repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hack4sec/ws-cli | classes/jobs/MongoJob.py | 1 | 5376 | # -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Common class for jobs works with MongoDB
"""
import Queue
from classes.Registry import Registry
from classes.kernel.WSJob import WSJob
class MongoJob(WSJob):
""" Common class for jobs works with MongoDB """
unique = True
collection = None
select_limit = 50
skip_blank_rows = True
counter = 0
collection_name = None
def __init__(self, maxsize=0):
WSJob.__init__(self, maxsize)
self.collection = Registry().get('mongo')[self.collection_name]
def build_row(self, _str):
""" Common build row method for MongoDB """
return {
"name": _str.strip(),
"checked": 0,
"getted": 0
}
def qsize(self):
""" Size of queue """
return self.collection.find({"checked": 0}).count()
def set_unique(self, unique=True):
""" Enable remove dups in queue """
self.unique = unique
def set_skip_blank_rows(self, value=True):
""" If True - we will skip blank rows then fill queue from dict or file """
self.skip_blank_rows = value
def task_done(self, name):
""" Mark current row as done """
self.counter += 1
self.collection.update({'name': str(unicode(name)), "getted": 1}, {"$set": {"checked": 1}})
WSJob.task_done(self)
def get(self, block=False, timeout=None):
""" Get next item from queue """
if self.empty() or self.qsize() < 50:
self.load_data()
if self.empty():
raise Queue.Empty
return WSJob.get(self, block, timeout)
def load_data(self):
""" Load data into queue from MongoDB """
data = self.collection.find(
{"checked": 0, "getted": 0},
limit=int(Registry().get('config')['main']['mongo_data_load_per_once'])
)
for row in data:
self.put(row['name'])
self.collection.update({"name": row['name']}, {"$set": {"getted": 1}})
return True
def load_dict(self, dict_for_load, drop=True):
""" Fill collection from dict """
if drop:
self.collection.drop()
counter = 0
last = "START OF FILE"
for line in dict_for_load:
try:
line = line.strip()
unicode(line)
self.collection.insert(self.build_row(line))
except UnicodeDecodeError:
_str = " UNICODE ERROR: In file '{0}' skip word '{1}', after word '{2}' !".format(file, line, last)
if Registry().isset('logger'):
Registry().get('logger').log(_str)
else:
print _str
continue
counter += 1
last = line
self.load_data()
return counter
def load_dom(self, dom):
""" Fill queue from DictOfMask """
self.collection.drop()
while True:
word = dom.get()
if word is None:
break
self.collection.insert(self.build_row(word))
self.collection.create_index('name', drop_dups=True, unique=self.unique)
self.load_data()
return self.collection.count()
def load_file(self, _file):
""" Fill queue from text file """
self.collection.drop()
fh = open(_file)
last = "START OF FILE"
while True:
line = fh.readline()
if not line:
break
if not line.strip() and self.skip_blank_rows:
continue
try:
line = line.strip()
unicode(line)
self.collection.insert(self.build_row(line))
except UnicodeDecodeError:
_str = " UNICODE ERROR: In file '{0}' skip word '{1}', after word '{2}' !".format(_file, line, last)
if Registry().isset('logger'):
Registry().get('logger').log(_str)
else:
print _str
continue
last = line
fh.close()
self.collection.create_index('name', drop_dups=True, unique=self.unique)
self.load_data()
return self.collection.count()
# 2 метода ниже взяты с
# http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
# Рецепт для уникальных задачь в очереди
def _init(self, maxsize):
WSJob._init(self, maxsize)
if self.unique:
self.all_items = set()
def _put(self, item):
if self.unique:
if item not in self.all_items:
WSJob._put(self, item)
self.all_items.add(item)
else:
_str = "WARNING: try to add not unique item `{0}`".format(item)
if Registry().isset('logger'):
#Registry().get('logger').log(_str)
pass
else:
#print _str
pass
else:
WSJob._put(self, item)
| mit | 5,420,918,640,989,109,000 | 28.594444 | 116 | 0.523372 | false |
gannetson/django | django/contrib/flatpages/forms.py | 357 | 2024 | from django import forms
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext, ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$',
help_text=_("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_messages={
"invalid": _("This value must contain only letters, numbers,"
" dots, underscores, dashes, slashes or tildes."),
},
)
class Meta:
model = FlatPage
fields = '__all__'
def clean_url(self):
url = self.cleaned_data['url']
if not url.startswith('/'):
raise forms.ValidationError(
ugettext("URL is missing a leading slash."),
code='missing_leading_slash',
)
if (settings.APPEND_SLASH and
'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE_CLASSES and
not url.endswith('/')):
raise forms.ValidationError(
ugettext("URL is missing a trailing slash."),
code='missing_trailing_slash',
)
return url
def clean(self):
url = self.cleaned_data.get('url')
sites = self.cleaned_data.get('sites')
same_url = FlatPage.objects.filter(url=url)
if self.instance.pk:
same_url = same_url.exclude(pk=self.instance.pk)
if sites and same_url.filter(sites__in=sites).exists():
for site in sites:
if same_url.filter(sites=site).exists():
raise forms.ValidationError(
_('Flatpage with url %(url)s already exists for site %(site)s'),
code='duplicate_url',
params={'url': url, 'site': site},
)
return super(FlatpageForm, self).clean()
| bsd-3-clause | -8,797,956,800,796,315,000 | 36.481481 | 94 | 0.554348 | false |
fenginx/django | tests/admin_inlines/models.py | 14 | 6885 | """
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class NonAutoPKBookChild(NonAutoPKBook):
pass
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
text = models.CharField(max_length=40)
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class NovelReadonlyChapter(Novel):
class Meta:
proxy = True
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
readonly_field = models.CharField(max_length=1)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause | -1,111,888,167,839,457,700 | 24.21978 | 93 | 0.717357 | false |
boompig/passzero | passzero/main_routes.py | 1 | 12491 | from functools import wraps
from flask import (Blueprint, abort, current_app, escape, flash, make_response,
redirect, render_template, request, session, url_for)
from sqlalchemy.orm.exc import NoResultFound
from passzero.api_utils import check_auth
from passzero.backend import (activate_account, decrypt_entries, get_entries,
get_link_by_id, get_services_map,
password_strength_scores, get_document_by_id)
from passzero.models import AuthToken, User, db
from . import export_utils
main_routes = Blueprint("main_routes", __name__)
def auth_or_redirect_login(function):
"""This is a decorator which does authentication for GET requests to templates.
If not authenticated, return a redirect to the login screen.
If authenticated, call the function."""
@wraps(function)
def inner(*args, **kwargs):
if check_auth():
return function(*args, **kwargs)
else:
return redirect(url_for("main_routes.login"))
return inner
def auth_or_abort(function):
"""This is a decorator which does authentication for GET requests to templates.
If not authenticated, show the 401 screen.
If authenticated, call the function."""
@wraps(function)
def inner(*args, **kwargs):
if check_auth():
return function(*args, **kwargs)
else:
return abort(401)
return inner
@main_routes.route("/", methods=["GET"])
def index():
if check_auth():
return redirect(url_for("main_routes.view_entries"))
else:
return render_template("landing.jinja2")
@main_routes.route("/done_login", methods=["GET"])
@auth_or_abort
def post_login():
flash(f"Successfully logged in as {escape(session['email'])}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/login", methods=["GET"])
def login():
return render_template("login.jinja2", login=True, error=None)
@main_routes.route("/logout", methods=["GET", "POST"])
def logout():
if 'email' in session:
session.pop("email")
if 'password' in session:
session.pop("password")
if 'user_id' in session:
session.pop("user_id")
return redirect(url_for("main_routes.login"))
@main_routes.route("/post_account_delete", methods=["GET", "POST"])
def post_account_delete():
flash("Account successfully deleted")
return redirect(url_for("main_routes.logout"))
@main_routes.route("/done_signup/<email>", methods=["GET"])
def post_signup(email: str):
flash("Successfully created account with email %s. A confirmation email was sent to this address." % escape(email))
return redirect(url_for("main_routes.login"))
# --- entries --- #
@main_routes.route("/entries/post_delete/<account_name>", methods=["GET"])
@auth_or_abort
def post_delete(account_name: str):
flash(f"Successfully deleted account {escape(account_name)}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/entries/new", methods=["GET"])
@auth_or_redirect_login
def new_entry_view():
user = db.session.query(User).filter_by(id=session["user_id"]).one()
user_prefs = {
"default_random_password_length": user.default_random_password_length,
"default_random_passphrase_length": user.default_random_passphrase_length
}
return render_template("new.jinja2", title="PassZero · New Entry",
user_prefs=user_prefs, error=None)
@main_routes.route("/entries/done_edit/<account_name>")
@auth_or_abort
def post_edit(account_name):
flash(f"Successfully changed entry for account {escape(account_name)}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/entries/done_new/<account_name>", methods=["GET"])
@auth_or_abort
def post_create(account_name):
flash(f"Successfully created entry for account {escape(account_name)}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/entries", methods=["GET"])
@auth_or_redirect_login
def view_entries():
return render_template("entries.jinja2")
# --- entries --- #
# --- links --- #
@main_routes.route("/links", methods=["GET"])
@auth_or_redirect_login
def view_links():
return render_template("links/links.jinja2")
@main_routes.route("/links/new", methods=["GET"])
@auth_or_redirect_login
def new_link_view():
return render_template("links/new-link.jinja2", title="PassZero · New Link", link_id=-1)
@main_routes.route("/links/<int:link_id>", methods=["GET"])
@auth_or_redirect_login
def edit_link(link_id: int):
user = db.session.query(User).filter_by(id=session["user_id"]).one()
link = get_link_by_id(db.session, user.id, link_id)
if link is None:
flash("Error: no link with ID %d" % link_id, "error")
return redirect(url_for("main_routes.view_links"))
dec_link = link.decrypt(session["password"])
return render_template("links/new-link.jinja2", title="PassZero · Edit Link",
link_id=link_id,
service_name=dec_link.service_name,
link=dec_link.link)
# --- links --- #
# --- documents --- #
@main_routes.route("/docs", methods=["GET"])
@auth_or_redirect_login
def view_docs():
return render_template("docs/docs.jinja2")
@main_routes.route("/docs/new", methods=["GET"])
@auth_or_redirect_login
def new_doc_view():
return render_template("docs/new-doc.jinja2", title="PassZero · New Document",
document_id=-1)
@main_routes.route("/docs/<int:document_id>/edit", methods=["GET"])
@auth_or_redirect_login
def edit_doc(document_id: int):
# get the document
user = db.session.query(User).filter_by(id=session["user_id"]).one()
doc = get_document_by_id(db.session, user.id, document_id)
if doc is None:
flash(f"Error: no document with ID {document_id}", "error")
return redirect(url_for("main_routes.view_docs"))
return render_template("docs/new-doc.jinja2", title="PassZero · New Document",
document_id=document_id)
@main_routes.route("/docs/<int:document_id>/view", methods=["GET"])
@auth_or_redirect_login
def view_decrypted_doc(document_id: int):
user = db.session.query(User).filter_by(id=session["user_id"]).one()
doc = get_document_by_id(db.session, user.id, document_id)
if doc is None:
flash(f"Error: no document with ID {document_id}", "error")
return redirect(url_for("main_routes.view_docs"))
dec_doc = doc.decrypt(session["password"])
return render_template(
"docs/view-doc.jinja2",
title="PassZero · View Document",
document_id=document_id,
document_mimetype=dec_doc.mimetype,
document_name=dec_doc.name
)
# --- documents --- #
@main_routes.route("/signup", methods=["GET"])
def signup():
error = None
return render_template("login.jinja2", login=False, error=error)
@main_routes.route("/signup/post_confirm")
def post_confirm_signup():
flash("Successfully signed up! Login with your newly created account")
return redirect(url_for("main_routes.login"))
@main_routes.route("/signup/confirm")
def confirm_signup():
try:
token = request.args["token"]
token_obj = db.session.query(AuthToken).filter_by(token=token).one()
if token_obj.is_expired():
flash("Token has expired", "error")
# delete old token from database
db.session.delete(token_obj)
db.session.commit()
return redirect(url_for("main_routes.signup"))
else:
# token deleted when password changed
db.session.delete(token_obj)
user = db.session.query(User).filter_by(id=token_obj.user_id).one()
activate_account(db.session, user)
return redirect(url_for("main_routes.post_confirm_signup"))
except NoResultFound:
flash("Token is invalid", "error")
return redirect(url_for("main_routes.signup"))
except KeyError:
flash("Token is mandatory", "error")
return redirect(url_for("main_routes.signup"))
@main_routes.route("/advanced/export", methods=["GET"])
@auth_or_abort
def export_entries():
export_contents = export_utils.export_decrypted_entries(
db.session,
session["user_id"],
session["password"]
)
response = make_response(export_contents)
response.headers["Content-Disposition"] = (
"attachment; filename=%s" % current_app.config['DUMP_FILE']
)
return response
@main_routes.route("/advanced/done_export")
@auth_or_abort
def done_export():
flash("database successfully dumped to file %s" % current_app.config['DUMP_FILE'])
return redirect("/advanced")
@main_routes.route("/edit/<int:entry_id>", methods=["GET"])
@main_routes.route("/entries/<int:entry_id>", methods=["GET"])
@auth_or_redirect_login
def edit_entry(entry_id: int):
user = db.session.query(User).filter_by(id=session["user_id"]).one()
entries = get_entries(db.session, session["user_id"])
my_entries = [e for e in entries if e.id == entry_id]
if len(my_entries) == 0:
flash("Error: no entry with ID %d" % entry_id, "error")
return redirect(url_for("main_routes.view_entries"))
else:
fe = decrypt_entries(my_entries, session['password'])
user_prefs = {
"default_random_password_length": user.default_random_password_length,
"default_random_passphrase_length": user.default_random_passphrase_length
}
return render_template(
"new.jinja2",
user_prefs=user_prefs,
e_id=entry_id,
entry=fe[0],
error=None
)
@main_routes.route("/entries/strength")
@auth_or_redirect_login
def password_strength():
entries = get_entries(db.session, session["user_id"])
dec_entries = decrypt_entries(entries, session['password'])
entry_scores = password_strength_scores(session["email"], dec_entries)
return render_template("password_strength.jinja2", entry_scores=entry_scores)
@main_routes.route("/entries/2fa")
@auth_or_redirect_login
def two_factor():
entries = get_entries(db.session, session["user_id"])
services_map = get_services_map(db.session)
two_factor_map = {}
for entry in entries:
account = entry.account.lower()
two_factor_map[entry.account] = {
"service_has_2fa": services_map.get(account, {}).get("has_two_factor", False),
"entry_has_2fa": entry.has_2fa,
"entry_id": entry.id
}
return render_template("entries_2fa.jinja2", two_factor_map=two_factor_map)
@main_routes.route("/advanced")
@auth_or_redirect_login
def advanced():
return render_template("advanced.jinja2")
@main_routes.route("/profile")
@auth_or_redirect_login
def profile():
user = db.session.query(User).filter_by(id=session["user_id"]).one()
user_prefs = {
"default_random_password_length": user.default_random_password_length,
"default_random_passphrase_length": user.default_random_passphrase_length,
}
return render_template(
"profile.jinja2",
title="PassZero · Profile",
user_prefs=user_prefs
)
@main_routes.route("/recover")
def recover_password():
return render_template("recover.jinja2")
@main_routes.route("/recover/confirm")
def recover_account_confirm():
try:
token = request.args['token']
token_obj = db.session.query(AuthToken).filter_by(token=token).one()
if token_obj.is_expired():
flash("Token has expired", "error")
# delete old token from database
db.session.delete(token_obj)
db.session.commit()
return redirect(url_for("main_routes.recover_password"))
else:
# token deleted when password changed
return render_template("recover.jinja2", confirm=True)
except NoResultFound:
flash("Token is invalid", "error")
return redirect(url_for("main_routes.recover_password"))
except KeyError:
flash("Token is mandatory", "error")
return redirect(url_for("main_routes.recover_password"))
@main_routes.route("/about")
def about():
return render_template("about.jinja2")
@main_routes.route("/version")
def get_version():
return current_app.config['BUILD_ID']
| gpl-3.0 | -8,464,349,113,962,936,000 | 32.942935 | 119 | 0.647987 | false |
LoHChina/nova | nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py | 39 | 2104 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
authorize = extensions.soft_extension_authorizer('compute', 'extended_vif_net')
class ExtendedServerVIFNetController(wsgi.Controller):
def __init__(self):
super(ExtendedServerVIFNetController, self).__init__()
self.network_api = network.API()
@wsgi.extends
def index(self, req, resp_obj, server_id):
key = "%s:net_id" % Extended_virtual_interfaces_net.alias
context = req.environ['nova.context']
if authorize(context):
for vif in resp_obj.obj['virtual_interfaces']:
vif1 = self.network_api.get_vif_by_mac_address(context,
vif['mac_address'])
vif[key] = vif1.net_uuid
class Extended_virtual_interfaces_net(extensions.ExtensionDescriptor):
"""Adds network id parameter to the virtual interface list."""
name = "ExtendedVIFNet"
alias = "OS-EXT-VIF-NET"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended-virtual-interfaces-net/api/v1.1")
updated = "2013-03-07T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedServerVIFNetController()
extension = extensions.ControllerExtension(self,
'os-virtual-interfaces',
controller)
return [extension]
| apache-2.0 | 8,469,629,465,449,983,000 | 38.698113 | 79 | 0.639259 | false |
aferr/LatticeMemCtl | src/python/m5/util/attrdict.py | 84 | 3421 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
__all__ = [ 'attrdict', 'multiattrdict', 'optiondict' ]
class attrdict(dict):
"""Wrap dict, so you can use attribute access to get/set elements"""
def __getattr__(self, attr):
if attr in self:
return self.__getitem__(attr)
return super(attrdict, self).__getattribute__(attr)
def __setattr__(self, attr, value):
if attr in dir(self) or attr.startswith('_'):
return super(attrdict, self).__setattr__(attr, value)
return self.__setitem__(attr, value)
def __delattr__(self, attr):
if attr in self:
return self.__delitem__(attr)
return super(attrdict, self).__delattr__(attr)
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
class multiattrdict(attrdict):
"""Wrap attrdict so that nested attribute accesses automatically create
nested dictionaries."""
def __getattr__(self, attr):
try:
return super(multiattrdict, self).__getattr__(attr)
except AttributeError:
if attr.startswith('_'):
raise
d = multiattrdict()
setattr(self, attr, d)
return d
class optiondict(attrdict):
"""Modify attrdict so that a missing attribute just returns None"""
def __getattr__(self, attr):
try:
return super(optiondict, self).__getattr__(attr)
except AttributeError:
return None
if __name__ == '__main__':
x = attrdict()
x.y = 1
x['z'] = 2
print x['y'], x.y
print x['z'], x.z
print dir(x)
print x
print
del x['y']
del x.z
print dir(x)
print(x)
print
print "multiattrdict"
x = multiattrdict()
x.x.x.x = 9
x.y.z = 9
print x
print x.y
print x.y.z
print x.z.z
| bsd-3-clause | 3,028,026,996,137,915,000 | 33.21 | 75 | 0.666179 | false |
hynnet/openwrt-mt7620 | staging_dir/host/lib/python2.7/distutils/dep_util.py | 177 | 3509 | """distutils.dep_util
Utility functions for simple, timestamp-based dependency of files
and groups of files; also, function based entirely on such
timestamp dependency analysis."""
__revision__ = "$Id$"
import os
from stat import ST_MTIME
from distutils.errors import DistutilsFileError
def newer(source, target):
"""Tells if the target is newer than the source.
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Return false if both exist and 'target' is the same age or younger
than 'source'. Raise DistutilsFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same second
will have the same "age".
"""
if not os.path.exists(source):
raise DistutilsFileError("file '%s' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source)[ST_MTIME] > os.stat(target)[ST_MTIME]
def newer_pairwise(sources, targets):
"""Walk two filename lists in parallel, testing if each source is newer
than its corresponding target. Return a pair of lists (sources,
targets) where source is newer than target, according to the semantics
of 'newer()'.
"""
if len(sources) != len(targets):
raise ValueError, "'sources' and 'targets' must be same length"
# build a pair of lists (sources, targets) where source is newer
n_sources = []
n_targets = []
for source, target in zip(sources, targets):
if newer(source, target):
n_sources.append(source)
n_targets.append(target)
return n_sources, n_targets
def newer_group(sources, target, missing='error'):
"""Return true if 'target' is out-of-date with respect to any file
listed in 'sources'.
In other words, if 'target' exists and is newer
than every file in 'sources', return false; otherwise return true.
'missing' controls what we do when a source file is missing; the
default ("error") is to blow up with an OSError from inside 'stat()';
if it is "ignore", we silently drop any missing source files; if it is
"newer", any missing source files make us assume that 'target' is
out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
carry out commands that wouldn't work because inputs are missing, but
that doesn't matter because you're not actually going to run the
commands).
"""
# If the target doesn't even exist, then it's definitely out-of-date.
if not os.path.exists(target):
return True
# Otherwise we have to find out the hard way: if *any* source file
# is more recent than 'target', then 'target' is out-of-date and
# we can immediately return true. If we fall through to the end
# of the loop, then 'target' is up-to-date and we return false.
target_mtime = os.stat(target)[ST_MTIME]
for source in sources:
if not os.path.exists(source):
if missing == 'error': # blow up when we stat() the file
pass
elif missing == 'ignore': # missing source dropped from
continue # target's dependency list
elif missing == 'newer': # missing source means target is
return True # out-of-date
if os.stat(source)[ST_MTIME] > target_mtime:
return True
return False
| gpl-2.0 | 8,281,611,961,159,469,000 | 38.426966 | 78 | 0.655172 | false |
shakamunyi/docker-registry | depends/docker-registry-core/docker_registry/testing/driver.py | 30 | 13434 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import random
import string
from ..core import compat
from ..core import driver
from ..core import exceptions
from nose import SkipTest # noqa
from nose import tools
logger = logging.getLogger(__name__)
class Driver(object):
def __init__(self, scheme=None, path=None, config=None):
self.scheme = scheme
self.path = path
self.config = config
# Load the requested driver
def setUp(self):
storage = driver.fetch(self.scheme)
self._storage = storage(self.path, self.config)
def tearDown(self):
pass
def gen_random_string(self, length=16):
return ''.join([random.choice(string.ascii_uppercase + string.digits)
for x in range(length)]).lower()
def simplehelp(self, path, content, expected, size=0):
self._storage.put_content(path, content)
assert self._storage.get_content(path) == expected
assert self._storage.get_content(path) == expected
if size:
assert self._storage.get_size(path) == size
def unicodehelp(self, path, content, expected):
self._storage.put_unicode(path, content)
assert self._storage.get_unicode(path) == expected
assert self._storage.get_unicode(path) == expected
def jsonhelp(self, path, content, expected):
self._storage.put_json(path, content)
assert self._storage.get_json(path) == expected
assert self._storage.get_json(path) == expected
def test_exists_non_existent(self):
filename = self.gen_random_string()
assert not self._storage.exists(filename)
def test_exists_existent(self):
filename = self.gen_random_string()
self._storage.put_content(filename, b'')
assert self._storage.exists(filename)
# get / put
def test_write_read_1(self):
filename = self.gen_random_string()
content = b'a'
expected = b'a'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_2(self):
filename = self.gen_random_string()
content = b'\xc3\x9f'
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_3(self):
filename = self.gen_random_string()
content = u'ß'.encode('utf8')
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_4(self):
filename = self.gen_random_string()
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
content = content.encode('utf8')
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_5(self):
filename = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
expected = content
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_6(self):
filename = self.gen_random_string()
content = self.gen_random_string(1024 * 1024).encode('utf8')
expected = content
self.simplehelp(filename, content, expected, len(expected))
# get / put unicode
def test_unicode_1(self):
filename = self.gen_random_string()
content = 'a'
expected = u'a'
self.unicodehelp(filename, content, expected)
def test_unicode_2(self):
filename = self.gen_random_string()
content = b'\xc3\x9f'.decode('utf8')
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_3(self):
filename = self.gen_random_string()
content = u'ß'
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_4(self):
filename = self.gen_random_string()
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_5(self):
filename = self.gen_random_string()
content = self.gen_random_string()
expected = content
self.unicodehelp(filename, content, expected)
def test_unicode_6(self):
filename = self.gen_random_string()
content = self.gen_random_string(1024 * 1024)
expected = content
self.unicodehelp(filename, content, expected)
# JSON
def test_json(self):
filename = self.gen_random_string()
content = {u"ß": u"ß"}
expected = {u"ß": u"ß"}
self.jsonhelp(filename, content, expected)
# Removes
def test_remove_existent(self):
filename = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content(filename, content)
self._storage.remove(filename)
assert not self._storage.exists(filename)
def test_remove_folder(self):
dirname = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content('%s/%s' % (dirname, filename1), content)
self._storage.put_content('%s/%s' % (dirname, filename2), content)
self._storage.remove(dirname)
assert not self._storage.exists(filename1)
assert not self._storage.exists(filename2)
assert not self._storage.exists(dirname)
# Check the lru is ok
try:
self._storage.get_content(filename1)
assert False
except Exception:
pass
try:
self._storage.get_content(filename2)
assert False
except Exception:
pass
@tools.raises(exceptions.FileNotFoundError)
def test_remove_inexistent(self):
filename = self.gen_random_string()
self._storage.remove(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_read_inexistent(self):
filename = self.gen_random_string()
self._storage.get_content(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_get_size_inexistent(self):
filename = self.gen_random_string()
self._storage.get_size(filename)
def test_stream(self):
filename = self.gen_random_string()
# test 7MB
content = self.gen_random_string(7).encode('utf8') # * 1024 * 1024
# test exists
io = compat.StringIO(content)
logger.debug("%s should NOT exists still" % filename)
assert not self._storage.exists(filename)
self._storage.stream_write(filename, io)
io.close()
logger.debug("%s should exist now" % filename)
assert self._storage.exists(filename)
# test read / write
data = compat.bytes()
for buf in self._storage.stream_read(filename):
data += buf
assert content == data
# test bytes_range only if the storage backend suppports it
if self._storage.supports_bytes_range:
b = random.randint(0, math.floor(len(content) / 2))
bytes_range = (b, random.randint(b + 1, len(content) - 1))
data = compat.bytes()
for buf in self._storage.stream_read(filename, bytes_range):
data += buf
expected_content = content[bytes_range[0]:bytes_range[1] + 1]
assert data == expected_content
# logger.debug("Content length is %s" % len(content))
# logger.debug("And retrieved content length should equal it: %s" %
# len(data))
# logger.debug("got content %s" % content)
# logger.debug("got data %s" % data)
# test remove
self._storage.remove(filename)
assert not self._storage.exists(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_stream_read_inexistent(self):
filename = self.gen_random_string()
data = compat.bytes()
for buf in self._storage.stream_read(filename):
data += buf
@tools.raises(exceptions.FileNotFoundError)
def test_inexistent_list_directory(self):
notexist = self.gen_random_string()
iterator = self._storage.list_directory(notexist)
next(iterator)
# XXX only elliptics return StopIteration for now - though we should
# return probably that for all
@tools.raises(exceptions.FileNotFoundError, StopIteration)
def test_empty_list_directory(self):
path = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content(path, content)
iterator = self._storage.list_directory(path)
next(iterator)
def test_list_directory(self):
base = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fb1 = '%s/%s' % (base, filename1)
fb2 = '%s/%s' % (base, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
assert sorted([fb1, fb2]
) == sorted(list(self._storage.list_directory(base)))
def test_list_directory_with_subdir(self):
if self.scheme == 's3':
raise SkipTest("Check GH #596.")
base = self.gen_random_string()
dir1 = self.gen_random_string()
dir2 = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fd1 = '%s/%s' % (base, dir1)
fd2 = '%s/%s' % (base, dir2)
fb1 = '%s/%s' % (fd1, filename1)
fb2 = '%s/%s' % (fd2, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
assert sorted([fd1, fd2]
) == sorted(list(self._storage.list_directory(base)))
# def test_root_list_directory(self):
# fb1 = self.gen_random_string()
# fb2 = self.gen_random_string()
# content = self.gen_random_string()
# self._storage.put_content(fb1, content)
# self._storage.put_content(fb2, content)
# print(list(self._storage.list_directory()))
# assert sorted([fb1, fb2]
# ) == sorted(list(self._storage.list_directory()))
@tools.raises(exceptions.FileNotFoundError, StopIteration)
def test_empty_after_remove_list_directory(self):
base = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fb1 = '%s/%s' % (base, filename1)
fb2 = '%s/%s' % (base, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
self._storage.remove(fb1)
self._storage.remove(fb2)
iterator = self._storage.list_directory(base)
next(iterator)
def test_paths(self):
namespace = 'namespace'
repository = 'repository'
tag = 'sometag'
image_id = 'imageid'
p = self._storage.images_list_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.image_json_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_mark_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_checksum_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_layer_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_ancestry_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_files_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_diff_path(image_id)
assert not self._storage.exists(p)
p = self._storage.repository_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.tag_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.tag_path(namespace, repository, tag)
assert not self._storage.exists(p)
p = self._storage.repository_json_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.repository_tag_json_path(namespace, repository, tag)
assert not self._storage.exists(p)
p = self._storage.index_images_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.private_flag_path(namespace, repository)
assert not self._storage.exists(p)
| apache-2.0 | -834,524,187,108,370,700 | 35.674863 | 78 | 0.617746 | false |
rastaman/ansible-modules-core | cloud/amazon/ec2_metric_alarm.py | 61 | 10684 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions', {})
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict'),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
main()
| gpl-3.0 | 115,073,352,208,283,900 | 36.487719 | 435 | 0.632815 | false |
wolverineav/neutron | neutron/db/common_db_mixin.py | 3 | 14029 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import weakref
from oslo_log import log as logging
from oslo_utils import excutils
import six
from sqlalchemy import and_
from sqlalchemy.ext import associationproxy
from sqlalchemy import or_
from sqlalchemy import sql
from neutron._i18n import _LE
from neutron.db import sqlalchemyutils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def _noop_context_manager():
yield
def safe_creation(context, create_fn, delete_fn, create_bindings,
transaction=True):
'''This function wraps logic of object creation in safe atomic way.
In case of exception, object is deleted.
More information when this method could be used can be found in
developer guide - Effective Neutron: Database interaction section.
http://docs.openstack.org/developer/neutron/devref/effective_neutron.html
:param context: context
:param create_fn: function without arguments that is called to create
object and returns this object.
:param delete_fn: function that is called to delete an object. It is
called with object's id field as an argument.
:param create_bindings: function that is called to create bindings for
an object. It is called with object's id field as an argument.
:param transaction: if true the whole operation will be wrapped in a
transaction. if false, no transaction will be used.
'''
cm = (context.session.begin(subtransactions=True)
if transaction else _noop_context_manager())
with cm:
obj = create_fn()
try:
value = create_bindings(obj['id'])
except Exception:
with excutils.save_and_reraise_exception():
try:
delete_fn(obj['id'])
except Exception as e:
LOG.error(_LE("Cannot clean up created object %(obj)s. "
"Exception: %(exc)s"), {'obj': obj['id'],
'exc': e})
return obj, value
def model_query_scope(context, model):
# Unless a context has 'admin' or 'advanced-service' rights the
# query will be scoped to a single tenant_id
return ((not context.is_admin and hasattr(model, 'tenant_id')) and
(not context.is_advsvc and hasattr(model, 'tenant_id')))
def model_query(context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if model_query_scope(context, model):
query_filter = (model.tenant_id == context.tenant_id)
if query_filter is not None:
query = query.filter(query_filter)
return query
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
cls._model_query_hooks.setdefault(model, {})[name] = {
'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
@classmethod
def register_dict_extend_funcs(cls, resource, funcs):
cls._dict_extend_functions.setdefault(resource, []).extend(funcs)
@property
def safe_reference(self):
"""Return a weakref to the instance.
Minimize the potential for the instance persisting
unnecessarily in memory by returning a weakref proxy that
won't prevent deallocation.
"""
return weakref.proxy(self)
def model_query_scope(self, context, model):
return model_query_scope(context, model)
def _model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if self.model_query_scope(context, model):
if hasattr(model, 'rbac_entries'):
query = query.outerjoin(model.rbac_entries)
rbac_model = model.rbac_entries.property.mapper.class_
query_filter = (
(model.tenant_id == context.tenant_id) |
((rbac_model.action == 'access_as_shared') &
((rbac_model.target_tenant == context.tenant_id) |
(rbac_model.target_tenant == '*'))))
elif hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == sql.true()))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
query_hook = hooks.get('query')
if isinstance(query_hook, six.string_types):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, six.string_types):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
def _apply_filters_to_query(self, query, model, filters, context=None):
if filters:
for key, value in six.iteritems(filters):
column = getattr(model, key, None)
# NOTE(kevinbenton): if column is a hybrid property that
# references another expression, attempting to convert to
# a boolean will fail so we must compare to None.
# See "An Important Expression Language Gotcha" in:
# docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html
if column is not None:
if not value:
query = query.filter(sql.false())
return query
if isinstance(column, associationproxy.AssociationProxy):
# association proxies don't support in_ so we have to
# do multiple equals matches
query = query.filter(
or_(*[column == v for v in value]))
else:
query = query.filter(column.in_(value))
elif key == 'shared' and hasattr(model, 'rbac_entries'):
# translate a filter on shared into a query against the
# object's rbac entries
query = query.outerjoin(model.rbac_entries)
rbac = model.rbac_entries.property.mapper.class_
matches = [rbac.target_tenant == '*']
if context:
matches.append(rbac.target_tenant == context.tenant_id)
# any 'access_as_shared' records that match the
# wildcard or requesting tenant
is_shared = and_(rbac.action == 'access_as_shared',
or_(*matches))
if not value[0]:
# NOTE(kevinbenton): we need to find objects that don't
# have an entry that matches the criteria above so
# we use a subquery to exclude them.
# We can't just filter the inverse of the query above
# because that will still give us a network shared to
# our tenant (or wildcard) if it's shared to another
# tenant.
# This is the column joining the table to rbac via
# the object_id. We can't just use model.id because
# subnets join on network.id so we have to inspect the
# relationship.
join_cols = model.rbac_entries.property.local_columns
oid_col = list(join_cols)[0]
is_shared = ~oid_col.in_(
query.session.query(rbac.object_id).
filter(is_shared)
)
query = query.filter(is_shared)
for _nam, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, six.string_types):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, six.string_types):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters,
context)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters=filters,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
def _get_marker_obj(self, context, resource, limit, marker):
if limit and marker:
return getattr(self, '_get_%s' % resource)(context, marker)
return None
def _filter_non_model_columns(self, data, model):
"""Remove all the attributes from data which are not columns or
association proxies of the model passed as second parameter
"""
columns = [c.name for c in model.__table__.columns]
return dict((k, v) for (k, v) in
six.iteritems(data) if k in columns or
isinstance(getattr(model, k, None),
associationproxy.AssociationProxy))
| apache-2.0 | 6,460,679,173,305,752,000 | 43.536508 | 79 | 0.569463 | false |
cgstudiomap/cgstudiomap | main/eggs/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/graphics/barcode/qrencoder.py | 36 | 34126 | # QRCode for Python
#
# Support for Kanji, Hanzi, ECI, FNC1 and Structurded append,
# and optimizations by Anders Hammarquist <[email protected]>
#
# Copyright (c) 2014 Open End AB http://www.openend.se/
#
# Ported from the Javascript library by Sam Curren
#
# QRCode for Javascript
# http://d-project.googlecode.com/svn/trunk/misc/qrcode/js/qrcode.js
#
# Copyright (c) 2009 Kazuhiko Arase
#
# URL: http://www.d-project.com/
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# The word "QR Code" is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
import re
import itertools
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
try:
unicode
except NameError:
# No unicode in Python 3
unicode = str
class QR:
valid = None
bits = None
group = 0
def __init__(self, data):
if self.valid and not self.valid(data):
raise ValueError
self.data = data
def __len__(self):
return len(self.data)
@property
def bitlength(self):
if self.bits is None:
return 0
q, r = divmod(len(self), len(self.bits))
return q * sum(self.bits) + sum(self.bits[:r])
def getLengthBits(self, ver):
if 0 < ver < 10:
return self.lengthbits[0]
elif ver < 27:
return self.lengthbits[1]
elif ver < 41:
return self.lengthbits[2]
raise ValueError("Unknown version: " + ver)
def getLength(self):
return len(self.data)
def __repr__(self):
return repr(self.data)
def write_header(self, buffer, version):
buffer.put(self.mode, 4)
lenbits = self.getLengthBits(version)
if lenbits:
buffer.put(len(self.data), lenbits )
def write(self, buffer, version):
self.write_header(buffer, version)
for g in zip_longest(*[iter(self.data)] * self.group):
bits = 0
n = 0
for i in range(self.group):
if g[i] is not None:
n *= len(self.chars)
n += self.chars.index(g[i])
bits += self.bits[i]
buffer.put(n, bits)
class QRNumber(QR):
valid = re.compile(u'[0-9]*$').match
chars = u'0123456789'
bits = (4,3,3)
group = 3
mode = 0x1
lengthbits = (10, 12, 14)
class QRAlphaNum(QR):
valid = re.compile(u'[-0-9A-Z $%*+-./:]*$').match
chars = u'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
bits = (6,5)
group = 2
mode = 0x2
lengthbits = (9, 11, 13)
class QR8bitByte(QR):
bits = (8,)
group = 1
mode = 0x4
lengthbits = (8, 16, 16)
def __init__(self, data):
if isinstance(data, unicode):
self.data = data.encode('utf-8') # XXX This really needs an ECI too
else:
self.data = data # It'd better be byte data
def write(self, buffer, version):
self.write_header(buffer, version)
for c in self.data:
if isinstance(c, str):
c = ord(c)
buffer.put(c, 8)
class QRKanji(QR):
bits = (13,)
group = 1
mode = 0x8
lengthbits = (8, 10, 12)
def __init__(self, data):
try:
self.data = self.unicode_to_qrkanji(data)
except UnicodeEncodeError:
raise ValueError('Not valid kanji')
def unicode_to_qrkanji(self, data):
codes = []
for i,c in enumerate(data):
try:
c = c.encode('shift-jis')
try:
c,d = map(ord, c)
except TypeError:
# Python 3
c,d = c
except UnicodeEncodeError as e:
raise UnicodeEncodeError('qrkanji', data, i, i+1, e.args[4])
except ValueError:
raise UnicodeEncodeError('qrkanji', data, i, i+1,
'illegal multibyte sequence')
c = c << 8 | d
if 0x8140 <= c <=0x9ffc:
c -= 0x8140
c = (((c & 0xff00) >> 8) * 0xc0) + (c & 0xff)
elif 0xe040 <= c <= 0xebbf:
c -= 0xc140
c = (((c & 0xff00) >> 8) * 0xc0) + (c & 0xff)
else:
raise UnicodeEncodeError('qrkanji', data, i, i+1,
'illegal multibyte sequence')
codes.append(c)
return codes
def write(self, buffer, version):
self.write_header(buffer, version)
for d in self.data:
buffer.put(d, 13)
class QRHanzi(QR):
bits = (13,)
group = 1
mode = 0xD
lengthbits = (8, 10, 12)
def __init__(self, data):
try:
self.data = self.unicode_to_qrhanzi(data)
except UnicodeEncodeError:
raise ValueError('Not valid hanzi')
def unicode_to_qrhanzi(self, data):
codes = []
for i,c in enumerate(data):
try:
c = c.encode('gb2312')
try:
c,d = map(ord, c)
except TypeError:
# Python 3
c,d = c
except UnicodeEncodeError as e:
raise UnicodeEncodeError('qrhanzi', data, i, i+1, e.args[4])
except ValueError:
raise UnicodeEncodeError('qrhanzi', data, i, i+1,
'illegal multibyte sequence')
c = c << 8 | d
if 0xa1a1 <= c <=0xaafe:
c -= 0xa1a1
c = (((c & 0xff00) >> 8) * 0x60) + (c & 0xff)
elif 0xb0a1 <= c <= 0xfafe:
c -= 0xa6a1
c = (((c & 0xff00) >> 8) * 0x60) + (c & 0xff)
else:
raise UnicodeEncodeError('qrhanzi', data, i, i+1,
'illegal multibyte sequence')
codes.append(c)
return codes
def write_header(self, buffer, version):
buffer.put(self.mode, 4)
buffer.put(1, 4) # Subset 1: GB2312 encoding
lenbits = self.getLengthBits(version)
if lenbits:
buffer.put(len(self.data), lenbits )
def write(self, buffer, version):
self.write_header(buffer, version)
for d in self.data:
buffer.put(d, 13)
# Special modes
class QRECI(QR):
mode = 0x7
lengthbits = (0, 0, 0)
def __init__(self, data):
if not 0 < data < 999999:
# Spec says 999999, format supports up to 0x1fffff = 2097151
raise ValueError("ECI out of range")
self.data = data
def write(self, buffer, version):
self.write_header(buffer, version)
if self.data <= 0x7f:
buffer.put(self.data, 8)
elif self.data <= 0x3fff:
buffer.put(self.data | 0x8000, 16)
elif self.data <= 0x1fffff:
buffer.put(self.data | 0xC00000, 24)
class QRStructAppend(QR):
mode = 0x3
lengthbits = (0, 0, 0)
def __init__(self, part, total, parity):
if not 0 < part <= 16:
raise ValueError("part out of range [1,16]")
if not 0 < total <= 16:
raise ValueError("total out of range [1,16]")
self.part = part
self.total = total
self.parity = parity
def write(self, buffer, version):
self.write_header(buffer, version)
buffer.put(self.part, 4)
buffer.put(self.total, 4)
buffer.put(self.parity, 8)
class QRFNC1First(QR):
mode = 0x5
lengthbits = (0, 0, 0)
def __init__(self):
pass
def write(self, buffer, version):
self.write_header(buffer, version)
class QRFNC1Second(QR):
valid = re.compile('^([A-Za-z]|[0-9][0-9])$').match
mode = 0x9
lengthbits = (0, 0, 0)
def write(self, buffer, version):
self.write_header(buffer, version)
d = self.data
if len(d) == 1:
d = ord(d) + 100
else:
d = int(d)
buffer.put(d, 8)
class QRCode:
def __init__(self, version, errorCorrectLevel):
self.version = version
self.errorCorrectLevel = errorCorrectLevel
self.modules = None
self.moduleCount = 0
self.dataCache = None
self.dataList = []
def addData(self, data):
if isinstance(data, QR):
newData = data
else:
for conv in (QRNumber, QRAlphaNum, QRKanji, QR8bitByte):
try:
newData = conv(data)
break
except ValueError:
pass
else:
raise ValueError
self.dataList.append(newData)
self.dataCache = None
def isDark(self, row, col):
return self.modules[row][col]
def getModuleCount(self):
return self.moduleCount
def calculate_version(self):
# Calculate version for data to fit the QR Code capacity
for version in range(1, 40):
rsBlocks = QRRSBlock.getRSBlocks(version, self.errorCorrectLevel)
totalDataCount = sum(block.dataCount for block in rsBlocks)
length = 0
for data in self.dataList:
length += 4
length += data.getLengthBits(version)
length += data.bitlength
if length <= totalDataCount * 8:
break
return version
def make(self):
if self.version is None:
self.version = self.calculate_version()
self.makeImpl(False, self.getBestMaskPattern())
def makeImpl(self, test, maskPattern):
self.moduleCount = self.version * 4 + 17
self.modules = [ [False] * self.moduleCount
for x in range(self.moduleCount) ]
self.setupPositionProbePattern(0, 0)
self.setupPositionProbePattern(self.moduleCount - 7, 0)
self.setupPositionProbePattern(0, self.moduleCount - 7)
self.setupPositionAdjustPattern()
self.setupTimingPattern()
self.setupTypeInfo(test, maskPattern)
if (self.version >= 7):
self.setupTypeNumber(test)
if (self.dataCache == None):
self.dataCache = QRCode.createData(self.version,
self.errorCorrectLevel,
self.dataList)
self.mapData(self.dataCache, maskPattern)
_positionProbePattern = [
[True, True, True, True, True, True, True],
[True, False, False, False, False, False, True],
[True, False, True, True, True, False, True],
[True, False, True, True, True, False, True],
[True, False, True, True, True, False, True],
[True, False, False, False, False, False, True],
[True, True, True, True, True, True, True],
]
def setupPositionProbePattern(self, row, col):
if row == 0:
self.modules[row+7][col:col+7] = [False] * 7
if col == 0:
self.modules[row+7][col+7] = False
else:
self.modules[row+7][col-1] = False
else:
# col == 0
self.modules[row-1][col:col+8] = [False] * 8
for r, data in enumerate(self._positionProbePattern):
self.modules[row+r][col:col+7] = data
if col == 0:
self.modules[row+r][col+7] = False
else:
self.modules[row+r][col-1] = False
def getBestMaskPattern(self):
minLostPoint = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i);
lostPoint = QRUtil.getLostPoint(self);
if (i == 0 or minLostPoint > lostPoint):
minLostPoint = lostPoint
pattern = i
return pattern
def setupTimingPattern(self):
for r in range(8, self.moduleCount - 8):
self.modules[r][6] = (r % 2 == 0)
self.modules[6][8:self.moduleCount - 8] = itertools.islice(
itertools.cycle([True, False]), self.moduleCount - 16)
_positionAdjustPattern = [
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, True, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
]
def setupPositionAdjustPattern(self):
pos = QRUtil.getPatternPosition(self.version)
maxpos = self.moduleCount - 8
for row, col in itertools.product(pos, pos):
if col <= 8 and (row <= 8 or row >= maxpos):
continue
elif col >= maxpos and row <= 8:
continue
for r, data in enumerate(self._positionAdjustPattern):
self.modules[row + r - 2][col-2:col+3] = data
def setupTypeNumber(self, test):
bits = QRUtil.getBCHTypeNumber(self.version)
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.moduleCount - 8 - 3] = mod;
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i % 3 + self.moduleCount - 8 - 3][i // 3] = mod;
def setupTypeInfo(self, test, maskPattern):
data = (self.errorCorrectLevel << 3) | maskPattern
bits = QRUtil.getBCHTypeInfo(data)
# vertical
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1)
if (i < 6):
self.modules[i][8] = mod
elif (i < 8):
self.modules[i + 1][8] = mod
else:
self.modules[self.moduleCount - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1);
if (i < 8):
self.modules[8][self.moduleCount - i - 1] = mod
elif (i < 9):
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed module
self.modules[self.moduleCount - 8][8] = (not test)
def _dataPosIterator(self):
cols = itertools.chain(range(self.moduleCount - 1, 6, -2),
range(5, 0, -2))
rows = (list(range(9, self.moduleCount - 8)),
list(itertools.chain(range(6), range(7, self.moduleCount))),
list(range(9, self.moduleCount)))
rrows = tuple( list(reversed(r)) for r in rows)
ppos = QRUtil.getPatternPosition(self.version)
ppos = set(itertools.chain.from_iterable(
(p-2, p-1, p, p+1, p+2) for p in ppos))
maxpos = self.moduleCount - 11
for col in cols:
rows, rrows = rrows, rows
if col <= 8: rowidx = 0
elif col >= self.moduleCount - 8: rowidx = 2
else: rowidx = 1
for row in rows[rowidx]:
for c in range(2):
c = col - c
if self.version >= 7:
if row < 6 and c >= self.moduleCount - 11:
continue
elif col < 6 and row >= self.moduleCount - 11:
continue
if row in ppos and c in ppos:
if not (row < 11 and (c < 11 or c > maxpos) or
c < 11 and (row < 11 or row > maxpos)):
continue
yield (c, row)
_dataPosList = None
def dataPosIterator(self):
if not self._dataPosList:
self._dataPosList = list(self._dataPosIterator())
return self._dataPosList
def _dataBitIterator(self, data):
for byte in data:
for bit in [0x80, 0x40, 0x20, 0x10,
0x08, 0x04, 0x02, 0x01]:
yield bool(byte & bit)
_dataBitList = None
def dataBitIterator(self, data):
if not self._dataBitList:
self._dataBitList = list(self._dataBitIterator(data))
return iter(self._dataBitList)
def mapData(self, data, maskPattern):
bits = self.dataBitIterator(data)
mask = QRUtil.getMask(maskPattern)
for (col, row), dark in zip_longest(self.dataPosIterator(), bits,
fillvalue=False):
self.modules[row][col] = dark ^ mask(row, col)
PAD0 = 0xEC
PAD1 = 0x11
@staticmethod
def createData(version, errorCorrectLevel, dataList):
rsBlocks = QRRSBlock.getRSBlocks(version, errorCorrectLevel)
buffer = QRBitBuffer();
for data in dataList:
data.write(buffer, version)
# calc num max data.
totalDataCount = 0;
for block in rsBlocks:
totalDataCount += block.dataCount
if (buffer.getLengthInBits() > totalDataCount * 8):
raise Exception("code length overflow. (%d > %d)" %
(buffer.getLengthInBits(), totalDataCount * 8))
# end code
if (buffer.getLengthInBits() + 4 <= totalDataCount * 8):
buffer.put(0, 4)
# padding
while (buffer.getLengthInBits() % 8 != 0):
buffer.putBit(False)
# padding
while (True):
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD0, 8)
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD1, 8)
return QRCode.createBytes(buffer, rsBlocks)
@staticmethod
def createBytes(buffer, rsBlocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
totalCodeCount = 0
dcdata = []
ecdata = []
for block in rsBlocks:
totalCodeCount += block.totalCount
dcCount = block.dataCount
ecCount = block.totalCount - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata.append(buffer.buffer[offset:offset+dcCount])
offset += dcCount
rsPoly = QRUtil.getErrorCorrectPolynomial(ecCount)
rawPoly = QRPolynomial(dcdata[-1], rsPoly.getLength() - 1)
modPoly = rawPoly.mod(rsPoly)
rLen = rsPoly.getLength() - 1
mLen = modPoly.getLength()
ecdata.append([ (modPoly.get(i) if i >= 0 else 0)
for i in range(mLen - rLen, mLen) ])
data = [ d for dd in itertools.chain(
zip_longest(*dcdata), zip_longest(*ecdata))
for d in dd if d is not None]
return data
class QRErrorCorrectLevel:
L = 1
M = 0
Q = 3
H = 2
class QRMaskPattern:
PATTERN000 = 0
PATTERN001 = 1
PATTERN010 = 2
PATTERN011 = 3
PATTERN100 = 4
PATTERN101 = 5
PATTERN110 = 6
PATTERN111 = 7
class QRUtil(object):
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = ((1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) |
(1 << 0))
G18 = ((1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) |
(1 << 5) | (1 << 2) | (1 << 0))
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
@staticmethod
def getBCHTypeInfo(data):
d = data << 10;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0):
d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) -
QRUtil.getBCHDigit(QRUtil.G15) ) )
return ( (data << 10) | d) ^ QRUtil.G15_MASK
@staticmethod
def getBCHTypeNumber(data):
d = data << 12;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0):
d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) -
QRUtil.getBCHDigit(QRUtil.G18) ) )
return (data << 12) | d
@staticmethod
def getBCHDigit(data):
digit = 0;
while (data != 0):
digit += 1
data >>= 1
return digit
@staticmethod
def getPatternPosition(version):
return QRUtil.PATTERN_POSITION_TABLE[version - 1]
maskPattern = {
0: lambda i,j: (i + j) % 2 == 0,
1: lambda i,j: i % 2 == 0,
2: lambda i,j: j % 3 == 0,
3: lambda i,j: (i + j) % 3 == 0,
4: lambda i,j: (i // 2 + j // 3) % 2 == 0,
5: lambda i,j: (i*j)%2 + (i*j)%3 == 0,
6: lambda i,j: ( (i * j) % 2 + (i * j) % 3) % 2 == 0,
7: lambda i,j: ( (i * j) % 3 + (i + j) % 2) % 2 == 0
}
@classmethod
def getMask(cls, maskPattern):
return cls.maskPattern[maskPattern]
@staticmethod
def getErrorCorrectPolynomial(errorCorrectLength):
a = QRPolynomial([1], 0);
for i in range(errorCorrectLength):
a = a.multiply(QRPolynomial([1, QRMath.gexp(i)], 0) )
return a
@classmethod
def maskScoreRule1vert(cls, modules):
score = 0
lastCount = [0]
lastRow = None
for row in modules:
# Vertical patterns
if lastRow:
changed = [a ^ b for a,b in zip(row, lastRow)]
scores = [a and (b-4+3) for a,b in
zip_longest(changed, lastCount, fillvalue=0)
if b >= 4]
score += sum(scores)
lastCount = [0 if a else b + 1
for a,b in zip_longest(changed, lastCount,
fillvalue=0)]
lastRow = row
score += sum([b-4+3 for b in lastCount if b >= 4]) # final counts
return score
@classmethod
def maskScoreRule2(cls, modules):
score = 0
lastRow = modules[0]
for row in modules[1:]:
lastCol0, lastCol1 = row[0], lastRow[0]
for col0, col1 in zip(row[1:], lastRow[1:]):
if col0 == col1 == lastCol0 == lastCol1:
score += 3
lastCol0, lastCol1 = col0, col1
lastRow = row
return score
@classmethod
def maskScoreRule3hor(
cls, modules,
pattern = [True, False, True, True, True, False, True,
False, False, False, False]):
patternlen = len(pattern)
score = 0
for row in modules:
j = 0
maxj = len(row) - patternlen
while j < maxj:
if row[j:j+patternlen] == pattern:
score += 40
j += patternlen
else:
j += 1
return score
@classmethod
def maskScoreRule4(cls, modules):
cellCount = len(modules)**2
count = sum(sum(row) for row in modules)
return 10 * (abs(100 * count // cellCount - 50) // 5)
@classmethod
def getLostPoint(cls, qrCode):
lostPoint = 0;
# LEVEL1
lostPoint += cls.maskScoreRule1vert(qrCode.modules)
lostPoint += cls.maskScoreRule1vert(zip(*qrCode.modules))
# LEVEL2
lostPoint += cls.maskScoreRule2(qrCode.modules)
# LEVEL3
lostPoint += cls.maskScoreRule3hor(qrCode.modules)
lostPoint += cls.maskScoreRule3hor(zip(*qrCode.modules))
# LEVEL4
lostPoint += cls.maskScoreRule4(qrCode.modules)
return lostPoint
class QRMath:
@staticmethod
def glog(n):
if (n < 1):
raise Exception("glog(" + n + ")")
return LOG_TABLE[n];
@staticmethod
def gexp(n):
while n < 0:
n += 255
while n >= 256:
n -= 255
return EXP_TABLE[n];
EXP_TABLE = [x for x in range(256)]
LOG_TABLE = [x for x in range(256)]
for i in range(8):
EXP_TABLE[i] = 1 << i;
for i in range(8, 256):
EXP_TABLE[i] = (EXP_TABLE[i - 4] ^ EXP_TABLE[i - 5] ^
EXP_TABLE[i - 6] ^ EXP_TABLE[i - 8])
for i in range(255):
LOG_TABLE[EXP_TABLE[i] ] = i
class QRPolynomial:
def __init__(self, num, shift):
if (len(num) == 0):
raise Exception(len(num) + "/" + shift)
offset = 0
while offset < len(num) and num[offset] == 0:
offset += 1
self.num = num[offset:] + [0]*shift
def get(self, index):
return self.num[index]
def getLength(self):
return len(self.num)
def multiply(self, e):
num = [0] * (self.getLength() + e.getLength() - 1);
for i in range(self.getLength()):
for j in range(e.getLength()):
num[i + j] ^= QRMath.gexp(QRMath.glog(self.get(i) ) +
QRMath.glog(e.get(j) ) )
return QRPolynomial(num, 0);
def mod(self, e):
if (self.getLength() < e.getLength()):
return self;
ratio = QRMath.glog(self.num[0] ) - QRMath.glog(e.num[0] )
num = [nn ^ QRMath.gexp(QRMath.glog(en) + ratio)
for nn,en in zip(self.num, e.num)]
num += self.num[e.getLength():]
# recursive call
return QRPolynomial(num, 0).mod(e);
class QRRSBlock:
RS_BLOCK_TABLE = [
# L
# M
# Q
# H
# 1
[1, 26, 19],
[1, 26, 16],
[1, 26, 13],
[1, 26, 9],
# 2
[1, 44, 34],
[1, 44, 28],
[1, 44, 22],
[1, 44, 16],
# 3
[1, 70, 55],
[1, 70, 44],
[2, 35, 17],
[2, 35, 13],
# 4
[1, 100, 80],
[2, 50, 32],
[2, 50, 24],
[4, 25, 9],
# 5
[1, 134, 108],
[2, 67, 43],
[2, 33, 15, 2, 34, 16],
[2, 33, 11, 2, 34, 12],
# 6
[2, 86, 68],
[4, 43, 27],
[4, 43, 19],
[4, 43, 15],
# 7
[2, 98, 78],
[4, 49, 31],
[2, 32, 14, 4, 33, 15],
[4, 39, 13, 1, 40, 14],
# 8
[2, 121, 97],
[2, 60, 38, 2, 61, 39],
[4, 40, 18, 2, 41, 19],
[4, 40, 14, 2, 41, 15],
# 9
[2, 146, 116],
[3, 58, 36, 2, 59, 37],
[4, 36, 16, 4, 37, 17],
[4, 36, 12, 4, 37, 13],
# 10
[2, 86, 68, 2, 87, 69],
[4, 69, 43, 1, 70, 44],
[6, 43, 19, 2, 44, 20],
[6, 43, 15, 2, 44, 16],
# 11
[4, 101, 81],
[1, 80, 50, 4, 81, 51],
[4, 50, 22, 4, 51, 23],
[3, 36, 12, 8, 37, 13],
# 12
[2, 116, 92, 2, 117, 93],
[6, 58, 36, 2, 59, 37],
[4, 46, 20, 6, 47, 21],
[7, 42, 14, 4, 43, 15],
# 13
[4, 133, 107],
[8, 59, 37, 1, 60, 38],
[8, 44, 20, 4, 45, 21],
[12, 33, 11, 4, 34, 12],
# 14
[3, 145, 115, 1, 146, 116],
[4, 64, 40, 5, 65, 41],
[11, 36, 16, 5, 37, 17],
[11, 36, 12, 5, 37, 13],
# 15
[5, 109, 87, 1, 110, 88],
[5, 65, 41, 5, 66, 42],
[5, 54, 24, 7, 55, 25],
[11, 36, 12],
# 16
[5, 122, 98, 1, 123, 99],
[7, 73, 45, 3, 74, 46],
[15, 43, 19, 2, 44, 20],
[3, 45, 15, 13, 46, 16],
# 17
[1, 135, 107, 5, 136, 108],
[10, 74, 46, 1, 75, 47],
[1, 50, 22, 15, 51, 23],
[2, 42, 14, 17, 43, 15],
# 18
[5, 150, 120, 1, 151, 121],
[9, 69, 43, 4, 70, 44],
[17, 50, 22, 1, 51, 23],
[2, 42, 14, 19, 43, 15],
# 19
[3, 141, 113, 4, 142, 114],
[3, 70, 44, 11, 71, 45],
[17, 47, 21, 4, 48, 22],
[9, 39, 13, 16, 40, 14],
# 20
[3, 135, 107, 5, 136, 108],
[3, 67, 41, 13, 68, 42],
[15, 54, 24, 5, 55, 25],
[15, 43, 15, 10, 44, 16],
# 21
[4, 144, 116, 4, 145, 117],
[17, 68, 42],
[17, 50, 22, 6, 51, 23],
[19, 46, 16, 6, 47, 17],
# 22
[2, 139, 111, 7, 140, 112],
[17, 74, 46],
[7, 54, 24, 16, 55, 25],
[34, 37, 13],
# 23
[4, 151, 121, 5, 152, 122],
[4, 75, 47, 14, 76, 48],
[11, 54, 24, 14, 55, 25],
[16, 45, 15, 14, 46, 16],
# 24
[6, 147, 117, 4, 148, 118],
[6, 73, 45, 14, 74, 46],
[11, 54, 24, 16, 55, 25],
[30, 46, 16, 2, 47, 17],
# 25
[8, 132, 106, 4, 133, 107],
[8, 75, 47, 13, 76, 48],
[7, 54, 24, 22, 55, 25],
[22, 45, 15, 13, 46, 16],
# 26
[10, 142, 114, 2, 143, 115],
[19, 74, 46, 4, 75, 47],
[28, 50, 22, 6, 51, 23],
[33, 46, 16, 4, 47, 17],
# 27
[8, 152, 122, 4, 153, 123],
[22, 73, 45, 3, 74, 46],
[8, 53, 23, 26, 54, 24],
[12, 45, 15, 28, 46, 16],
# 28
[3, 147, 117, 10, 148, 118],
[3, 73, 45, 23, 74, 46],
[4, 54, 24, 31, 55, 25],
[11, 45, 15, 31, 46, 16],
# 29
[7, 146, 116, 7, 147, 117],
[21, 73, 45, 7, 74, 46],
[1, 53, 23, 37, 54, 24],
[19, 45, 15, 26, 46, 16],
# 30
[5, 145, 115, 10, 146, 116],
[19, 75, 47, 10, 76, 48],
[15, 54, 24, 25, 55, 25],
[23, 45, 15, 25, 46, 16],
# 31
[13, 145, 115, 3, 146, 116],
[2, 74, 46, 29, 75, 47],
[42, 54, 24, 1, 55, 25],
[23, 45, 15, 28, 46, 16],
# 32
[17, 145, 115],
[10, 74, 46, 23, 75, 47],
[10, 54, 24, 35, 55, 25],
[19, 45, 15, 35, 46, 16],
# 33
[17, 145, 115, 1, 146, 116],
[14, 74, 46, 21, 75, 47],
[29, 54, 24, 19, 55, 25],
[11, 45, 15, 46, 46, 16],
# 34
[13, 145, 115, 6, 146, 116],
[14, 74, 46, 23, 75, 47],
[44, 54, 24, 7, 55, 25],
[59, 46, 16, 1, 47, 17],
# 35
[12, 151, 121, 7, 152, 122],
[12, 75, 47, 26, 76, 48],
[39, 54, 24, 14, 55, 25],
[22, 45, 15, 41, 46, 16],
# 36
[6, 151, 121, 14, 152, 122],
[6, 75, 47, 34, 76, 48],
[46, 54, 24, 10, 55, 25],
[2, 45, 15, 64, 46, 16],
# 37
[17, 152, 122, 4, 153, 123],
[29, 74, 46, 14, 75, 47],
[49, 54, 24, 10, 55, 25],
[24, 45, 15, 46, 46, 16],
# 38
[4, 152, 122, 18, 153, 123],
[13, 74, 46, 32, 75, 47],
[48, 54, 24, 14, 55, 25],
[42, 45, 15, 32, 46, 16],
# 39
[20, 147, 117, 4, 148, 118],
[40, 75, 47, 7, 76, 48],
[43, 54, 24, 22, 55, 25],
[10, 45, 15, 67, 46, 16],
# 40
[19, 148, 118, 6, 149, 119],
[18, 75, 47, 31, 76, 48],
[34, 54, 24, 34, 55, 25],
[20, 45, 15, 61, 46, 16]
]
def __init__(self, totalCount, dataCount):
self.totalCount = totalCount
self.dataCount = dataCount
@staticmethod
def getRSBlocks(version, errorCorrectLevel):
rsBlock = QRRSBlock.getRsBlockTable(version, errorCorrectLevel);
if rsBlock == None:
raise Exception("bad rs block @ version:" + version +
"/errorCorrectLevel:" + errorCorrectLevel)
length = len(rsBlock) // 3
list = []
for i in range(length):
count = rsBlock[i * 3 + 0]
totalCount = rsBlock[i * 3 + 1]
dataCount = rsBlock[i * 3 + 2]
for j in range(count):
list.append(QRRSBlock(totalCount, dataCount))
return list;
@staticmethod
def getRsBlockTable(version, errorCorrectLevel):
if errorCorrectLevel == QRErrorCorrectLevel.L:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 0];
elif errorCorrectLevel == QRErrorCorrectLevel.M:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 1];
elif errorCorrectLevel == QRErrorCorrectLevel.Q:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 2];
elif errorCorrectLevel == QRErrorCorrectLevel.H:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 3];
else:
return None;
class QRBitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
bufIndex = index // 8
return ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
def put(self, num, length):
for i in range(length):
self.putBit( ( (num >> (length - i - 1) ) & 1) == 1)
def getLengthInBits(self):
return self.length
def putBit(self, bit):
bufIndex = self.length // 8
if len(self.buffer) <= bufIndex:
self.buffer.append(0)
if bit:
self.buffer[bufIndex] |= (0x80 >> (self.length % 8) )
self.length += 1
| agpl-3.0 | -6,790,555,099,693,506,000 | 29.2 | 80 | 0.475063 | false |
ashwyn/eden-message_parser | private/templates/default/menus.py | 8 | 4154 | # -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from eden.layouts import *
try:
from .layouts import *
except ImportError:
pass
import eden.menus as default
# Below is an example which you can base your own template's menus.py on
# - there are also other examples in the other templates folders
# =============================================================================
#class S3MainMenu(default.S3MainMenu):
#"""
#Custom Application Main Menu:
#The main menu consists of several sub-menus, each of which can
#be customized separately as a method of this class. The overall
#composition of the menu is defined in the menu() method, which can
#be customized as well:
#Function Sub-Menu Access to (standard)
#menu_modules() the modules menu the Eden modules
#menu_gis() the GIS menu GIS configurations
#menu_admin() the Admin menu System/User Administration
#menu_lang() the Language menu Selection of the GUI locale
#menu_auth() the User menu Login, Logout, User Profile
#menu_help() the Help menu Contact page, About page
#The standard uses the MM layout class for main menu items - but you
#can of course use a custom layout class which you define in layouts.py.
#Additional sub-menus can simply be defined as additional functions in
#this class, and then be included in the menu() method.
#Each sub-menu function returns a list of menu items, only the menu()
#function must return a layout class instance.
#"""
# -------------------------------------------------------------------------
#@classmethod
#def menu(cls):
#""" Compose Menu """
#main_menu = MM()(
## Modules-menu, align-left
#cls.menu_modules(),
## Service menus, align-right
## Note: always define right-hand items in reverse order!
#cls.menu_help(right=True),
#cls.menu_auth(right=True),
#cls.menu_lang(right=True),
#cls.menu_admin(right=True),
#cls.menu_gis(right=True)
#)
#return main_menu
# -------------------------------------------------------------------------
#@classmethod
#def menu_modules(cls):
#""" Custom Modules Menu """
#return [
#homepage(),
#homepage("gis"),
#homepage("pr")(
#MM("Persons", f="person"),
#MM("Groups", f="group")
#),
#MM("more", link=False)(
#homepage("dvi"),
#homepage("irs")
#),
#]
# =============================================================================
#class S3OptionsMenu(default.S3OptionsMenu):
#"""
#Custom Controller Menus
#The options menu (left-hand options menu) is individual for each
#controller, so each controller has its own options menu function
#in this class.
#Each of these option menu functions can be customized separately,
#by simply overriding (re-defining) the default function. The
#options menu function must return an instance of the item layout.
#The standard menu uses the M item layout class, but you can of
#course also use any other layout class which you define in
#layouts.py (can also be mixed).
#Make sure additional helper functions in this class don't match
#any current or future controller prefix (e.g. by using an
#underscore prefix).
#"""
#def cr(self):
#""" CR / Shelter Registry """
#return M(c="cr")(
#M("Camp", f="shelter")(
#M("New", m="create"),
#M("List All"),
#M("Map", m="map"),
#M("Import", m="import"),
#)
#)
# END =========================================================================
| mit | -1,046,064,666,999,466,600 | 34.810345 | 80 | 0.505296 | false |
kampanita/pelisalacarta | python/main-classic/platformcode/xbmc_info_window.py | 1 | 21922 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 [email protected]
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
import inspect
import os
import re
import xbmcgui
from core.tmdb import Tmdb
from core.item import Item
from core import logger
class InfoWindow(xbmcgui.WindowXMLDialog):
otmdb = None
item_title = ""
item_serie = ""
item_temporada = 0
item_episodio = 0
result = {}
@staticmethod
def get_language(lng):
# Cambiamos el formato del Idioma
languages = {
'aa': 'Afar', 'ab': 'Abkhazian', 'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'am': 'Amharic',
'ar': 'Arabic', 'an': 'Aragonese', 'as': 'Assamese', 'av': 'Avaric', 'ae': 'Avestan',
'ay': 'Aymara', 'az': 'Azerbaijani', 'ba': 'Bashkir', 'bm': 'Bambara', 'eu': 'Basque',
'be': 'Belarusian', 'bn': 'Bengali', 'bh': 'Bihari languages', 'bi': 'Bislama',
'bo': 'Tibetan', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'my': 'Burmese',
'ca': 'Catalan; Valencian', 'cs': 'Czech', 'ch': 'Chamorro', 'ce': 'Chechen', 'zh': 'Chinese',
'cu': 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic',
'cv': 'Chuvash', 'kw': 'Cornish', 'co': 'Corsican', 'cr': 'Cree', 'cy': 'Welsh',
'da': 'Danish', 'de': 'German', 'dv': 'Divehi; Dhivehi; Maldivian', 'nl': 'Dutch; Flemish',
'dz': 'Dzongkha', 'en': 'English', 'eo': 'Esperanto',
'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese', 'fa': 'Persian', 'fj': 'Fijian',
'fi': 'Finnish', 'fr': 'French', 'fy': 'Western Frisian', 'ff': 'Fulah',
'Ga': 'Georgian', 'gd': 'Gaelic; Scottish Gaelic', 'ga': 'Irish', 'gl': 'Galician',
'gv': 'Manx', 'el': 'Greek, Modern (1453-)', 'gn': 'Guarani', 'gu': 'Gujarati',
'ht': 'Haitian; Haitian Creole', 'ha': 'Hausa', 'he': 'Hebrew', 'hz': 'Herero', 'hi': 'Hindi',
'ho': 'Hiri Motu', 'hr': 'Croatian', 'hu': 'Hungarian', 'hy': 'Armenian', 'ig': 'Igbo',
'is': 'Icelandic', 'io': 'Ido', 'ii': 'Sichuan Yi; Nuosu', 'iu': 'Inuktitut',
'ie': 'Interlingue; Occidental', 'ia': 'Interlingua (International Auxiliary Language Association)',
'id': 'Indonesian', 'ik': 'Inupiaq', 'it': 'Italian', 'jv': 'Javanese',
'ja': 'Japanese', 'kl': 'Kalaallisut; Greenlandic', 'kn': 'Kannada', 'ks': 'Kashmiri',
'ka': 'Georgian', 'kr': 'Kanuri', 'kk': 'Kazakh', 'km': 'Central Khmer', 'ki': 'Kikuyu; Gikuyu',
'rw': 'Kinyarwanda', 'ky': 'Kirghiz; Kyrgyz', 'kv': 'Komi', 'kg': 'Kongo', 'ko': 'Korean',
'kj': 'Kuanyama; Kwanyama', 'ku': 'Kurdish', 'lo': 'Lao', 'la': 'Latin', 'lv': 'Latvian',
'li': 'Limburgan; Limburger; Limburgish', 'ln': 'Lingala', 'lt': 'Lithuanian',
'lb': 'Luxembourgish; Letzeburgesch', 'lu': 'Luba-Katanga', 'lg': 'Ganda', 'mk': 'Macedonian',
'mh': 'Marshallese', 'ml': 'Malayalam', 'mi': 'Maori', 'mr': 'Marathi', 'ms': 'Malay', 'Mi': 'Micmac',
'mg': 'Malagasy', 'mt': 'Maltese', 'mn': 'Mongolian', 'na': 'Nauru',
'nv': 'Navajo; Navaho', 'nr': 'Ndebele, South; South Ndebele', 'nd': 'Ndebele, North; North Ndebele',
'ng': 'Ndonga', 'ne': 'Nepali', 'nn': 'Norwegian Nynorsk; Nynorsk, Norwegian',
'nb': 'Bokmål, Norwegian; Norwegian Bokmål', 'no': 'Norwegian', 'oc': 'Occitan (post 1500)',
'oj': 'Ojibwa', 'or': 'Oriya', 'om': 'Oromo', 'os': 'Ossetian; Ossetic', 'pa': 'Panjabi; Punjabi',
'pi': 'Pali', 'pl': 'Polish', 'pt': 'Portuguese', 'ps': 'Pushto; Pashto', 'qu': 'Quechua',
'ro': 'Romanian; Moldavian; Moldovan', 'rn': 'Rundi', 'ru': 'Russian', 'sg': 'Sango', 'rm': 'Romansh',
'sa': 'Sanskrit', 'si': 'Sinhala; Sinhalese', 'sk': 'Slovak', 'sl': 'Slovenian', 'se': 'Northern Sami',
'sm': 'Samoan', 'sn': 'Shona', 'sd': 'Sindhi', 'so': 'Somali', 'st': 'Sotho, Southern', 'es': 'Spanish',
'sc': 'Sardinian', 'sr': 'Serbian', 'ss': 'Swati', 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
'ty': 'Tahitian', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu', 'tg': 'Tajik', 'tl': 'Tagalog',
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tonga (Tonga Islands)', 'tn': 'Tswana', 'ts': 'Tsonga',
'tk': 'Turkmen', 'tr': 'Turkish', 'tw': 'Twi', 'ug': 'Uighur; Uyghur', 'uk': 'Ukrainian',
'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Venda', 'vi': 'Vietnamese', 'vo': 'Volapük',
'wa': 'Walloon', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish', 'yo': 'Yoruba', 'za': 'Zhuang; Chuang',
'zu': 'Zulu'}
return languages.get(lng, lng)
@staticmethod
def get_date(date):
# Cambiamos el formato de la fecha
if date:
return date.split("-")[2] + "/" + date.split("-")[1] + "/" + date.split("-")[0]
else:
return "N/A"
def get_episode_from_title(self, item):
# Patron para temporada y episodio "1x01"
pattern = re.compile("([0-9]+)[ ]*[x|X][ ]*([0-9]+)")
# Busca en title
matches = pattern.findall(item.title)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
# Busca en fulltitle
matches = pattern.findall(item.fulltitle)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
# Busca en contentTitle
matches = pattern.findall(item.contentTitle)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
def get_item_info(self, item):
# Recogemos los parametros del Item que nos interesan:
if "title" in item and item.title != "":
self.item_title = item.title
if "fulltitle" in item and item.fulltitle != "":
self.item_title = item.fulltitle
if "contentTitle" in item and item.contentTitle != "":
self.item_title = item.contentTitle
if "show" in item and item.show != "":
self.item_serie = item.show
if "contentSerieName" in item and item.contentSerieName != "":
self.item_serie = item.contentSerieName
if "contentSeason" in item and item.contentSeason != "":
self.item_temporada = item.contentSeason
if "contentepisodeNumber" in item and item.contentepisodeNumber != "":
self.item_episodio = item.contentepisodeNumber
# i no existen contentepisodeNumber o contentSeason intenta sacarlo del titulo
if not self.item_episodio or not self.item_temporada:
self.get_episode_from_title(item)
def get_dict_info(self, dct):
self.result = dct
def get_tmdb_movie_data(self, text):
# Buscamos la pelicula si no lo esta ya
if not self.otmdb:
self.otmdb = Tmdb(texto_buscado=text, idioma_busqueda="es", tipo="movie")
# Si no hay resultados salimos
if not self.otmdb.get_id():
return False
# Informacion de la pelicula
self.result["type"] = "movie"
self.result["id_Tmdb"] = self.otmdb.get_id()
self.result["title"] = self.otmdb.result["title"]
self.result["original_title"] = self.otmdb.result["original_title"]
self.result["date"] = self.get_date(self.otmdb.result["release_date"])
self.result["language"] = self.get_language(self.otmdb.result["original_language"])
self.result["rating"] = self.otmdb.result["vote_average"] + "/10 (" + self.otmdb.result["vote_count"] + ")"
self.result["genres"] = ", ".join(self.otmdb.result["genres"])
self.result["thumbnail"] = self.otmdb.get_poster()
self.result["fanart"] = self.otmdb.get_backdrop()
self.result["overview"] = self.otmdb.result["overview"]
return True
def get_tmdb_tv_data(self, text, season=0, episode=0):
# Pasamos la temporada y episodeo a int()
season = int(season)
episode = int(episode)
# Buscamos la serie si no esta cargada
if not self.otmdb:
self.otmdb = Tmdb(texto_buscado=text, idioma_busqueda="es", tipo="tv")
_id = self.otmdb.get_id()
# Si no hay resultados salimos
if not _id:
return False
# informacion generica de la serie
self.result["type"] = "tv"
self.result["id_Tmdb"] = self.otmdb.get_id()
self.result["title"] = self.otmdb.result.get("name", "N/A")
self.result["rating"] = self.otmdb.result["vote_average"] + "/10 (" + self.otmdb.result["vote_count"] + ")"
self.result["genres"] = ", ".join(self.otmdb.result["genres"])
self.result["language"] = self.get_language(self.otmdb.result["original_language"])
self.result["thumbnail"] = self.otmdb.get_poster()
self.result["fanart"] = self.otmdb.get_backdrop()
self.result["overview"] = self.otmdb.result.get("overview", "N/A")
# Si tenemos informacion de temporada y episodio
if season and episode:
if "seasons" not in self.result or self.result["seasons"] == "":
self.otmdb = Tmdb(id_Tmdb=id, idioma_busqueda="es", tipo="tv")
self.result["seasons"] = str(self.otmdb.result.get("number_of_seasons", 0))
if season > self.result["seasons"]:
season = self.result["season_count"]
if episode > self.otmdb.result.get("seasons")[season-1]["episode_count"]:
episode = self.otmdb.result.get("seasons")[season]["episode_count"]
# Solicitamos información del episodio concreto
episode_info = self.otmdb.get_episodio(season, episode)
# informacion de la temporada
self.result["season"] = str(season)
if episode_info.get("temporada_poster"):
self.result["thumbnail"] = episode_info.get("temporada_poster")
if self.otmdb.result.get("overview"):
self.result["overview"] = self.otmdb.result.get("overview")
# informacion del episodio
self.result["episode"] = str(episode)
self.result["episodes"] = str(episode_info.get('temporada_num_episodios', 0))
self.result["episode_title"] = episode_info.get("episodio_titulo", "N/A")
self.result["date"] = self.get_date(self.otmdb.temporada[season]["episodes"][episode-1].get("air_date"))
if episode_info.get("episodio_imagen"):
self.result["fanart"] = episode_info.get("episodio_imagen")
if episode_info.get("episodio_sinopsis"):
self.result["overview"] = episode_info.get("episodio_sinopsis")
return True
def get_tmdb_data(self, data_in):
self.otmdb = None
if self.listData:
data = {}
if data_in["type"] == "movie":
# Modo Listado de peliculas
data["title"] = data_in["title"]
data["original_title"] = data_in["original_title"]
data["date"] = self.get_date(data_in["release_date"])
else:
# Modo Listado de series
data["title"] = data_in.get("name", "N/A")
# Datos comunes a todos los listados
data["type"] = data_in["type"]
data["id_Tmdb"] = data_in["id"]
data["language"] = self.get_language(data_in["original_language"])
data["rating"] = data_in["vote_average"] + "/10 (" + data_in["vote_count"] + ")"
data["genres"] = ", ".join(data_in["genres"])
data["thumbnail"] = data_in["thumbnail"]
data["fanart"] = data_in["fanart"]
data["overview"] = data_in.get("overview")
self.from_tmdb = False
self.result = data
else:
if type(data_in) == Item:
self.from_tmdb = True
self.get_item_info(data_in)
# Modo Pelicula
if not self.item_serie:
encontrado = self.get_tmdb_movie_data(self.item_title)
if not encontrado:
encontrado = self.get_tmdb_tv_data(self.item_title, self.item_temporada, self.item_episodio)
else:
encontrado = self.get_tmdb_tv_data(self.item_serie, self.item_temporada, self.item_episodio)
if not encontrado:
encontrado = self.get_tmdb_movie_data(self.item_serie)
if type(data_in) == dict:
self.from_tmdb = False
self.get_dict_info(data_in)
def Start(self, data, caption="Información del vídeo", callback=None):
# Capturamos los parametros
self.caption = caption
self.callback = callback
self.indexList = -1
self.listData = None
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
self.channel = os.path.basename(channelpath).replace(".py", "")
if type(data) == list:
self.listData = data
self.indexList = 0
data = self.listData[self.indexList]
self.get_tmdb_data(data)
# Muestra la ventana
self.return_value = None
self.doModal()
return self.return_value
def onInit(self):
# Ponemos el foco en el boton de cerrar [X]
self.setFocus(self.getControl(10003))
# Ponemos el título y las imagenes
self.getControl(10002).setLabel(self.caption)
self.getControl(10004).setImage(self.result.get("fanart", ""))
self.getControl(10005).setImage(self.result.get("thumbnail", "InfoWindow/img_no_disponible.png"))
# Cargamos los datos para el formato pelicula
if self.result.get("type", "movie") == "movie":
self.getControl(10006).setLabel("Titulo:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Titulo Original:")
self.getControl(10009).setLabel(self.result.get("original_title", "N/A"))
self.getControl(100010).setLabel("Idioma original:")
self.getControl(100011).setLabel(self.result.get("language", "N/A"))
self.getControl(100012).setLabel("Puntuacion:")
self.getControl(100013).setLabel(self.result.get("rating", "N/A"))
self.getControl(100014).setLabel("Lanzamiento:")
self.getControl(100015).setLabel(self.result.get("date", "N/A"))
self.getControl(100016).setLabel("Generos:")
self.getControl(100017).setLabel(self.result.get("genres", "N/A"))
# Cargamos los datos para el formato serie
else:
self.getControl(10006).setLabel("Serie:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Idioma original:")
self.getControl(10009).setLabel(self.result.get("language", "N/A"))
self.getControl(100010).setLabel("Puntuacion:")
self.getControl(100011).setLabel(self.result.get("rating", "N/A"))
self.getControl(100012).setLabel("Generos:")
self.getControl(100013).setLabel(self.result.get("genres", "N/A"))
if self.result.get("season") and self.result.get("episode"):
self.getControl(100014).setLabel("Titulo:")
self.getControl(100015).setLabel(self.result.get("episode_title", "N/A"))
self.getControl(100016).setLabel("Temporada:")
self.getControl(100017).setLabel(self.result.get("season", "N/A") + " de " +
self.result.get("seasons", "N/A"))
self.getControl(100018).setLabel("Episodio:")
self.getControl(100019).setLabel(self.result.get("episode", "N/A") + " de " +
self.result.get("episodes", "N/A"))
self.getControl(100020).setLabel("Emision:")
self.getControl(100021).setLabel(self.result.get("date", "N/A"))
# Sinopsis
if "overview" in self.result and self.result['overview']:
self.getControl(100022).setLabel("Sinopsis:")
self.getControl(100023).setText(self.result.get("overview", "N/A"))
else:
self.getControl(100022).setLabel("")
self.getControl(100023).setText("")
# Cargamos los botones si es necesario
self.getControl(10024).setVisible(self.indexList > -1)
self.getControl(10025).setEnabled(self.indexList > 0)
self.getControl(10026).setEnabled(self.indexList + 1 != len(self.listData))
self.getControl(100029).setLabel("({0}/{1})".format(self.indexList + 1, len(self.listData)))
# Ponemos el foto en el botón "Anterior",
# si estuviera desactivado iria el foco al boton "Siguiente" y pasara lo mismo al botón "Cancelar"
self.setFocus(self.getControl(10024))
def onClick(self, id):
logger.info("pelisalacarta.platformcode.xbmc_info_window onClick id="+repr(id))
# Boton Cancelar y [X]
if id == 10003 or id == 10027:
self.close()
# Boton Anterior
if id == 10025 and self.indexList > 0:
self.indexList -= 1
self.get_tmdb_data(self.listData[self.indexList])
self.onInit()
# Boton Siguiente
if id == 10026 and self.indexList < len(self.listData) - 1:
self.indexList += 1
self.get_tmdb_data(self.listData[self.indexList])
self.onInit()
# Boton Aceptar, Cancelar y [X]
if id == 10028 or id == 10003 or id == 10027:
self.close()
if self.callback:
cb_channel = None
try:
cb_channel = __import__('platformcode.%s' % self.channel,
fromlist=["platformcode.%s" % self.channel])
except ImportError:
logger.error('Imposible importar %s' % self.channel)
if id == 10028: # Boton Aceptar
if cb_channel:
self.return_value = getattr(cb_channel, self.callback)(self.result)
else: # Boton Cancelar y [X]
if cb_channel:
self.return_value = getattr(cb_channel, self.callback)(None)
def onAction(self, action):
# logger.info("pelisalacarta.platformcode.xbmc_info_window onAction action="+repr(action.getId()))
# Accion 1: Flecha izquierda
if action == 1:
# Obtenemos el foco
focus = self.getFocusId()
# botón Aceptar
if focus == 10028:
self.setFocus(self.getControl(10027))
# botón Cancelar
elif focus == 10027:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(10026))
elif self.indexList > 0:
# vamos al botón Anterior ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(10025))
# botón Siguiente
elif focus == 10026:
if self.indexList > 0:
# vamos al botón Anterior
self.setFocus(self.getControl(10025))
# Accion 2: Flecha derecha
if action == 2:
# Obtenemos el foco
focus = self.getFocusId()
# botón Anterior
if focus == 10025:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(10026))
else:
# vamos al botón Cancelar ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(10027))
# botón Siguiente
elif focus == 10026:
self.setFocus(self.getControl(10027))
# boton Cancelar
elif focus == 10027:
self.setFocus(self.getControl(10028))
# Pulsa OK, simula click en boton aceptar
# if action == 107: # es mover el ratón
# logger.info("onAction he pulstado ok")
# # self.onClick(10028)
# Pulsa ESC o Atrás, simula click en boton cancelar
if action in [10, 92]:
# TODO arreglar
# self.close()
self.onClick(10027)
| gpl-3.0 | 7,880,560,362,160,596,000 | 46.503254 | 116 | 0.557834 | false |
luzheqi1987/nova-annotation | nova/tests/unit/test_hooks.py | 22 | 6320 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for hook customization."""
import stevedore
from nova import hooks
from nova import test
class SampleHookA(object):
name = "a"
def _add_called(self, op, kwargs):
called = kwargs.get('called', None)
if called is not None:
called.append(op + self.name)
def pre(self, *args, **kwargs):
self._add_called("pre", kwargs)
class SampleHookB(SampleHookA):
name = "b"
def post(self, rv, *args, **kwargs):
self._add_called("post", kwargs)
class SampleHookC(SampleHookA):
name = "c"
def pre(self, f, *args, **kwargs):
self._add_called("pre" + f.__name__, kwargs)
def post(self, f, rv, *args, **kwargs):
self._add_called("post" + f.__name__, kwargs)
class SampleHookExceptionPre(SampleHookA):
name = "epre"
exception = Exception()
def pre(self, f, *args, **kwargs):
raise self.exception
class SampleHookExceptionPost(SampleHookA):
name = "epost"
exception = Exception()
def post(self, f, rv, *args, **kwargs):
raise self.exception
class MockEntryPoint(object):
def __init__(self, cls):
self.cls = cls
def load(self):
return self.cls
class MockedHookTestCase(test.BaseHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return []
def setUp(self):
super(MockedHookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
class HookTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
]
def setUp(self):
super(HookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
@hooks.add_hook('test_hook')
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['test_hook']
self.assert_has_hook('test_hook', self._hooked)
self.assertEqual(2, len(mgr.extensions))
self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['prea', 'preb', 'postb'], called_order)
class HookTestCaseWithFunction(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('function_hook',
MockEntryPoint(SampleHookC), SampleHookC, SampleHookC()),
]
@hooks.add_hook('function_hook', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['function_hook']
self.assert_has_hook('function_hook', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookC, mgr.extensions[0].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['pre_hookedc', 'post_hookedc'], called_order)
class HookFailPreTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_pre',
MockEntryPoint(SampleHookExceptionPre),
SampleHookExceptionPre, SampleHookExceptionPre()),
]
@hooks.add_hook('fail_pre', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_pre']
self.assert_has_hook('fail_pre', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPre, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPre, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
class HookFailPostTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_post',
MockEntryPoint(SampleHookExceptionPost),
SampleHookExceptionPost, SampleHookExceptionPost()),
]
@hooks.add_hook('fail_post', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_post']
self.assert_has_hook('fail_post', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPost, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPost, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
| apache-2.0 | 5,709,721,052,965,497,000 | 29.829268 | 78 | 0.631329 | false |
dario61081/koalixcrm | koalixcrm/crm/migrations/0045_auto_20180805_2047.py | 2 | 1557 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-05 20:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crm', '0044_reportingperiod_status'),
]
operations = [
migrations.AddField(
model_name='work',
name='worked_hours',
field=models.DateTimeField(blank=True, null=True, verbose_name='Stop Time'),
),
migrations.AlterField(
model_name='task',
name='planned_end_date',
field=models.DateField(blank=True, null=True, verbose_name='Planned End'),
),
migrations.AlterField(
model_name='task',
name='planned_start_date',
field=models.DateField(blank=True, null=True, verbose_name='Planned Start'),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='crm.TaskStatus', verbose_name='Status'),
),
migrations.AlterField(
model_name='work',
name='start_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Start Time'),
),
migrations.AlterField(
model_name='work',
name='stop_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Stop Time'),
),
]
| bsd-3-clause | 7,300,502,120,805,449,000 | 32.847826 | 148 | 0.5851 | false |
odicraig/kodi2odi | addons/plugin.radio.disney/common.py | 2 | 7722 | #############################################################################
#############################################################################
import os,xbmc,xbmcgui,xbmcaddon,sys,logging,re,urllib,urllib2
#############################################################################
#############################################################################
addon=xbmcaddon.Addon();
addon_id =addon.getAddonInfo('id');
addon_name =addon.getAddonInfo('name');
addon_path =addon.getAddonInfo('path'); addon_path8=addon.getAddonInfo('path').decode("utf-8");
#MediaPath =xbmc.translatePath( os.path.join(addon_path8,'resources','skins','default','media').encode("utf-8") ).decode("utf-8");
MediaPath =xbmc.translatePath( os.path.join(addon_path,'resources','skins','default','media') );
addonPath=addon_path; addonId=addon_id; addonName=addon_name;
#############################################################################
#############################################################################
def tP(p): return xbmc.translatePath(p)
def MediaFile(n,e='',p=MediaPath): return os.path.join(p,n+e)
def MediaFileP(n,e='',p=MediaPath): return MediaFile(n,e='.png')
def MediaFileG(n,e='',p=MediaPath): return MediaFile(n,e='.gif')
def MediaFileJ(n,e='',p=MediaPath): return MediaFile(n,e='.jpg')
def getSet(id,d=''):
try: return addon.getSetting(id)
except: return d
def setSet(id,v):
try: return addon.setSetting(id,v)
except: pass
def tfalse(r,d=False): ## Get True / False
if (r.lower()=='true' ) or (r.lower()=='t') or (r.lower()=='y') or (r.lower()=='1') or (r.lower()=='yes'): return True
elif (r.lower()=='false') or (r.lower()=='f') or (r.lower()=='n') or (r.lower()=='0') or (r.lower()=='no'): return False
else: return d
def isPath(path): return os.path.exists(path)
def isFile(filename): return os.path.isfile(filename)
def deb(a,b):
try: print "%s: %s"%(str(a),str(b))
except: pass
def debob(o):
try: print o
except: pass
def nolines(t):
it=t.splitlines(); t=''
for L in it: t=t+L
t=((t.replace("\r","")).replace("\n",""))
return t
def cFL( t,c='tan'): ### For Coloring Text ###
try: return '[COLOR '+c+']'+t+'[/COLOR]'
except: pass
def cFL_(t,c='tan'): ### For Coloring Text (First Letter-Only) ###
try: return '[COLOR '+c+']'+t[0:1]+'[/COLOR]'+t[1:]
except: pass
def DoE(e): xbmc.executebuiltin(e)
def DoAW(e): xbmc.executebuiltin("ActivateWindow(%s)"%str(e))
def DoRW(e): xbmc.executebuiltin("ReplaceWindow(%s)"%str(e))
def DoRA(e): xbmc.executebuiltin("RunAddon(%s)"%str(e))
def DoRA2(e,e2="1",e3=""): xbmc.executebuiltin('RunAddon(%s,"%s","%s")'%(str(e),str(e2),e3));
def DoA(a): xbmc.executebuiltin("Action(%s)"%str(a))
def DoCM(a): xbmc.executebuiltin("Control.Message(windowid=%s)"%(str(a)))
def DoSC(a): xbmc.executebuiltin("SendClick(%s)"%(str(a)))
def DoSC2(a,Id): xbmc.executebuiltin("SendClick(%s,%s)"%(str(a),str(Id)))
def DoStopScript(e): xbmc.executebuiltin("StopScript(%s)"%str(e))
def showAddonSettings(): addon.openSettings()
def note(title='',msg='',delay=5000,image='http://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/US_99_%281961%29.svg/40px-US_99_%281961%29.svg.png'): xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")'%(title,msg,delay,image))
def popYN(title='',line1='',line2='',line3='',n='',y=''):
diag=xbmcgui.Dialog()
r=diag.yesno(title,line1,line2,line3,n,y)
if r: return r
else: return False
#del diag
def popOK(msg="",title="",line2="",line3=""):
dialog=xbmcgui.Dialog()
#ok=dialog.ok(title,msg,line2,line3)
dialog.ok(title,msg,line2,line3)
def FileSAVE(path,data): file=open(path,'w'); file.write(data); file.close()
def FileOPEN(path,d=''):
try:
#deb('File',path)
if os.path.isfile(path): ## File found.
#deb('Found',path)
file = open(path, 'r')
contents=file.read()
file.close()
return contents
else: return d ## File not found.
except: return d
def FolderNEW(dir_path):
dir_path=dir_path.strip()
if not os.path.exists(dir_path): os.makedirs(dir_path)
def FolderLIST(mypath,dirname): #...creates sub-directories if they are not found.
subpath=os.path.join(mypath,dirname)
if not os.path.exists(subpath): os.makedirs(subpath)
return subpath
def getURL(url):
try:
req=urllib2.Request(url)
#req.add_header(MyBrowser[0],MyBrowser[1])
response=urllib2.urlopen(req)
link=response.read()
response.close()
return(link)
except: deb('Failed to fetch url',url); return ''
def postURL(url,form_data={},headers={},compression=True):
try:
req=urllib2.Request(url)
if form_data: form_data=urllib.urlencode(form_data); req=urllib2.Request(url,form_data)
#req.add_header(MyBrowser[0],MyBrowser[1])
for k, v in headers.items(): req.add_header(k, v)
if compression: req.add_header('Accept-Encoding', 'gzip')
response=urllib2.urlopen(req)
link=response.read()
response.close()
return link
except: deb('Failed to fetch url',url); return ''
def postURL2(url,form_data={}):
try:
postData=urllib.urlencode(form_data)
req=urllib2.Request(url,postData)
#req.add_header(MyBrowser[0], MyBrowser[1])
response=urllib2.urlopen(req)
link=response.read()
response.close()
return(link)
except: deb('Failed to fetch url',url); return ''
def showkeyboard(txtMessage="",txtHeader="",passwordField=False):
try:
if txtMessage=='None': txtMessage=''
keyboard = xbmc.Keyboard(txtMessage, txtHeader, passwordField)#("text to show","header text", True="password field"/False="show text")
keyboard.doModal()
if keyboard.isConfirmed(): return keyboard.getText()
else: return '' #return False
except: return ''
def art(f,fe=''):
fe1='.png'; fe2='.jpg'; fe3='.gif';
if fe1 in f: f=f.replace(fe1,''); fe=fe1;
elif fe2 in f: f=f.replace(fe2,''); fe=fe2;
elif fe3 in f: f=f.replace(fe3,''); fe=fe3;
return xbmc.translatePath(os.path.join(addonPath,'art',f+fe))
def artp(f,fe='.png'):
return art(f,fe)
def artj(f,fe='.jpg'):
return art(f,fe)
def addonPath2(f,fe=''):
return xbmc.translatePath(os.path.join(addonPath,f+fe))
def get_xbmc_os():
try: xbmc_os=str(os.environ.get('OS'))
except:
try: xbmc_os=str(sys.platform)
except: xbmc_os="unknown"
return xbmc_os
def doCtoS(c,s="",d=""): ## Put's an array (Example: [68,68,68,68]) into a string, converting each number in the array into it's character form to make up a string.
try:
if len(c)==0: return d
for k in range(0,len(c)):
s+=str(chr(c[k]))
except: return d
#############################################################################
#############################################################################
def SKgetStringValue(Tag,ErResult=''):
try: return xbmc.getInfoLabel('Skin.String('+Tag+')')
except: return ErResult
def SKchange(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetString('+Tag+', %s)' % NewValue)
def SKnchange(Tag,NewValue=0): xbmc.executebuiltin('Skin.SetNumeric('+Tag+', %s)' % NewValue)
def SKgchange(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetImage('+Tag+', %s)' % NewValue)
def SKgLchange(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetLargeImage('+Tag+', %s)' % NewValue)
def SKbchange(Tag,NewValue=False): xbmc.executebuiltin('Skin.SetBool('+Tag+', %s)' % NewValue)
def SKtchange(Tag,NewValue=False): xbmc.executebuiltin('Skin.ToggleSetting('+Tag+', %s)' % NewValue)
def SKsetStringValue(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetString('+Tag+', %s)' % NewValue)
#############################################################################
#############################################################################
| gpl-3.0 | 1,457,990,216,350,937,000 | 43.423529 | 237 | 0.596348 | false |
rgabo/awsh | awsh/repl.py | 1 | 3713 | from __future__ import unicode_literals, print_function
import shlex
import sys
import traceback
from codeop import compile_command
from pathlib import Path
from shutil import which
from awsh.commands import *
from awsh.providers import Provider, PosixProvider
from awsh.util import lazy_property
from prompt_toolkit import prompt
from prompt_toolkit.history import InMemoryHistory
from pyspark.sql import SparkSession
class Context(object):
def __init__(self):
self.globals = {
"context": self,
}
def sql(self, sql):
self.provider(self.cwd).create_df(self.cwd).registerTempTable('cwd')
return self.spark.sql(sql)
@property
def cwd(self):
return Path.cwd()
@property
def sc(self):
return self.spark.sparkContext
@lazy_property
def spark(self):
return SparkSession.builder \
.appName("awsh") \
.getOrCreate()
def provider(self, path):
for provider in Provider.providers:
if str(path).startswith(provider.prefix):
return provider(self)
return PosixProvider(self)
class Session(object):
keyword_commands = ["import"]
def __init__(self):
self.context = Context()
self.history = InMemoryHistory()
def command(self, cmd, args):
for command in Command.commands:
if command.name == cmd:
return command(args, context=self.context)
def prompt(self):
text = prompt(self.get_prompt(), history=self.history)
if text:
self.handle_input(text)
def handle_input(self, input):
# handle input modifiers
if input.startswith('>'):
return self.exec_code(input[1:])
if input.startswith('!'):
return self.exec_shell(input[1:])
if input.startswith('%'):
return self.exec_sql(input[1:])
# parse input as single cmd with args
cmd, *args = self.parse_input(input)
command = self.command(cmd, args)
# 1. execute builtin command
if command:
self.exec_command(command)
# 2. execute Python keywords
elif cmd in self.keyword_commands:
self.exec_code(input)
# 3. execute shell command
elif which(cmd) is not None:
self.exec_shell(input)
# 4. execute as code
else:
self.exec_code(input)
def exec_code(self, input):
exec(compile_command(input), self.context.globals)
@staticmethod
def exec_command(command):
command.exec()
@staticmethod
def exec_shell(input):
call(input, shell=True)
def exec_sql(self, input):
self.context.sql(input).show()
def get_prompt(self):
return "{} $ ".format(self.context.cwd.name)
@staticmethod
def parse_input(input):
return shlex.split(input, posix=True)
def run():
session = Session()
print("""
Welcome to __
____ __ _______/ /_
/ __ `/ | /| / / ___/ __ \\
/ /_/ /| |/ |/ (__ ) / / /
\__,_/ |__/|__/____/_/ /_/
""")
while True:
# noinspection PyBroadException
try:
session.prompt()
except (KeyboardInterrupt, EOFError):
break
except Exception:
handle_exception(sys.exc_info())
def handle_exception(exc_tuple):
last_type, last_value, last_traceback = exc_tuple
print(traceback.format_exception_only(last_type, last_value)[-1].rstrip('\n'))
sys.last_type = last_type
sys.last_value = last_value
sys.last_traceback = last_traceback
if __name__ == '__main__':
run()
| bsd-3-clause | 2,869,738,789,390,848,500 | 24.784722 | 82 | 0.58174 | false |
partofthething/home-assistant | tests/components/nest/test_events.py | 3 | 9106 | """Test for Nest binary sensor platform for the Smart Device Management API.
These tests fake out the subscriber/devicemanager, and are not using a real
pubsub subscriber.
"""
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventMessage
from homeassistant.util.dt import utcnow
from .common import async_setup_sdm_platform
from tests.common import async_capture_events
DOMAIN = "nest"
DEVICE_ID = "some-device-id"
PLATFORM = "camera"
NEST_EVENT = "nest_event"
EVENT_SESSION_ID = "CjY5Y3VKaTZwR3o4Y19YbTVfMF..."
EVENT_ID = "FWWVQVUdGNUlTU2V4MGV2aTNXV..."
async def async_setup_devices(hass, device_type, traits={}):
"""Set up the platform and prerequisites."""
devices = {
DEVICE_ID: Device.MakeDevice(
{
"name": DEVICE_ID,
"type": device_type,
"traits": traits,
},
auth=None,
),
}
return await async_setup_sdm_platform(hass, PLATFORM, devices=devices)
def create_device_traits(event_trait):
"""Create fake traits for a device."""
return {
"sdm.devices.traits.Info": {
"customName": "Front",
},
event_trait: {},
"sdm.devices.traits.CameraLiveStream": {
"maxVideoResolution": {
"width": 640,
"height": 480,
},
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
},
}
def create_event(event_type, device_id=DEVICE_ID, timestamp=None):
"""Create an EventMessage for a single event type."""
events = {
event_type: {
"eventSessionId": EVENT_SESSION_ID,
"eventId": EVENT_ID,
},
}
return create_events(events=events, device_id=device_id)
def create_events(events, device_id=DEVICE_ID, timestamp=None):
"""Create an EventMessage for events."""
if not timestamp:
timestamp = utcnow()
return EventMessage(
{
"eventId": "some-event-id",
"timestamp": timestamp.isoformat(timespec="seconds"),
"resourceUpdate": {
"name": device_id,
"events": events,
},
},
auth=None,
)
async def test_doorbell_chime_event(hass):
"""Test a pubsub message for a doorbell event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
assert entry.unique_id == "some-device-id-camera"
assert entry.original_name == "Front"
assert entry.domain == "camera"
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "Front"
assert device.model == "Doorbell"
assert device.identifiers == {("nest", DEVICE_ID)}
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.DoorbellChime.Chime", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "doorbell_chime",
"timestamp": event_time,
}
async def test_camera_motion_event(hass):
"""Test a pubsub message for a camera motion event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.CAMERA",
create_device_traits("sdm.devices.traits.CameraMotion"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.CameraMotion.Motion", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_motion",
"timestamp": event_time,
}
async def test_camera_sound_event(hass):
"""Test a pubsub message for a camera sound event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.CAMERA",
create_device_traits("sdm.devices.traits.CameraSound"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.CameraSound.Sound", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_sound",
"timestamp": event_time,
}
async def test_camera_person_event(hass):
"""Test a pubsub message for a camera person event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.CameraEventImage"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.CameraPerson.Person", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_person",
"timestamp": event_time,
}
async def test_camera_multiple_event(hass):
"""Test a pubsub message for a camera person event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.CameraEventImage"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
event_map = {
"sdm.devices.events.CameraMotion.Motion": {
"eventSessionId": EVENT_SESSION_ID,
"eventId": EVENT_ID,
},
"sdm.devices.events.CameraPerson.Person": {
"eventSessionId": EVENT_SESSION_ID,
"eventId": EVENT_ID,
},
}
timestamp = utcnow()
await subscriber.async_receive_event(create_events(event_map, timestamp=timestamp))
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 2
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_motion",
"timestamp": event_time,
}
assert events[1].data == {
"device_id": entry.device_id,
"type": "camera_person",
"timestamp": event_time,
}
async def test_unknown_event(hass):
"""Test a pubsub message for an unknown event type."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
await subscriber.async_receive_event(create_event("some-event-id"))
await hass.async_block_till_done()
assert len(events) == 0
async def test_unknown_device_id(hass):
"""Test a pubsub message for an unknown event type."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
await subscriber.async_receive_event(
create_event("sdm.devices.events.DoorbellChime.Chime", "invalid-device-id")
)
await hass.async_block_till_done()
assert len(events) == 0
async def test_event_message_without_device_event(hass):
"""Test a pubsub message for an unknown event type."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
timestamp = utcnow()
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": timestamp.isoformat(timespec="seconds"),
},
auth=None,
)
await subscriber.async_receive_event(event)
await hass.async_block_till_done()
assert len(events) == 0
| mit | -7,789,226,528,352,010,000 | 30.078498 | 87 | 0.631562 | false |
RefugeeMatchmaking/HackZurich | GAE_Playground/libs/networkx/algorithms/bipartite/tests/test_cluster.py | 85 | 2709 | import networkx as nx
from nose.tools import *
from networkx.algorithms.bipartite.cluster import cc_dot,cc_min,cc_max
import networkx.algorithms.bipartite as bipartite
def test_pairwise_bipartite_cc_functions():
# Test functions for different kinds of bipartite clustering coefficients
# between pairs of nodes using 3 example graphs from figure 5 p. 40
# Latapy et al (2008)
G1 = nx.Graph([(0,2),(0,3),(0,4),(0,5),(0,6),(1,5),(1,6),(1,7)])
G2 = nx.Graph([(0,2),(0,3),(0,4),(1,3),(1,4),(1,5)])
G3 = nx.Graph([(0,2),(0,3),(0,4),(0,5),(0,6),(1,5),(1,6),(1,7),(1,8),(1,9)])
result = {0:[1/3.0, 2/3.0, 2/5.0],
1:[1/2.0, 2/3.0, 2/3.0],
2:[2/8.0, 2/5.0, 2/5.0]}
for i, G in enumerate([G1, G2, G3]):
assert(bipartite.is_bipartite(G))
assert(cc_dot(set(G[0]), set(G[1])) == result[i][0])
assert(cc_min(set(G[0]), set(G[1])) == result[i][1])
assert(cc_max(set(G[0]), set(G[1])) == result[i][2])
def test_star_graph():
G=nx.star_graph(3)
# all modes are the same
answer={0:0,1:1,2:1,3:1}
assert_equal(bipartite.clustering(G,mode='dot'),answer)
assert_equal(bipartite.clustering(G,mode='min'),answer)
assert_equal(bipartite.clustering(G,mode='max'),answer)
@raises(nx.NetworkXError)
def test_not_bipartite():
bipartite.clustering(nx.complete_graph(4))
@raises(nx.NetworkXError)
def test_bad_mode():
bipartite.clustering(nx.path_graph(4),mode='foo')
def test_path_graph():
G=nx.path_graph(4)
answer={0:0.5,1:0.5,2:0.5,3:0.5}
assert_equal(bipartite.clustering(G,mode='dot'),answer)
assert_equal(bipartite.clustering(G,mode='max'),answer)
answer={0:1,1:1,2:1,3:1}
assert_equal(bipartite.clustering(G,mode='min'),answer)
def test_average_path_graph():
G=nx.path_graph(4)
assert_equal(bipartite.average_clustering(G,mode='dot'),0.5)
assert_equal(bipartite.average_clustering(G,mode='max'),0.5)
assert_equal(bipartite.average_clustering(G,mode='min'),1)
def test_ra_clustering_davis():
G = nx.davis_southern_women_graph()
cc4 = round(bipartite.robins_alexander_clustering(G), 3)
assert_equal(cc4, 0.468)
def test_ra_clustering_square():
G = nx.path_graph(4)
G.add_edge(0, 3)
assert_equal(bipartite.robins_alexander_clustering(G), 1.0)
def test_ra_clustering_zero():
G = nx.Graph()
assert_equal(bipartite.robins_alexander_clustering(G), 0)
G.add_nodes_from(range(4))
assert_equal(bipartite.robins_alexander_clustering(G), 0)
G.add_edges_from([(0,1),(2,3),(3,4)])
assert_equal(bipartite.robins_alexander_clustering(G), 0)
G.add_edge(1,2)
assert_equal(bipartite.robins_alexander_clustering(G), 0)
| mit | -8,219,858,428,909,908,000 | 37.7 | 80 | 0.640827 | false |
devekko/ansible-role-manager | arm/commands/install.py | 1 | 5123 | import os, shutil, re
from . import Command, CommandException
from arm.conf import settings
from arm.odict import odict
from arm.util import retrieve_role, retrieve_all_roles, get_playbook_root
from arm import Role, Module
class install(Command):
help = "install playbook role"
def __init__(self, parser):
parser.description = self.help
parser.add_argument('-U','--upgrade', action='store_true')
parser.add_argument('-n', '--no-deps', action='store_true', help="install without this item's dependencies")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-r', '--requirements', nargs=1, help="install from requirements file (see `arm help freeze`)")
group.add_argument('role_or_module', nargs='?', help="specifier of role or module to install locally")
# TODO : add argument of where the role is to be installed
# TODO : add argument of where the installed role should be linked
def run(self, argv):
root = get_playbook_root(os.getcwd())
if not root:
print '''
can't find playbook.
use `arm init` to create recommended structure.
or use the `--no-dependencies` option.'''
return 1
roles = odict()
if getattr(argv, 'requirements', ''):
for role_ident in open(argv.requirements[0],'r'):
roles = self._fetch(role_ident, argv.no_deps, roles)
else:
roles = self._fetch(argv.role_or_module, argv.no_deps, roles )
for alias,role in roles.items():
self._install_and_link(alias, role, getattr(argv, 'upgrade', False))
print "\nrole(s) '%s' installed succesfully.\n" % (", ".join(roles.keys()))
exit(0)
def _fetch(self, role_ident, no_deps, roles):
aliasRE = re.compile(r'^(?P<ident>.+?)(\#alias\=(?P<alias>[a-zA-Z][a-zA-Z0-9]+?)){0,1}$')
alias_match = aliasRE.match(role_ident)
if not alias_match:
print "error : could not find format"
return 1
role_ident = alias_match.groupdict()['ident']
alias = alias_match.groupdict().get('alias',None)
if no_deps:
role = retrieve_role(role_ident)
if alias:
roles.update( { alias:role } )
return roles
roles.update( { role.get_name():role } )
return roles
return retrieve_all_roles(role_ident, alias, roles)
def _install_and_link(self, alias, rmp, upgrade=False):
root = get_playbook_root(os.getcwd())
source_path = rmp.get_path()
library_path = None
link_path = None
if type(rmp) == Role:
installed_rmp_dir = settings.installed_roles_dir
ansible_rmp_dir = settings.ansible_roles_dir
elif type(rmp) == Module:
installed_rmp_dir = settings.installed_modules_dir
ansible_rmp_dir = settings.ansible_modules_dir
installed_rmp_path = os.path.join(installed_rmp_dir, rmp.get_name())
library_path = os.path.join(root, installed_rmp_path)
link_path = os.path.join(root, ansible_rmp_dir, alias)
# TODO : test if a 'local' route makes sense for a role dependency
# if the library path is also the role, local role dependency
#if os.path.realpath(link_path) == os.path.realpath(library_path):
#return
if os.path.exists(library_path) and not upgrade:
raise CommandException("'%s' already installed in library, use --upgrade to install latest" % rmp.get_name())
if os.path.exists(link_path):
if not os.path.islink(link_path):
if type(rmp) == Role:
raise Exception("role '%s' already exists as a non-installed role" % rmp)
elif type(rmp) == Module:
raise Exception("module '%s' aleady exists as a non-installed module" % rmp)
if not upgrade:
raise CommandException("'%s' already installed in library, use --upgrade to install latest" % rmp.get_name())
if upgrade:
if os.path.exists(library_path):
print "\t upgrading :: removing old version"
shutil.rmtree(library_path)
if os.path.islink(link_path):
print "\t upgrading :: unlinking old version"
os.unlink(link_path)
shutil.copytree(source_path, library_path)
ansible_rmp_path = os.path.join(root,ansible_rmp_dir)
if not os.path.exists(ansible_rmp_path):
os.mkdir(ansible_rmp_path)
os.symlink(
os.path.relpath(installed_rmp_path, ansible_rmp_dir),
os.path.join(link_path)
)
| apache-2.0 | -8,093,591,074,921,309,000 | 34.576389 | 125 | 0.553191 | false |
ianyh/heroku-buildpack-python-nodejs | vendor/distribute-0.6.36/setuptools/tests/test_markerlib.py | 71 | 2237 | import os
import unittest
from setuptools.tests.py26compat import skipIf
try:
import ast
except ImportError:
pass
class TestMarkerlib(unittest.TestCase):
@skipIf('ast' not in globals(),
"ast not available (Python < 2.6?)")
def test_markers(self):
from _markerlib import interpret, default_environment, compile
os_name = os.name
self.assertTrue(interpret(""))
self.assertTrue(interpret("os.name != 'buuuu'"))
self.assertTrue(interpret("python_version > '1.0'"))
self.assertTrue(interpret("python_version < '5.0'"))
self.assertTrue(interpret("python_version <= '5.0'"))
self.assertTrue(interpret("python_version >= '1.0'"))
self.assertTrue(interpret("'%s' in os.name" % os_name))
self.assertTrue(interpret("'buuuu' not in os.name"))
self.assertFalse(interpret("os.name == 'buuuu'"))
self.assertFalse(interpret("python_version < '1.0'"))
self.assertFalse(interpret("python_version > '5.0'"))
self.assertFalse(interpret("python_version >= '5.0'"))
self.assertFalse(interpret("python_version <= '1.0'"))
self.assertFalse(interpret("'%s' not in os.name" % os_name))
self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'"))
environment = default_environment()
environment['extra'] = 'test'
self.assertTrue(interpret("extra == 'test'", environment))
self.assertFalse(interpret("extra == 'doc'", environment))
def raises_nameError():
try:
interpret("python.version == '42'")
except NameError:
pass
else:
raise Exception("Expected NameError")
raises_nameError()
def raises_syntaxError():
try:
interpret("(x for x in (4,))")
except SyntaxError:
pass
else:
raise Exception("Expected SyntaxError")
raises_syntaxError()
statement = "python_version == '5'"
self.assertEqual(compile(statement).__doc__, statement)
| mit | 2,657,379,931,597,587,000 | 33.953125 | 89 | 0.563701 | false |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/ctypes/test/test_prototypes.py | 81 | 6738 | from ctypes import *
import unittest
# IMPORTANT INFO:
#
# Consider this call:
# func.restype = c_char_p
# func(c_char_p("123"))
# It returns
# "123"
#
# WHY IS THIS SO?
#
# argument tuple (c_char_p("123"), ) is destroyed after the function
# func is called, but NOT before the result is actually built.
#
# If the arglist would be destroyed BEFORE the result has been built,
# the c_char_p("123") object would already have a zero refcount,
# and the pointer passed to (and returned by) the function would
# probably point to deallocated space.
#
# In this case, there would have to be an additional reference to the argument...
import _ctypes_test
testdll = CDLL(_ctypes_test.__file__)
# Return machine address `a` as a (possibly long) non-negative integer.
# Starting with Python 2.5, id(anything) is always non-negative, and
# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
def positive_address(a):
if a >= 0:
return a
# View the bits in `a` as unsigned instead.
import struct
num_bits = struct.calcsize("P") * 8 # num bits in native machine address
a += 1L << num_bits
assert a >= 0
return a
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
class CharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_long
func.argtypes = None
def test_paramflags(self):
# function returns c_void_p result,
# and has a required parameter named 'input'
prototype = CFUNCTYPE(c_void_p, c_void_p)
func = prototype(("_testfunc_p_p", testdll),
((1, "input"),))
try:
func()
except TypeError, details:
self.assertEqual(str(details), "required argument 'input' missing")
else:
self.fail("TypeError not raised")
self.assertEqual(func(None), None)
self.assertEqual(func(input=None), None)
def test_int_pointer_arg(self):
func = testdll._testfunc_p_p
func.restype = c_long
self.assertEqual(0, func(0))
ci = c_int(0)
func.argtypes = POINTER(c_int),
self.assertEqual(positive_address(addressof(ci)),
positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_short),
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_double),
self.assertRaises(ArgumentError, func, byref(ci))
def test_POINTER_c_char_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = POINTER(c_char),
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_char_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_char_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_void_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_void_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
func(byref(c_int()))
func(pointer(c_int()))
func((c_int * 3)())
try:
func.restype = c_wchar_p
except NameError:
pass
else:
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
def test_instance(self):
func = testdll._testfunc_p_p
func.restype = c_void_p
class X:
_as_parameter_ = None
func.argtypes = c_void_p,
self.assertEqual(None, func(X()))
func.argtypes = None
self.assertEqual(None, func(X()))
try:
c_wchar
except NameError:
pass
else:
class WCharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_int
func.argtypes = None
def test_POINTER_c_wchar_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = POINTER(c_wchar),
self.assertEqual(None, func(None))
self.assertEqual(u"123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
self.assertEqual(u"123", func(c_wbuffer(u"123")))
ca = c_wchar("a")
self.assertEqual(u"a", func(pointer(ca))[0])
self.assertEqual(u"a", func(byref(ca))[0])
def test_c_wchar_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_wchar_p,
c_wchar_p.from_param(u"123")
self.assertEqual(None, func(None))
self.assertEqual("123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
# XXX Currently, these raise TypeErrors, although they shouldn't:
self.assertEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
func = testdll._testfunc_ai8
func.restype = POINTER(c_int)
func.argtypes = c_int * 8,
func((c_int * 8)(1, 2, 3, 4, 5, 6, 7, 8))
# This did crash before:
def func(): pass
CFUNCTYPE(None, c_int * 3)(func)
################################################################
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -3,905,336,734,486,215,000 | 29.488688 | 81 | 0.570347 | false |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/ConfigParser.py | 186 | 27746 | """Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section.lower() == "default":
raise ValueError, 'Invalid section name: %s' % section
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
import UserDict as _UserDict
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)
| gpl-2.0 | -8,483,117,190,821,577,000 | 35.847278 | 80 | 0.54534 | false |
huntxu/fuel-web | nailgun/manage.py | 2 | 11684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __main__
import argparse
import code
import os
import sys
def add_config_parameter(parser):
parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
def load_run_parsers(subparsers):
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='8000'
)
run_parser.add_argument(
'-a', '--address', dest='address', action='store', type=str,
help='application address', default='0.0.0.0'
)
run_parser.add_argument(
'--fake-tasks', action='store_true', help='fake tasks'
)
run_parser.add_argument(
'--fake-tasks-amqp', action='store_true',
help='fake tasks with real AMQP'
)
run_parser.add_argument(
'--keepalive',
action='store_true',
help='run keep alive thread'
)
add_config_parameter(run_parser)
run_parser.add_argument(
'--fake-tasks-tick-count', action='store', type=int,
help='Fake tasks tick count'
)
run_parser.add_argument(
'--fake-tasks-tick-interval', action='store', type=int,
help='Fake tasks tick interval in seconds'
)
run_parser.add_argument(
'--authentication-method', action='store', type=str,
help='Choose authentication type',
choices=['none', 'fake', 'keystone'],
)
def load_db_parsers(subparsers):
subparsers.add_parser(
'syncdb', help='sync application database'
)
subparsers.add_parser(
'dropdb', help='drop application database'
)
# fixtures
loaddata_parser = subparsers.add_parser(
'loaddata', help='load data from fixture'
)
loaddata_parser.add_argument(
'fixture', action='store', help='json fixture to load'
)
dumpdata_parser = subparsers.add_parser(
'dumpdata', help='dump models as fixture'
)
dumpdata_parser.add_argument(
'model', action='store', help='model name to dump; underscored name'
'should be used, e.g. network_group for NetworkGroup model'
)
generate_parser = subparsers.add_parser(
'generate_nodes_fixture', help='generate new nodes fixture'
)
generate_parser.add_argument(
'-n', '--total-nodes', dest='total_nodes', action='store', type=int,
help='total nodes count to generate', required=True
)
generate_parser.add_argument(
'-e', '--error-nodes', dest='error_nodes', action='store', type=int,
help='error nodes count to generate'
)
generate_parser.add_argument(
'-o', '--offline-nodes', dest='offline_nodes', action='store', type=int,
help='offline nodes count to generate'
)
generate_parser.add_argument(
'-i', '--min-ifaces-num', dest='min_ifaces_num', action='store',
type=int, default=1,
help='minimal number of ethernet interfaces for node'
)
subparsers.add_parser(
'loaddefault',
help='load data from default fixtures (settings.FIXTURES_TO_UPLOAD) '
'and apply fake deployment tasks for all releases in database'
)
def load_alembic_parsers(migrate_parser):
alembic_parser = migrate_parser.add_subparsers(
dest="alembic_command",
help='alembic command'
)
for name in ['current', 'history', 'branches']:
parser = alembic_parser.add_parser(name)
for name in ['upgrade', 'downgrade']:
parser = alembic_parser.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser = alembic_parser.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser = alembic_parser.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
def load_db_migrate_parsers(subparsers):
migrate_parser = subparsers.add_parser(
'migrate', help='dealing with DB migration'
)
load_alembic_parsers(migrate_parser)
def load_dbshell_parsers(subparsers):
dbshell_parser = subparsers.add_parser(
'dbshell', help='open database shell'
)
add_config_parameter(dbshell_parser)
def load_test_parsers(subparsers):
subparsers.add_parser(
'test', help='run unit tests'
)
def load_shell_parsers(subparsers):
shell_parser = subparsers.add_parser(
'shell', help='open python REPL'
)
add_config_parameter(shell_parser)
def load_settings_parsers(subparsers):
subparsers.add_parser(
'dump_settings', help='dump current settings to YAML'
)
def load_extensions_parsers(subparsers):
extensions_parser = subparsers.add_parser(
'extensions', help='extensions related actions')
load_alembic_parsers(extensions_parser)
def action_dumpdata(params):
import logging
logging.disable(logging.WARNING)
from nailgun.db.sqlalchemy import fixman
fixman.dump_fixture(params.model)
sys.exit(0)
def action_generate_nodes_fixture(params):
try:
from oslo.serialization import jsonutils
except ImportError:
from oslo_serialization import jsonutils
from nailgun.logger import logger
from nailgun.utils import fake_generator
logger.info('Generating new nodes fixture...')
total_nodes_count = params.total_nodes
fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'nailgun/fixtures/')
file_path = os.path.join(
fixtures_dir,
'{0}_fake_nodes_environment.json'.format(total_nodes_count)
)
generator = fake_generator.FakeNodesGenerator()
res = generator.generate_fake_nodes(
total_nodes_count, error_nodes_count=params.error_nodes,
offline_nodes_count=params.offline_nodes,
min_ifaces_num=params.min_ifaces_num)
with open(file_path, 'w') as file_to_write:
jsonutils.dump(res, file_to_write, indent=4)
logger.info('Done. New fixture was stored in {0} file'.format(file_path))
def action_loaddata(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
with open(params.fixture, "r") as fileobj:
fixman.upload_fixture(fileobj)
logger.info("Done")
def action_loadfakedeploymenttasks(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Applying fake deployment tasks to all releases...")
fixman.load_fake_deployment_tasks()
logger.info("Done")
def action_loaddefault(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
fixman.upload_fixtures()
logger.info("Applying fake deployment tasks to all releases...")
fixman.load_fake_deployment_tasks()
logger.info("Done")
def action_syncdb(params):
from nailgun.db import syncdb
from nailgun.logger import logger
logger.info("Syncing database...")
syncdb()
logger.info("Done")
def action_dropdb(params):
from nailgun.db import dropdb
from nailgun.logger import logger
logger.info("Dropping database...")
dropdb()
logger.info("Done")
def action_migrate(params):
from nailgun.db.migration import action_migrate_alembic_core
action_migrate_alembic_core(params)
def action_extensions(params):
from nailgun.logger import logger
from nailgun.db.migration import action_migrate_alembic_extension
from nailgun.extensions import get_all_extensions
for extension in get_all_extensions():
if extension.alembic_migrations_path():
logger.info('Running command for extension {0}'.format(
extension.full_name()))
action_migrate_alembic_extension(params, extension=extension)
else:
logger.info(
'Extension {0} does not have migrations. '
'Skipping...'.format(extension.full_name()))
def action_test(params):
from nailgun.logger import logger
from nailgun.unit_test import TestRunner
logger.info("Running tests...")
TestRunner.run()
logger.info("Done")
def action_dbshell(params):
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
args = ['psql']
env = {}
if settings.DATABASE['passwd']:
env['PGPASSWORD'] = settings.DATABASE['passwd']
if settings.DATABASE['user']:
args += ["-U", settings.DATABASE['user']]
if settings.DATABASE['host']:
args.extend(["-h", settings.DATABASE['host']])
if settings.DATABASE['port']:
args.extend(["-p", str(settings.DATABASE['port'])])
args += [settings.DATABASE['name']]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvpe('psql', args, env)
def action_dump_settings(params):
from nailgun.settings import settings
sys.stdout.write(settings.dump())
def action_shell(params):
from nailgun.db import db
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except ImportError:
code.interact(local={'db': db, 'settings': settings})
def action_run(params):
from nailgun.settings import settings
settings.update({
'LISTEN_PORT': int(params.port),
'LISTEN_ADDRESS': params.address,
})
for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
param = getattr(params, attr.lower())
if param is not None:
settings.update({attr: param})
if params.authentication_method:
auth_method = params.authentication_method
settings.AUTH.update({'AUTHENTICATION_METHOD': auth_method})
if params.config_file:
settings.update_from_file(params.config_file)
from nailgun.app import appstart
appstart()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
load_run_parsers(subparsers)
load_db_parsers(subparsers)
load_db_migrate_parsers(subparsers)
load_dbshell_parsers(subparsers)
load_test_parsers(subparsers)
load_shell_parsers(subparsers)
load_settings_parsers(subparsers)
load_extensions_parsers(subparsers)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
action = getattr(
__main__,
"action_{0}".format(params.action)
)
action(params) if action else parser.print_help()
| apache-2.0 | 2,955,147,169,767,911,400 | 29.348052 | 80 | 0.653115 | false |
petewarden/tensorflow | tensorflow/python/data/kernel_tests/as_numpy_iterator_test.py | 9 | 3745 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.numpy()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class AsNumpyIteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testBasic(self):
ds = dataset_ops.Dataset.range(3)
self.assertEqual([0, 1, 2], list(ds.as_numpy_iterator()))
@combinations.generate(test_base.eager_only_combinations())
def testImmutable(self):
ds = dataset_ops.Dataset.from_tensors([1, 2, 3])
arr = next(ds.as_numpy_iterator())
with self.assertRaisesRegex(ValueError,
'assignment destination is read-only'):
arr[0] = 0
@combinations.generate(test_base.eager_only_combinations())
def testNestedStructure(self):
point = collections.namedtuple('Point', ['x', 'y'])
ds = dataset_ops.Dataset.from_tensor_slices({
'a': ([1, 2], [3, 4]),
'b': [5, 6],
'c': point([7, 8], [9, 10])
})
self.assertEqual([{
'a': (1, 3),
'b': 5,
'c': point(7, 9)
}, {
'a': (2, 4),
'b': 6,
'c': point(8, 10)
}], list(ds.as_numpy_iterator()))
@combinations.generate(test_base.graph_only_combinations())
def testNonEager(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaises(RuntimeError):
ds.as_numpy_iterator()
def _testInvalidElement(self, element):
ds = dataset_ops.Dataset.from_tensors(element)
with self.assertRaisesRegex(TypeError,
'.*does not support datasets containing.*'):
ds.as_numpy_iterator()
@combinations.generate(test_base.eager_only_combinations())
def testSparseElement(self):
self._testInvalidElement(sparse_tensor.SparseTensorValue([[0]], [0], [1]))
@combinations.generate(test_base.eager_only_combinations())
def testRaggedElement(self):
lst = [[1, 2], [3], [4, 5, 6]]
rt = ragged_factory_ops.constant(lst)
ds = dataset_ops.Dataset.from_tensor_slices(rt)
for actual, expected in zip(ds.as_numpy_iterator(), lst):
self.assertTrue(np.array_equal(actual, expected))
@combinations.generate(test_base.eager_only_combinations())
def testDatasetElement(self):
self._testInvalidElement(dataset_ops.Dataset.range(3))
@combinations.generate(test_base.eager_only_combinations())
def testNestedNonTensorElement(self):
tuple_elem = (constant_op.constant([1, 2, 3]), dataset_ops.Dataset.range(3))
self._testInvalidElement(tuple_elem)
if __name__ == '__main__':
test.main()
| apache-2.0 | -3,758,278,383,399,535,600 | 35.715686 | 80 | 0.676903 | false |
gmnamra/python-image-utils | Tools/guimaker.py | 1 | 6773 | """
###############################################################################
An extended Frame that makes window menus and toolbars automatically.
Use GuiMakerFrameMenu for embedded components (makes frame-based menus).
Use GuiMakerWindowMenu for top-level windows (makes Tk8.0 window menus).
See the self-test code (and PyEdit) for an example layout tree format.
###############################################################################
"""
import sys
from Tkinter import * # widget classes
from Tkinter.messagebox import showinfo
class GuiMaker(Frame):
menuBar = [] # class defaults
toolBar = [] # change per instance in subclasses
helpButton = True # set these in start() if need self
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH) # make frame stretchable
self.start() # for subclass: set menu/toolBar
self.makeMenuBar() # done here: build menu bar
self.makeToolBar() # done here: build toolbar
self.makeWidgets() # for subclass: add middle part
def makeMenuBar(self):
"""
make menu bar at the top (Tk8.0 menus below)
expand=no, fill=x so same width on resize
"""
menubar = Frame(self, relief=RAISED, bd=2)
menubar.pack(side=TOP, fill=X)
for (name, key, items) in self.menuBar:
mbutton = Menubutton(menubar, text=name, underline=key)
mbutton.pack(side=LEFT)
pulldown = Menu(mbutton)
self.addMenuItems(pulldown, items)
mbutton.config(menu=pulldown)
if self.helpButton:
Button(menubar, text = 'Help',
cursor = 'gumby',
relief = FLAT,
command = self.help).pack(side=RIGHT)
def addMenuItems(self, menu, items):
for item in items: # scan nested items list
if item == 'separator': # string: add separator
menu.add_separator({})
elif type(item) == list: # list: disabled item list
for num in item:
menu.entryconfig(num, state=DISABLED)
elif type(item[2]) != list:
menu.add_command(label = item[0], # command:
underline = item[1], # add command
command = item[2]) # cmd=callable
else:
pullover = Menu(menu)
self.addMenuItems(pullover, item[2]) # sublist:
menu.add_cascade(label = item[0], # make submenu
underline = item[1], # add cascade
menu = pullover)
def makeToolBar(self):
"""
make button bar at bottom, if any
expand=no, fill=x so same width on resize
this could support images too: see Chapter 9,
would need prebuilt gifs or PIL for thumbnails
"""
if self.toolBar:
toolbar = Frame(self, cursor='hand2', relief=SUNKEN, bd=2)
toolbar.pack(side=BOTTOM, fill=X)
for (name, action, where) in self.toolBar:
Button(toolbar, text=name, command=action).pack(where)
def makeWidgets(self):
"""
make 'middle' part last, so menu/toolbar
is always on top/bottom and clipped last;
override this default, pack middle any side;
for grid: grid middle part in a packed frame
"""
name = Label(self,
width=40, height=10,
relief=SUNKEN, bg='white',
text = self.__class__.__name__,
cursor = 'crosshair')
name.pack(expand=YES, fill=BOTH, side=TOP)
def help(self):
"override me in subclass"
showinfo('Help', 'Sorry, no help for ' + self.__class__.__name__)
def start(self):
"override me in subclass: set menu/toolbar with self"
pass
###############################################################################
# Customize for Tk 8.0 main window menu bar, instead of a frame
###############################################################################
GuiMakerFrameMenu = GuiMaker # use this for embedded component menus
class GuiMakerWindowMenu(GuiMaker): # use this for top-level window menus
def makeMenuBar(self):
menubar = Menu(self.master)
self.master.config(menu=menubar)
for (name, key, items) in self.menuBar:
pulldown = Menu(menubar)
self.addMenuItems(pulldown, items)
menubar.add_cascade(label=name, underline=key, menu=pulldown)
if self.helpButton:
if sys.platform[:3] == 'win':
menubar.add_command(label='Help', command=self.help)
else:
pulldown = Menu(menubar) # Linux needs real pull down
pulldown.add_command(label='About', command=self.help)
menubar.add_cascade(label='Help', menu=pulldown)
###############################################################################
# Self-test when file run standalone: 'python guimaker.py'
###############################################################################
if __name__ == '__main__':
from guimixin import GuiMixin # mix in a help method
menuBar = [
('File', 0,
[('Open', 0, lambda:0), # lambda:0 is a no-op
('Quit', 0, sys.exit)]), # use sys, no self here
('Edit', 0,
[('Cut', 0, lambda:0),
('Paste', 0, lambda:0)]) ]
toolBar = [('Quit', sys.exit, {'side': LEFT})]
class TestAppFrameMenu(GuiMixin, GuiMakerFrameMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenu(GuiMixin, GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenuBasic(GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar # guimaker help, not guimixin
root = Tk()
TestAppFrameMenu(Toplevel())
TestAppWindowMenu(Toplevel())
TestAppWindowMenuBasic(root)
root.mainloop()
| mit | -6,838,384,200,539,205,000 | 39.808642 | 80 | 0.490034 | false |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/shell/demos/explorer_browser.py | 10 | 4758 | # A sample of using Vista's IExplorerBrowser interfaces...
# Currently doesn't quite work:
# * CPU sits at 100% while running.
import sys
import pythoncom
from win32com.shell import shell, shellcon
import win32gui, win32con, win32api
from win32com.server.util import wrap, unwrap
# event handler for the browser.
IExplorerBrowserEvents_Methods = """OnNavigationComplete OnNavigationFailed
OnNavigationPending OnViewCreated""".split()
class EventHandler:
_com_interfaces_ = [shell.IID_IExplorerBrowserEvents]
_public_methods_ = IExplorerBrowserEvents_Methods
def OnNavigationComplete(self, pidl):
print("OnNavComplete", pidl)
def OnNavigationFailed(self, pidl):
print("OnNavigationFailed", pidl)
def OnNavigationPending(self, pidl):
print("OnNavigationPending", pidl)
def OnViewCreated(self, view):
print("OnViewCreated", view)
# And if our demo view has been registered, it may well
# be that view!
try:
pyview = unwrap(view)
print("and look - its a Python implemented view!", pyview)
except ValueError:
pass
class MainWindow:
def __init__(self):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_SIZE: self.OnSize,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "test_explorer_browser"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow( classAtom, "Python IExplorerBrowser demo", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
eb = pythoncom.CoCreateInstance(shellcon.CLSID_ExplorerBrowser, None, pythoncom.CLSCTX_ALL, shell.IID_IExplorerBrowser)
# as per MSDN docs, hook up events early
self.event_cookie = eb.Advise(wrap(EventHandler()))
eb.SetOptions(shellcon.EBO_SHOWFRAMES)
rect = win32gui.GetClientRect(self.hwnd)
# Set the flags such that the folders autoarrange and non web view is presented
flags = (shellcon.FVM_LIST, shellcon.FWF_AUTOARRANGE | shellcon.FWF_NOWEBVIEW)
eb.Initialize(self.hwnd, rect, (0, shellcon.FVM_DETAILS))
if len(sys.argv)==2:
# If an arg was specified, ask the desktop parse it.
# You can pass anything explorer accepts as its '/e' argument -
# eg, "::{guid}\::{guid}" etc.
# "::{20D04FE0-3AEA-1069-A2D8-08002B30309D}" is "My Computer"
pidl = shell.SHGetDesktopFolder().ParseDisplayName(0, None, sys.argv[1])[1]
else:
# And start browsing at the root of the namespace.
pidl = []
eb.BrowseToIDList(pidl, shellcon.SBSP_ABSOLUTE)
# and for some reason the "Folder" view in the navigator pane doesn't
# magically synchronize itself - so let's do that ourself.
# Get the tree control.
sp = eb.QueryInterface(pythoncom.IID_IServiceProvider)
try:
tree = sp.QueryService(shell.IID_INameSpaceTreeControl,
shell.IID_INameSpaceTreeControl)
except pythoncom.com_error as exc:
# this should really only fail if no "nav" frame exists...
print("Strange - failed to get the tree control even though " \
"we asked for a EBO_SHOWFRAMES")
print(exc)
else:
# get the IShellItem for the selection.
si = shell.SHCreateItemFromIDList(pidl, shell.IID_IShellItem)
# set it to selected.
tree.SetItemState(si, shellcon.NSTCIS_SELECTED, shellcon.NSTCIS_SELECTED)
#eb.FillFromObject(None, shellcon.EBF_NODROPTARGET);
#eb.SetEmptyText("No known folders yet...");
self.eb = eb
def OnCommand(self, hwnd, msg, wparam, lparam):
pass
def OnDestroy(self, hwnd, msg, wparam, lparam):
print("tearing down ExplorerBrowser...")
self.eb.Unadvise(self.event_cookie)
self.eb.Destroy()
self.eb = None
print("shutting down app...")
win32gui.PostQuitMessage(0)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self.eb.SetRect(None, (0, 0, x, y))
def main():
w=MainWindow()
win32gui.PumpMessages()
if __name__=='__main__':
main()
| apache-2.0 | -393,237,709,399,099,460 | 39.666667 | 127 | 0.627785 | false |
synopat/pyload | module/lib/thrift/transport/TTransport.py | 74 | 8384 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from struct import pack,unpack
from thrift.Thrift import TException
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase:
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = ''
have = 0
while (have < sz):
chunk = self.read(sz-have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport:
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase:
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase:
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory:
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase,CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size = DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = StringIO()
self.__rbuf = StringIO("")
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cStringIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = StringIO(value)
else:
self._buffer = StringIO()
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory:
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = StringIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = StringIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
| gpl-3.0 | 2,303,665,774,153,999,600 | 24.329305 | 78 | 0.674499 | false |
wwj718/edx-platform | common/djangoapps/track/views/tests/test_views.py | 81 | 10225 | # pylint: disable=missing-docstring,maybe-no-member
from mock import patch, sentinel
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from track import views
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase, FROZEN_TIME
from openedx.core.lib.tests.assertions.events import assert_event_matches
class TestTrackViews(EventTrackingTestCase):
def setUp(self):
super(TestTrackViews, self).setUp()
self.request_factory = RequestFactory()
patcher = patch('track.views.tracker', autospec=True)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
self.path_with_course = '/courses/foo/bar/baz/xmod/'
self.url_with_course = 'http://www.edx.org' + self.path_with_course
self.event = {
sentinel.key: sentinel.value
}
def test_user_track(self):
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': '{}'
})
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'event_source': 'browser',
'page': self.url_with_course,
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': str(sentinel.event_type)
}
assert_event_matches(expected_event, actual_event)
def test_user_track_with_missing_values(self):
request = self.request_factory.get('/event')
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': '',
'org_id': '',
'event_source': 'browser',
'page': '',
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': 'unknown'
}
assert_event_matches(expected_event, actual_event)
views.user_track(request)
def test_user_track_with_empty_event(self):
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': ''
})
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'event_source': 'browser',
'page': self.url_with_course,
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': str(sentinel.event_type)
}
assert_event_matches(expected_event, actual_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=[{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'}],
)
def test_user_track_with_middleware_and_processors(self):
self.recreate_tracker()
middleware = TrackMiddleware()
payload = '{"foo": "bar"}'
user_id = 1
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': payload
})
request.user = User.objects.create(pk=user_id, username=str(sentinel.username))
request.META['REMOTE_ADDR'] = '10.0.0.1'
request.META['HTTP_REFERER'] = str(sentinel.referer)
request.META['HTTP_ACCEPT_LANGUAGE'] = str(sentinel.accept_language)
request.META['HTTP_USER_AGENT'] = str(sentinel.user_agent)
request.META['SERVER_NAME'] = 'testserver2'
middleware.process_request(request)
try:
views.user_track(request)
expected_event = {
'accept_language': str(sentinel.accept_language),
'referer': str(sentinel.referer),
'username': str(sentinel.username),
'session': '',
'ip': '10.0.0.1',
'event_source': 'browser',
'event_type': str(sentinel.event_type),
'name': str(sentinel.event_type),
'event': payload,
'agent': str(sentinel.user_agent),
'page': self.url_with_course,
'time': FROZEN_TIME,
'host': 'testserver2',
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'user_id': user_id,
'path': u'/event'
},
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
def test_server_track(self):
request = self.request_factory.get(self.path_with_course)
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {},
}
self.assert_mock_tracker_call_matches(expected_event)
def assert_mock_tracker_call_matches(self, expected_event):
self.assertEqual(len(self.mock_tracker.send.mock_calls), 1)
actual_event = self.mock_tracker.send.mock_calls[0][1][0]
assert_event_matches(expected_event, actual_event)
def test_server_track_with_middleware(self):
middleware = TrackMiddleware()
request = self.request_factory.get(self.path_with_course)
middleware.process_request(request)
# The middleware emits an event, reset the mock to ignore it since we aren't testing that feature.
self.mock_tracker.reset_mock()
try:
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'user_id': '',
'course_id': u'foo/bar/baz',
'org_id': 'foo',
'path': u'/courses/foo/bar/baz/xmod/'
},
}
finally:
middleware.process_response(request, None)
self.assert_mock_tracker_call_matches(expected_event)
def test_server_track_with_middleware_and_google_analytics_cookie(self):
middleware = TrackMiddleware()
request = self.request_factory.get(self.path_with_course)
request.COOKIES['_ga'] = 'GA1.2.1033501218.1368477899'
middleware.process_request(request)
# The middleware emits an event, reset the mock to ignore it since we aren't testing that feature.
self.mock_tracker.reset_mock()
try:
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'user_id': '',
'course_id': u'foo/bar/baz',
'org_id': 'foo',
'path': u'/courses/foo/bar/baz/xmod/'
},
}
finally:
middleware.process_response(request, None)
self.assert_mock_tracker_call_matches(expected_event)
def test_server_track_with_no_request(self):
request = None
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': '',
'context': {},
}
self.assert_mock_tracker_call_matches(expected_event)
def test_task_track(self):
request_info = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'agent': 'agent',
'host': 'testserver',
}
task_info = {
sentinel.task_key: sentinel.task_value
}
expected_event_data = dict(task_info)
expected_event_data.update(self.event)
views.task_track(request_info, task_info, str(sentinel.event_type), self.event)
expected_event = {
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'task',
'event_type': str(sentinel.event_type),
'event': expected_event_data,
'agent': 'agent',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'course_id': '',
'org_id': ''
},
}
self.assert_mock_tracker_call_matches(expected_event)
| agpl-3.0 | 2,877,390,146,717,725,000 | 33.197324 | 106 | 0.506993 | false |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_4/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 | 6,015,122,248,314,438,000 | 29.188525 | 203 | 0.594217 | false |
matthiask/django-chet | chet/views.py | 1 | 1373 | from django.shortcuts import get_object_or_404, render
from django.views import generic
from chet.models import Album, Photo
def visible_albums(user):
if user.is_staff:
return Album.objects.active()
else:
return Album.objects.public()
def visible_photos(user):
if user.is_staff:
return Photo.objects.active()
else:
return Photo.objects.public()
class AlbumMixin(object):
allow_empty = True
date_field = 'date'
make_object_list = True
month_format = '%m'
paginate_by = 10
paginate_orphans = 3
def get_queryset(self):
return visible_albums(self.request.user)
class AlbumArchiveView(AlbumMixin, generic.ArchiveIndexView):
pass
def album_detail(request, year, slug):
album = get_object_or_404(
visible_albums(request.user),
date__year=year,
slug=slug,
)
return render(request, 'chet/album_detail.html', {
'album': album,
'object': album,
'photos': visible_photos(request.user).filter(album=album),
})
def photo_detail(request, year, slug, photo):
photo = get_object_or_404(
visible_photos(request.user),
album__date__year=year,
album__slug=slug,
pk=photo,
)
return render(request, 'chet/photo_detail.html', {
'photo': photo,
'object': photo,
})
| bsd-3-clause | -7,310,885,411,515,405,000 | 21.145161 | 67 | 0.625637 | false |
nerzhul/ansible | lib/ansible/modules/files/acl.py | 25 | 11472 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
name:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['path']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query).
author:
- "Brian Coca (@bcoca)"
- "Jérémie Astori (@astorije)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
- As of Ansible 2.0, this module only supports Linux distributions.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl:
name: /etc/foo.conf
entity: joe
etype: user
permissions: r
state: present
# Removes the acl for Joe on a specific file
- acl:
name: /etc/foo.conf
entity: joe
etype: user
state: absent
# Sets default acl for joe on foo.d
- acl:
name: /etc/foo.d
entity: joe
etype: user
permissions: rw
default: yes
state: present
# Same as previous but using entry shorthand
- acl:
name: /etc/foo.d
entry: "default:user:joe:rw-"
state: present
# Obtain the acl for a specific file
- acl:
name: /etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
d = None
if entry.lower().startswith("d"):
d = True
a.pop(0)
if len(a) == 2:
a.append(None)
t, e, p = a
t = t.lower()
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
return [d, t, e, p]
def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False):
'''Builds and returns an entry string. Does not include the permissions bit if they are not provided.'''
if use_nfsv4_acls:
return ':'.join([etype, entity, permissions, 'allow'])
if permissions:
return etype + ':' + entity + ':' + permissions
else:
return etype + ':' + entity
def build_command(module, mode, path, follow, default, recursive, entry=''):
'''Builds and returns a getfacl/setfacl command.'''
if mode == 'set':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-m "%s"' % entry)
elif mode == 'rm':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-x "%s"' % entry)
else: # mode == 'get'
cmd = [module.get_bin_path('getfacl', True)]
# prevents absolute path warnings and removes headers
if get_platform().lower() == 'linux':
cmd.append('--omit-header')
cmd.append('--absolute-names')
if recursive:
cmd.append('--recursive')
if not follow:
if get_platform().lower() == 'linux':
cmd.append('--physical')
elif get_platform().lower() == 'freebsd':
cmd.append('-h')
if default:
if(mode == 'rm'):
cmd.insert(1, '-k')
else: # mode == 'set' or mode == 'get'
cmd.insert(1, '-d')
cmd.append(path)
return cmd
def acl_changed(module, cmd):
'''Returns true if the provided command affects the existing ACLs, false otherwise.'''
# FreeBSD do not have a --test flag, so by default, it is safer to always say "true"
if get_platform().lower() == 'freebsd':
return True
cmd = cmd[:] # lists are mutables so cmd would be overwritten without this
cmd.insert(1, '--test')
lines = run_acl(module, cmd)
for line in lines:
if not line.endswith('*,*'):
return True
return False
def run_acl(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg=e.strerror)
lines = []
for l in out.splitlines():
if not l.startswith('#'):
lines.append(l.strip())
if lines and not lines[-1].split():
# trim last line only when it is empty
return lines[:-1]
else:
return lines
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['path'], type='path'),
entry=dict(required=False, type='str'),
entity=dict(required=False, type='str', default=''),
etype=dict(
required=False,
choices=['other', 'user', 'group', 'mask'],
type='str'
),
permissions=dict(required=False, type='str'),
state=dict(
required=False,
default='query',
choices=['query', 'present', 'absent'],
type='str'
),
follow=dict(required=False, type='bool', default=True),
default=dict(required=False, type='bool', default=False),
recursive=dict(required=False, type='bool', default=False),
use_nfsv4_acls=dict(required=False, type='bool', default=False)
),
supports_check_mode=True,
)
if get_platform().lower() not in ['linux', 'freebsd']:
module.fail_json(msg="The acl module is not available on this system.")
path = module.params.get('name')
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
recursive = module.params.get('recursive')
use_nfsv4_acls = module.params.get('use_nfsv4_acls')
if not os.path.exists(path):
module.fail_json(msg="Path not found or not accessible.")
if state == 'query' and recursive:
module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.")
if not entry:
if state == 'absent' and permissions:
module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.")
if state == 'absent' and not entity:
module.fail_json(msg="'entity' MUST be set when 'state=absent'.")
if state in ['present', 'absent'] and not etype:
module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
if state == 'present' and not entry.count(":") in [2, 3]:
module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.")
if state == 'absent' and not entry.count(":") in [1, 2]:
module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.")
if state == 'query':
module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.")
default_flag, etype, entity, permissions = split_entry(entry)
if default_flag != None:
default = default_flag
if get_platform().lower() == 'freebsd':
if recursive:
module.fail_json(msg="recursive is not supported on that platform.")
changed = False
msg = ""
if state == 'present':
entry = build_entry(etype, entity, permissions, use_nfsv4_acls)
command = build_command(
module, 'set', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command)
msg = "%s is present" % entry
elif state == 'absent':
entry = build_entry(etype, entity, use_nfsv4_acls)
command = build_command(
module, 'rm', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command, False)
msg = "%s is absent" % entry
elif state == 'query':
msg = "current acl"
acl = run_acl(
module,
build_command(module, 'get', path, follow, default, recursive)
)
module.exit_json(changed=changed, msg=msg, acl=acl)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 3,480,645,721,317,277,700 | 29.505319 | 341 | 0.603749 | false |
ybellavance/python-for-android | python3-alpha/extra_modules/atom/mock_service.py | 48 | 10344 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MockService provides CRUD ops. for mocking calls to AtomPub services.
MockService: Exposes the publicly used methods of AtomService to provide
a mock interface which can be used in unit tests.
"""
import atom.service
import pickle
__author__ = 'api.jscudder (Jeffrey Scudder)'
# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects.
recordings = []
# If set, the mock service HttpRequest are actually made through this object.
real_request_handler = None
def ConcealValueWithSha(source):
import sha
return sha.new(source[:-5]).hexdigest()
def DumpRecordings(conceal_func=ConcealValueWithSha):
if conceal_func:
for recording_pair in recordings:
recording_pair[0].ConcealSecrets(conceal_func)
return pickle.dumps(recordings)
def LoadRecordings(recordings_file_or_string):
if isinstance(recordings_file_or_string, str):
atom.mock_service.recordings = pickle.loads(recordings_file_or_string)
elif hasattr(recordings_file_or_string, 'read'):
atom.mock_service.recordings = pickle.loads(
recordings_file_or_string.read())
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Simulates an HTTP call to the server, makes an actual HTTP request if
real_request_handler is set.
This function operates in two different modes depending on if
real_request_handler is set or not. If real_request_handler is not set,
HttpRequest will look in this module's recordings list to find a response
which matches the parameters in the function call. If real_request_handler
is set, this function will call real_request_handler.HttpRequest, add the
response to the recordings list, and respond with the actual response.
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, uri) = atom.service.ProcessUrl(service, uri)
current_request = MockRequest(operation, full_uri, host=server, ssl=ssl,
data=data, extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# If the request handler is set, we should actually make the request using
# the request handler and record the response to replay later.
if real_request_handler:
response = real_request_handler.HttpRequest(service, operation, data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# TODO: need to copy the HTTP headers from the real response into the
# recorded_response.
recorded_response = MockHttpResponse(body=response.read(),
status=response.status, reason=response.reason)
# Insert a tuple which maps the request to the response object returned
# when making an HTTP call using the real_request_handler.
recordings.append((current_request, recorded_response))
return recorded_response
else:
# Look through available recordings to see if one matches the current
# request.
for request_response_pair in recordings:
if request_response_pair[0].IsMatch(current_request):
return request_response_pair[1]
return None
class MockRequest(object):
"""Represents a request made to an AtomPub server.
These objects are used to determine if a client request matches a recorded
HTTP request to determine what the mock server's response will be.
"""
def __init__(self, operation, uri, host=None, ssl=False, port=None,
data=None, extra_headers=None, url_params=None, escape_params=True,
content_type='application/atom+xml'):
"""Constructor for a MockRequest
Args:
operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the
HTTP operation requested on the resource.
uri: str The URL describing the resource to be modified or feed to be
retrieved. This should include the protocol (http/https) and the host
(aka domain). For example, these are some valud full_uris:
'http://example.com', 'https://www.google.com/accounts/ClientLogin'
host: str (optional) The server name which will be placed at the
beginning of the URL if the uri parameter does not begin with 'http'.
Examples include 'example.com', 'www.google.com', 'www.blogger.com'.
ssl: boolean (optional) If true, the request URL will begin with https
instead of http.
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string. (optional)
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, the constructor
will read the entire file into memory. If the data is a list of
parts to be sent, each part will be evaluated and stored.
extra_headers: dict (optional) HTTP headers included in the request.
url_params: dict (optional) Key value pairs which should be added to
the URL as URL parameters in the request. For example uri='/',
url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'.
escape_params: boolean (optional) Perform URL escaping on the keys and
values specified in url_params. Defaults to True.
content_type: str (optional) Provides the MIME type of the data being
sent.
"""
self.operation = operation
self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl)
self.data = data
self.extra_headers = extra_headers
self.url_params = url_params or {}
self.escape_params = escape_params
self.content_type = content_type
def ConcealSecrets(self, conceal_func):
"""Conceal secret data in this request."""
if 'Authorization' in self.extra_headers:
self.extra_headers['Authorization'] = conceal_func(
self.extra_headers['Authorization'])
def IsMatch(self, other_request):
"""Check to see if the other_request is equivalent to this request.
Used to determine if a recording matches an incoming request so that a
recorded response should be sent to the client.
The matching is not exact, only the operation and URL are examined
currently.
Args:
other_request: MockRequest The request which we want to check this
(self) MockRequest against to see if they are equivalent.
"""
# More accurate matching logic will likely be required.
return (self.operation == other_request.operation and self.uri ==
other_request.uri)
def _ConstructFullUrlBase(uri, host=None, ssl=False):
"""Puts URL components into the form http(s)://full.host.strinf/uri/path
Used to construct a roughly canonical URL so that URLs which begin with
'http://example.com/' can be compared to a uri of '/' when the host is
set to 'example.com'
If the uri contains 'http://host' already, the host and ssl parameters
are ignored.
Args:
uri: str The path component of the URL, examples include '/'
host: str (optional) The host name which should prepend the URL. Example:
'example.com'
ssl: boolean (optional) If true, the returned URL will begin with https
instead of http.
Returns:
String which has the form http(s)://example.com/uri/string/contents
"""
if uri.startswith('http'):
return uri
if ssl:
return 'https://%s%s' % (host, uri)
else:
return 'http://%s%s' % (host, uri)
class MockHttpResponse(object):
"""Returned from MockService crud methods as the server's response."""
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Construct a mock HTTPResponse and set members.
Args:
body: str (optional) The HTTP body of the server's response.
status: int (optional)
reason: str (optional)
headers: dict (optional)
"""
self.body = body
self.status = status
self.reason = reason
self.headers = headers or {}
def read(self):
return self.body
def getheader(self, header_name):
return self.headers[header_name]
| apache-2.0 | -1,612,177,898,058,422,500 | 41.567901 | 89 | 0.701663 | false |
niteoweb/libcloud | libcloud/container/drivers/dummy.py | 24 | 1589 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.base import ContainerDriver
class DummyContainerDriver(ContainerDriver):
"""
Dummy Container driver.
>>> from libcloud.container.drivers.dummy import DummyContainerDriver
>>> driver = DummyContainerDriver('key', 'secret')
>>> driver.name
'Dummy Container Provider'
"""
name = 'Dummy Container Provider'
website = 'http://example.com'
supports_clusters = False
def __init__(self, api_key, api_secret):
"""
:param api_key: API key or username to used (required)
:type api_key: ``str``
:param api_secret: Secret password to be used (required)
:type api_secret: ``str``
:rtype: ``None``
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 | 9,029,930,833,809,508,000 | 33.543478 | 74 | 0.687854 | false |
tedelhourani/ansible | lib/ansible/modules/cloud/amazon/ec2_vol.py | 10 | 20561 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance.
If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made.
This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
"Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
kms_key_id:
description:
- Specify the id of the KMS key to use.
default: null
version_added: "2.3"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
tags:
description:
- tag:value pairs to add to the volume after creation
required: false
default: {}
version_added: "2.3"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
volume_size: 5
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto
import boto.ec2
import boto.exception
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
Returns:
True if version is equal to or higher then the version needed, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
tags = module.params.get('tags')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
if kms_key_id is not None:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
tags["Name"] = name
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
kms_key_id = dict(),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present'),
tags = dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
tags = module.params.get('tags')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
if kms_key_id is not None and not boto_supports_kms_key_id():
module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state)
# deleteOnTermination is not correctly reflected on attachment
if module.params.get('delete_on_termination'):
for attempt in range(0, 8):
if volume_info['attachment_set'].get('deleteOnTermination') == 'true':
break
time.sleep(5)
volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,842,771,345,494,299,000 | 31.277865 | 158 | 0.621322 | false |
HaydenFaulkner/phd | keras_code/cnns/model_defs/faulkner/c3d.py | 1 | 3008 | '''
C3D CNN architecture
Webpage for original:
http://vlg.cs.dartmouth.edu/c3d/
Paper for original:
D. Tran, L. Bourdev, R. Fergus, L. Torresani, and M. Paluri
Learning Spatiotemporal Features with 3D Convolutional Networks
ICCV 2015
http://vlg.cs.dartmouth.edu/c3d/c3d_video.pdf
Designed for Sports-1M Dataset
'''
from keras.layers import Flatten, Dense, Dropout, Input, Conv3D, MaxPool3D, ZeroPadding3D
from keras.models import Model
def c3d(t=16, input_tensor=None, fc_size=4096, image_dims=112, image_channels=3, output_channels=487, dropout=None, layers=[1,2,3,4,5,6,7]):
assert(input_tensor is not None)
x = input_tensor
if 1 in layers:
x = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', name='conv1')(x)
if 5 not in layers and 4 not in layers and 3 not in layers and 2 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((1, 2, 2), strides=(1, 2, 2), padding='valid', name="pool1")(x)
if 2 in layers:
x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv2')(x)
if 5 not in layers and 4 not in layers and 3 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name="pool2")(x)
if 3 in layers:
x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv3a')(x)
x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv3b')(x)
if 5 not in layers and 4 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name="pool3")(x)
if 4 in layers:
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv4a')(x)
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv4b')(x)
if 5 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name="pool4")(x)
if 5 in layers:
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv5a')(x)
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv5b')(x)
x = ZeroPadding3D((0, 1, 1), name='zeropad5')(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')(x)
x = Flatten(name='flatten_1')(x)
if 6 in layers:
x = Dense(fc_size, activation='relu', name='fc6')(x)
if dropout is not None:
x = Dropout(dropout, name='dropout_1')(x)
if 7 in layers:
x = Dense(fc_size, activation='relu', name='fc7')(x)
if dropout is not None:
x = Dropout(dropout, name='dropout_2')(x)
sm = Dense(output_channels, activation='softmax', name='pred')(x)
model = Model(inputs=input_tensor, outputs=sm)
return model
| mit | -3,190,853,827,328,618,000 | 41.366197 | 140 | 0.589761 | false |
mbernasocchi/QGIS | tests/src/python/test_qgsziputils.py | 45 | 3763 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for zip functions.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Paul Blottiere'
__date__ = '06/7/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import os
from qgis.core import QgsZipUtils
from qgis.testing import unittest
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QTemporaryFile, QTemporaryDir
def tmpPath():
f = QTemporaryFile()
f.open()
f.close()
os.remove(f.fileName())
return f.fileName()
class TestQgsZip(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.zipDir = os.path.join(unitTestDataPath(), "zip")
def test_zip_ok(self):
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(tmpPath(), [f0, f1, f2])
self.assertTrue(rc)
def test_zip_file_yet_exist(self):
zip = QTemporaryFile()
zip.open()
zip.close()
os.remove(zip.fileName())
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(zip.fileName(), [f0, f1, f2])
self.assertTrue(rc)
rc = QgsZipUtils.zip(zip.fileName(), [f0, f1, f2])
self.assertFalse(rc)
def test_zip_file_empty(self):
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip("", [f0, f1, f2])
self.assertFalse(rc)
def test_zip_input_file_not_exist(self):
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'fake.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(tmpPath(), [f0, f1, f2])
self.assertFalse(rc)
def test_unzip_ok(self):
outDir = QTemporaryDir()
zip = os.path.join(self.zipDir, 'testzip.zip')
rc, files = QgsZipUtils.unzip(zip, outDir.path())
self.assertTrue(rc)
self.assertEqual(len(files), 11)
def test_unzip_file_not_exist(self):
outDir = QTemporaryDir()
zip = os.path.join(self.zipDir, 'fake.zip')
rc, files = QgsZipUtils.unzip(zip, outDir.path())
self.assertFalse(rc)
def test_unzip_file_empty(self):
outDir = QTemporaryDir()
rc, files = QgsZipUtils.unzip("", outDir.path())
self.assertFalse(rc)
def test_unzip_dir_not_exist(self):
zip = os.path.join(self.zipDir, 'testzip.zip')
rc, files = QgsZipUtils.unzip(zip, '/tmp/fake')
self.assertFalse(rc)
def test_unzip_dir_empty(self):
zip = os.path.join(self.zipDir, 'testzip.zip')
rc, files = QgsZipUtils.unzip(zip, '')
self.assertFalse(rc)
def test_zip_unzip_ok(self):
zip = tmpPath()
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(zip, [f0, f1, f2])
self.assertTrue(rc)
outDir = QTemporaryDir()
rc, files = QgsZipUtils.unzip(zip, outDir.path())
self.assertTrue(rc)
self.assertEqual(len(files), 3)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,736,205,845,756,684,300 | 29.104 | 78 | 0.618124 | false |
krez13/scikit-learn | sklearn/externals/joblib/_memory_helpers.py | 303 | 3605 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text | bsd-3-clause | 3,441,545,788,849,177,000 | 33.342857 | 79 | 0.560333 | false |
prarthitm/edxplatform | openedx/core/lib/block_structure/factory.py | 7 | 3851 | """
Module for factory class for BlockStructure objects.
"""
from .block_structure import BlockStructureModulestoreData, BlockStructureBlockData
class BlockStructureFactory(object):
"""
Factory class for BlockStructure objects.
"""
@classmethod
def create_from_modulestore(cls, root_block_usage_key, modulestore):
"""
Creates and returns a block structure from the modulestore
starting at the given root_block_usage_key.
Arguments:
root_block_usage_key (UsageKey) - The usage_key for the root
of the block structure that is to be created.
modulestore (ModuleStoreRead) - The modulestore that
contains the data for the xBlocks within the block
structure starting at root_block_usage_key.
Returns:
BlockStructureModulestoreData - The created block structure
with instantiated xBlocks from the given modulestore
starting at root_block_usage_key.
Raises:
xmodule.modulestore.exceptions.ItemNotFoundError if a block for
root_block_usage_key is not found in the modulestore.
"""
block_structure = BlockStructureModulestoreData(root_block_usage_key)
blocks_visited = set()
def build_block_structure(xblock):
"""
Recursively update the block structure with the given xBlock
and its descendants.
"""
# Check if the xblock was already visited (can happen in
# DAGs).
if xblock.location in blocks_visited:
return
# Add the xBlock.
blocks_visited.add(xblock.location)
block_structure._add_xblock(xblock.location, xblock) # pylint: disable=protected-access
# Add relations with its children and recurse.
for child in xblock.get_children():
block_structure._add_relation(xblock.location, child.location) # pylint: disable=protected-access
build_block_structure(child)
root_xblock = modulestore.get_item(root_block_usage_key, depth=None, lazy=False)
build_block_structure(root_xblock)
return block_structure
@classmethod
def create_from_cache(cls, root_block_usage_key, block_structure_cache):
"""
Deserializes and returns the block structure starting at
root_block_usage_key from the given cache, if it's found in the cache.
The given root_block_usage_key must equate the root_block_usage_key
previously passed to serialize_to_cache.
Arguments:
root_block_usage_key (UsageKey) - The usage_key for the root
of the block structure that is to be deserialized from
the given cache.
block_structure_cache (BlockStructureCache) - The
cache from which the block structure is to be
deserialized.
Returns:
BlockStructure - The deserialized block structure starting
at root_block_usage_key, if found in the cache.
NoneType - If the root_block_usage_key is not found in the cache.
"""
return block_structure_cache.get(root_block_usage_key)
@classmethod
def create_new(cls, root_block_usage_key, block_relations, transformer_data, block_data_map):
"""
Returns a new block structure for given the arguments.
"""
block_structure = BlockStructureBlockData(root_block_usage_key)
block_structure._block_relations = block_relations # pylint: disable=protected-access
block_structure.transformer_data = transformer_data
block_structure._block_data_map = block_data_map # pylint: disable=protected-access
return block_structure
| agpl-3.0 | 1,256,899,184,124,275,700 | 39.536842 | 114 | 0.64321 | false |
tal-nino/ansible | lib/ansible/plugins/lookup/ini.py | 82 | 3304 | # (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import StringIO
import os
import codecs
import ConfigParser
import re
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def read_properties(self, filename, key, dflt, is_regexp):
config = StringIO.StringIO()
config.write('[java_properties]\n' + open(filename).read())
config.seek(0, os.SEEK_SET)
self.cp.readfp(config)
return self.get_value(key, 'java_properties', dflt, is_regexp)
def read_ini(self, filename, key, section, dflt, is_regexp):
self.cp.readfp(open(filename))
return self.get_value(key, section, dflt, is_regexp)
def get_value(self, key, section, dflt, is_regexp):
# Retrieve all values from a section using a regexp
if is_regexp:
return [v for k, v in self.cp.items(section) if re.match(key, k)]
value = None
# Retrieve a single value
try:
value = self.cp.get(section, key)
except ConfigParser.NoOptionError as e:
return dflt
return value
def run(self, terms, variables=None, **kwargs):
basedir = self.get_basedir(variables)
self.basedir = basedir
self.cp = ConfigParser.ConfigParser()
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'file' : 'ansible.ini',
're' : False,
'default' : None,
'section' : "global",
'type' : "ini",
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise errors.AnsibleError(e)
path = self._loader.path_dwim_relative(basedir, 'files', paramvals['file'])
if paramvals['type'] == "properties":
var = self.read_properties(path, key, paramvals['default'], paramvals['re'])
else:
var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])
if var is not None:
if type(var) is list:
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| gpl-3.0 | 4,063,612,942,060,546,600 | 34.526882 | 107 | 0.586562 | false |
davidsoncolin/IMS | UI/QActorWidget.py | 1 | 2542 | #!/usr/bin/env python
from PySide import QtCore, QtGui
class QActorWidget(QtGui.QWidget):
def __init__(self, cb, parent=None):
self.cb = cb
self.parent = parent
QtGui.QWidget.__init__(self, parent)
self.groupLayout = QtGui.QVBoxLayout(self)
self.groupTabs = QtGui.QTabWidget()
self.groupLayout.addWidget(self.groupTabs)
self.groupLayout.addStretch(1)
self.setLayout(self.groupLayout)
self.actors = {}
self.actorNames = []
self.notRecordingPixmap = QtGui.QPixmap("img/NotRecording.png").scaledToHeight(16)
self.recordingPixmap = QtGui.QPixmap("img/Recording.png").scaledToHeight(16)
def addActor(self, name):
if self.actors.has_key(name):
return self.actors[name]
self.actors[name] = actor = {}
self.actorNames.append(name)
actorGroup = QtGui.QWidget()
actorGroupLayout = QtGui.QVBoxLayout(actorGroup)
actorVisible = QtGui.QCheckBox('Visible', actorGroup)
actorVisible.setCheckState(QtCore.Qt.Checked)
actorGroup.setLayout(actorGroupLayout)
actorLabel = QtGui.QLabel()
actor['group'] = actorGroup
actor['layout'] = actorGroupLayout
actor['data'] = actorLabel
actor['visible'] = actorVisible
actorVisible.cb = lambda x : self.cb(name, x)
actorVisible.stateChanged.connect(actorVisible.cb)
self.groupTabs.addTab(actorGroup,name)
actorGroupLayout.addWidget(actorVisible)
actorGroupLayout.addWidget(actorLabel)
actorLabel.setPixmap(self.recordingPixmap)
return actor
def setActorDofs(self, name, dofNames, sharedDofs, cb):
actor = self.actors[name]
layout = actor['layout']
import QActorDofsWidget
actor['dofsWidget'] = dofsWidget = QActorDofsWidget.QActorDofsWidget(name, cb, self)
layout.addWidget(dofsWidget)
dofsWidget.setDofs(dofNames,sharedDofs)
def syncActorDofs(self, name, dofValues):
self.actors[name]['dofsWidget'].syncSliders(dofValues)
def setActorData(self, name, value):
self.actors[name]['data'].setPixmap(self.notRecordingPixmap if value else self.recordingPixmap)
def removeActor(self, name):
if self.actors.has_key(name):
self.actorNames.remove(name)
self.actors.pop(name)['group'].deleteLater() # mark for deletion!
if __name__ == '__main__':
import sys
global app, win
app = QtGui.QApplication(sys.argv)
def test(actor, value):
print 'cb',actor,value
win = QActorWidget(test)
win.addActor('charles')
win.addActor('colin')
win.addActor('fred')
win.setActorData('fred', True)
win.removeActor('colin')
win.show()
app.connect(app, QtCore.SIGNAL('lastWindowClosed()') , app.quit)
sys.exit(app.exec_())
| mit | 345,725,258,588,368,300 | 30.775 | 97 | 0.739182 | false |
fbossy/SickRage | lib/hachoir_parser/common/win32.py | 74 | 6210 | from hachoir_core.field import (FieldSet,
UInt16, UInt32, Enum, String, Bytes, Bits, TimestampUUID60)
from hachoir_parser.video.fourcc import video_fourcc_name
from hachoir_core.bits import str2hex
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.network.common import MAC48_Address
# Dictionary: Windows codepage => Python charset name
CODEPAGE_CHARSET = {
874: "CP874",
# 932: Japanese Shift-JIS
# 936: Simplified Chinese GBK
# 949: Korean
# 950: Traditional Chinese Big5
1250: "WINDOWS-1250",
1251: "WINDOWS-1251",
1252: "WINDOWS-1252",
1253: "WINDOWS-1253",
1254: "WINDOWS-1254",
1255: "WINDOWS-1255",
1256: "WINDOWS-1256",
1257: "WINDOWS-1257",
1258: "WINDOWS-1258",
65001: "UTF-8",
}
class PascalStringWin16(FieldSet):
def __init__(self, parent, name, description=None, strip=None, charset="UTF-16-LE"):
FieldSet.__init__(self, parent, name, description)
length = self["length"].value
self._size = 16 + length * 16
self.strip = strip
self.charset = charset
def createFields(self):
yield UInt16(self, "length", "Length in widechar characters")
size = self["length"].value
if size:
yield String(self, "text", size*2, charset=self.charset, strip=self.strip)
def createValue(self):
if "text" in self:
return self["text"].value
else:
return None
class PascalStringWin32(FieldSet):
def __init__(self, parent, name, description=None, strip=None, charset="UTF-16-LE"):
FieldSet.__init__(self, parent, name, description)
length = self["length"].value
self._size = 32 + length * 16
self.strip = strip
self.charset = charset
def createFields(self):
yield UInt32(self, "length", "Length in widechar characters")
size = self["length"].value
if size:
yield String(self, "text", size*2, charset=self.charset, strip=self.strip)
def createValue(self):
if "text" in self:
return self["text"].value
else:
return None
class GUID(FieldSet):
"""
Windows 128 bits Globally Unique Identifier (GUID)
See RFC 4122
"""
static_size = 128
NULL = "00000000-0000-0000-0000-000000000000"
FIELD_NAMES = {
3: ("sha1_high", "sha1_low"),
4: ("random_high", "random_low"),
5: ("md5_high", "md5_low"),
}
VERSION_NAME = {
1: "Timestamp & MAC-48",
2: "DCE Security version",
3: "Name SHA-1 hash",
4: "Randomly generated",
5: "Name MD5 hash",
}
VARIANT_NAME = {
0: "NCS",
2: "Leach-Salz",
# 5: Microsoft Corporation?
6: "Microsoft Corporation",
7: "Reserved Future",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self.version = self.stream.readBits(self.absolute_address + 32 + 16 + 12, 4, self.endian)
def createFields(self):
if self.version == 1:
yield TimestampUUID60(self, "time")
yield Enum(Bits(self, "version", 4), self.VERSION_NAME)
yield Enum(Bits(self, "variant", 3), self.VARIANT_NAME)
yield textHandler(Bits(self, "clock", 13), hexadecimal)
# yield textHandler(Bits(self, "clock", 16), hexadecimal)
if self.version == 1:
yield MAC48_Address(self, "mac", "IEEE 802 MAC address")
else:
yield Bytes(self, "node", 6)
else:
namea, nameb = self.FIELD_NAMES.get(
self.version, ("data_a", "data_b"))
yield textHandler(Bits(self, namea, 60), hexadecimal)
yield Enum(Bits(self, "version", 4), self.VERSION_NAME)
yield Enum(Bits(self, "variant", 3), self.VARIANT_NAME)
yield textHandler(Bits(self, nameb, 61), hexadecimal)
def createValue(self):
addr = self.absolute_address
a = self.stream.readBits (addr, 32, self.endian)
b = self.stream.readBits (addr + 32, 16, self.endian)
c = self.stream.readBits (addr + 48, 16, self.endian)
d = self.stream.readBytes(addr + 64, 2)
e = self.stream.readBytes(addr + 80, 6)
return "%08X-%04X-%04X-%s-%s" % (a, b, c, str2hex(d), str2hex(e))
def createDisplay(self):
value = self.value
if value == self.NULL:
name = "Null GUID: "
else:
name = "GUID v%u (%s): " % (self.version, self["version"].display)
return name + value
def createRawDisplay(self):
value = self.stream.readBytes(self.absolute_address, 16)
return str2hex(value, format=r"\x%02x")
class BitmapInfoHeader(FieldSet):
""" Win32 BITMAPINFOHEADER structure from GDI """
static_size = 40*8
COMPRESSION_NAME = {
0: u"Uncompressed (RGB)",
1: u"RLE (8 bits)",
2: u"RLE (4 bits)",
3: u"Bitfields",
4: u"JPEG",
5: u"PNG"
}
def __init__(self, parent, name, use_fourcc=False):
FieldSet.__init__(self, parent, name)
self._use_fourcc = use_fourcc
def createFields(self):
yield UInt32(self, "hdr_size", "Header size (in bytes) (=40)")
yield UInt32(self, "width", "Width")
yield UInt32(self, "height", "Height")
yield UInt16(self, "nb_planes", "Color planes")
yield UInt16(self, "bpp", "Bits/pixel")
if self._use_fourcc:
yield Enum(String(self, "codec", 4, charset="ASCII"), video_fourcc_name)
else:
yield Enum(UInt32(self, "codec", "Compression"), self.COMPRESSION_NAME)
yield UInt32(self, "size", "Image size (in bytes)")
yield UInt32(self, "xres", "X pixels per meter")
yield UInt32(self, "yres", "Y pixels per meter")
yield UInt32(self, "color_used", "Number of used colors")
yield UInt32(self, "color_important", "Number of important colors")
def createDescription(self):
return "Bitmap info header: %ux%u pixels, %u bits/pixel" % \
(self["width"].value, self["height"].value, self["bpp"].value)
| gpl-3.0 | -2,656,384,322,158,293,500 | 34.689655 | 97 | 0.583414 | false |
p4datasystems/CarnotKEdist | dist/Lib/types.py | 30 | 2149 | """Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
# XXX: no buffer in jython
#BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.func_code)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
# XXX: Jython sys is not a real module
#ModuleType = type(sys)
ModuleType = type(sys.modules[__name__])
FileType = file
XRangeType = xrange
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
del tb
SliceType = slice
EllipsisType = type(Ellipsis)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.func_code)
MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export
| apache-2.0 | 801,455,557,047,614,700 | 23.701149 | 71 | 0.705444 | false |
rahuldhote/odoo | openerp/cli/start.py | 240 | 2748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import os
import sys
from . import Command
from .server import main
from openerp.modules.module import get_module_root, MANIFEST
from openerp.service.db import _create_empty_database, DatabaseExists
class Start(Command):
"""Quick start the Odoo server for your project"""
def get_module_list(self, path):
mods = glob.glob(os.path.join(path, '*/%s' % MANIFEST))
return [mod.split(os.path.sep)[-2] for mod in mods]
def run(self, cmdargs):
parser = argparse.ArgumentParser(
prog="%s start" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__
)
parser.add_argument('--path', default=".",
help="Directory where your project's modules are stored (will autodetect from current dir)")
parser.add_argument("-d", "--database", dest="db_name", default=None,
help="Specify the database name (default to project's directory name")
args, unknown = parser.parse_known_args(args=cmdargs)
project_path = os.path.abspath(os.path.expanduser(os.path.expandvars(args.path)))
module_root = get_module_root(project_path)
db_name = None
if module_root:
# started in a module so we choose this module name for database
db_name = project_path.split(os.path.sep)[-1]
# go to the parent's directory of the module root
project_path = os.path.abspath(os.path.join(project_path, os.pardir))
# check if one of the subfolders has at least one module
mods = self.get_module_list(project_path)
if mods and '--addons-path' not in cmdargs:
cmdargs.append('--addons-path=%s' % project_path)
if not args.db_name:
args.db_name = db_name or project_path.split(os.path.sep)[-1]
cmdargs.extend(('-d', args.db_name))
# TODO: forbid some database names ? eg template1, ...
try:
_create_empty_database(args.db_name)
except DatabaseExists, e:
pass
except Exception, e:
die("Could not create database `%s`. (%s)" % (args.db_name, e))
if '--db-filter' not in cmdargs:
cmdargs.append('--db-filter=^%s$' % args.db_name)
# Remove --path /-p options from the command arguments
def to_remove(i, l):
return l[i] == '-p' or l[i].startswith('--path') or \
(i > 0 and l[i-1] in ['-p', '--path'])
cmdargs = [v for i, v in enumerate(cmdargs)
if not to_remove(i, cmdargs)]
main(cmdargs)
def die(message, code=1):
print >>sys.stderr, message
sys.exit(code)
| agpl-3.0 | 1,866,656,568,638,254,000 | 35.64 | 104 | 0.59607 | false |
onitake/ansible | test/units/mock/yaml_helper.py | 209 | 5267 | import io
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
class YamlTestUtils(object):
"""Mixin class to combine with a unittest.TestCase subclass."""
def _loader(self, stream):
"""Vault related tests will want to override this.
Vault cases should setup a AnsibleLoader that has the vault password."""
return AnsibleLoader(stream)
def _dump_stream(self, obj, stream, dumper=None):
"""Dump to a py2-unicode or py3-string stream."""
if PY3:
return yaml.dump(obj, stream, Dumper=dumper)
else:
return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
def _dump_string(self, obj, dumper=None):
"""Dump to a py2-unicode or py3-string"""
if PY3:
return yaml.dump(obj, Dumper=dumper)
else:
return yaml.dump(obj, Dumper=dumper, encoding=None)
def _dump_load_cycle(self, obj):
# Each pass though a dump or load revs the 'generation'
# obj to yaml string
string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
# wrap a stream/file like StringIO around that yaml
stream_from_object_dump = io.StringIO(string_from_object_dump)
loader = self._loader(stream_from_object_dump)
# load the yaml stream to create a new instance of the object (gen 2)
obj_2 = loader.get_data()
# dump the gen 2 objects directory to strings
string_from_object_dump_2 = self._dump_string(obj_2,
dumper=AnsibleDumper)
# The gen 1 and gen 2 yaml strings
self.assertEquals(string_from_object_dump, string_from_object_dump_2)
# the gen 1 (orig) and gen 2 py object
self.assertEquals(obj, obj_2)
# again! gen 3... load strings into py objects
stream_3 = io.StringIO(string_from_object_dump_2)
loader_3 = self._loader(stream_3)
obj_3 = loader_3.get_data()
string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
self.assertEquals(obj, obj_3)
# should be transitive, but...
self.assertEquals(obj_2, obj_3)
self.assertEquals(string_from_object_dump, string_from_object_dump_3)
def _old_dump_load_cycle(self, obj):
'''Dump the passed in object to yaml, load it back up, dump again, compare.'''
stream = io.StringIO()
yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
self._dump_stream(obj, stream, dumper=AnsibleDumper)
yaml_string_from_stream = stream.getvalue()
# reset stream
stream.seek(0)
loader = self._loader(stream)
# loader = AnsibleLoader(stream, vault_password=self.vault_password)
obj_from_stream = loader.get_data()
stream_from_string = io.StringIO(yaml_string)
loader2 = self._loader(stream_from_string)
# loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
obj_from_string = loader2.get_data()
stream_obj_from_stream = io.StringIO()
stream_obj_from_string = io.StringIO()
if PY3:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
else:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
stream_obj_from_stream.seek(0)
stream_obj_from_string.seek(0)
if PY3:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
else:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
assert yaml_string == yaml_string_obj_from_stream
assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
yaml_string_stream_obj_from_string)
assert obj == obj_from_stream
assert obj == obj_from_string
assert obj == yaml_string_obj_from_stream
assert obj == yaml_string_obj_from_string
assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
return {'obj': obj,
'yaml_string': yaml_string,
'yaml_string_from_stream': yaml_string_from_stream,
'obj_from_stream': obj_from_stream,
'obj_from_string': obj_from_string,
'yaml_string_obj_from_string': yaml_string_obj_from_string}
| gpl-3.0 | -2,129,113,732,131,423,700 | 42.528926 | 130 | 0.639453 | false |
jclakkis/discus-inferno | flaskenv/lib/python2.7/site-packages/psycopg2/tests/test_bug_gc.py | 62 | 1719 | #!/usr/bin/env python
# bug_gc.py - test for refcounting/GC bug
#
# Copyright (C) 2010-2011 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions
import unittest
import gc
from testutils import ConnectingTestCase, skip_if_no_uuid
class StolenReferenceTestCase(ConnectingTestCase):
@skip_if_no_uuid
def test_stolen_reference_bug(self):
def fish(val, cur):
gc.collect()
return 42
UUID = psycopg2.extensions.new_type((2950,), "UUID", fish)
psycopg2.extensions.register_type(UUID, self.conn)
curs = self.conn.cursor()
curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid")
curs.fetchone()
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit | 501,774,426,823,963,650 | 34.8125 | 75 | 0.727749 | false |
wolfv/uberwriter | uberwriter/plugins/bibtex/fuzzywuzzy/process.py | 11 | 8790 | #!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import itertools
from . import fuzz
from . import utils
def extract(query, choices, processor=None, scorer=None, limit=5):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
list of tuples containing the match and it's score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is an
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns:
List of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
he key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
[('train', 22, 'bard'), ('man', 0, 'dog')]
"""
if choices is None:
return []
# Catch generators without lengths
try:
if len(choices) == 0:
return []
except TypeError:
pass
# default, turn whatever the choice is into a workable string
if not processor:
processor = utils.full_process
# default: wratio
if not scorer:
scorer = fuzz.WRatio
sl = []
try:
# See if choices is a dictionary-like object.
for key, choice in choices.items():
processed = processor(choice)
score = scorer(query, processed)
sl.append((choice, score, key))
except AttributeError:
# It's a list; just iterate over it.
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
sl.append((choice, score))
sl.sort(key=lambda i: i[1], reverse=True)
return sl[:limit]
def extractBests(query, choices, processor=None, scorer=None, score_cutoff=0, limit=5):
"""Get a list of the best matches to a collection of choices.
Convenience function for getting the choices with best scores.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. No matches with
a score less than this number will be returned. Defaults to 0.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns: A a list of (match, score) tuples.
"""
best_list = extract(query, choices, processor, scorer, limit)
return list(itertools.takewhile(lambda x: x[1] >= score_cutoff, best_list))
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
"""Find the single best match above a score in a list of choices.
This is a convenience method which returns the single best choice.
See extract() for the full arguments list.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. If the best
match is found, but it is not greater than this number, then
return None anyway ("not a good enough match"). Defaults to 0.
Returns:
A tuple containing a single match and its score, if a match
was found that was above score_cutoff. Otherwise, returns None.
"""
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0 and best_list[0][1] >= score_cutoff:
return best_list[0]
return None
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
since we assume this item contains the most entity information and returns that. It breaks string
length ties on an alphabetical sort.
Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
sensitive.
Args:
contains_dupes: A list of strings that we would like to dedupe.
threshold: the numerical value (0,100) point at which we expect to find duplicates.
Defaults to 70 out of 100
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.token_set_ratio() is used and expects both query and
choice to be strings.
Returns:
A deduplicated list. For example:
In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
In: fuzzy_dedupe(contains_dupes)
Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
"""
extractor = []
# iterate over items in *contains_dupes*
for item in contains_dupes:
# return all duplicate matches found
matches = extract(item, contains_dupes, limit=None, scorer=scorer)
# filter matches based on the threshold
filtered = [x for x in matches if x[1] > threshold]
# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
if len(filtered) == 1:
extractor.append(filtered[0][0])
else:
# alpha sort
filtered = sorted(filtered, key=lambda x: x[0])
# length sort
filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
# take first item as our 'canonical example'
extractor.append(filter_sort[0][0])
# uniquify *extractor* list
keys = {}
for e in extractor:
keys[e] = 1
extractor = keys.keys()
# check that extractor differs from contain_dupes (e.g. duplicates were found)
# if not, then return the original list
if len(extractor) == len(contains_dupes):
return contains_dupes
else:
return extractor
| gpl-3.0 | -1,416,555,686,416,905,700 | 37.722467 | 122 | 0.663481 | false |
thopiekar/Cura | cura/Machines/Models/MultiBuildPlateModel.py | 2 | 2526 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import QTimer, pyqtSignal, pyqtProperty
from UM.Application import Application
from UM.Scene.Camera import Camera
from UM.Scene.Selection import Selection
from UM.Qt.ListModel import ListModel
#
# This is the model for multi build plate feature.
# This has nothing to do with the build plate types you can choose on the sidebar for a machine.
#
class MultiBuildPlateModel(ListModel):
maxBuildPlateChanged = pyqtSignal()
activeBuildPlateChanged = pyqtSignal()
selectionChanged = pyqtSignal()
def __init__(self, parent = None):
super().__init__(parent)
self._update_timer = QTimer()
self._update_timer.setInterval(100)
self._update_timer.setSingleShot(True)
self._update_timer.timeout.connect(self._updateSelectedObjectBuildPlateNumbers)
self._application = Application.getInstance()
self._application.getController().getScene().sceneChanged.connect(self._updateSelectedObjectBuildPlateNumbersDelayed)
Selection.selectionChanged.connect(self._updateSelectedObjectBuildPlateNumbers)
self._max_build_plate = 1 # default
self._active_build_plate = -1
def setMaxBuildPlate(self, max_build_plate):
if self._max_build_plate != max_build_plate:
self._max_build_plate = max_build_plate
self.maxBuildPlateChanged.emit()
## Return the highest build plate number
@pyqtProperty(int, notify = maxBuildPlateChanged)
def maxBuildPlate(self):
return self._max_build_plate
def setActiveBuildPlate(self, nr):
if self._active_build_plate != nr:
self._active_build_plate = nr
self.activeBuildPlateChanged.emit()
@pyqtProperty(int, notify = activeBuildPlateChanged)
def activeBuildPlate(self):
return self._active_build_plate
def _updateSelectedObjectBuildPlateNumbersDelayed(self, *args):
if not isinstance(args[0], Camera):
self._update_timer.start()
def _updateSelectedObjectBuildPlateNumbers(self, *args):
result = set()
for node in Selection.getAllSelectedObjects():
result.add(node.callDecoration("getBuildPlateNumber"))
self._selection_build_plates = list(result)
self.selectionChanged.emit()
@pyqtProperty("QVariantList", notify = selectionChanged)
def selectionBuildPlates(self):
return self._selection_build_plates
| lgpl-3.0 | 3,507,922,057,515,765,000 | 35.608696 | 125 | 0.70388 | false |
adhoc-dev/odoo-addons | product_pack/models/pack.py | 5 | 3149 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import fields, models, api
class product_pack(models.Model):
_name = 'product.pack.line'
_rec_name = 'product_id'
parent_product_id = fields.Many2one(
'product.product',
'Parent Product',
ondelete='cascade',
required=True
)
quantity = fields.Float(
'Quantity',
required=True,
default=1.0,
)
product_id = fields.Many2one(
'product.product',
'Product',
ondelete='cascade',
required=True,
)
@api.multi
def get_sale_order_line_vals(self, line, order):
self.ensure_one()
# pack_price = 0.0
subproduct = self.product_id
quantity = self.quantity * line.product_uom_qty
taxes = order.fiscal_position.map_tax(
subproduct.taxes_id)
tax_id = [(6, 0, taxes.ids)]
if subproduct.uos_id:
uos_id = subproduct.uos_id.id
uos_qty = quantity * subproduct.uos_coeff
else:
uos_id = False
uos_qty = quantity
# if pack is fixed price or totlice price we don want amount on
# pack lines
if line.product_id.pack_price_type in [
'fixed_price', 'totalice_price']:
price = 0.0
discount = 0.0
else:
pricelist = order.pricelist_id.id
price = self.env['product.pricelist'].price_get(
subproduct.id, quantity,
order.partner_id.id, context={
'uom': subproduct.uom_id.id,
'date': order.date_order})[pricelist]
discount = line.discount
# Obtain product name in partner's language
if order.partner_id.lang:
subproduct = subproduct.with_context(
lang=order.partner_id.lang)
subproduct_name = subproduct.name
vals = {
'order_id': order.id,
'name': '%s%s' % (
'> ' * (line.pack_depth + 1), subproduct_name
),
# 'delay': subproduct.sale_delay or 0.0,
'product_id': subproduct.id,
# 'procurement_ids': (
# [(4, x.id) for x in line.procurement_ids]
# ),
'price_unit': price,
'tax_id': tax_id,
'address_allotment_id': False,
'product_uom_qty': quantity,
'product_uom': subproduct.uom_id.id,
'product_uos_qty': uos_qty,
'product_uos': uos_id,
'product_packaging': False,
'discount': discount,
'number_packages': False,
'th_weight': False,
'state': 'draft',
'pack_parent_line_id': line.id,
'pack_depth': line.pack_depth + 1,
}
return vals
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,675,045,906,619,023,600 | 31.802083 | 78 | 0.49476 | false |
guiquanz/Dato-Core | src/unity/python/graphlab_util/lambda_closure_capture.py | 13 | 9429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import sys
import parser
import symbol
import token
import ast
import inspect
import graphlab.meta as meta
class expression_validator(ast.NodeVisitor):
"""
This tree walk attempts to validate an expression: that the expression
should *not* contain certain names.
This is used for the case
x = 10
lambda x: fn(x+15, x)
Really, the "x+15" expression is invalid since the expression uses an
lambda argument. However, it does evaluate correctly in the scope
since "x" also exists in the function scope.
We thus need to validate the expression before attempting to evaluate it
so that the expression must not contain a lambda argument.
This validator here is a lot stricter than it should since it will also
prevent all cases where something with the same name as the lambda argument
is created in an inner scope. For instance:
lambda x: fn((lambda x: x + 15)(5), x)
lambda x: fn(([x for x in [1,2,3]], x)
"""
def __init__(self, blocked_symbols):
self.blocked_symbols = blocked_symbols
def visit_Name(self, node):
if node.id in self.blocked_symbols:
raise RuntimeError("Blocked symbols encountered")
class attribute_reader(ast.NodeVisitor):
"""
Things like gl.extensions._demo_add
get parsed as
Attribute(value=Attribute(value=Name(id='gl', ctx=Load()),
attr='extensions', ctx=Load()), attr='_demo_add', ctx=Load())
This causes problems for
lambda x: gl.extensions._demo_add(x, 5)
We need to breakdown the attribute into the original string
"""
def default(self, node):
raise NotImplementedError("Cannot process token at " +
str(node.lineno) + ":" + str(node.col_offset))
def visit_Name(self, node):
return node.id
def visit_Attribute(self, node):
s = self.visit(node.value)
return s + "." + node.attr
class Parameter(object):
def __init__(self, name):
self.name = name
def __str__(self):
return 'λ' + self.name
def __repr__(self):
return str(self)
class lambda_closure_visitor(ast.NodeVisitor):
"""
This implements a *very* limited decompiler. It only handles cases of
lambda x: fn(a, b, x, ...)
where a,b, etc are variables captured from the surrounding scope, and there
may be some occurances of x.
No additional statements or expressions are permitted
"""
FUNCTION = 0 # I am translating the wrapping lambda function
INNER_CALL = 1 # I am translating the function call inside
PARAMETER = 2 # I am just translating a function parameter
def __init__(self):
# The fn
self.closure_fn_name = ""
# A list of captured positional arguments
# lambda parameters are denoted by being of type Parameter
self.positional_args = []
# A dictionary of captured named arguments
# lambda parameters are denoted by being of type Parameter
self.named_args = {}
# List of all the input argument names
self.input_arg_names = []
self.caller_globals = []
self.state = self.FUNCTION
def default(self, node):
raise NotImplementedError("Cannot process token at " +
str(node.lineno) + ":" + str(node.col_offset))
def __repr__(self):
return str(self)
def __str__(self):
ret = self.closure_fn_name + "("
comma = False
for i in self.positional_args:
if comma:
ret = ret + ','
ret = ret + str(i)
comma = True
for i in self.named_args:
if comma:
ret = ret + ','
ret = ret + i + ":" + str(self.named_args[i])
comma = True
ret = ret + ")"
return ret
def translate_ast(self, ast_node):
#print(ast.dump(ast_node))
t = self.visit(ast_node)
def visit_Module(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected module in position " +
str(node.lineno) + ":" + str(node.col_offset))
for line in node.body:
self.visit(line)
def visit_Call(self, node):
if (self.state != self.INNER_CALL):
raise NotImplementedError("Unexpected call in position " +
str(node.lineno) + ":" + str(node.col_offset))
self.state = self.INNER_CALL
# this is the main closure function call
if self.closure_fn_name != "":
raise NotImplementedError("Cannot translate function call " +
str(node.lineno) + ":" + str(node.col_offset))
elif type(node.func) is ast.Name:
self.closure_fn_name = node.func.id
elif type(node.func) is ast.Attribute:
self.closure_fn_name = attribute_reader().visit(node.func)
else:
raise NotImplementedError("Unexpected type of function call.")
self.state = self.PARAMETER
for i in range(len(node.args)):
arg = node.args[i]
if type(arg) is ast.Name and arg.id in self.input_arg_names:
self.positional_args += [Parameter(arg.id)]
else:
try:
expression_validator(self.input_arg_names).visit(arg)
# try to evaluate the ast
result = eval(compile(ast.Expression(arg), '<string>', 'eval'), self.caller_globals)
except:
raise NotImplementedError("Only simple expressions not using the function arguments are permitted")
self.positional_args += [result]
# keyword arguments next
keywordargs = {i.arg:i.value for i in node.keywords}
for i in keywordargs:
arg = keywordargs[i]
if type(arg) is ast.Name and arg.id in self.input_arg_names:
self.named_args[i] = Parameter(arg.id)
else:
try:
expression_validator(self.input_arg_names).visit(arg)
# try to evaluate the ast
result = eval(compile(ast.Expression(arg), '<string>', 'eval'), self.caller_globals)
except:
raise NotImplementedError("Only simple expressions not using the function arguments are permitted")
self.named_args[i] = result
def visit_arguments(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected function")
self.input_arg_names = [arg.id for arg in node.args]
def visit_Name(self, node):
raise NotImplementedError("Unexpected name")
def visit_Return(self, node):
if (self.state != self.INNER_CALL):
raise NotImplementedError("Unexpected return")
return self.visit(node.value)
def visit_Lambda(self, node):
return self.visit_FunctionDef(node)
def visit_FunctionDef(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected function")
self.visit(node.args)
self.state = self.INNER_CALL
if type(node.body) is list:
next_node = node.body[0]
else:
next_node = node.body
if type(next_node) is ast.Call:
self.visit(next_node)
elif type(next_node) is ast.Return and type(next_node.value) is ast.Call:
self.visit(next_node.value)
else:
raise NotImplementedError("Function must comprise of just a function call ")
def visit_ClassDef(self, node):
raise NotImplementedError("Classes are not implemented")
def _isalambda(v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
def translate(fn):
visitor = lambda_closure_visitor()
visitor.caller_globals = fn.func_globals.copy()
# now. annoyingly enough certain captures are not here. We need to
# look in func_closures for it
if fn.func_closure:
closure = dict(zip(fn.func_code.co_freevars, (c.cell_contents for c in fn.func_closure)))
# inject closure into "caller_globals"
for i in closure:
visitor.caller_globals[i] = closure[i]
ast_node = None
try:
if not _isalambda(fn):
ast_node = ast.parse(inspect.getsource(fn))
except:
pass
try:
if ast_node == None:
ast_node = meta.decompiler.decompile_func(fn)
except:
pass
if ast_node is None:
raise RuntimeError("Cannot process provided function")
visitor.translate_ast(ast_node)
return visitor
# if __name__ == "__main__":
# if len(sys.argv) <= 1:
# print("Usage:\n\t./Lua_Translator.py <FILENAME>\n")
# exit(-1)
# f = open(sys.argv[1] , 'r')
# l = f.readlines()
# f.close()
# s = ""
#
# for x in l:
# s = s + x
#
# ast_node = ast.parse(s)
#
# f = open(sys.argv[1].rpartition(".")[0] + "_trans.lua", 'w')
# test = translator_NodeVisitor(f)
# test.translate_ast(ast_node)
# f.close()
| agpl-3.0 | -2,257,631,989,928,932,900 | 31.622837 | 119 | 0.592384 | false |
visualputty/Landing-Lights | django/utils/datastructures.py | 239 | 15522 | from types import GeneratorType
from django.utils.copycompat import copy, deepcopy
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
import django.utils.copycompat as copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause | -4,056,238,526,838,558,000 | 30.294355 | 131 | 0.547417 | false |
nachandr/cfme_tests | cfme/tests/containers/test_static_custom_attributes.py | 3 | 10129 | import re
from copy import deepcopy
from os import path
from random import choice
from string import ascii_letters
from string import digits
import pytest
from manageiq_client.api import APIException
from cfme import test_requirements
from cfme.containers.provider import ContainersProvider
from cfme.containers.provider.openshift import CustomAttribute
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(2),
pytest.mark.provider([ContainersProvider], scope='function'),
test_requirements.containers
]
def get_random_string(length):
valid_chars = digits + ascii_letters + ' !@#$%^&*()'
out = ''.join([choice(valid_chars) for _ in range(length)])
return re.sub(r'\s+', ' ', out)
ATTRIBUTES_DATASET = [
CustomAttribute('exp date', '2017-01-02', 'Date'),
CustomAttribute('sales force acount', 'ADF231VRWQ1', None),
CustomAttribute('expected num of nodes', '2', None)
]
VALUE_UPDATES = ['2018-07-12', 'ADF231VRWQ1', '1']
# TODO These should be factored into a single CRUD test
@pytest.fixture(scope='function')
def add_delete_custom_attributes(provider):
provider.add_custom_attributes(*ATTRIBUTES_DATASET)
view = navigate_to(provider, 'Details', force=True)
assert view.entities.summary('Custom Attributes').is_displayed
yield
try:
provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
except APIException:
logger.info("No custom attributes to delete")
def test_add_static_custom_attributes(add_delete_custom_attributes, provider):
"""Tests adding of static custom attributes to provider
Steps:
* Add static custom attributes (API)
* Go to provider summary page
Expected results:
* The attributes was successfully added
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
view = navigate_to(provider, 'Details', force=True)
custom_attr_ui = view.entities.summary('Custom Attributes')
for attr in ATTRIBUTES_DATASET:
assert attr.name in custom_attr_ui.fields
assert custom_attr_ui.get_text_of(attr.name) == attr.value
def test_edit_static_custom_attributes(provider):
"""Tests editing of static custom attributes from provider
Prerequisite:
* test_add_static_custom_attributes passed.
Steps:
* Edit (update) the static custom attributes (API)
* Go to provider summary page
Expected results:
* The attributes was successfully updated to the new values
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
provider.add_custom_attributes(*ATTRIBUTES_DATASET)
edited_attribs = deepcopy(ATTRIBUTES_DATASET)
for ii, value in enumerate(VALUE_UPDATES):
edited_attribs[ii].value = value
provider.edit_custom_attributes(*edited_attribs)
view = navigate_to(provider, 'Details', force=True)
custom_attr_ui = view.entities.summary('Custom Attributes')
for attr in edited_attribs:
assert attr.name in custom_attr_ui.fields
assert custom_attr_ui.get_text_of(attr.name) == attr.value
provider.delete_custom_attributes(*edited_attribs)
def test_delete_static_custom_attributes(add_delete_custom_attributes, request, provider):
"""Tests deleting of static custom attributes from provider
Steps:
* Delete the static custom attributes that recently added (API)
* Go to provider summary page
Expected results:
* The attributes was successfully deleted
(you should not see a custom attributes table)
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
view = navigate_to(provider, 'Details', force=True)
if view.entities.summary('Custom Attributes').is_displayed:
for attr in ATTRIBUTES_DATASET:
assert attr.name not in view.entities.summary('Custom Attributes').fields
else:
logger.info("No custom attributes table to check")
assert True
ca = CustomAttribute('test_value', 'This is a test', None)
request.addfinalizer(lambda: provider.delete_custom_attributes(ca))
provider.add_custom_attributes(ca)
provider.add_custom_attributes(*ATTRIBUTES_DATASET)
provider.browser.refresh()
for attr in ATTRIBUTES_DATASET:
assert attr.name in view.entities.summary('Custom Attributes').fields
assert view.entities.summary('Custom Attributes').get_text_of(attr.name) == attr.value
provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
provider.browser.refresh()
if view.entities.summary('Custom Attributes').is_displayed:
for attr in ATTRIBUTES_DATASET:
assert attr.name not in view.entities.summary('Custom Attributes').fields
else:
logger.info("Custom Attributes Table does not exist. Expecting it to exist")
assert False
def test_add_attribute_with_empty_name(provider):
"""Tests adding of static custom attributes with empty field
Steps:
* add the static custom attribute with name "" (API)
* Go to provider summary page
Expected results:
* You should get an error
* You should not see this attribute in the custom attributes table
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
with pytest.raises(APIException):
provider.add_custom_attributes(
CustomAttribute('', "17")
)
pytest.fail('You have added custom attribute with empty name'
'and didn\'t get an error!')
view = navigate_to(provider, 'Details', force=True)
if view.entities.summary('Custom Attributes').is_displayed:
assert "" not in view.entities.summary('Custom Attributes').fields
def test_add_date_attr_with_wrong_value(provider):
"""Trying to add attribute of type date with non-date value
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = CustomAttribute('nondate', "koko", 'Date')
with pytest.raises(APIException):
provider.add_custom_attributes(ca)
pytest.fail('You have added custom attribute of type'
'{} with value of {} and didn\'t get an error!'
.format(ca.field_type, ca.value))
view = navigate_to(provider, 'Details', force=True)
if view.entities.summary('Custom Attributes').is_displayed:
assert 'nondate' not in view.entities.summary('Custom Attributes').fields
def test_edit_non_exist_attribute(provider):
"""Trying to edit non-exist attribute
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = choice(ATTRIBUTES_DATASET)
# Note: we need to implement it inside the test instead of using
# the API (provider.edit_custom_attributes) in order to
# specify the href and yield the exception
payload = {
"action": "edit",
"resources": [{
"href": '{}/custom_attributes/9876543210000000'
.format(provider.href()),
"value": ca.value
}]}
with pytest.raises(APIException):
provider.appliance.rest_api.post(
path.join(provider.href(), 'custom_attributes'), **payload)
pytest.fail('You tried to edit a non-exist custom attribute'
'({}) and didn\'t get an error!'
.format(ca.value))
def test_delete_non_exist_attribute(provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = choice(ATTRIBUTES_DATASET)
with pytest.raises(APIException):
provider.delete_custom_attributes(ca)
pytest.fail('You tried to delete a non-exist custom attribute'
'({}) and didn\'t get an error!'
.format(ca.value))
def test_add_already_exist_attribute(provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = choice(ATTRIBUTES_DATASET)
provider.add_custom_attributes(ca)
try:
provider.add_custom_attributes(ca)
except APIException:
pytest.fail('You tried to add a custom attribute that already exists'
'({}) and didn\'t get an error!'
.format(ca.value))
finally:
provider.delete_custom_attributes(ca)
def test_very_long_name_with_special_characters(request, provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = CustomAttribute(get_random_string(1000), 'very_long_name', None)
request.addfinalizer(lambda: provider.delete_custom_attributes(ca))
provider.add_custom_attributes(ca)
view = navigate_to(provider, 'Details', force=True)
assert ca.name in view.entities.summary('Custom Attributes').fields
# BZ 540647 was closed as no fix. Code was added that strips underscores from attribute names.
def test_very_long_value_with_special_characters(request, provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = CustomAttribute('very long value', get_random_string(1000), None)
request.addfinalizer(lambda: provider.delete_custom_attributes(ca))
provider.add_custom_attributes(ca)
view = navigate_to(provider, 'Details', force=True)
assert ca.value == view.entities.summary('Custom Attributes').get_text_of(ca.name)
| gpl-2.0 | -5,497,219,186,626,041,000 | 34.292683 | 94 | 0.669563 | false |
MaxTyutyunnikov/lino | lino/modlib/vocbook/base.py | 1 | 33978 | # -*- coding: UTF-8 -*-
## Copyright 2011-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
Work in progress.
I currently use the :mod:`lino.modlib.vocbook`
module to generate teaching materials for my pupils,
the catering cooks at
`Vigala professional school <http://web.vigalattk.ee/>`_.
"""
import logging
#~ logging.basicConfig(filename='example.log',level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
import os
import codecs
import locale
import re
from lino.utils import AttrDict
from lino.utils import iif, curry
from lino.utils import memo
from lino.utils.html2xhtml import html2xhtml
#~ from lino.utils.xmlgen.html import html2rst as html2rst_
from lino.utils.xmlgen.html import html2rst
from lino.utils.xmlgen.html import E
from lino.utils.restify import restify
from atelier.rstgen import write_header
from atelier import rstgen
from lino.utils import htmlgen
USE_XHTML2ODT = False
#~ def html2rst(x):
#~ if isinstance(x,basestring): return x
#~ return html2rst_(x)
def e2s(g):
def fmt(e):
if isinstance(e,basestring): return e
return E.tostring(e)
return ' '.join([fmt(e) for e in g])
if USE_XHTML2ODT:
from Cheetah.Template import Template as CheetahTemplate
import xhtml2odt
class MyODTFile(xhtml2odt.ODTFile):
def render(self,context):
self.open()
tpl = CheetahTemplate(self.xml['content'],namespaces=[context])
nc = unicode(tpl) #.encode('utf-8')
if nc.startswith('<?xml version'):
#~ nc = nc.replace('<?xml version="1.0" encoding="UTF-8"?>','')
nc = nc.split('\n',1)[1]
self.xml['content'] = nc
#~ odt = self.xhtml_to_odt(xhtml)
#~ self.insert_content(odt)
if True:
f = open("content.xml","wt")
f.write(self.xml['content'].encode('utf-8'))
f.close()
self.add_styles()
self.save(self.options.output)
pronunciationRE = re.compile("^(.*)\s*(\[.*\])\s*",re.DOTALL)
def makedirs_if_missing(dirname):
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
class LanguageMeta(type):
def __new__(meta, classname, bases, classDict):
# Each subclass gets her own list of word types:
classDict['words'] = []
classDict['wordTypes'] = []
cls = type.__new__(meta, classname, bases, classDict)
return cls
class Language(object):
__metaclass__ = LanguageMeta
@classmethod
def add_wordtype(cls,wt):
cls.wordTypes.append(wt)
@classmethod
def register_word(cls,w):
for ew in cls.words:
if ew == w:
return ew
cls.words.append(w)
return w
class WordType(object):
text = None
@classmethod
def is_of_this_type(cls,w):
return w.type == cls
class Word(object):
type = None
text = None
pronounciation = ''
gender = None
form = None
def __init__(self,text,
type=None,
pronounciation=None,
gender=None,
form=None):
if not text:
raise Exception("Cannot create empty word!")
self.text = text
if type: self.type = type
if form is not None: self.form = form
if pronounciation:
assert pronounciation.startswith('[')
assert pronounciation.endswith(']')
self.pronounciation = pronounciation[1:-1]
if gender:
assert gender in ('m','f','mf','pl')
self.gender = gender
self.translations = []
self.units = []
def get_pron_html(self,article=False):
if not self.pronounciation:
return ''
#~ if article and Nom.is_of_this_type(self):
return "[%s]" % self.pronounciation
def add_to_unit(self,unit):
self.units.append(unit)
#~ unit.add_word(self)
def __repr__(self):
return "%r(%r)" % (self.text,self.type.__name__)
def __eq__(self,other):
if self.__class__ != other.__class__: return False
if self.text != other.text: return False
if self.pronounciation != other.pronounciation: return False
if self.gender != other.gender: return False
if self.type != other.type: return False
if self.form != other.form: return False
return True
def add_translations(self,translations):
for t in translations:
if not t in self.translations:
self.translations.append(t)
def opposite_gender(self):
if self.gender == 'f' : return 'm'
if self.gender == 'm' : return 'f'
return None
def get_partner(self,gender):
if self.gender == 'mf' or self.gender == gender:
return self
if not self.partner:
raise Exception("%r has no partner " % self)
return self.partner
class Column:
label = ''
def __init__(self,label):
self.label = label
@classmethod
def render(cls,w,book):
s = html2rst(E.div(*tuple(cls.word2html(w,book))))
#~ s = html2rst(e) for e (cls.word2html(w,book)))
if "<" in s:
raise Exception("2013116 %r" % cls.word2html(w,book))
if s.startswith('('):
s = '\\' + s
return s
@classmethod
def word2html(cls,w,book):
raise NotImplementedError()
class FR(Column):
label = 'prantsuse k.'
@classmethod
def word2html(cls,w,book):
for e in book.from_language.present_word2html(w,book):
yield e
class PRON(Column):
label = u'hääldamine'
@classmethod
def word2html(cls,w,book):
yield w.get_pron_html()
class ET(Column):
label = u'eesti k.'
@classmethod
def word2html(cls,w,book):
if len(w.translations) == 1:
yield w.translations[0]
else:
yield "; ".join(["(%d) %s" % (n+1,w) for n,w in enumerate(w.translations)])
class M(Column):
label = u'meessoost'
gender = 'm'
@classmethod
def word2html(cls,w,book):
#~ return html2rst(w.html_fr_lesson()) + ' ' + w.pronounciation
w = w.get_partner(cls.gender)
#~ return '<b>%s</b>' % w.text + ' ' + w.get_pron_html()
yield E.b(w.text)
yield ' '
yield w.get_pron_html()
class F(M):
label = u'naissoost'
gender = 'f'
class GEON(FR):
label = u'Maa'
class GEOM(Column):
gender = 'm'
label = u'omadussõna (m)'
@classmethod
def word2html(cls,w,book):
if not w.adjectif:
yield ''
return
w = w.adjectif
w = w.get_partner(cls.gender)
#~ return '<b>%s</b>' % w.text + ' ' + w.get_pron_html()
yield E.b(w.text)
yield ' '
yield w.get_pron_html()
class GEOF(GEOM):
label = u'omadussõna (n)'
gender = 'f'
#~ def mycmp(a,b):
#~ return locale.strcoll(a,b)
def sort_by_fr(a,b):
return locale.strcoll(a.text.lower(),b.text.lower())
#~ return locale.strcoll(S(a.fr),S(b.fr))
class Section:
def __init__(self,book,parent,
title=None,intro=None,
number=None,ref=None,
from_language=None,
to_language=None):
if from_language is None:
from_language = parent.from_language
if to_language is None:
to_language = parent.to_language
#~ if number is None:
#~ raise Exception("Section %r has no number" % title)
self.to_language = to_language
self.from_language = from_language
self.parent = parent
if number is not None:
if not isinstance(number,int):
raise Exception("Section %r got invalid number %r" % (title,number))
elif parent is not None:
number = len(parent.children) + 1
self.number = number
self.book = book
self.ref = ref
self.title = title
self.intro = intro
self.body = []
self.words = []
self.children = []
self.current_lesson = None
if self.ref:
if self.ref in self.book.ref2sect:
raise Exception("Duplicate reference %r" % self.ref)
self.book.ref2sect[self.ref] = self
def add_section(self,*args,**kw):
sect = Section(self.book,self,*args,**kw)
self.children.append(sect)
return sect
def add_index(self,*args,**kw):
sect = Index(self.book,self,*args,**kw)
self.children.append(sect)
return sect
def add_dictionary(self,*args,**kw):
sect = Dictionary(self.book,self,*args,**kw)
self.children.append(sect)
return sect
def add_lesson(self,*args,**kw):
self.current_lesson = Unit(self.book,self,*args,**kw)
self.children.append(self.current_lesson)
def add_after(self,chunk):
#~ self.current_lesson.body.append(chunk)
self.current_lesson.after.append(chunk)
def parse_words(self,cl,lines):
self.current_lesson.parse_words(cl,lines)
def name_parts(self):
if self.parent is None:
return ['index' ]
elif self.children:
return [ self.get_ref(), 'index' ]
else:
return [ self.get_ref() ]
def get_ref(self):
if self.ref:
return self.ref
if self.number is not None:
#~ return str(self.number)
return '%02d' % self.number
def rst_ref_to(self,text=None):
parts = self.name_parts()
#~ ref = self.get_ref()
p = self.parent
while p is not None:
pref = p.get_ref()
#~ if p.number is not None:
if pref is not None:
#~ parts = ['%02d' % p.number] + parts
parts = [pref] + parts
p = p.parent
if not text:
text = self.get_ref_text()
if self.book.writing_format == 'rst':
if text:
return ':doc:`%s </%s>`' % (text,'/'.join(parts))
return ':doc:`/%s`' % '/'.join(parts)
return "*" + text + "*"
#~ return ':doc:`%s </%s>`' % (self.title,'/'.join(parts))
def get_full_number(self):
number = str(self.number)
p = self.parent
while p is not None:
if p.number is not None:
number = str(p.number) + "." + number
p = p.parent
return number
def get_ref_text(self):
return self.title
def html_lines(self,level=1):
if self.number is None:
title = self.title
else:
title = "%s %s" % (self.get_full_number(),self.title)
if True:
if self.parent is not None:
title = restify(self.memo2rst(title)).strip()
if title.startswith('<p>') and title.endswith('</p>'):
title = title[3:-4]
#~ logger.info("20120311 title is %r", title)
else:
raise Exception("20120311 title is %r" % title)
yield htmlgen.H(level,title)
else:
tag = "H%d" % level
title = title.replace("<p>","<"+tag+">")
title = title.replace("</p>","</"+tag+">")
yield title
#~ yield "<H%d>%s</H%d>" % (level,,level)
if self.intro:
yield restify(self.memo2rst(self.intro))
if self.children:
for s in self.children:
for ln in s.html_lines(level+1):
yield ln
for chunk in self.body:
yield restify(self.memo2rst(chunk))
def write_rst_files(self,root):
fn = os.path.join(root,*self.name_parts()) + ".rst"
logger.info("Generate %s",fn)
newroot = os.path.dirname(fn)
makedirs_if_missing(newroot)
fd = codecs.open(fn,'w','utf-8')
if self.number is None:
title = self.title
else:
title = "%d. %s" % (self.number,self.title)
#~ if self.number is None:
#~ write_header(fd,1,"%s" % self.title)
#~ else:
#~ write_header(fd,1,"%d. %s" % (self.number,self.title))
write_header(fd,1,self.memo2rst(title))
self.write_body(fd)
fd.close()
for s in self.children:
s.write_rst_files(newroot)
def write_body(self,fd):
if self.intro:
fd.write(self.memo2rst(self.intro) + '\n\n')
for chunk in self.body:
fd.write(self.memo2rst(chunk) + '\n\n')
if self.children:
fd.write("""\
.. toctree::
:maxdepth: 2
""")
for s in self.children:
fd.write(" " + ("/".join(s.name_parts())) + "\n")
fd.write('\n\n')
def memo2rst(self,s):
return self.book.memo2rst(s)
class Unit(Section):
columns = [FR,PRON,ET]
def __init__(self,book,parent,title=None,intro=None,columns=None,show_headers=None,**kw):
if columns is not None:
self.columns = columns
if show_headers is None:
show_headers = True
elif show_headers is None:
show_headers = False
self.show_headers = show_headers
#~ self.parent = parent
Section.__init__(self,book,parent,title=title,intro=intro,**kw)
if not self.title:
self.title = u"Leçon %d" % self.number
self.after = []
#~ if after:
#~ self.add_after(after)
self.words = []
#~ def add_word(self,w):
#~ self.words.append(w)
def tablerow(self,w):
return [col.render(w,self) for col in self.columns]
def parse_words(self,cl,lines):
#~ lesson = self.current_lesson
for ln in lines.splitlines():
ln = ln.strip()
if ln and not ln.startswith('#'):
a = ln.split(':')
if len(a) != 2:
raise Exception("%r.split(':') is not 2" % ln)
fr_list = a[0].split('|')
et_list = a[1].split('|')
translations = []
for et in et_list:
et = et.strip()
if et == '-':
pass
elif et.startswith('#'):
pass
else:
w = self.to_language.parse_word(et)
translations.append(et)
main = None
for fr in fr_list:
w = self.from_language.parse_word(fr,cl,parent=main)
w.add_to_unit(self)
#~ w.add_lesson(self.current_lesson)
w.add_translations(translations)
if main:
main.marry(w)
else:
main = w
self.words.append(main)
def html_lines(self,level=1):
for ln in Section.html_lines(self,level):
yield ln
words = [w for w in self.words if w.parent is None]
if words:
t = htmlgen.TABLE([col.label for col in self.columns],
show_headers=self.show_headers)
def row(w):
return [col.word2html(w,self) for col in self.columns]
rows = [row(w) for w in words]
for ln in t.html_lines(rows):
yield ln
for chunk in self.after:
yield restify(self.memo2rst(chunk))
def write_body(self,fd):
Section.write_body(self,fd)
words = [w for w in self.words if w.parent is None]
if words:
t = rstgen.Table([col.label for col in self.columns],
show_headers=self.show_headers)
t.write(fd,[self.tablerow(w) for w in words])
for chunk in self.after:
fd.write('\n\n' + chunk + '\n\n')
#~ def uca_collator():
#~ """
#~ """
#~ logger.info("20120308 build uca_collator")
#~ c = Collator(fn)
#~ logger.info("20120308 uca_collator() done")
#~ return c
try:
from lino.utils.pyuca import Collator
#~ fn = os.path.join(os.path.dirname(__file__),'pyuca_allkeys.txt')
fn = 'uca_allkeys.txt'
UCA_COLLATOR = Collator(fn)
except Exception:
UCA_COLLATOR = None
import warnings
warnings.warn("""\
If you want serious alphabetic sorting, you need to download \
http://www.unicode.org/Public/UCA/latest/allkeys.txt \
to your current working directory (`%s`) and rename it to `uca_allkeys.txt`. \
""" % os.getcwd())
def uca_sort(l):
#~ c = uca_collator()
if UCA_COLLATOR:
def k(w): return UCA_COLLATOR.sort_key(w.text)
else:
def k(w): return w.text.upper()
l.sort(key=k)
class Dictionary(Section):
columns = [FR,PRON,ET]
show_headers = True
def html_lines(self,level=1):
for ln in Section.html_lines(self,level):
yield ln
words = [w for w in self.from_language.words if w.parent is None]
if words:
uca_sort(words)
t = htmlgen.TABLE([col.label for col in self.columns],
show_headers=self.show_headers)
def row(w):
return [col.word2html(w,self) for col in self.columns]
rows = [row(w) for w in words]
for ln in t.html_lines(rows):
yield ln
class Index(Section):
def html_lines(self,level=1):
for ln in Section.html_lines(self,level):
yield ln
#~ self.from_language.words.sort(sort_by_fr)
uca_sort(self.from_language.words)
#~ self.from_language.words = uca_sorted(self.from_language.words)
def fmt(w):
return e2s(self.from_language.word2html(w)) \
+ " " + e2s(ET.word2html(w,self)) \
+ " " \
+ ", ".join([u.get_full_number() for u in w.units])
for w in self.from_language.words:
yield "<br>" + fmt(w)
def write_body(self,fd):
Section.write_body(self,fd)
self.from_language.words.sort(sort_by_fr)
uca_sort(self.from_language.words)
#~ self.from_language.words = uca_sorted(self.from_language.words)
def fmt(w):
for x in self.from_language.word2html(w):
yield x
yield " "
for x in ET.word2html(w,self):
yield x
yield " "
yield ", ".join([u.rst_ref_to() for u in w.units])
for w in self.from_language.words:
fd.write("| %s\n" % html2rst(E.div(*fmt(w))))
class MemoParser(memo.Parser):
def __init__(self,book,*args,**kw):
self.book = book
memo.Parser.__init__(self,*args,**kw)
self.register_command('ref',self.cmd_ref)
self.register_command('item',curry(self.cmd_item,'- '))
self.register_command('oitem',curry(self.cmd_item,'#. '))
self.register_command('ruleslist',self.cmd_ruleslist)
#~ self.register_command('url',self.cmd_url)
def cmd_ref(self,s):
sect = self.book.ref2sect[s]
return sect.rst_ref_to()
def cmd_item(self,prefix,ref,rulesmode=False):
indent = " " * len(prefix)
sect = self.book.ref2sect[ref]
r = prefix
if not rulesmode:
r += sect.rst_ref_to()
if sect.intro:
r += " -- "
if sect.intro:
intro = self.book.memo2rst(sect.intro.strip())
if "\n\n" in intro:
r += "\n"
for ln in intro.splitlines():
r += indent + ln + "\n"
r += "\n"
else:
intro = intro.replace('\n','\n'+indent)
r += intro
if rulesmode:
r += "\n" + indent + "-- " + sect.rst_ref_to(text=sect.get_full_number())
r += "\n"
return r
def cmd_ruleslist(self,s):
r = ''
for ref in s.split():
r += self.cmd_item('#. ',ref,rulesmode=True)
return r
#~ def cmd_url(self,s):
#~ if not s: return "XXX"
#~ url,text = s.split(None,1)
#~ # return '<a href="%s">%s</a>' % (url,text)
#~ return E.a(text,href=url)
class Book:
def __init__(self,from_language,to_language,
title=None,input_template=None,
memo_parser=None):
self.writing_format = None
self.input_template = input_template
self.ref2sect = dict()
self.memo_parser = memo_parser or MemoParser(self)
self.main = Section(self,None,title,
from_language=from_language,to_language=to_language)
def memo2rst(self,s):
return self.memo_parser.parse(s)
def add_section(self,*args,**kw): return self.main.add_section(*args,**kw)
def add_index(self,*args,**kw): return self.main.add_index(*args,**kw)
def add_dictionary(self,*args,**kw): return self.main.add_dictionary(*args,**kw)
def old_as_odt(self):
from xhtml2odt import ODTFile
from lino.utils import AttrDict
from lino.utils.html2xhtml import html2xhtml
options = AttrDict(
url = "",
with_network = False,
verbose = True,
template = self.input_template,
top_header_level = 1,
img_width = "8cm",
img_height = "6cm",
)
#~ version=False # help="Show the version and exit")
#~ input=input", metavar="FILE",
#~ help="Read the html from this file")
#~ parser.add_option("-o", "--output", dest="output", metavar="FILE",
#~ help="Location of the output ODT file")
#~ parser.add_option("-t", "--template", dest="template", metavar="FILE",
#~ help="Location of the template ODT file")
#~ parser.add_option("-u", "--url", dest="url",
#~ help="Use this URL for relative links")
#~ parser.add_option("-v", "--verbose", dest="verbose",
#~ action="store_true", default=False,
#~ help="Show what's going on")
#~ parser.add_option("--html-id", dest="htmlid", metavar="ID",
#~ help="Only export from the element with this ID")
#~ parser.add_option("--replace", dest="replace_keyword",
#~ default="ODT-INSERT", metavar="KEYWORD",
#~ help="Keyword to replace in the ODT template "
#~ "(default is %default)")
#~ parser.add_option("--cut-start", dest="cut_start",
#~ default="ODT-CUT-START", metavar="KEYWORD",
#~ help="Keyword to start cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--cut-stop", dest="cut_stop",
#~ default="ODT-CUT-STOP", metavar="KEYWORD",
#~ help="Keyword to stop cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--top-header-level", dest="top_header_level",
#~ type="int", default="1", metavar="LEVEL",
#~ help="Level of highest header in the HTML "
#~ "(default is %default)")
#~ parser.add_option("--img-default-width", dest="img_width",
#~ metavar="WIDTH", default="8cm",
#~ help="Default image width (default is %default)")
#~ parser.add_option("--img-default-height", dest="img_height",
#~ metavar="HEIGHT", default="6cm",
#~ help="Default image height (default is %default)")
#~ parser.add_option("--dpi", dest="img_dpi", type="int",
#~ default=96, metavar="DPI", help="Screen resolution "
#~ "in Dots Per Inch (default is %default)")
#~ parser.add_option("--no-network", dest="with_network",
#~ action="store_false", default=True,
#~ help="Do not download remote images")
#~ options, args = parser.parse_args()
odtfile = ODTFile(options)
odtfile.open()
xhtml = ''.join([ln for ln in self.main.html_lines()])
xhtml = html2xhtml(xhtml)
#~ xhtml = "<DIV>%s</DIV>" % xhtml
xhtml = """\
<html xmlns="http://www.w3.org/1999/xhtml"><body>%s</body></html>""" % xhtml
#~ xhtml = "<p>%s</p>" % xhtml
if True:
f = open("before.xml","wt")
f.write(xhtml.encode('utf-8'))
f.close()
#~ logger.info("Gonna do it with %r",xhtml)
xhtml = odtfile.xhtml_to_odt(xhtml)
if True:
f = open("after.xml","wt")
f.write(xhtml)
#~ f.write(xhtml.encode('utf-8'))
f.close()
return xhtml
def html(self):
#~ s = htmlgen.DIV(self.main.html_lines)
s = ''.join([ln for ln in self.main.html_lines()])
s = "<div>%s</div>" % s
if True:
f = open("odt_content.xml","wt")
f.write(s.encode('utf-8'))
f.close()
#~ logger.info(s)
return s
def write_rst_files(self,root='.'):
self.writing_format = 'rst'
self.main.write_rst_files(root)
if False: # must convert to new structure
fn = os.path.join('dict','et_fr.rst')
logger.info("Generate %s",fn)
fd = codecs.open(fn,'w','utf-8')
write_header(fd,1,'eesti-prantsuse')
t = rstgen.Table(['Nr.',"ET","FR",u"hääldamine","Tasand"])
self.words.sort(sort_by_et)
words_et = [w for w in self.words if not w.hide_et]
t.write(fd,[
(i,w.et,html2rst(w.html_fr()),w.pronounciation,w.lesson.rst_ref_to())
for i,w in enumerate(words_et)])
fd.close()
def write_odt_file(self,target):
#~ from appy.pod.renderer import Renderer
from lino.utils import iif
#~ from lino.utils.appy_pod import setup_renderer
from lino.utils.appy_pod import Renderer
assert os.path.abspath(self.input_template) != os.path.abspath(target)
if os.path.exists(target):
os.remove(target)
#~ tpl = os.path.join(os.path.dirname(__filename__),'cfr.odt')
ses = settings.SITE.login("root") # not tested after 20130327
context = dict(
self=self,
iif=iif,
)
appy_params = dict()
logger.info(u"appy.pod render %s -> %s (params=%s)",self.input_template,target,appy_params)
renderer = Renderer(ses,self.input_template, context, target,**appy_params)
#~ setup_renderer(renderer)
#~ renderer.context.update(restify=debug_restify)
self.writing_format = 'odt'
renderer.run()
if USE_XHTML2ODT:
class Book2(Book):
def write_odt_file(self,target):
#~ from lino.utils import iif
#~ from lino.utils import AttrDict
#~ from lino.utils.html2xhtml import html2xhtml
assert os.path.abspath(self.input_template) != os.path.abspath(target)
if os.path.exists(target):
os.remove(target)
options = AttrDict(
url = "",
template = self.input_template,
output = target,
with_network = True,
verbose = True,
top_header_level = 1,
img_width = "8cm",
img_height = "6cm",
)
#~ version=False # help="Show the version and exit")
#~ input=input", metavar="FILE",
#~ help="Read the html from this file")
#~ parser.add_option("-o", "--output", dest="output", metavar="FILE",
#~ help="Location of the output ODT file")
#~ parser.add_option("-t", "--template", dest="template", metavar="FILE",
#~ help="Location of the template ODT file")
#~ parser.add_option("-u", "--url", dest="url",
#~ help="Use this URL for relative links")
#~ parser.add_option("-v", "--verbose", dest="verbose",
#~ action="store_true", default=False,
#~ help="Show what's going on")
#~ parser.add_option("--html-id", dest="htmlid", metavar="ID",
#~ help="Only export from the element with this ID")
#~ parser.add_option("--replace", dest="replace_keyword",
#~ default="ODT-INSERT", metavar="KEYWORD",
#~ help="Keyword to replace in the ODT template "
#~ "(default is %default)")
#~ parser.add_option("--cut-start", dest="cut_start",
#~ default="ODT-CUT-START", metavar="KEYWORD",
#~ help="Keyword to start cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--cut-stop", dest="cut_stop",
#~ default="ODT-CUT-STOP", metavar="KEYWORD",
#~ help="Keyword to stop cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--top-header-level", dest="top_header_level",
#~ type="int", default="1", metavar="LEVEL",
#~ help="Level of highest header in the HTML "
#~ "(default is %default)")
#~ parser.add_option("--img-default-width", dest="img_width",
#~ metavar="WIDTH", default="8cm",
#~ help="Default image width (default is %default)")
#~ parser.add_option("--img-default-height", dest="img_height",
#~ metavar="HEIGHT", default="6cm",
#~ help="Default image height (default is %default)")
#~ parser.add_option("--dpi", dest="img_dpi", type="int",
#~ default=96, metavar="DPI", help="Screen resolution "
#~ "in Dots Per Inch (default is %default)")
#~ parser.add_option("--no-network", dest="with_network",
#~ action="store_false", default=True,
#~ help="Do not download remote images")
#~ options, args = parser.parse_args()
self.odtfile = MyODTFile(options)
context = dict(iif=iif)
context.update(book=self)
self.odtfile.render(context)
def as_odt(self):
xhtml = ''.join([ln for ln in self.main.html_lines()])
xhtml = html2xhtml(xhtml)
#~ xhtml = "<div>%s</div>" % xhtml
#~ xhtml = "<p>%s</p>" % xhtml
#~ xhtml = '<html><body>%s</body></html>' % xhtml
xhtml = '<html xmlns="http://www.w3.org/1999/xhtml"><body>%s</body></html>' % xhtml
if not True:
f = open("before.xml","wt")
f.write(xhtml.encode('utf-8'))
f.close()
#~ logger.info("Gonna do it with %r",xhtml)
xhtml = self.odtfile.xhtml_to_odt(xhtml)
if True:
f = open("after.xml","wt")
f.write(xhtml)
#~ f.write(xhtml.encode('utf-8'))
f.close()
return xhtml.decode('utf-8')
| gpl-3.0 | 8,913,107,689,133,865,000 | 33.64638 | 99 | 0.500368 | false |
tempbottle/Nuitka | nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/rpcgen.py | 4 | 2827 | """SCons.Tool.rpcgen
Tool-specific initialization for RPCGEN tools.
Three normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpcgen.py 2014/07/05 09:42:21 garyo"
from SCons.Builder import Builder
import SCons.Util
cmd = "cd ${SOURCE.dir} && $RPCGEN -%s $RPCGENFLAGS %s -o ${TARGET.abspath} ${SOURCE.file}"
rpcgen_client = cmd % ('l', '$RPCGENCLIENTFLAGS')
rpcgen_header = cmd % ('h', '$RPCGENHEADERFLAGS')
rpcgen_service = cmd % ('m', '$RPCGENSERVICEFLAGS')
rpcgen_xdr = cmd % ('c', '$RPCGENXDRFLAGS')
def generate(env):
"Add RPCGEN Builders and construction variables for an Environment."
client = Builder(action=rpcgen_client, suffix='_clnt.c', src_suffix='.x')
header = Builder(action=rpcgen_header, suffix='.h', src_suffix='.x')
service = Builder(action=rpcgen_service, suffix='_svc.c', src_suffix='.x')
xdr = Builder(action=rpcgen_xdr, suffix='_xdr.c', src_suffix='.x')
env.Append(BUILDERS={'RPCGenClient' : client,
'RPCGenHeader' : header,
'RPCGenService' : service,
'RPCGenXDR' : xdr})
env['RPCGEN'] = 'rpcgen'
env['RPCGENFLAGS'] = SCons.Util.CLVar('')
env['RPCGENCLIENTFLAGS'] = SCons.Util.CLVar('')
env['RPCGENHEADERFLAGS'] = SCons.Util.CLVar('')
env['RPCGENSERVICEFLAGS'] = SCons.Util.CLVar('')
env['RPCGENXDRFLAGS'] = SCons.Util.CLVar('')
def exists(env):
return env.Detect('rpcgen')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | 1,978,389,670,858,955,800 | 39.385714 | 91 | 0.694729 | false |
jendap/tensorflow | tensorflow/contrib/tpu/python/tpu/datasets.py | 5 | 7480 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
with ops.device('/job:%s' % file_reader_job):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.DatasetV2):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
source_dataset = source_dataset.repeat(num_epochs)
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = dataset_ops.make_one_shot_iterator(source_dataset)
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, source_dataset.output_types, source_dataset.output_shapes)
return remote_iterator.get_next()
def MapFn(unused_input):
if isinstance(source_dataset.output_types, dtypes.DType):
output_types = [source_dataset.output_types]
elif isinstance(source_dataset.output_types, (list, tuple)):
output_types = source_dataset.output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
| apache-2.0 | 4,726,582,858,502,975,000 | 38.162304 | 80 | 0.706417 | false |
gnachman/iTerm2 | tools/ply/ply-3.4/test/yacc_badprec2.py | 174 | 1501 | # -----------------------------------------------------------------------------
# yacc_badprec2.py
#
# Bad precedence
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
42,
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| gpl-2.0 | -5,641,118,034,247,557,000 | 21.073529 | 79 | 0.494337 | false |
wayoda/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| lgpl-2.1 | -5,177,969,393,237,023,000 | 34.764151 | 69 | 0.56766 | false |
GeographicaGS/moocng | moocng/media_contents/handlers/prezi.py | 1 | 1737 | import re
from django.template.loader import get_template
from django.template import Context
from django.templatetags.static import static
from .base import MediaContentHandlerBase
class PreziMediaContentHandler(MediaContentHandlerBase):
def get_iframe_template(self, content_id, **kwargs):
template = get_template("media_contents/handlers/prezi_template.html")
context = Context({
'content_id': content_id,
'origin': kwargs.pop('host', ''),
})
return template.render(context)
def get_iframe_code(self, content_id, **kwargs):
template = get_template("media_contents/handlers/prezi.html")
context = Context({
'content_id': content_id,
'origin': kwargs.get('host', ''),
'height': kwargs.get('height', '349px'),
'width': kwargs.get('width', '620px'),
'extra_params': kwargs.get('extra_params', ''),
'extra_attribs': kwargs.get('extra_attribs', ''),
})
return template.render(context)
def get_javascript_code(self, **kwargs):
template = get_template("media_contents/handlers/prezi_js.html")
context = Context(kwargs)
return template.render(context)
def get_thumbnail_url(self, content_id):
return static('img/media_contents/prezi.png')
def get_last_frame(self, content_id, tmpdir):
return None
def extract_id(self, url):
patterns = [
'prezi\.com/([a-zA-Z\d\-\_]+)/.*',
'^([a-zA-Z\d\-\_]+)$',
]
for pattern in patterns:
result = re.search(pattern, url, re.IGNORECASE)
if result:
return result.group(1)
return ''
| apache-2.0 | -4,423,932,717,042,717,700 | 33.058824 | 78 | 0.593552 | false |
zero-ui/miniblink49 | v8_7_5/tools/unittests/run_tests_test.py | 3 | 23853 | #!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Global system tests for V8 test runners and fuzzers.
This hooks up the framework under tools/testrunner testing high-level scenarios
with different test suite extensions and build configurations.
"""
# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
# independent.
# TODO(machenbach): Move coverage recording to a global test entry point to
# include other unittest suites in the coverage report.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
# for py2/py3 compatibility
from __future__ import print_function
import collections
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from cStringIO import StringIO
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
Result = collections.namedtuple(
'Result', ['stdout', 'stderr', 'returncode'])
Result.__str__ = lambda self: (
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
(self.returncode, self.stdout, self.stderr))
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. Relevant files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
builddir = os.path.join(tempbase, 'out', 'Release')
testroot = os.path.join(tempbase, 'test')
os.makedirs(builddir)
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
for suite in os.listdir(os.path.join(basedir, 'test')):
os.makedirs(os.path.join(testroot, suite))
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
shutil.copy(
os.path.join(basedir, 'test', suite, entry),
os.path.join(testroot, suite))
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def run_tests(basedir, *args, **kwargs):
"""Executes the test runner with captured output."""
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
class SystemTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Try to set up python coverage and run without it if not available.
cls._cov = None
try:
import coverage
if int(coverage.__version__.split('.')[0]) < 4:
cls._cov = None
print('Python coverage version >= 4 required.')
raise ImportError()
cls._cov = coverage.Coverage(
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
omit=['*unittest*', '*__init__.py'],
)
cls._cov.exclude('raise NotImplementedError')
cls._cov.exclude('if __name__ == .__main__.:')
cls._cov.exclude('except TestRunnerError:')
cls._cov.exclude('except KeyboardInterrupt:')
cls._cov.exclude('if options.verbose:')
cls._cov.exclude('if verbose:')
cls._cov.exclude('pass')
cls._cov.exclude('assert False')
cls._cov.start()
except ImportError:
print('Running without python coverage.')
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
global num_fuzzer
from testrunner import num_fuzzer
from testrunner.local import command
from testrunner.local import pool
command.setup_testing()
pool.setup_testing()
@classmethod
def tearDownClass(cls):
if cls._cov:
cls._cov.stop()
print('')
print(cls._cov.report(show_missing=True))
def testPass(self):
"""Test running only passing tests in two variants.
Also test printing durations.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--time',
'sweet/bananas',
'sweet/raspberries',
)
self.assertIn('Done running sweet/bananas default: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
# self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/blackberries',
'sweet/raspberries',
infra_staging=False,
)
# One of the shards gets one variant of each test.
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
self.assertIn(
'Done running sweet/raspberries default', result.stdout, result)
self.assertIn(
'Done running sweet/raspberries stress', result.stdout, result)
self.assertEqual(0, result.returncode, result)
else:
self.assertIn(
'sweet/blackberries default: FAIL', result.stdout, result)
self.assertIn(
'sweet/blackberries stress: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
)
# One of the shards gets one variant of each test.
self.assertIn('Running 2 tests', result.stdout, result)
self.assertIn('Done running sweet/bananas', result.stdout, result)
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testFail(self):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def check_cleaned_json_output(
self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
def testFailWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output(
'expected_test_results1.json', json_path, basedir)
def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet',
infra_staging=False,
)
self.assertIn(
'Done running sweet/bananaflakes default: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output(
'expected_test_results2.json', json_path, basedir)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
Using all those options at once doesn't really make much sense. This is
merely for getting coverage.
"""
with temp_base() as basedir:
override_build_config(
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_use_snapshot=False, v8_enable_embedded_builtins=False,
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
expect_text = (
'>>> Autodetected:\n'
'asan\n'
'cfi_vptr\n'
'dcheck_always_on\n'
'msan\n'
'no_i18n\n'
'no_snap\n'
'tsan\n'
'ubsan_vptr\n'
'>>> Running tests for ia32.release')
self.assertIn(expect_text, result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
def testSkips(self):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testRunSkips(self):
"""Inverse the above. Test parameter to keep running skipped tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=nooptimization',
'--run-skipped',
'sweet/strawberries',
)
self.assertIn('1 tests failed', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testDefault(self):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_base() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testInconsistentMode(self):
"""Test failing run when attempting to wrongly override the mode."""
with temp_base() as basedir:
override_build_config(basedir, is_debug=True)
result = run_tests(basedir, '--mode=Release')
self.assertIn('execution mode (release) for release is inconsistent '
'with build config (debug)', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--arch=ia32')
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--variants=meh')
self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
with temp_base() as basedir:
result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
This also exercises various paths in statusfile logic.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default',
'sweet',
'--report',
)
self.assertIn(
'3 tests are expected to fail that we should fix',
result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default,nooptimization',
'sweet',
'--warn-unused',
)
self.assertIn( 'Unused rule: carrots', result.stdout, result)
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default',
'sweet/bananas',
'--cat',
)
self.assertIn('begin source: sweet/bananas', result.stdout, result)
self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testPredictable(self):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
that the predictable flags are passed and printed after failure.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_enable_verify_predictable=True)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn(
'Done running sweet/bananas default: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSlowArch(self):
"""Test timeout factor manipulation on slow architecture."""
with temp_base() as basedir:
override_build_config(basedir, v8_target_cpu='arm64')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
# TODO(machenbach): We don't have a way for testing if the correct
# timeout was used.
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefault(self):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
"""Test using random-seed-stress feature passing a random seed."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'--random-seed=123',
'sweet/strawberries',
)
self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSpecificVariants(self):
"""Test using NO_VARIANTS modifiers in status files skips the desire tests.
The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
But the status file applies a modifier to each skipping one of the
variants.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_use_snapshot=False)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'sweet/bananas',
'sweet/raspberries',
)
# Both tests are either marked as running in only default or only
# slow variant.
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
"""Test that the fake status file is well-formed."""
with temp_base() as basedir:
from testrunner.local import statusfile
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
def testDotsProgress(self):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=dots',
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testMonoProgress(self):
self._testCompactProgress('mono')
def testColorProgress(self):
self._testCompactProgress('color')
def _testCompactProgress(self, name):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
infra_staging=False,
)
if name == 'color':
expected = ('\033[34m% 28\033[0m|'
'\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
expected = '% 28|+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
self.assertEqual(1, result.returncode, result)
def testExitAfterNFailures(self):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--exit-after-n-failures=2',
'-j1',
'sweet/mangoes', # PASS
'sweet/strawberries', # FAIL
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
self.assertIn('sweet/mangoes default: pass', result.stdout, result)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)
self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
self.assertNotIn('Done running sweet/raspberries', result.stdout, result)
self.assertIn('2 tests failed', result.stdout, result)
self.assertIn('3 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testNumFuzzer(self):
sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release']
with temp_base() as basedir:
with capture() as (stdout, stderr):
code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
result = Result(stdout.getvalue(), stderr.getvalue(), code)
self.assertEqual(0, result.returncode, result)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,105,457,174,386,922,500 | 34.495536 | 91 | 0.627049 | false |
TE-ToshiakiTanaka/alize | alize/library/slack/module.py | 1 | 1735 | import os
import sys
from alize.log import Log
from alize.exception import *
try :
from slacker import Slacker
except Exception as e:
print(str(e))
L = Log("Slack.Library.ALIZE")
class Slack(object):
def __init__(self, token):
try:
self.slack = Slacker(token)
except Exception as e:
L.warning(str(e))
raise SlackError("%s is not exists." % token)
def message(self, message, channels):
try:
result = self.slack.chat.post_message(
channels,
message,
as_user=True)
if result.successful:
return result.body
else:
L.warning("Slack Error : %s" % result.error)
raise SlackError(result.error)
except Exception as e:
L.warning(str(e))
raise SlackError("%s is not exists." % channels)
def upload(self, filepath, channels,
content=None,
filetype=None,
filename=None,
title=None,
initial_comment=None):
try:
result = self.slack.files.upload(
filepath,
content=content,
filetype=filetype,
filename=filename,
title=title,
initial_comment=initial_comment,
channels=channels)
if result.successful:
return result.body
else:
L.warning("Slack Error : %s" % result.error)
raise SlackError(result.error)
except Exception as e:
L.warning(str(e))
raise SlackError("%s is not exists." % channels)
| mit | 3,606,521,463,389,347,000 | 27.916667 | 60 | 0.511816 | false |
ua-snap/downscale | snap_scripts/epscor_sc/downscale_cmip5_epscor_sc.py | 1 | 6401 | # downscale the prepped cmip5 data downloaded using SYNDA for EPSCoR SC project
# author: Michael Lindgren -- June 09, 2016 (UPDATED: September 21, 2016 -- [ML])
if __name__ == '__main__':
import glob, os, rasterio, itertools
from functools import partial
import downscale
from downscale import preprocess, Mask, utils
import argparse
import numpy as np
# # parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_dir", action='store', dest='base_dir', type=str, help="base directory where data is stored in structured folders" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
parser.add_argument( "-u", "--units", action='store', dest='units', type=str, help="cmip5 units name (exact)" )
parser.add_argument( "-met", "--metric", action='store', dest='metric', type=str, help="cmip5 metric name (exact)" )
args = parser.parse_args()
# unpack the args
variable = args.variable
scenario = args.scenario
model = args.model
units = args.units
metric = args.metric
base_dir = args.base_dir
# AOI MASK -- HARDWIRE -- PCLL for CMIP5
aoi_mask_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/akcan_aoi_mask_PCLL.shp'
project = 'ar5'
# # # # FOR TESTING # # #
# base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data'
# variable = 'pr'
# scenario = 'rcp60'
# model = 'GFDL-CM3'
# units = 'mm'
# metric = 'total'
# some setup args
base_path = os.path.join( base_dir,'cmip5','prepped' )
output_dir = os.path.join( base_dir, 'downscaled' )
variables = [ variable ]
scenarios = [ scenario ]
models = [ model ]
anom = True # write out anoms (True) or not (False)
interp = False # interpolate across space -- Low Res
find_bounds = False
# modelnames is simply the string name to put in the output filenaming if that differs from the modelname
# used in querying the file which is the models list variable
all_models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ]
modelnames = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'NCAR-CCSM4' ]
modelnames = dict( zip( all_models, modelnames ) )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
os.chdir( output_dir )
for variable, model, scenario in itertools.product( variables, models, scenarios ):
# fix the climatology -- precip only
if variable == 'pr':
fix_clim = True
else:
fix_clim = False
modelname = modelnames[ model ]
# SETUP BASELINE
clim_path = os.path.join( base_dir, 'prism', variable )
filelist = glob.glob( os.path.join( clim_path, '*.tif' ) )
filelist = [ i for i in filelist if '_14_' not in i ] # remove the GD ANNUAL _14_ file.
baseline = downscale.Baseline( filelist )
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_dir, model, scenario, variable )
if not os.path.exists( output_path ):
os.makedirs( output_path )
print( input_path )
# list files for this set of downscaling -- one per folder
fn, = glob.glob( os.path.join( input_path, '*.nc' ) )
if 'historical' in scenario:
historical = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric, begin=1860, end=2005 )
future = None # no need for futures here....
else:
# get the historical data for anomalies
historical_fn, = glob.glob( os.path.join( os.path.dirname( fn ).replace( scenario, 'historical' ), '*.nc' ) )
historical = downscale.Dataset( historical_fn, variable, model, scenario, project=project, units=units, metric=metric, begin=1860, end=2005 )
future = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric, begin=2006, end=2100 )
# convert from Kelvin to Celcius
if variable != 'pr':
if historical:
historical.ds[ variable ] = historical.ds[ variable ] - 273.15
historical.ds[ variable ][ 'units' ] = units
if future:
future.ds[ variable ] = future.ds[ variable ] - 273.15
future.ds[ variable ][ 'units' ] = units
if variable == 'pr':
# convert to mm/month
if historical:
timesteps, = historical.ds.time.shape # this assumes time begins in January
days = [31,28,31,30,31,30,31,31,30,31,30,31] * (timesteps / 12)
for index, days_in_month in zip(range( len( days ) ), days ):
historical.ds[ variable ][index, ...] = historical.ds[ variable ][index, ...].data * 86400 * days_in_month
historical.ds[ variable ][ 'units' ] = units
if future:
timesteps, = future.ds.time.shape # this assumes time begins in January
days = [31,28,31,30,31,30,31,31,30,31,30,31] * (timesteps / 12)
for index, days_in_month in enumerate( days ):
future.ds[ variable ][index, ...] = future.ds[ variable ][index, ...] * 86400 * days_in_month
future.ds[ variable ][ 'units' ] = units
# DOWNSCALE
mask = rasterio.open( baseline.filelist[0] ).read_masks( 1 )
clim_begin = '1961'
clim_end = '1990'
if variable == 'pr':
# truncate to whole number
rounder = np.rint
downscaling_operation = 'mult'
aoi_mask = aoi_mask_fn
# make AOI_Mask input resolution for computing 95th percentiles...
if aoi_mask_fn is not None:
aoi_mask = Mask( aoi_mask_fn, historical, 1, 0 )
else:
aoi_mask = None
else:
# round to 2 decimals
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'add'
aoi_mask = None
def round_it( x, mask ):
arr = np.ma.masked_array( data=x, mask=mask )
return rounder( arr )
round_data = partial( round_it, mask=( mask==0 ) )
ar5 = downscale.DeltaDownscale( baseline, clim_begin, clim_end, historical, future,
downscaling_operation=downscaling_operation, mask=mask, mask_value=0, ncpus=32,
src_crs={'init':'epsg:4326'}, src_nodata=None, dst_nodata=None,
post_downscale_function=round_data, varname=variable, modelname=modelname, anom=anom,
fix_clim=fix_clim, aoi_mask=aoi_mask )
ar5.downscale( output_dir=output_path )
| mit | 3,454,023,624,196,908,500 | 39.00625 | 151 | 0.673176 | false |
kmspriyatham/symath | scipy/scipy/interpolate/benchmarks/bench_memusage.py | 3 | 3412 | # Posix-only benchmark
from __future__ import division, absolute_import, print_function
import os
import sys
import re
import subprocess
import time
import textwrap
from numpy.testing import dec
from scipy.stats import spearmanr
import numpy as np
@dec.skipif(not sys.platform.startswith('linux'), "Memory benchmark works only on Linux")
def bench_leaks():
mem_info = get_mem_info()
set_mem_rlimit(int(mem_info['memtotal'] * 0.7))
# Setup temp file, make it fit in memory
print_table_row(['repeats', 'peak memory (MB)'])
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
print_table_row(["%d" % repeat, "%.1f" % (peak_mem/1e6,)])
print("")
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
raise AssertionError("Probable memory leak")
else:
print("PROBABLY NO MEMORY LEAK")
def print_table_row(columns):
print(" | ".join("%-20s" % x for x in columns))
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code],
cwd=os.path.dirname(__file__))
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open('/proc/%d/status' % process.pid, 'r') as f:
procdata = f.read()
m = re.search('VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def get_mem_info():
"""Get information about available memory"""
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
return info
def set_mem_rlimit(max_mem):
"""
Set rlimit to 80% of total system memory, to avoid grinding halt
because of swapping.
"""
import resource
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
if __name__ == "__main__":
bench_run()
| apache-2.0 | -7,070,426,059,619,381,000 | 24.654135 | 89 | 0.578546 | false |
dand-oss/yaml-cpp | test/gtest-1.10.0/googlemock/scripts/generator/cpp/ast.py | 69 | 62925 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = '[email protected] (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparison.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparison.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
token = self._GetNextToken()
if not (token.token_type == tokenize.NAME and token.name == 'class'):
self._AddBackToken(token)
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| mit | 7,997,252,435,162,168,000 | 35.24712 | 82 | 0.541613 | false |
SCSSoftware/BlenderTools | addon/io_scs_tools/utils/info.py | 1 | 4434 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2014: SCS Software
import bpy
from io_scs_tools import bl_info
def __get_bl_info_version__(key):
"""Gets version string from bl_info dictonary for given key.
:param key: key in bl_info contaning version tuple (X, X, X, ..) where X is int number
:type key: str
:return: string representation of bl_info dictionary value for given key
:rtype: str
"""
ver = ""
for ver_num in bl_info[key]:
ver += str(ver_num) + "."
return ver[:-1]
def get_tools_version():
"""Returns Blender Tools version as string from bl_info["version"] dictonary value.
:return: string representation of bl_info["version"] tuple
:rtype: str
"""
return __get_bl_info_version__("version")
def get_required_blender_version():
"""Returns required Blender version as string from bl_info["blender"] dictonary value.
:return: string representation of bl_info["blender"] tuple
:rtype: str
"""
return __get_bl_info_version__("blender")
def get_blender_version():
"""Returns Blender's version and the build identifications as strings.
:return: Blender's version number and its build identification as two formated strings
:rtype: tuple(str, str)
"""
b_ver = bpy.app.version
b_ver_str = str(str(b_ver[0]) + "." + str(b_ver[1]) + "." + str(b_ver[2]))
if b_ver[0] == 2 and b_ver[1] <= 69:
build_str = str(" (r" + str(bpy.app.build_revision)[2:-1] + ")")
else:
build_str = str(" (hash: " + str(bpy.app.build_hash)[2:-1] + ")")
return b_ver_str, build_str
def get_combined_ver_str(only_version_numbers=False):
"""Returns combined version string from Blender version and Blender Tools version.
:param only_version_numbers: True to return only versions without "Blender" and "SCS Blender Tools" strings
:type only_version_numbers: bool
:return: combined version string
:rtype: str
"""
(version, build) = get_blender_version()
if only_version_numbers:
return version + build + ", " + get_tools_version()
else:
return "Blender " + version + build + ", SCS Blender Tools: " + get_tools_version()
def is_blender_able_to_run_tools():
"""Tells if Blender version is good enough to run Blender Tools.
:return: True if current blender version meets required version for Blender Tools; False otherwise
:rtype: bool
"""
return cmp_ver_str(get_blender_version()[0], get_required_blender_version()) >= 0
def cmp_ver_str(version_str, version_str2):
"""Compares two version string of format "X.X.X..." where X is number.
:param version_str: version string to check (should be in format: "X.Y" where X and Y are version numbers)
:type version_str: str
:param version_str2: version string to check (should be in format: "X.Y" where X and Y are version numbers)
:type version_str2: str
:return: -1 if first is smaller; 0 if equal; 1 if first is greater;
:rtype: int
"""
version_str = version_str.split(".")
version_str2 = version_str2.split(".")
ver_cmp = []
for ver_i in range(0, 2):
if int(version_str[ver_i]) < int(version_str2[ver_i]):
ver_cmp.append(-1)
elif int(version_str[ver_i]) == int(version_str2[ver_i]):
ver_cmp.append(0)
else:
ver_cmp.append(1)
ver_i += 1
# first version smaller than second
if ver_cmp[0] < 0 or (ver_cmp[0] == 0 and ver_cmp[1] < 0):
return -1
# equal versions
if ver_cmp[0] == 0 and ver_cmp[1] == 0:
return 0
# otherwise we directly assume that first is greater
return 1
| gpl-2.0 | 8,198,539,691,296,012,000 | 33.107692 | 111 | 0.651105 | false |
ameuret/mongrel2 | examples/zcov/zcov/GCovGroup.py | 96 | 6378 | #!/usr/bin/python
from __future__ import division
from pprint import pprint
import cPickle
import os
import warnings
from zcov import GCovParser
class GCovGroup:
@staticmethod
def fromfile(path):
f = open(path)
try:
res = cPickle.load(f)
header,version = res[0],res[1]
except:
raise ValueError,'invalid zcov input'
if header != 'zcov-data':
raise ValueError,'input is not in zcov format'
elif version != 1:
raise ValueError,'unrecognized zcov version'
return res[2]
def tofile(self, path):
f = open(path,'wb')
cPickle.dump(('zcov-data',1,self),f,-1)
f.close()
def __init__(self):
self.entryMap = {}
def addEntry(self, path, entry):
record = self.entryMap.get(path)
if record is None:
self.entryMap[path] = entry
else:
self.entryMap[path] = self.mergeData(record,entry)
def addGCDA(self, data):
for path,entry in data.entries:
self.addEntry(path, entry)
def merge(self, b):
for path,entry in b.entryMap.items():
self.addEntry(path, entry)
def mergeData(self, a, b):
keys = self.mergeKeys(a.keys, b.keys)
lines = self.mergeLines(a.lines, b.lines)
calls = self.mergeCalls(a.calls, b.calls)
branches = self.mergeBranches(a.branches, b.branches)
functions = self.mergeFunctions(a.functions, b.functions)
return GCovParser.GCovFileData(keys, lines, calls, branches, functions)
def mergeKeys(self, aKeys, bKeys):
if set(aKeys) != set(bKeys):
raise ValueError,'Keys differ: %s, %s'%(pprint.pformat(a.keys),
pprint.pformat(b.keys))
keys = {}
for key,aValue in aKeys.items():
bValue = bKeys[key]
if key=='Source':
if aValue != bValue:
raise ValueError,'Key ("%s") differs: %s %s'%(key,
aValue,
bValue)
value = aValue
elif key in ('Runs','Programs'):
value = str(int(aValue) + int(bValue))
elif key in ('Data','Graph'):
value = aValue+','+bValue
else:
raise ValueError,'Unrecognized key: "%s"'%(key,)
keys[key] = value
return keys
def mergeLines(self, aLines, bLines):
if len(aLines) != len(bLines):
raise ValueError,'Entry mismatch (number of lines)'
lines = [None]*len(aLines)
for i,(a,b) in enumerate(zip(aLines,bLines)):
if a is None or b is None:
# Executability can change across tests (conditional
# code), take the non-None one if it exists.
lines[i] = (a,b)[a is None]
else:
lines[i] = a + b
return lines
def mergeLineList(self, aList, bList, merge):
if not aList:
for bItem in bList:
yield bItem
elif not bList:
for aItem in aList:
yield aItem
aIter,bIter = iter(aList),iter(bList)
aItem,bItem = aIter.next(),bIter.next()
while 1:
if aItem[0]==bItem[0]:
yield merge(aItem,bItem)
try:
aItem = aIter.next()
except StopIteration:
for bItem in bIter:
yield bItem
break
try:
bItem = bIter.next()
except StopIteration:
for aItem in aIter:
yield aItem
break
elif aItem[0]<bItem[0]:
yield aItem
try:
aItem = aIter.next()
except StopIteration:
yield bItem
for bItem in bIter:
yield bItem
break
else:
yield bItem
try:
bItem = bIter.next()
except StopIteration:
yield aItem
for aItem in bIter:
yield aItem
break
def mergeCalls(self, aCalls, bCalls):
def merge(a,b):
if a[1] != b[1]:
warnings.warn('Call mismatch (numbers differ)')
# raise ValueError,'Call mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.CallNotExecuted
if GCovParser.GCovFileData.CallReturned in (a[2],b[2]):
code = GCovParser.GCovFileData.CallReturned
return (a[0],a[1],code,count)
return list(self.mergeLineList(aCalls,bCalls,merge))
def mergeBranches(self, aBranches, bBranches):
def merge(a,b):
# XXX This is really wrong
if a[1] != b[1]:
warnings.warn('Branch mismatch (numbers differ)')
# raise ValueError,'Branch mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.BranchNotTaken
if GCovParser.GCovFileData.BranchTaken in (a[2],b[2]):
code = GCovParser.GCovFileData.BranchTaken
return (a[0],a[1],code,count)
return list(self.mergeLineList(aBranches,bBranches,merge))
def mergeFunctions(self, aFunctions, bFunctions):
def merge(a,b):
if a[0] != b[0]:
warnings.warn('Function mismatch (names differ)')
# raise ValueError,'Function mismatch (names differ)'
return (a[0],a[1]+b[1])
return list(self.mergeLineList(aFunctions,bFunctions,merge))
###
def main():
from optparse import OptionParser
op = OptionParser("usage: %prog [options] files")
opts,args = op.parse_args()
group = GCovGroup()
for f in args:
res = GCovParser.parseGCDA(f)
group.addGCDA(res)
print '%d total files'%(len(group.entryMap),)
if __name__=='__main__':
main()
| bsd-3-clause | 1,305,681,132,966,070,500 | 32.746032 | 79 | 0.499216 | false |
yongshengwang/builthue | desktop/core/ext-py/PyYAML-3.09/tests/lib3/test_constructor.py | 57 | 8744 |
import yaml
import pprint
import datetime
import yaml.tokens
def execute(code):
global value
exec(code)
return value
def _make_objects():
global MyLoader, MyDumper, MyTestClass1, MyTestClass2, MyTestClass3, YAMLObject1, YAMLObject2, \
AnObject, AnInstance, AState, ACustomState, InitArgs, InitArgsWithState, \
NewArgs, NewArgsWithState, Reduce, ReduceWithState, MyInt, MyList, MyDict, \
FixedOffset, today, execute
class MyLoader(yaml.Loader):
pass
class MyDumper(yaml.Dumper):
pass
class MyTestClass1:
def __init__(self, x, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
if isinstance(other, MyTestClass1):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
def construct1(constructor, node):
mapping = constructor.construct_mapping(node)
return MyTestClass1(**mapping)
def represent1(representer, native):
return representer.represent_mapping("!tag1", native.__dict__)
yaml.add_constructor("!tag1", construct1, Loader=MyLoader)
yaml.add_representer(MyTestClass1, represent1, Dumper=MyDumper)
class MyTestClass2(MyTestClass1, yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = "!tag2"
def from_yaml(cls, constructor, node):
x = constructor.construct_yaml_int(node)
return cls(x=x)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_scalar(cls.yaml_tag, str(native.x))
to_yaml = classmethod(to_yaml)
class MyTestClass3(MyTestClass2):
yaml_tag = "!tag3"
def from_yaml(cls, constructor, node):
mapping = constructor.construct_mapping(node)
if '=' in mapping:
x = mapping['=']
del mapping['=']
mapping['x'] = x
return cls(**mapping)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_mapping(cls.yaml_tag, native.__dict__)
to_yaml = classmethod(to_yaml)
class YAMLObject1(yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = '!foo'
def __init__(self, my_parameter=None, my_another_parameter=None):
self.my_parameter = my_parameter
self.my_another_parameter = my_another_parameter
def __eq__(self, other):
if isinstance(other, YAMLObject1):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
class YAMLObject2(yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = '!bar'
def __init__(self, foo=1, bar=2, baz=3):
self.foo = foo
self.bar = bar
self.baz = baz
def __getstate__(self):
return {1: self.foo, 2: self.bar, 3: self.baz}
def __setstate__(self, state):
self.foo = state[1]
self.bar = state[2]
self.baz = state[3]
def __eq__(self, other):
if isinstance(other, YAMLObject2):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
class AnObject:
def __new__(cls, foo=None, bar=None, baz=None):
self = object.__new__(cls)
self.foo = foo
self.bar = bar
self.baz = baz
return self
def __cmp__(self, other):
return cmp((type(self), self.foo, self.bar, self.baz),
(type(other), other.foo, other.bar, other.baz))
def __eq__(self, other):
return type(self) is type(other) and \
(self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
class AnInstance:
def __init__(self, foo=None, bar=None, baz=None):
self.foo = foo
self.bar = bar
self.baz = baz
def __cmp__(self, other):
return cmp((type(self), self.foo, self.bar, self.baz),
(type(other), other.foo, other.bar, other.baz))
def __eq__(self, other):
return type(self) is type(other) and \
(self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
class AState(AnInstance):
def __getstate__(self):
return {
'_foo': self.foo,
'_bar': self.bar,
'_baz': self.baz,
}
def __setstate__(self, state):
self.foo = state['_foo']
self.bar = state['_bar']
self.baz = state['_baz']
class ACustomState(AnInstance):
def __getstate__(self):
return (self.foo, self.bar, self.baz)
def __setstate__(self, state):
self.foo, self.bar, self.baz = state
class NewArgs(AnObject):
def __getnewargs__(self):
return (self.foo, self.bar, self.baz)
def __getstate__(self):
return {}
class NewArgsWithState(AnObject):
def __getnewargs__(self):
return (self.foo, self.bar)
def __getstate__(self):
return self.baz
def __setstate__(self, state):
self.baz = state
InitArgs = NewArgs
InitArgsWithState = NewArgsWithState
class Reduce(AnObject):
def __reduce__(self):
return self.__class__, (self.foo, self.bar, self.baz)
class ReduceWithState(AnObject):
def __reduce__(self):
return self.__class__, (self.foo, self.bar), self.baz
def __setstate__(self, state):
self.baz = state
class MyInt(int):
def __eq__(self, other):
return type(self) is type(other) and int(self) == int(other)
class MyList(list):
def __init__(self, n=1):
self.extend([None]*n)
def __eq__(self, other):
return type(self) is type(other) and list(self) == list(other)
class MyDict(dict):
def __init__(self, n=1):
for k in range(n):
self[k] = None
def __eq__(self, other):
return type(self) is type(other) and dict(self) == dict(other)
class FixedOffset(datetime.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return datetime.timedelta(0)
today = datetime.date.today()
def _load_code(expression):
return eval(expression)
def _serialize_value(data):
if isinstance(data, list):
return '[%s]' % ', '.join(map(_serialize_value, data))
elif isinstance(data, dict):
items = []
for key, value in data.items():
key = _serialize_value(key)
value = _serialize_value(value)
items.append("%s: %s" % (key, value))
items.sort()
return '{%s}' % ', '.join(items)
elif isinstance(data, datetime.datetime):
return repr(data.utctimetuple())
elif isinstance(data, float) and data != data:
return '?'
else:
return str(data)
def test_constructor_types(data_filename, code_filename, verbose=False):
_make_objects()
native1 = None
native2 = None
try:
native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
if len(native1) == 1:
native1 = native1[0]
native2 = _load_code(open(code_filename, 'rb').read())
try:
if native1 == native2:
return
except TypeError:
pass
if verbose:
print("SERIALIZED NATIVE1:")
print(_serialize_value(native1))
print("SERIALIZED NATIVE2:")
print(_serialize_value(native2))
assert _serialize_value(native1) == _serialize_value(native2), (native1, native2)
finally:
if verbose:
print("NATIVE1:")
pprint.pprint(native1)
print("NATIVE2:")
pprint.pprint(native2)
test_constructor_types.unittest = ['.data', '.code']
if __name__ == '__main__':
import sys, test_constructor
sys.modules['test_constructor'] = sys.modules['__main__']
import test_appliance
test_appliance.run(globals())
| apache-2.0 | -7,207,332,789,654,987,000 | 32.630769 | 101 | 0.543573 | false |
a-doumoulakis/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset.py | 163 | 2444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
| apache-2.0 | 3,396,705,484,659,443,000 | 39.733333 | 80 | 0.703764 | false |
jianglu/mojo | tools/clang/scripts/run_tool.py | 34 | 11858 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to help run clang tools across Chromium code.
How to use this tool:
If you want to run the tool across all Chromium code:
run_tool.py <tool> <path/to/compiledb>
If you want to include all files mentioned in the compilation database:
run_tool.py <tool> <path/to/compiledb> --all
If you only want to run the tool across just chrome/browser and content/browser:
run_tool.py <tool> <path/to/compiledb> chrome/browser content/browser
Please see https://code.google.com/p/chromium/wiki/ClangToolRefactoring for more
information, which documents the entire automated refactoring flow in Chromium.
Why use this tool:
The clang tool implementation doesn't take advantage of multiple cores, and if
it fails mysteriously in the middle, all the generated replacements will be
lost.
Unfortunately, if the work is simply sharded across multiple cores by running
multiple RefactoringTools, problems arise when they attempt to rewrite a file at
the same time. To work around that, clang tools that are run using this tool
should output edits to stdout in the following format:
==== BEGIN EDITS ====
r:<file path>:<offset>:<length>:<replacement text>
r:<file path>:<offset>:<length>:<replacement text>
...etc...
==== END EDITS ====
Any generated edits are applied once the clang tool has finished running
across Chromium, regardless of whether some instances failed or not.
"""
import collections
import functools
import json
import multiprocessing
import os.path
import pipes
import subprocess
import sys
Edit = collections.namedtuple(
'Edit', ('edit_type', 'offset', 'length', 'replacement'))
def _GetFilesFromGit(paths = None):
"""Gets the list of files in the git repository.
Args:
paths: Prefix filter for the returned paths. May contain multiple entries.
"""
args = []
if sys.platform == 'win32':
args.append('git.bat')
else:
args.append('git')
args.append('ls-files')
if paths:
args.extend(paths)
command = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _ = command.communicate()
return [os.path.realpath(p) for p in output.splitlines()]
def _GetFilesFromCompileDB(build_directory):
""" Gets the list of files mentioned in the compilation database.
Args:
build_directory: Directory that contains the compile database.
"""
compiledb_path = os.path.join(build_directory, 'compile_commands.json')
with open(compiledb_path, 'rb') as compiledb_file:
json_commands = json.load(compiledb_file)
return [os.path.join(entry['directory'], entry['file'])
for entry in json_commands]
def _ExtractEditsFromStdout(build_directory, stdout):
"""Extracts generated list of edits from the tool's stdout.
The expected format is documented at the top of this file.
Args:
build_directory: Directory that contains the compile database. Used to
normalize the filenames.
stdout: The stdout from running the clang tool.
Returns:
A dictionary mapping filenames to the associated edits.
"""
lines = stdout.splitlines()
start_index = lines.index('==== BEGIN EDITS ====')
end_index = lines.index('==== END EDITS ====')
edits = collections.defaultdict(list)
for line in lines[start_index + 1:end_index]:
try:
edit_type, path, offset, length, replacement = line.split(':::', 4)
replacement = replacement.replace("\0", "\n");
# Normalize the file path emitted by the clang tool.
path = os.path.realpath(os.path.join(build_directory, path))
edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
except ValueError:
print 'Unable to parse edit: %s' % line
return edits
def _ExecuteTool(toolname, build_directory, filename):
"""Executes the tool.
This is defined outside the class so it can be pickled for the multiprocessing
module.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filename: The file to run the tool over.
Returns:
A dictionary that must contain the key "status" and a boolean value
associated with it.
If status is True, then the generated edits are stored with the key "edits"
in the dictionary.
Otherwise, the filename and the output from stderr are associated with the
keys "filename" and "stderr" respectively.
"""
command = subprocess.Popen((toolname, '-p', build_directory, filename),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = command.communicate()
if command.returncode != 0:
return {'status': False, 'filename': filename, 'stderr': stderr}
else:
return {'status': True,
'edits': _ExtractEditsFromStdout(build_directory, stdout)}
class _CompilerDispatcher(object):
"""Multiprocessing controller for running clang tools in parallel."""
def __init__(self, toolname, build_directory, filenames):
"""Initializer method.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filenames: The files to run the tool over.
"""
self.__toolname = toolname
self.__build_directory = build_directory
self.__filenames = filenames
self.__success_count = 0
self.__failed_count = 0
self.__edit_count = 0
self.__edits = collections.defaultdict(list)
@property
def edits(self):
return self.__edits
@property
def failed_count(self):
return self.__failed_count
def Run(self):
"""Does the grunt work."""
pool = multiprocessing.Pool()
result_iterator = pool.imap_unordered(
functools.partial(_ExecuteTool, self.__toolname,
self.__build_directory),
self.__filenames)
for result in result_iterator:
self.__ProcessResult(result)
sys.stdout.write('\n')
sys.stdout.flush()
def __ProcessResult(self, result):
"""Handles result processing.
Args:
result: The result dictionary returned by _ExecuteTool.
"""
if result['status']:
self.__success_count += 1
for k, v in result['edits'].iteritems():
self.__edits[k].extend(v)
self.__edit_count += len(v)
else:
self.__failed_count += 1
sys.stdout.write('\nFailed to process %s\n' % result['filename'])
sys.stdout.write(result['stderr'])
sys.stdout.write('\n')
percentage = (
float(self.__success_count + self.__failed_count) /
len(self.__filenames)) * 100
sys.stdout.write('Succeeded: %d, Failed: %d, Edits: %d [%.2f%%]\r' % (
self.__success_count, self.__failed_count, self.__edit_count,
percentage))
sys.stdout.flush()
def _ApplyEdits(edits, clang_format_diff_path):
"""Apply the generated edits.
Args:
edits: A dict mapping filenames to Edit instances that apply to that file.
clang_format_diff_path: Path to the clang-format-diff.py helper to help
automatically reformat diffs to avoid style violations. Pass None if the
clang-format step should be skipped.
"""
edit_count = 0
for k, v in edits.iteritems():
# Sort the edits and iterate through them in reverse order. Sorting allows
# duplicate edits to be quickly skipped, while reversing means that
# subsequent edits don't need to have their offsets updated with each edit
# applied.
v.sort()
last_edit = None
with open(k, 'rb+') as f:
contents = bytearray(f.read())
for edit in reversed(v):
if edit == last_edit:
continue
last_edit = edit
contents[edit.offset:edit.offset + edit.length] = edit.replacement
if not edit.replacement:
_ExtendDeletionIfElementIsInList(contents, edit.offset)
edit_count += 1
f.seek(0)
f.truncate()
f.write(contents)
if clang_format_diff_path:
# TODO(dcheng): python3.3 exposes this publicly as shlex.quote, but Chrome
# uses python2.7. Use the deprecated interface until Chrome uses a newer
# Python.
if subprocess.call('git diff -U0 %s | python %s -i -p1 -style=file ' % (
pipes.quote(k), clang_format_diff_path), shell=True) != 0:
print 'clang-format failed for %s' % k
print 'Applied %d edits to %d files' % (edit_count, len(edits))
_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
def _ExtendDeletionIfElementIsInList(contents, offset):
"""Extends the range of a deletion if the deleted element was part of a list.
This rewriter helper makes it easy for refactoring tools to remove elements
from a list. Even if a matcher callback knows that it is removing an element
from a list, it may not have enough information to accurately remove the list
element; for example, another matcher callback may end up removing an adjacent
list element, or all the list elements may end up being removed.
With this helper, refactoring tools can simply remove the list element and not
worry about having to include the comma in the replacement.
Args:
contents: A bytearray with the deletion already applied.
offset: The offset in the bytearray where the deleted range used to be.
"""
char_before = char_after = None
left_trim_count = 0
for byte in reversed(contents[:offset]):
left_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte in (ord(','), ord(':'), ord('('), ord('{')):
char_before = chr(byte)
break
right_trim_count = 0
for byte in contents[offset:]:
right_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte == ord(','):
char_after = chr(byte)
break
if char_before:
if char_after:
del contents[offset:offset + right_trim_count]
elif char_before in (',', ':'):
del contents[offset - left_trim_count:offset]
def main(argv):
if len(argv) < 2:
print 'Usage: run_tool.py <clang tool> <compile DB> <path 1> <path 2> ...'
print ' <clang tool> is the clang tool that should be run.'
print ' <compile db> is the directory that contains the compile database'
print ' <path 1> <path2> ... can be used to filter what files are edited'
return 1
clang_format_diff_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../third_party/llvm/tools/clang/tools/clang-format',
'clang-format-diff.py')
# TODO(dcheng): Allow this to be controlled with a flag as well.
# TODO(dcheng): Shell escaping of args to git diff to clang-format is broken
# on Windows.
if not os.path.isfile(clang_format_diff_path) or sys.platform == 'win32':
clang_format_diff_path = None
if len(argv) == 3 and argv[2] == '--all':
filenames = set(_GetFilesFromCompileDB(argv[1]))
source_filenames = filenames
else:
filenames = set(_GetFilesFromGit(argv[2:]))
# Filter out files that aren't C/C++/Obj-C/Obj-C++.
extensions = frozenset(('.c', '.cc', '.m', '.mm'))
source_filenames = [f for f in filenames
if os.path.splitext(f)[1] in extensions]
dispatcher = _CompilerDispatcher(argv[0], argv[1], source_filenames)
dispatcher.Run()
# Filter out edits to files that aren't in the git repository, since it's not
# useful to modify files that aren't under source control--typically, these
# are generated files or files in a git submodule that's not part of Chromium.
_ApplyEdits({k : v for k, v in dispatcher.edits.iteritems()
if os.path.realpath(k) in filenames},
clang_format_diff_path)
if dispatcher.failed_count != 0:
return 2
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -343,072,775,630,543,700 | 33.979351 | 80 | 0.675831 | false |
DVegaCapital/zipline | zipline/utils/control_flow.py | 25 | 1295 | """
Control flow utilities.
"""
from warnings import (
catch_warnings,
filterwarnings,
)
class nullctx(object):
"""
Null context manager. Useful for conditionally adding a contextmanager in
a single line, e.g.:
with SomeContextManager() if some_expr else nullctx():
do_stuff()
"""
def __enter__(self):
return self
def __exit__(*args):
return False
class WarningContext(object):
"""
Re-entrant contextmanager for contextually managing warnings.
"""
def __init__(self, *warning_specs):
self._warning_specs = warning_specs
self._catchers = []
def __enter__(self):
catcher = catch_warnings()
catcher.__enter__()
self._catchers.append(catcher)
for args, kwargs in self._warning_specs:
filterwarnings(*args, **kwargs)
return catcher
def __exit__(self, *exc_info):
catcher = self._catchers.pop()
return catcher.__exit__(*exc_info)
def ignore_nanwarnings():
"""
Helper for building a WarningContext that ignores warnings from numpy's
nanfunctions.
"""
return WarningContext(
(
('ignore',),
{'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'},
)
)
| apache-2.0 | -291,267,422,713,577,200 | 22.125 | 78 | 0.589189 | false |
pdellaert/ansible | lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py | 9 | 16634 | #!/usr/bin/python
# Copyright (c) 2017 Jon Meran <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_compute_environment
short_description: Manage AWS Batch Compute Environments
description:
- This module allows the management of AWS Batch Compute Environments.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
compute_environment_name:
description:
- The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores
are allowed.
required: true
type:
description:
- The type of the compute environment.
required: true
choices: ["MANAGED", "UNMANAGED"]
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
compute_environment_state:
description:
- The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs
from a queue and can scale out automatically based on queues.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
service_role:
description:
- The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
services on your behalf.
required: true
compute_resource_type:
description:
- The type of compute resource.
required: true
choices: ["EC2", "SPOT"]
minv_cpus:
description:
- The minimum number of EC2 vCPUs that an environment should maintain.
required: true
maxv_cpus:
description:
- The maximum number of EC2 vCPUs that an environment can reach.
required: true
desiredv_cpus:
description:
- The desired number of EC2 vCPUS in the compute environment.
instance_types:
description:
- The instance types that may be launched.
required: true
image_id:
description:
- The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
subnets:
description:
- The VPC subnets into which the compute resources are launched.
required: true
security_group_ids:
description:
- The EC2 security groups that are associated with instances launched in the compute environment.
required: true
ec2_key_pair:
description:
- The EC2 key pair that is used for instances launched in the compute environment.
instance_role:
description:
- The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
required: true
tags:
description:
- Key-value pair tags to be applied to resources that are launched in the compute environment.
bid_percentage:
description:
- The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price
must be below 20% of the current On-Demand price for that EC2 instance.
spot_iam_fleet_role:
description:
- The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Compute Environment
aws_batch_compute_environment:
compute_environment_name: computeEnvironmentName
state: present
region: us-east-1
compute_environment_state: ENABLED
type: MANAGED
compute_resource_type: EC2
minv_cpus: 0
maxv_cpus: 2
desiredv_cpus: 1
instance_types:
- optimal
subnets:
- my-subnet1
- my-subnet2
security_group_ids:
- my-sg1
- my-sg2
instance_role: arn:aws:iam::<account>:instance-profile/<role>
tags:
tag1: value1
tag2: value2
service_role: arn:aws:iam::<account>:role/service-role/<role>
- name: show results
debug: var=aws_batch_compute_environment_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_compute_environment_action: none
changed: false
invocation:
module_args:
aws_access_key: ~
aws_secret_key: ~
bid_percentage: ~
compute_environment_name: <name>
compute_environment_state: ENABLED
compute_resource_type: EC2
desiredv_cpus: 0
ec2_key_pair: ~
ec2_url: ~
image_id: ~
instance_role: "arn:aws:iam::..."
instance_types:
- optimal
maxv_cpus: 8
minv_cpus: 0
profile: ~
region: us-east-1
security_group_ids:
- "*******"
security_token: ~
service_role: "arn:aws:iam::...."
spot_iam_fleet_role: ~
state: present
subnets:
- "******"
tags:
Environment: <name>
Name: <name>
type: MANAGED
validate_certs: true
response:
computeEnvironmentArn: "arn:aws:batch:...."
computeEnvironmentName: <name>
computeResources:
desiredvCpus: 0
instanceRole: "arn:aws:iam::..."
instanceTypes:
- optimal
maxvCpus: 8
minvCpus: 0
securityGroupIds:
- "******"
subnets:
- "*******"
tags:
Environment: <name>
Name: <name>
type: EC2
ecsClusterArn: "arn:aws:ecs:....."
serviceRole: "arn:aws:iam::..."
state: ENABLED
status: VALID
statusReason: "ComputeEnvironment Healthy"
type: MANAGED
type: dict
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.batch import AWSConnection
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, HAS_BOTO3
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
import re
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
except ImportError:
pass # Handled by HAS_BOTO3
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
compute_environment_name = module.params['compute_environment_name']
# validate compute environment name
if not re.search(r'^[\w\_:]+$', compute_environment_name):
module.fail_json(
msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
"and underscores.".format(compute_environment_name)
)
if not compute_environment_name.startswith('arn:aws:batch:'):
if len(compute_environment_name) > 128:
module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
.format(compute_environment_name))
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Compute Environment functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_compute_environment(module, connection):
try:
environments = connection.client().describe_compute_environments(
computeEnvironments=[module.params['compute_environment_name']]
)
if len(environments['computeEnvironments']) > 0:
return environments['computeEnvironments'][0]
else:
return None
except ClientError:
return None
def create_compute_environment(module, aws):
"""
Adds a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
params = (
'compute_environment_name', 'type', 'service_role')
api_params = set_api_params(module, params)
if module.params['compute_environment_state'] is not None:
api_params['state'] = module.params['compute_environment_state']
compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
'spot_iam_fleet_role')
compute_resources_params = set_api_params(module, compute_resources_param_list)
if module.params['compute_resource_type'] is not None:
compute_resources_params['type'] = module.params['compute_resource_type']
# if module.params['minv_cpus'] is not None:
# compute_resources_params['minvCpus'] = module.params['minv_cpus']
api_params['computeResources'] = compute_resources_params
try:
if not module.check_mode:
client.create_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def remove_compute_environment(module, aws):
"""
Remove a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
api_params = {'computeEnvironment': module.params['compute_environment_name']}
try:
if not module.check_mode:
client.delete_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def manage_state(module, aws):
changed = False
current_state = 'absent'
state = module.params['state']
compute_environment_state = module.params['compute_environment_state']
compute_environment_name = module.params['compute_environment_name']
service_role = module.params['service_role']
minv_cpus = module.params['minv_cpus']
maxv_cpus = module.params['maxv_cpus']
desiredv_cpus = module.params['desiredv_cpus']
action_taken = 'none'
update_env_response = ''
check_mode = module.check_mode
# check if the compute environment exists
current_compute_environment = get_current_compute_environment(module, aws)
response = current_compute_environment
if current_compute_environment:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Compute Environment configuration
compute_kwargs = {'computeEnvironment': compute_environment_name}
# Update configuration if needed
compute_resources = {}
if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
compute_kwargs.update({'state': compute_environment_state})
updates = True
if service_role and current_compute_environment['serviceRole'] != service_role:
compute_kwargs.update({'serviceRole': service_role})
updates = True
if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
compute_resources['minvCpus'] = minv_cpus
if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
compute_resources['maxvCpus'] = maxv_cpus
if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
compute_resources['desiredvCpus'] = desiredv_cpus
if len(compute_resources) > 0:
compute_kwargs['computeResources'] = compute_resources
updates = True
if updates:
try:
if not check_mode:
update_env_response = aws.client().update_compute_environment(**compute_kwargs)
if not update_env_response:
module.fail_json(msg='Unable to get compute environment information after creating')
changed = True
action_taken = "updated"
except (ParamValidationError, ClientError) as e:
module.fail_json(msg="Unable to update environment: {0}".format(to_native(e)),
exception=traceback.format_exc())
else:
# Create Batch Compute Environment
changed = create_compute_environment(module, aws)
# Describe compute environment
action_taken = 'added'
response = get_current_compute_environment(module, aws)
if not response:
module.fail_json(msg='Unable to get compute environment information after creating')
else:
if current_state == 'present':
# remove the compute environment
changed = remove_compute_environment(module, aws)
action_taken = 'deleted'
return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_compute_environment_action, response
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
compute_environment_name=dict(required=True),
type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
service_role=dict(required=True),
compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
minv_cpus=dict(type='int', required=True),
maxv_cpus=dict(type='int', required=True),
desiredv_cpus=dict(type='int'),
instance_types=dict(type='list', required=True),
image_id=dict(),
subnets=dict(type='list', required=True),
security_group_ids=dict(type='list', required=True),
ec2_key_pair=dict(),
instance_role=dict(required=True),
tags=dict(type='dict'),
bid_percentage=dict(type='int'),
spot_iam_fleet_role=dict(),
region=dict(aliases=['aws_region', 'ec2_region'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['batch'])
validate_params(module, aws)
results = manage_state(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
if __name__ == '__main__':
main()
| gpl-3.0 | 723,079,245,279,754,100 | 32.268 | 126 | 0.608212 | false |
vsajip/django | tests/regressiontests/localflavor/generic/tests.py | 57 | 4361 | from __future__ import unicode_literals
import datetime
from django.contrib.localflavor.generic.forms import DateField, DateTimeField
from django.test import SimpleTestCase
class GenericLocalFlavorTests(SimpleTestCase):
def test_GenericDateField(self):
error_invalid = ['Enter a valid date.']
valid = {
datetime.date(2006, 10, 25): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30, 59): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30, 59, 200): datetime.date(2006, 10, 25),
'2006-10-25': datetime.date(2006, 10, 25),
'25/10/2006': datetime.date(2006, 10, 25),
'25/10/06': datetime.date(2006, 10, 25),
'Oct 25 2006': datetime.date(2006, 10, 25),
'October 25 2006': datetime.date(2006, 10, 25),
'October 25, 2006': datetime.date(2006, 10, 25),
'25 October 2006': datetime.date(2006, 10, 25),
'25 October, 2006': datetime.date(2006, 10, 25),
}
invalid = {
'2006-4-31': error_invalid,
'200a-10-25': error_invalid,
'10/25/06': error_invalid,
}
self.assertFieldOutput(DateField, valid, invalid, empty_value=None)
# DateField with optional input_formats parameter
valid = {
datetime.date(2006, 10, 25): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30): datetime.date(2006, 10, 25),
'2006 10 25': datetime.date(2006, 10, 25),
}
invalid = {
'2006-10-25': error_invalid,
'25/10/2006': error_invalid,
'25/10/06': error_invalid,
}
kwargs = {'input_formats':['%Y %m %d'],}
self.assertFieldOutput(DateField,
valid, invalid, field_kwargs=kwargs, empty_value=None
)
def test_GenericDateTimeField(self):
error_invalid = ['Enter a valid date/time.']
valid = {
datetime.date(2006, 10, 25): datetime.datetime(2006, 10, 25, 0, 0),
datetime.datetime(2006, 10, 25, 14, 30): datetime.datetime(2006, 10, 25, 14, 30),
datetime.datetime(2006, 10, 25, 14, 30, 59): datetime.datetime(2006, 10, 25, 14, 30, 59),
datetime.datetime(2006, 10, 25, 14, 30, 59, 200): datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
'2006-10-25 14:30:45': datetime.datetime(2006, 10, 25, 14, 30, 45),
'2006-10-25 14:30:00': datetime.datetime(2006, 10, 25, 14, 30),
'2006-10-25 14:30': datetime.datetime(2006, 10, 25, 14, 30),
'2006-10-25': datetime.datetime(2006, 10, 25, 0, 0),
'25/10/2006 14:30:45': datetime.datetime(2006, 10, 25, 14, 30, 45),
'25/10/2006 14:30:00': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/2006 14:30': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/2006': datetime.datetime(2006, 10, 25, 0, 0),
'25/10/06 14:30:45': datetime.datetime(2006, 10, 25, 14, 30, 45),
'25/10/06 14:30:00': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/06 14:30': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/06': datetime.datetime(2006, 10, 25, 0, 0),
}
invalid = {
'hello': error_invalid,
'2006-10-25 4:30 p.m.': error_invalid,
}
self.assertFieldOutput(DateTimeField, valid, invalid, empty_value=None)
# DateTimeField with optional input_formats paramter
valid = {
datetime.date(2006, 10, 25): datetime.datetime(2006, 10, 25, 0, 0),
datetime.datetime(2006, 10, 25, 14, 30): datetime.datetime(2006, 10, 25, 14, 30),
datetime.datetime(2006, 10, 25, 14, 30, 59): datetime.datetime(2006, 10, 25, 14, 30, 59),
datetime.datetime(2006, 10, 25, 14, 30, 59, 200): datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
'2006 10 25 2:30 PM': datetime.datetime(2006, 10, 25, 14, 30),
}
invalid = {
'2006-10-25 14:30:45': error_invalid,
}
kwargs = {'input_formats':['%Y %m %d %I:%M %p'],}
self.assertFieldOutput(DateTimeField,
valid, invalid, field_kwargs=kwargs, empty_value=None
)
| bsd-3-clause | -2,639,744,777,833,078,000 | 47.455556 | 111 | 0.562486 | false |
Andreasdahlberg/sillycat | scripts/check_style.py | 1 | 1234 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
def execute_format_command(cmd='scons -C firmware format'):
"""Execute the format command and return the result."""
output = subprocess.check_output(cmd, shell=True)
return output.decode('utf-8')
def check_command_output(output):
"""Check if the output contains 'Formatted'."""
if 'Formatted' in output:
print(output)
return 1
return 0
def main():
output = execute_format_command()
return check_command_output(output)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 7,128,739,670,354,792,000 | 28.380952 | 71 | 0.707455 | false |
ukgovdatascience/classifyintentsapp | app/auth/forms.py | 1 | 5291 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms.widgets import PasswordInput
from wtforms import ValidationError
import safe
from ..models import User
class PasswordWidget(PasswordInput):
'''
Custom password field widget, but with autocomplete="off" by default.
'''
def __call__(self, field, **kwargs):
if "autocomplete" not in kwargs:
kwargs['autocomplete'] = 'off'
return super(PasswordWidget, self).__call__(field, **kwargs)
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()],
widget=PasswordWidget())
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class PasswordStrength(object):
'''
Custom password strength validator using safe.
'''
def __init__(self, message=None):
if not message:
message = ('Your password is too easy to guess. Please try again '
'with a harder to guess password.')
self.message = message
def __call__(self, form, field):
password = field.data
strength = safe.check(password)
if strength.strength not in ['medium', 'strong']:
raise ValidationError(self.message)
class RegistrationForm(FlaskForm):
email = StringField(
'Email address',
validators=[
DataRequired(),
Length(1, 64),
Email(),
Regexp(
regex='.*\@digital\.cabinet\-office\.gov\.uk',
message='Must be a valid @digital.cabinet-office.gov.uk address')
]
)
username = StringField('Username', validators=[
DataRequired(),
Length(1, 64),
Regexp(
'^[A-Za-z][A-Za-z0-9_.]*$',
0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match.'),
Length(
min=8,
message='Password must be at least 8 characters in length.'),
PasswordStrength()],
widget=PasswordWidget())
password2 = PasswordField('Confirm password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old password', validators=[DataRequired()],
widget=PasswordWidget())
password = PasswordField('New password',
widget=PasswordWidget(),
validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match.'),
Length(
min=8, message='Password must be at least 8 characters in length.'),
PasswordStrength()
])
password2 = PasswordField(
'Confirm new password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(FlaskForm):
email = StringField('Email', validators=[
DataRequired(), Length(1, 64), Email()])
password = PasswordField('New Password', widget=PasswordWidget(),
validators=[
DataRequired(), EqualTo('password2', message='Passwords must match'),
PasswordStrength()])
password2 = PasswordField('Confirm password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(FlaskForm):
email = StringField(
'Email address',
validators=[
DataRequired(),
Length(1, 64),
Email(),
Regexp(regex=r'.*\@digital\.cabinet\-office\.gov\.uk',
message='Must be a valid @digital.cabinet-office.gov.uk address')
])
password = PasswordField('Password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
| mit | 1,401,283,324,015,495,700 | 35.239726 | 84 | 0.596863 | false |
mrnamingo/vix4-34-enigma2-bcm | lib/python/Plugins/SystemPlugins/CommonInterfaceAssignment/plugin.py | 20 | 26382 | from xml.etree.cElementTree import parse as ci_parse
from boxbranding import getMachineBrand, getMachineName
from os import path as os_path
from enigma import eDVBCI_UI, eDVBCIInterfaces
from Screens.ChannelSelection import *
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.config import ConfigNothing
from Components.ConfigList import ConfigList
from Components.SelectionList import SelectionList
from ServiceReference import ServiceReference
from Plugins.Plugin import PluginDescriptor
class CIselectMainMenu(Screen):
skin = """
<screen name="CIselectMainMenu" position="center,center" size="500,250" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="CiList" position="5,50" size="490,200" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.close,
"ok": self.greenPressed,
"cancel": self.close
}, -1)
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Wizzard] FOUND %d CI Slots " % NUM_CI
self.state = { }
self.list = [ ]
if NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
if state == 0:
appname = _("Slot %d") %(slot+1) + " - " + _("no module found")
elif state == 1:
appname = _("Slot %d") %(slot+1) + " - " + _("init modules")
elif state == 2:
appname = _("Slot %d") %(slot+1) + " - " + eDVBCI_UI.getInstance().getAppName(slot)
self.list.append( (appname, ConfigNothing(), 0, slot) )
else:
self.list.append( (_("Slot %d") %(slot+1) + " - " + _("no module found") , ConfigNothing(), 1, -1) )
else:
self.list.append( (_("no CI slots found") , ConfigNothing(), 1, -1) )
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["CiList"] = menuList
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def greenPressed(self):
cur = self["CiList"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 1:
print "[CI_Wizzard] there is no CI Slot in your %s %s" % (getMachineBrand(), getMachineName())
else:
print "[CI_Wizzard] selected CI Slot : %d" % slot
if config.usage.setup_level.index > 1: # advanced
self.session.open(CIconfigMenu, slot)
else:
self.session.open(easyCIconfigMenu, slot)
# def yellowPressed(self): # unused
# NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
# print "[CI_Check] FOUND %d CI Slots " % NUM_CI
# if NUM_CI > 0:
# for ci in range(NUM_CI):
# print eDVBCIInterfaces.getInstance().getDescrambleRules(ci)
class CIconfigMenu(Screen):
skin = """
<screen name="CIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="CAidList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget source="CAidList" render="Label" position="5,80" size="550,45" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<ePixmap pixmap="div-h.png" position="0,125" zPosition="1" size="560,2" />
<widget source="ServiceList_desc" render="Label" position="5,130" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,160" size="550,250" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,160" size="550,250" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.__init__(self, session)
self.ci_slot=ci_slot
self.filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(self.ci_slot) + ".xml"
self["key_red"] = StaticText(_("Delete"))
self["key_green"] = StaticText(_("Add service"))
self["key_yellow"] = StaticText(_("Add provider"))
self["key_blue"] = StaticText(_("Select CAId"))
self["CAidList_desc"] = StaticText(_("Assigned CAIds:"))
self["CAidList"] = StaticText()
self["ServiceList_desc"] = StaticText(_("Assigned services/provider:"))
self["ServiceList_info"] = StaticText()
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"blue": self.bluePressed,
"cancel": self.cancel
}, -1)
print "[CI_Wizzard_Config] Configuring CI Slots : %d " % self.ci_slot
i=0
self.caidlist=[]
print eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot)
for caid in eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot):
i+=1
self.caidlist.append((str(hex(int(caid))),str(caid),i))
# noinspection PyStringFormat
print "[CI_Wizzard_Config_CI%d] read following CAIds from CI: %s" %(self.ci_slot, self.caidlist)
self.selectedcaid = []
self.servicelist = []
self.caids = ""
serviceList = ConfigList(self.servicelist)
serviceList.list = self.servicelist
serviceList.l.setList(self.servicelist)
self["ServiceList"] = serviceList
self.loadXML()
# if config mode !=advanced autoselect any caid
if config.usage.setup_level.index <= 1: # advanced
self.selectedcaid=self.caidlist
self.finishedCAidSelection(self.selectedcaid)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def redPressed(self):
self.delete()
def greenPressed(self):
self.session.openWithCallback( self.finishedChannelSelection, myChannelSelection, None)
def yellowPressed(self):
self.session.openWithCallback( self.finishedProviderSelection, myProviderSelection, None)
def bluePressed(self):
self.session.openWithCallback(self.finishedCAidSelection, CAidSelect, self.caidlist, self.selectedcaid)
def cancel(self):
self.saveXML()
activate_all(self)
self.close()
def setServiceListInfo(self):
if len(self.servicelist):
self["ServiceList_info"].setText("")
else:
self["ServiceList_info"].setText(_("No services/providers selected"))
def delete(self):
cur = self["ServiceList"].getCurrent()
if cur and len(cur) > 2:
self.servicelist.remove(cur)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedChannelSelection(self, *args):
if len(args):
ref=args[0]
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if not find_in_list(self.servicelist, service_name, 0):
split_ref=service_ref.ref.toString().split(":")
if split_ref[0] == "1":#== dvb service und nicht muell von None
self.servicelist.append( (service_name , ConfigNothing(), 0, service_ref.ref.toString()) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedProviderSelection(self, *args):
if len(args)>1: # bei nix selected kommt nur 1 arg zurueck (==None)
name=args[0]
dvbnamespace=args[1]
if not find_in_list(self.servicelist, name, 0):
self.servicelist.append( (name , ConfigNothing(), 1, dvbnamespace) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedCAidSelection(self, *args):
if len(args):
self.selectedcaid=args[0]
self.caids=""
if len(self.selectedcaid):
for item in self.selectedcaid:
if len(self.caids):
self.caids+= ", " + item[0]
else:
self.caids=item[0]
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
self["CAidList"].setText(self.caids)
def saveXML(self):
try:
fp = open(self.filename, 'w')
fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
fp.write("<ci>\n")
fp.write("\t<slot>\n")
fp.write("\t\t<id>%s</id>\n" % self.ci_slot)
for item in self.selectedcaid:
if len(self.selectedcaid):
fp.write("\t\t<caid id=\"%s\" />\n" % item[0])
for item in self.servicelist:
if len(self.servicelist):
if item[2]==1:
fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (item[0], item[3]))
else:
fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (item[0], item[3]))
fp.write("\t</slot>\n")
fp.write("</ci>\n")
fp.close()
except:
print "[CI_Config_CI%d] xml not written" %self.ci_slot
os.unlink(self.filename)
def loadXML(self):
if not os_path.exists(self.filename):
return
def getValue(definitions, default):
ret = ""
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
self.read_services=[]
self.read_providers=[]
self.usingcaid=[]
self.ci_config=[]
try:
fp = open(self.filename, 'r')
tree = ci_parse(fp).getroot()
fp.close()
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
print "ci " + read_slot
i=0
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
self.selectedcaid.append((str(read_caid),str(read_caid),i))
self.usingcaid.append(long(read_caid,16))
i+=1
for service in slot.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
self.read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
self.read_providers.append((read_provider_name,read_provider_dvbname))
self.ci_config.append((int(read_slot), (self.read_services, self.read_providers, self.usingcaid)))
except:
print "[CI_Config_CI%d] error parsing xml..." %self.ci_slot
for item in self.read_services:
if len(item):
self.finishedChannelSelection(item)
for item in self.read_providers:
if len(item):
self.finishedProviderSelection(item[0],item[1])
print self.ci_config
self.finishedCAidSelection(self.selectedcaid)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
class easyCIconfigMenu(CIconfigMenu):
skin = """
<screen name="easyCIconfigMenu" position="center,center" size="560,440" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="ServiceList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,80" size="550,300" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,80" size="550,300" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.setTitle(self, _("CI assignment"))
ci=ci_slot
CIconfigMenu.__init__(self, session, ci_slot)
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"cancel": self.cancel
})
class CAidSelect(Screen):
skin = """
<screen name="CAidSelect" position="center,center" size="450,440" title="select CAId's" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="5,50" size="440,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="450,2" />
<widget source="introduction" render="Label" position="0,400" size="450,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, list, selected_caids):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
for listindex in range(len(list)):
if find_in_list(selected_caids,list[listindex][0],0):
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, True)
else:
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["introduction"] = StaticText(_("Press OK to select/deselect a CAId."))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel
}, -1)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("select CAId's"))
def greenPressed(self):
list = self.list.getSelectionsList()
print list
self.close(list)
def cancel(self):
self.close()
class myProviderSelection(ChannelSelectionBase):
skin = """
<screen name="myProviderSelection" position="center,center" size="560,440" title="Select provider to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = EDIT_BOUQUET
self["actions"] = ActionMap(["OkCancelActions", "ChannelSelectBaseActions"],
{
"showFavourites": self.doNothing,
"showAllServices": self.cancel,
"showProviders": self.doNothing,
"showSatellites": self.doNothing,
"cancel": self.cancel,
"ok": self.channelSelected
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText(_("Press OK to select a provider."))
def doNothing(self):
pass
def __onExecCallback(self):
self.showSatellites()
self.setTitle(_("Select provider to add..."))
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
splited_ref=ref.toString().split(":")
if ref.flags == 7 and splited_ref[6] != "0":
self.dvbnamespace=splited_ref[6]
self.enterPath(ref)
else:
self.close(ref.getName(), self.dvbnamespace)
def showSatellites(self):
if not self.pathChangeDisabled:
refstr = '%s FROM SATELLITES ORDER BY satellitePosition'% self.service_types
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
justSet=False
prev = None
if self.isBasePathEqual(ref):
if self.isPrevPathEqual(ref):
justSet=True
prev = self.pathUp(justSet)
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
justSet=True
self.clearPath()
self.enterPath(ref, True)
if justSet:
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
unsigned_orbpos = service.getUnsignedData(4) >> 16
orbpos = service.getData(4) >> 16
if orbpos < 0:
orbpos += 3600
if service.getPath().find("FROM PROVIDER") != -1:
service_type = _("Providers")
try:
# why we need this cast?
service_name = str(nimmanager.getSatDescription(orbpos))
except:
if unsigned_orbpos == 0xFFFF: #Cable
service_name = _("Cable")
elif unsigned_orbpos == 0xEEEE: #Terrestrial
service_name = _("Terrestrial")
else:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
service_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
service.setName("%s - %s" % (service_name, service_type))
self.servicelist.addService(service)
self.servicelist.finishFill()
if prev is not None:
self.setCurrentSelection(prev)
def cancel(self):
self.close(None)
class myChannelSelection(ChannelSelectionBase):
skin = """
<screen name="myChannelSelection" position="center,center" size="560,440" title="Select service to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = OFF
self["actions"] = ActionMap(["OkCancelActions", "TvRadioActions", "ChannelSelectBaseActions"],
{
"showProviders": self.doNothing,
"showSatellites": self.showAllServices,
"showAllServices": self.cancel,
"cancel": self.cancel,
"ok": self.channelSelected,
"keyRadio": self.setModeRadio,
"keyTV": self.setModeTv
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("All"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Favourites"))
self["introduction"] = StaticText(_("Press OK to select a provider."))
def __onExecCallback(self):
self.setModeTv()
self.setTitle(_("Select service to add..."))
def doNothing(self):
pass
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
if (ref.flags & 7) == 7:
self.enterPath(ref)
elif not (ref.flags & eServiceReference.isMarker):
ref = self.getCurrentSelection()
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
def cancel(self):
self.close(None)
def activate_all(session):
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Activate] FOUND %d CI Slots " % NUM_CI
if NUM_CI > 0:
ci_config=[]
def getValue(definitions, default):
# Initialize Output
ret = ""
# How many definitions are present
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
for ci in range(NUM_CI):
filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(ci) + ".xml"
if not os_path.exists(filename):
print "[CI_Activate_Config_CI%d] no config file found" %ci
try:
if not os_path.exists(self.filename):
return
fp = open(filename, 'r')
tree = ci_parse(fp).getroot()
fp.close()
read_services=[]
read_providers=[]
usingcaid=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
usingcaid.append(long(read_caid,16))
for service in slot.findall("service"):
read_service_ref = service.get("ref").encode("UTF-8")
read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
read_providers.append((read_provider_name,long(read_provider_dvbname,16)))
ci_config.append((int(read_slot), (read_services, read_providers, usingcaid)))
except:
print "[CI_Activate_Config_CI%d] error parsing xml..." %ci
for item in ci_config:
print "[CI_Activate] activate CI%d with following settings:" %item[0]
print item[0]
print item[1]
try:
eDVBCIInterfaces.getInstance().setDescrambleRules(item[0],item[1])
except:
print "[CI_Activate_Config_CI%d] error setting DescrambleRules..." %item[0]
def find_in_list(list, search, listpos=0):
for item in list:
if item[listpos]==search:
return True
return False
global_session = None
def sessionstart(reason, session):
global global_session
global_session = session
def autostart(reason, **kwargs):
global global_session
if reason == 0:
print "[CI_Assignment] activating ci configs:"
activate_all(global_session)
elif reason == 1:
global_session = None
def main(session, **kwargs):
session.open(CIselectMainMenu)
def menu(menuid, **kwargs):
if menuid == "cam" and eDVBCIInterfaces.getInstance().getNumOfSlots():
return [(_("Common Interface Assignment"), main, "ci_assign", 11)]
return [ ]
def Plugins(**kwargs):
if config.usage.setup_level.index > 1:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart ),
PluginDescriptor( name = _("Common Interface assignment"), description = _("a gui to assign services/providers/caids to common interface modules"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu )]
else:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart ),
PluginDescriptor( name = _("Common Interface assignment"), description = _("a gui to assign services/providers to common interface modules"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu )]
| gpl-2.0 | 6,294,768,001,270,203,000 | 39.216463 | 224 | 0.679251 | false |
YanyangChen/OpenNI2 | Wrappers/java/OpenNI.jni/CreateMethods.py | 32 | 2398 | #/****************************************************************************
#* *
#* OpenNI 1.x Alpha *
#* Copyright (C) 2012 PrimeSense Ltd. *
#* *
#* This file is part of OpenNI. *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#****************************************************************************/
import os
import re
# ----------------------- MAIN -------------------------
java_header = open("org_openni_NativeMethods.h")
cont = java_header.read()
java_header.close()
result = open("methods.inl", "w")
result.write("static JNINativeMethod methods[] = {\n")
while True:
match = re.search("Method:\s*(\w*)", cont)
if match is None:
break
method_name = match.group(1)
match = re.search("Signature:\s*([\w\(\)\[;/]*)", cont)
if match is None:
break
signature = match.group(1)
match = re.search("JNIEXPORT.*JNICALL (\w*)", cont)
if match is None:
break
method = match.group(1)
result.write('\t{ "' + method_name + '", "' + signature + '", (void*)&' + method + ' },\n')
cont = cont[match.end():];
result.write('};\n');
result.close()
| apache-2.0 | 13,528,437,951,845,220 | 46.019608 | 95 | 0.382819 | false |
chitr/neutron | neutron/db/quota/models.py | 22 | 2526 | # Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
class ResourceDelta(model_base.BASEV2):
resource = sa.Column(sa.String(255), primary_key=True)
reservation_id = sa.Column(sa.String(36),
sa.ForeignKey('reservations.id',
ondelete='CASCADE'),
primary_key=True,
nullable=False)
# Requested amount of resource
amount = sa.Column(sa.Integer)
class Reservation(model_base.BASEV2, models_v2.HasId):
tenant_id = sa.Column(sa.String(255))
expiration = sa.Column(sa.DateTime())
resource_deltas = orm.relationship(ResourceDelta,
backref='reservation',
lazy="joined",
cascade='all, delete-orphan')
class Quota(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represent a single quota override for a tenant.
If there is no row for a given tenant id and resource, then the
default for the deployment is used.
"""
resource = sa.Column(sa.String(255))
limit = sa.Column(sa.Integer)
class QuotaUsage(model_base.BASEV2):
"""Represents the current usage for a given resource."""
resource = sa.Column(sa.String(255), nullable=False,
primary_key=True, index=True)
tenant_id = sa.Column(sa.String(255), nullable=False,
primary_key=True, index=True)
dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false())
in_use = sa.Column(sa.Integer, nullable=False,
server_default="0")
reserved = sa.Column(sa.Integer, nullable=False,
server_default="0")
| apache-2.0 | 5,400,663,112,791,167,000 | 37.861538 | 78 | 0.623515 | false |
richardcs/ansible | lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py | 6 | 7683 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_ssm_parameter_store
short_description: Manage key-value pairs in aws parameter store.
description:
- Manage key-value pairs in aws parameter store.
version_added: "2.5"
options:
name:
description:
- parameter key name.
required: true
description:
description:
- parameter key description.
required: false
value:
description:
- Parameter value.
required: false
state:
description:
- Creates or modifies an existing parameter
- Deletes a parameter
required: false
choices: ['present', 'absent']
default: present
string_type:
description:
- Parameter String type
required: false
choices: ['String', 'StringList', 'SecureString']
default: String
decryption:
description:
- Work with SecureString type to get plain text secrets
type: bool
required: false
default: True
key_id:
description:
- aws KMS key to decrypt the secrets.
required: false
default: aws/ssm (this key is automatically generated at the first parameter created).
overwrite_value:
description:
- Option to overwrite an existing value if it already exists.
- String
required: false
version_added: "2.6"
choices: ['never', 'changed', 'always']
default: changed
region:
description:
- region.
required: false
author:
- Nathan Webster (@nathanwebsterdotme)
- Bill Wang (@ozbillwang) <[email protected]>
- Michael De La Rue (@mikedlr)
extends_documentation_fragment: aws
requirements: [ botocore, boto3 ]
'''
EXAMPLES = '''
- name: Create or update key/value pair in aws parameter store
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
value: "World"
- name: Delete the key
aws_ssm_parameter_store:
name: "Hello"
state: absent
- name: Create or update secure key/value pair with default kms key (aws/ssm)
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
value: "World"
- name: Create or update secure key/value pair with nominated kms key
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
key_id: "alias/demo"
value: "World"
- name: Always update a parameter store value and create a new version
aws_ssm_parameter_store:
name: "overwrite_example"
description: "This example will always overwrite the value"
string_type: "String"
value: "Test1234"
overwrite_value: "always"
- name: recommend to use with aws_ssm lookup plugin
debug: msg="{{ lookup('aws_ssm', 'hello') }}"
'''
RETURN = '''
put_parameter:
description: Add one or more parameters to the system.
returned: success
type: dictionary
delete_parameter:
description: Delete a parameter from the system.
returned: success
type: dictionary
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
try:
from botocore.exceptions import ClientError
except ImportError:
pass # will be captured by imported HAS_BOTO3
def update_parameter(client, module, args):
changed = False
response = {}
try:
response = client.put_parameter(**args)
changed = True
except ClientError as e:
module.fail_json_aws(e, msg="setting parameter")
return changed, response
def create_update_parameter(client, module):
changed = False
existing_parameter = None
response = {}
args = dict(
Name=module.params.get('name'),
Value=module.params.get('value'),
Type=module.params.get('string_type')
)
if (module.params.get('overwrite_value') in ("always", "changed")):
args.update(Overwrite=True)
else:
args.update(Overwrite=False)
if module.params.get('description'):
args.update(Description=module.params.get('description'))
if module.params.get('string_type') == 'SecureString':
args.update(KeyId=module.params.get('key_id'))
try:
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
except:
pass
if existing_parameter:
if (module.params.get('overwrite_value') == 'always'):
(changed, response) = update_parameter(client, module, args)
elif (module.params.get('overwrite_value') == 'changed'):
if existing_parameter['Parameter']['Type'] != args['Type']:
(changed, response) = update_parameter(client, module, args)
if existing_parameter['Parameter']['Value'] != args['Value']:
(changed, response) = update_parameter(client, module, args)
if args.get('Description'):
# Description field not available from get_parameter function so get it from describe_parameters
describe_existing_parameter = None
try:
describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
describe_existing_parameter = describe_existing_parameter_paginator.paginate(
Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
except ClientError as e:
module.fail_json_aws(e, msg="getting description value")
if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
(changed, response) = update_parameter(client, module, args)
else:
(changed, response) = update_parameter(client, module, args)
return changed, response
def delete_parameter(client, module):
response = {}
try:
response = client.delete_parameter(
Name=module.params.get('name')
)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
return False, {}
module.fail_json_aws(e, msg="deleting parameter")
return True, response
def setup_client(module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ssm', region=region, endpoint=ec2_url, **aws_connect_params)
return connection
def setup_module_object():
argument_spec = dict(
name=dict(required=True),
description=dict(),
value=dict(required=False, no_log=True),
state=dict(default='present', choices=['present', 'absent']),
string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
decryption=dict(default=True, type='bool'),
key_id=dict(default="alias/aws/ssm"),
overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
region=dict(required=False),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
)
def main():
module = setup_module_object()
state = module.params.get('state')
client = setup_client(module)
invocations = {
"present": create_update_parameter,
"absent": delete_parameter,
}
(changed, response) = invocations[state](client, module)
module.exit_json(changed=changed, response=response)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,917,926,273,472,367,000 | 29.011719 | 126 | 0.645451 | false |
bswartz/cinder | cinder/volume/drivers/solidfire.py | 1 | 86895 | # All Rights Reserved.
# Copyright 2013 SolidFire Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import math
import random
import re
import socket
import string
import time
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
from requests.packages.urllib3 import exceptions
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
from cinder import interface
from cinder.objects import fields
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_volume_prefix',
default='UUID-',
help='Create SolidFire volumes with this prefix. Volume names '
'are of the form <sf_volume_prefix><cinder-volume-id>. '
'The default is to use a prefix of \'UUID-\'.'),
cfg.StrOpt('sf_template_account_name',
default='openstack-vtemplate',
help='Account name on the SolidFire Cluster to use as owner of '
'template/cache volumes (created if does not exist).'),
cfg.BoolOpt('sf_allow_template_caching',
default=True,
help='Create an internal cache of copy of images when '
'a bootable volume is created to eliminate fetch from '
'glance and qemu-conversion on subsequent calls.'),
cfg.StrOpt('sf_svip',
help='Overrides default cluster SVIP with the one specified. '
'This is required or deployments that have implemented '
'the use of VLANs for iSCSI networks in their cloud.'),
cfg.BoolOpt('sf_enable_volume_mapping',
default=True,
help='Create an internal mapping of volume IDs and account. '
'Optimizes lookups and performance at the expense of '
'memory, very large deployments may want to consider '
'setting to False.'),
cfg.PortOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.'),
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts)
# SolidFire API Error Constants
xExceededLimit = 'xExceededLimit'
xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup'
xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise exception.SolidFireAPIException(message=msg)
return func_retry
return retry_dec
@interface.volumedriver
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
2.0.3 - Implement cluster pairing
2.0.4 - Implement volume replication
2.0.5 - Try and deal with the stupid retry/clear issues from objects
and tflow
"""
VERSION = '2.0.2'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
cluster_stats = {}
retry_exc_tuple = (exception.SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xNotReadyForIO']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.failed_over_id = kwargs.get('active_backend_id', None)
self.active_cluster_info = {}
self.configuration.append_config_values(sf_opts)
self.template_account_id = None
self.max_volumes_per_account = 1990
self.volume_map = {}
self.cluster_pairs = []
self.replication_enabled = False
self.failed_over = False
self.target_driver = SolidFireISCSI(solidfire_driver=self,
configuration=self.configuration)
if self.failed_over_id:
remote_info = self._get_remote_info_by_id(self.failed_over_id)
if remote_info:
self._set_active_cluster_info(remote_info['endpoint'])
else:
LOG.error(_LE('Failed to initialize SolidFire driver to '
'a remote cluster specified at id: %s'),
self.failed_over_id)
else:
self._set_active_cluster_info()
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
if self.configuration.sf_allow_template_caching:
account = self.configuration.sf_template_account_name
self.template_account_id = self._create_template_account(account)
if not self.failed_over_id:
self._set_cluster_pairs()
def __getattr__(self, attr):
if hasattr(self.target_driver, attr):
return getattr(self.target_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _get_remote_info_by_id(self, backend_id):
remote_info = None
for rd in self.configuration.get('replication_device', []):
if rd.get('backend_id', None) == backend_id:
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_remote_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = (
remote_info['svip'] + ':3260')
return remote_info
def _create_remote_pairing(self, remote_device):
try:
pairing_info = self._issue_api_request('StartClusterPairing',
{}, version='8.0')['result']
pair_id = self._issue_api_request(
'CompleteClusterPairing',
{'clusterPairingKey': pairing_info['clusterPairingKey']},
version='8.0',
endpoint=remote_device['endpoint'])['result']['clusterPairID']
except exception.SolidFireAPIException as ex:
if 'xPairingAlreadExists' in ex.msg:
LOG.debug('Pairing already exists during init.')
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Cluster pairing failed: %s'), ex.msg)
LOG.debug(('Initialized Cluster pair with ID: %s'), pair_id)
remote_device['clusterPairID'] = pair_id
return pair_id
def _get_remote_cluster_info(self, remote_endpoint):
return self._issue_api_request(
'GetClusterInfo',
{},
endpoint=remote_endpoint)['result']['clusterInfo']
def _set_cluster_pairs(self):
if not self.configuration.get('replication_device', None):
self.replication = False
return
existing_pairs = self._issue_api_request(
'ListClusterPairs',
{},
version='8.0')['result']['clusterPairs']
remote_pair = {}
for rd in self.configuration.get('replication_device', []):
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_remote_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = remote_info['svip'] + ':3260'
for ep in existing_pairs:
if rd['backend_id'] == ep['mvip']:
remote_pair = ep
LOG.debug("Found remote pair: %s", remote_pair)
remote_info['clusterPairID'] = ep['clusterPairID']
break
if not remote_pair:
# NOTE(jdg): create_remote_pairing sets the
# clusterPairID in remote_info for us
self._create_remote_pairing(remote_info)
self.cluster_pairs.append(remote_info)
LOG.debug("Setting replication_enabled to True.")
self.replication_enabled = True
def _set_active_cluster_info(self, endpoint=None):
if not endpoint:
self.active_cluster_info['endpoint'] = self._build_endpoint_info()
else:
self.active_cluster_info['endpoint'] = endpoint
for k, v in self._issue_api_request(
'GetClusterInfo',
{})['result']['clusterInfo'].items():
self.active_cluster_info[k] = v
# Add a couple extra things that are handy for us
self.active_cluster_info['clusterAPIVersion'] = (
self._issue_api_request('GetClusterVersionInfo',
{})['result']['clusterAPIVersion'])
if self.configuration.get('sf_svip', None):
self.active_cluster_info['svip'] = (
self.configuration.get('sf_svip'))
def _create_provider_id_string(self,
resource_id,
account_or_vol_id):
# NOTE(jdg): We use the same format, but in the case
# of snapshots, we don't have an account id, we instead
# swap that with the parent volume id
return "%s %s %s" % (resource_id,
account_or_vol_id,
self.active_cluster_info['uuid'])
def _init_snapshot_mappings(self, srefs):
updates = []
sf_snaps = self._issue_api_request(
'ListSnapshots', {}, version='6.0')['result']['snapshots']
for s in srefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id'])
sfsnap = next(
(ss for ss in sf_snaps if ss['name'] == seek_name), None)
if sfsnap:
id_string = self._create_provider_id_string(
sfsnap['snapshotID'],
sfsnap['volumeID'])
if s.get('provider_id') != id_string:
updates.append(
{'id': s['id'],
'provider_id': id_string})
return updates
def _init_volume_mappings(self, vrefs):
updates = []
sf_vols = self._issue_api_request('ListActiveVolumes',
{})['result']['volumes']
self.volume_map = {}
for v in vrefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id'])
sfvol = next(
(sv for sv in sf_vols if sv['name'] == seek_name), None)
if sfvol:
if v.get('provider_id', 'nil') != sfvol['volumeID']:
updates.append(
{'id': v['id'],
'provider_id': self._create_provider_id_string(
sfvol['volumeID'], sfvol['accountID'])})
return updates
def update_provider_info(self, vrefs, snaprefs):
volume_updates = self._init_volume_mappings(vrefs)
snapshot_updates = self._init_snapshot_mappings(snaprefs)
return (volume_updates, snapshot_updates)
def _create_template_account(self, account_name):
# We raise an API exception if the account doesn't exist
# We need to take account_prefix settings into consideration
# This just uses the same method to do template account create
# as we use for any other OpenStack account
account_name = self._get_sf_account_name(account_name)
try:
id = self._issue_api_request(
'GetAccountByName',
{'username': account_name})['result']['account']['accountID']
except exception.SolidFireAPIException:
chap_secret = self._generate_random_string(12)
params = {'username': account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
id = self._issue_api_request('AddAccount',
params)['result']['accountID']
return id
def _build_endpoint_info(self, **kwargs):
endpoint = {}
endpoint['mvip'] = (
kwargs.get('mvip', self.configuration.san_ip))
endpoint['login'] = (
kwargs.get('login', self.configuration.san_login))
endpoint['passwd'] = (
kwargs.get('passwd', self.configuration.san_password))
endpoint['port'] = (
kwargs.get('port', self.configuration.sf_api_port))
endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'],
endpoint['port'])
endpoint['svip'] = kwargs.get('svip', self.configuration.sf_svip)
if not endpoint.get('mvip', None) and kwargs.get('backend_id', None):
endpoint['mvip'] = kwargs.get('backend_id')
return endpoint
@retry(retry_exc_tuple, tries=6)
def _issue_api_request(self, method, params, version='1.0', endpoint=None):
if params is None:
params = {}
if endpoint is None:
endpoint = self.active_cluster_info['endpoint']
payload = {'method': method, 'params': params}
url = '%s/json-rpc/%s/' % (endpoint['url'], version)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
req = requests.post(url,
data=json.dumps(payload),
auth=(endpoint['login'], endpoint['passwd']),
verify=False,
timeout=30)
response = req.json()
req.close()
if (('error' in response) and
(response['error']['name'] in self.retryable_errors)):
msg = ('Retryable error (%s) encountered during '
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
raise exception.SolidFireRetryableException(message=msg)
if 'error' in response:
msg = _('API response: %s') % response
raise exception.SolidFireAPIException(msg)
return response
def _get_volumes_by_sfaccount(self, account_id, endpoint=None):
"""Get all volumes on cluster for specified account."""
params = {'accountID': account_id}
return self._issue_api_request(
'ListVolumesForAccount',
params,
endpoint=endpoint)['result']['volumes']
def _get_sfaccount_by_name(self, sf_account_name, endpoint=None):
"""Get SolidFire account object by name."""
sfaccount = None
params = {'username': sf_account_name}
try:
data = self._issue_api_request('GetAccountByName',
params,
endpoint=endpoint)
if 'result' in data and 'account' in data['result']:
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
except exception.SolidFireAPIException as ex:
if 'xUnknownAccount' in ex.msg:
return sfaccount
else:
raise
return sfaccount
def _get_sf_account_name(self, project_id):
"""Build the SolidFire account name to use."""
prefix = self.configuration.sf_account_prefix or ''
if prefix == 'hostname':
prefix = socket.gethostname()
return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
def _get_sfaccount(self, project_id):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
raise exception.SolidFireAccountNotFound(
account_name=sf_account_name)
return sfaccount
def _create_sfaccount(self, project_id):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
just return it. If not, then create it.
"""
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
self._issue_api_request('AddAccount', params)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
return sfaccount
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
char_set = string.ascii_uppercase + string.digits
return ''.join(random.sample(char_set, length))
def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
"""Gets the connection info for specified account and volume."""
if endpoint:
iscsi_portal = endpoint['svip']
else:
iscsi_portal = self.active_cluster_info['svip']
if ':' not in iscsi_portal:
iscsi_portal += ':3260'
chap_secret = sfaccount['targetSecret']
found_volume = False
iteration_count = 0
while not found_volume and iteration_count < 600:
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'], endpoint=endpoint)
iqn = None
for v in volume_list:
if v['volumeID'] == sf_volume_id:
iqn = v['iqn']
found_volume = True
break
if not found_volume:
time.sleep(2)
iteration_count += 1
if not found_volume:
LOG.error(_LE('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!'), sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
# NOTE(john-griffith): SF volumes are always at lun 0
model_update['provider_location'] = ('%s %s %s'
% (iscsi_portal, iqn, 0))
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
chap_secret))
if not self.configuration.sf_emulate_512:
model_update['provider_geometry'] = ('%s %s' % (4096, 4096))
model_update['provider_id'] = (
self._create_provider_id_string(sf_volume_id,
sfaccount['accountID']))
return model_update
def _snapshot_discovery(self, src_uuid, params, vref):
# NOTE(jdg): First check the SF snapshots
# if we don't find a snap by the given name, just move on to check
# volumes. This may be a running system that was updated from
# before we did snapshots, so need to check both
is_clone = False
sf_vol = None
snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid)
snaps = self._get_sf_snapshots()
snap = next((s for s in snaps if s["name"] == snap_name), None)
if snap:
params['snapshotID'] = int(snap['snapshotID'])
params['volumeID'] = int(snap['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
else:
sf_vol = self._get_sf_volume(src_uuid)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=src_uuid)
params['volumeID'] = int(sf_vol['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
is_clone = True
return params, is_clone, sf_vol
def _do_clone_volume(self, src_uuid,
vref, sf_src_snap=None):
"""Create a clone of an existing volume or snapshot."""
attributes = {}
sf_account = self._get_create_account(vref['project_id'])
params = {'name': '%(prefix)s%(id)s' %
{'prefix': self.configuration.sf_volume_prefix,
'id': vref['id']},
'newAccountID': sf_account['accountID']}
is_clone = False
sf_vol = None
if sf_src_snap:
# In some scenarios we are passed the snapshot information that we
# are supposed to clone.
params['snapshotID'] = sf_src_snap['snapshotID']
params['volumeID'] = sf_src_snap['volumeID']
params['newSize'] = int(vref['size'] * units.Gi)
else:
params, is_clone, sf_vol = self._snapshot_discovery(src_uuid,
params,
vref)
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
sf_volume_id = data['result']['volumeID']
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
qos = self._retrieve_qos_setting(vref)
params = {'volumeID': sf_volume_id}
if qos:
params['qos'] = qos
create_time = vref['created_at'].isoformat()
attributes = {'uuid': vref['id'],
'is_clone': 'True',
'src_uuid': src_uuid,
'created_at': create_time}
params['attributes'] = attributes
data = self._issue_api_request('ModifyVolume', params)
model_update = self._get_model_info(sf_account, sf_volume_id)
if model_update is None:
mesg = _('Failed to get model update from clone')
raise exception.SolidFireAPIException(mesg)
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
data = self._update_attributes(sf_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
cloned_count = sf_vol['attributes'].get('cloned_count', 0)
cloned_count += 1
attributes = sf_vol['attributes']
attributes['cloned_count'] = cloned_count
params = {'volumeID': int(sf_vol['volumeID'])}
params['attributes'] = attributes
return self._issue_api_request('ModifyVolume', params)
def _do_volume_create(self, sf_account, params, endpoint=None):
params['accountID'] = sf_account['accountID']
sf_volid = self._issue_api_request(
'CreateVolume', params, endpoint=endpoint)['result']['volumeID']
return self._get_model_info(sf_account, sf_volid, endpoint=endpoint)
def _do_snapshot_create(self, params):
model_update = {}
snapshot_id = self._issue_api_request(
'CreateSnapshot', params, version='6.0')['result']['snapshotID']
snaps = self._get_sf_snapshots()
snap = (
next((s for s in snaps if int(s["snapshotID"]) ==
int(snapshot_id)), None))
model_update['provider_id'] = (
self._create_provider_id_string(snap['snapshotID'],
snap['volumeID']))
return model_update
def _set_qos_presets(self, volume):
qos = {}
valid_presets = self.sf_qos_dict.keys()
# First look to see if they included a preset
presets = [i.value for i in volume.get('volume_metadata')
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
LOG.warning(_LW('More than one valid preset was '
'detected, using %s'), presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
for i in volume.get('volume_metadata'):
if i.key in self.sf_qos_keys:
qos[i.key] = int(i.value)
return qos
def _set_qos_by_volume_type(self, ctxt, type_id):
qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(jdg): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.sf_qos_keys:
qos[key] = int(value)
return qos
def _get_sf_volume(self, uuid, params=None):
if params:
vols = self._issue_api_request(
'ListVolumesForAccount', params)['result']['volumes']
else:
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
found_count = 0
sf_volref = None
for v in vols:
# NOTE(jdg): In the case of "name" we can't
# update that on manage/import, so we use
# the uuid attribute
meta = v.get('attributes')
alt_id = ''
if meta:
alt_id = meta.get('uuid', '')
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
return sf_volref
def _get_sf_snapshots(self, sf_volid=None):
params = {}
if sf_volid:
params = {'volumeID': sf_volid}
return self._issue_api_request(
'ListSnapshots', params, version='6.0')['result']['snapshots']
def _create_image_volume(self, context,
image_meta, image_service,
image_id):
with image_utils.TemporaryImages.fetch(image_service,
context,
image_id) as tmp_image:
data = image_utils.qemu_img_info(tmp_image)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi))
attributes = {}
attributes['image_info'] = {}
attributes['image_info']['image_updated_at'] = (
image_meta['updated_at'].isoformat())
attributes['image_info']['image_name'] = (
image_meta['name'])
attributes['image_info']['image_created_at'] = (
image_meta['created_at'].isoformat())
attributes['image_info']['image_id'] = image_meta['id']
params = {'name': 'OpenStackIMG-%s' % image_id,
'accountID': self.template_account_id,
'sliceCount': 1,
'totalSize': int(virtual_size * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': {}}
sf_account = self._issue_api_request(
'GetAccountByID',
{'accountID': self.template_account_id})['result']['account']
template_vol = self._do_volume_create(sf_account, params)
tvol = {}
tvol['id'] = image_id
tvol['provider_location'] = template_vol['provider_location']
tvol['provider_auth'] = template_vol['provider_auth']
connector = {'multipath': False}
conn = self.initialize_connection(tvol, connector)
attach_info = super(SolidFireDriver, self)._connect_device(conn)
properties = 'na'
try:
image_utils.convert_image(tmp_image,
attach_info['device']['path'],
'raw',
run_as_root=True)
data = image_utils.qemu_img_info(attach_info['device']['path'],
run_as_root=True)
if data.file_format != 'raw':
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {'vol_format': 'raw',
'file_format': data.
file_format})
except Exception as exc:
vol = self._get_sf_volume(image_id)
LOG.error(_LE('Failed image conversion during '
'cache creation: %s'),
exc)
LOG.debug('Removing SolidFire Cache Volume (SF ID): %s',
vol['volumeID'])
self._detach_volume(context, attach_info, tvol, properties)
self._issue_api_request('DeleteVolume', params)
return
self._detach_volume(context, attach_info, tvol, properties)
sf_vol = self._get_sf_volume(image_id, params)
LOG.debug('Successfully created SolidFire Image Template '
'for image-id: %s', image_id)
return sf_vol
def _verify_image_volume(self, context, image_meta, image_service):
# This method just verifies that IF we have a cache volume that
# it's still up to date and current WRT the image in Glance
# ie an image-update hasn't occurred since we grabbed it
# If it's out of date, just delete it and we'll create a new one
# Any other case we don't care and just return without doing anything
params = {'accountID': self.template_account_id}
sf_vol = self._get_sf_volume(image_meta['id'], params)
if sf_vol is None:
return
# Check updated_at field, delete copy and update if needed
if sf_vol['attributes']['image_info']['image_updated_at'] == (
image_meta['updated_at'].isoformat()):
return
else:
# Bummer, it's been updated, delete it
params = {'accountID': self.template_account_id}
params['volumeID'] = sf_vol['volumeID']
self._issue_api_request('DeleteVolume', params)
if not self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']):
msg = _("Failed to create SolidFire Image-Volume")
raise exception.SolidFireAPIException(msg)
def _get_sfaccounts_for_tenant(self, cinder_project_id):
accounts = self._issue_api_request(
'ListAccounts', {})['result']['accounts']
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in accounts if
cinder_project_id in acc['username']])
def _get_all_active_volumes(self, cinder_uuid=None):
params = {}
volumes = self._issue_api_request('ListActiveVolumes',
params)['result']['volumes']
if cinder_uuid:
vols = ([v for v in volumes if
cinder_uuid in v.name])
else:
vols = [v for v in volumes]
return vols
def _get_all_deleted_volumes(self, cinder_uuid=None):
params = {}
vols = self._issue_api_request('ListDeletedVolumes',
params)['result']['volumes']
if cinder_uuid:
deleted_vols = ([v for v in vols if
cinder_uuid in v['name']])
else:
deleted_vols = [v for v in vols]
return deleted_vols
def _get_account_create_availability(self, accounts):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if self._get_volumes_for_account(
acc['accountID']) > self.max_volumes_per_account:
return acc
if len(accounts) == 1:
sfaccount = self._create_sfaccount(accounts[0]['name'] + '_')
return sfaccount
return None
def _get_create_account(self, proj_id):
# Retrieve SolidFire accountID to be used for creating volumes.
sf_accounts = self._get_sfaccounts_for_tenant(proj_id)
if not sf_accounts:
sf_account = self._create_sfaccount(proj_id)
else:
# Check availability for creates
sf_account = self._get_account_create_availability(sf_accounts)
if not sf_account:
msg = _('Volumes/account exceeded on both primary and '
'secondary SolidFire accounts.')
raise exception.SolidFireDriverException(msg)
return sf_account
def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
params = {'accountID': sf_account_id}
vols = self._issue_api_request('ListVolumesForAccount',
params)['result']['volumes']
if cinder_uuid:
vlist = [v for v in vols if
cinder_uuid in v['name']]
else:
vlist = [v for v in vols]
vlist = sorted(vlist, key=lambda k: k['volumeID'])
return vlist
def _create_vag(self, iqn, vol_id=None):
"""Create a volume access group(vag).
Returns the vag_id.
"""
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
params = {'name': vag_name,
'initiators': [iqn],
'volumes': [vol_id],
'attributes': {'openstack': True}}
try:
result = self._issue_api_request('CreateVolumeAccessGroup',
params,
version='7.0')
return result['result']['volumeAccessGroupID']
except exception.SolidFireAPIException as error:
if xExceededLimit in error.msg:
if iqn in error.msg:
# Initiator double registered.
return self._safe_create_vag(iqn, vol_id)
else:
# VAG limit reached. Purge and start over.
self._purge_vags()
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _safe_create_vag(self, iqn, vol_id=None):
# Potential race condition with simultaneous volume attaches to the
# same host. To help avoid this, VAG creation makes a best attempt at
# finding and using an existing VAG.
vags = self._get_vags_by_name(iqn)
if vags:
# Filter through the vags and find the one with matching initiator
vag = next((v for v in vags if iqn in v['initiators']), None)
if vag:
return vag['volumeAccessGroupID']
else:
# No matches, use the first result, add initiator IQN.
vag_id = vags[0]['volumeAccessGroupID']
return self._add_initiator_to_vag(iqn, vag_id)
return self._create_vag(iqn, vol_id)
def _base_get_vags(self):
params = {}
vags = self._issue_api_request(
'ListVolumeAccessGroups',
params,
version='7.0')['result']['volumeAccessGroups']
return vags
def _get_vags_by_name(self, iqn):
"""Retrieve SolidFire volume access group objects by name.
Returns an array of vags with a matching name value.
Returns an empty array if there are no matches.
"""
vags = self._base_get_vags()
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
matching_vags = [vag for vag in vags if vag['name'] == vag_name]
return matching_vags
def _add_initiator_to_vag(self, iqn, vag_id):
# Added a vag_id return as there is a chance that we might have to
# create a new VAG if our target VAG is deleted underneath us.
params = {"initiators": [iqn],
"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('AddInitiatorsToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except exception.SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
# No locking means sometimes a VAG can be removed by a parallel
# volume detach against the same host.
return self._safe_create_vag(iqn)
else:
raise
def _add_volume_to_vag(self, vol_id, iqn, vag_id):
# Added a vag_id return to be consistent with add_initiator_to_vag. It
# isn't necessary but may be helpful in the future.
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('AddVolumesToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except exception.SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _remove_volume_from_vag(self, vol_id, vag_id):
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('RemoveVolumesFromVolumeAccessGroup',
params,
version='7.0')
except exception.SolidFireAPIException as error:
if xNotInVolumeAccessGroup in error.msg:
pass
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
pass
else:
raise
def _remove_volume_from_vags(self, vol_id):
# Due to all sorts of uncertainty around multiattach, on volume
# deletion we make a best attempt at removing the vol_id from VAGs.
vags = self._base_get_vags()
targets = [v for v in vags if vol_id in v['volumes']]
for vag in targets:
self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID'])
def _remove_vag(self, vag_id):
params = {"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('DeleteVolumeAccessGroup',
params,
version='7.0')
except exception.SolidFireAPIException as error:
if xVolumeAccessGroupIDDoesNotExist not in error.msg:
raise
def _purge_vags(self, limit=10):
# Purge up to limit number of VAGs that have no active volumes,
# initiators, and an OpenStack attribute. Purge oldest VAGs first.
vags = self._base_get_vags()
targets = [v for v in vags if v['volumes'] == [] and
v['initiators'] == [] and
v['deletedVolumes'] == [] and
v['attributes'].get('openstack')]
sorted_targets = sorted(targets,
key=lambda k: k['volumeAccessGroupID'])
for vag in sorted_targets[:limit]:
self._remove_vag(vag['volumeAccessGroupID'])
def clone_image(self, context,
volume, image_location,
image_meta, image_service):
public = False
# Check out pre-requisites:
# Is template caching enabled?
if not self.configuration.sf_allow_template_caching:
return None, False
# NOTE(jdg): Glance V2 moved from is_public to visibility
# so we check both, as we don't necessarily know or want
# to care which we're using. Will need to look at
# future handling of things like shared and community
# but for now, it's owner or public and that's it
visibility = image_meta.get('visibility', None)
if visibility and visibility == 'public':
public = True
elif image_meta.get('is_public', False):
public = True
else:
if image_meta['owner'] == volume['project_id']:
public = True
if not public:
LOG.warning(_LW("Requested image is not "
"accessible by current Tenant."))
return None, False
try:
self._verify_image_volume(context,
image_meta,
image_service)
except exception.SolidFireAPIException:
return None, False
try:
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
volume)
except exception.VolumeNotFound:
if self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']) is None:
# We failed, dump out
return None, False
# Ok, should be good to go now, try it again
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
volume)
return model, True
def _retrieve_qos_setting(self, volume):
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
return qos
def create_volume(self, volume):
"""Create volume on SolidFire device.
The account is where CHAP settings are derived from, volume is
created and exported. Note that the new volume is immediately ready
for use.
One caveat here is that an existing user account must be specified
in the API call to create a new volume. We use a set algorithm to
determine account info based on passed in cinder volume object. First
we check to see if the account already exists (and use it), or if it
does not already exist, we'll go ahead and create it.
"""
slice_count = 1
attributes = {}
sf_account = self._get_create_account(volume['project_id'])
qos = self._retrieve_qos_setting(volume)
create_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'created_at': create_time}
vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id'])
params = {'name': vname,
'accountID': sf_account['accountID'],
'sliceCount': slice_count,
'totalSize': int(volume['size'] * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
migration_status = volume.get('migration_status', None)
if migration_status and 'target' in migration_status:
k, v = migration_status.split(':')
vname = '%s%s' % (self.configuration.sf_volume_prefix, v)
params['name'] = vname
params['attributes']['migration_uuid'] = volume['id']
params['attributes']['uuid'] = v
model_update = self._do_volume_create(sf_account, params)
try:
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
volume['volumeID'] = (
int(model_update['provider_id'].split()[0]))
self._replicate_volume(volume, params,
sf_account, rep_settings)
except exception.SolidFireAPIException:
# NOTE(jdg): Something went wrong after the source create, due to
# the way TFLOW works and it's insistence on retrying the same
# command over and over coupled with the fact that the introduction
# of objects now sets host to None on failures we'll end up with an
# orphaned volume on the backend for every one of these segments
# that fail, for n-retries. Sad Sad Panda!! We'll just do it
# ourselves until we can get a general fix in Cinder further up the
# line
with excutils.save_and_reraise_exception():
sf_volid = int(model_update['provider_id'].split()[0])
self._issue_api_request('DeleteVolume', {'volumeID': sf_volid})
return model_update
def _retrieve_replication_settings(self, volume):
rep_data = {}
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
rep_data = self._set_rep_by_volume_type(ctxt, type_id)
return rep_data
def _set_rep_by_volume_type(self, ctxt, type_id):
rep_opts = {}
type_ref = volume_types.get_volume_type(ctxt, type_id)
specs = type_ref.get('extra_specs')
if specs.get('replication', 'disabled').lower() == 'enabled':
rep_opts['targets'] = specs.get(
'solidfire:replication_targets', self.cluster_pairs[0])
return rep_opts
def _replicate_volume(self, volume, src_params,
parent_sfaccount, rep_info):
params = {}
# TODO(jdg): Right now we just go to first pair,
# need to add parsing of rep_info eventually
# in other words "rep_info" is not used yet!
tgt_endpoint = self.cluster_pairs[0]['endpoint']
LOG.debug("Replicating volume on remote cluster: %s", tgt_endpoint)
params['attributes'] = src_params['attributes']
params['username'] = self._get_sf_account_name(volume['project_id'])
try:
params['initiatorSecret'] = parent_sfaccount['initiatorSecret']
params['targetSecret'] = parent_sfaccount['targetSecret']
self._issue_api_request(
'AddAccount',
params,
endpoint=tgt_endpoint)['result']['accountID']
except exception.SolidFireAPIException as ex:
if 'xDuplicateUsername' not in ex.msg:
raise
remote_account = (
self._get_sfaccount_by_name(params['username'],
endpoint=tgt_endpoint))
# Create the volume on the remote cluster w/same params as original
params = src_params
params['accountID'] = remote_account['accountID']
LOG.debug("Create remote volume on: %(endpoint)s with account: "
"%(account)s",
{'endpoint': tgt_endpoint['url'], 'account': remote_account})
model_update = self._do_volume_create(
remote_account, params, endpoint=tgt_endpoint)
tgt_sfid = int(model_update['provider_id'].split()[0])
params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=tgt_endpoint)
# Enable volume pairing
LOG.debug("Start volume pairing on volume ID: %s",
volume['volumeID'])
params = {'volumeID': volume['volumeID']}
rep_key = self._issue_api_request('StartVolumePairing',
params,
'8.0')['result']['volumePairingKey']
params = {'volumeID': tgt_sfid,
'volumePairingKey': rep_key}
LOG.debug("Issue CompleteVolumePairing request on remote: "
"%(endpoint)s, %(parameters)s",
{'endpoint': tgt_endpoint['url'], 'parameters': params})
self._issue_api_request('CompleteVolumePairing',
params,
'8.0',
endpoint=tgt_endpoint)
LOG.debug("Completed volume pairing.")
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of an existing volume."""
(_data, _sfaccount, model) = self._do_clone_volume(
src_vref['id'],
volume)
return model
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
sf_vol = None
accounts = self._get_sfaccounts_for_tenant(volume['project_id'])
if accounts is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
LOG.error(_LE("This usually means the volume was never "
"successfully created."))
return
for acc in accounts:
vols = self._get_volumes_for_account(acc['accountID'],
volume['id'])
if vols:
sf_vol = vols[0]
break
if sf_vol is not None:
for vp in sf_vol.get('volumePairs', []):
LOG.debug("Deleting paired volume on remote cluster...")
pair_id = vp['clusterPairID']
for cluster in self.cluster_pairs:
if cluster['clusterPairID'] == pair_id:
params = {'volumeID': vp['remoteVolumeID']}
LOG.debug("Issue Delete request on cluster: "
"%(remote)s with params: %(parameters)s",
{'remote': cluster['endpoint']['url'],
'parameters': params})
self._issue_api_request('DeleteVolume', params,
endpoint=cluster['endpoint'])
if sf_vol['status'] == 'active':
params = {'volumeID': sf_vol['volumeID']}
self._issue_api_request('DeleteVolume', params)
if volume.get('multiattach'):
self._remove_volume_from_vags(sf_vol['volumeID'])
else:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot from the SolidFire cluster."""
sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])
accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
snap = None
for acct in accounts:
params = {'accountID': acct['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol:
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if snap:
params = {'snapshotID': snap['snapshotID']}
self._issue_api_request('DeleteSnapshot',
params,
version='6.0')
return
# Make sure it's not "old style" using clones as snaps
LOG.debug("Snapshot not found, checking old style clones.")
self.delete_volume(snapshot)
def create_snapshot(self, snapshot):
sfaccount = self._get_sfaccount(snapshot['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"create_snapshot operation!"), snapshot['volume_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
params = {'volumeID': sf_vol['volumeID'],
'name': '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])}
return self._do_snapshot_create(params)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from the specified snapshot."""
(_data, _sfaccount, model) = self._do_clone_volume(
snapshot['id'],
volume)
return model
# Consistency group helpers
def _create_group_snapshot(self, name, sf_volumes):
# Group snapshot is our version of a consistency group snapshot.
vol_ids = [vol['volumeID'] for vol in sf_volumes]
params = {'name': name,
'volumes': vol_ids}
snapshot_id = self._issue_api_request('CreateGroupSnapshot',
params,
version='7.0')
return snapshot_id['result']
def _group_snapshot_creator(self, gsnap_name, src_vol_ids):
# Common helper that takes in an array of OpenStack Volume UUIDs and
# creates a SolidFire group snapshot with them.
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in src_vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(src_vol_ids) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder volumes. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(src_vol_ids)})
raise exception.SolidFireDriverException(msg)
result = self._create_group_snapshot(gsnap_name, target_vols)
return result
def _create_temp_group_snapshot(self, source_cg, source_vols):
# Take a temporary snapshot to create the volumes for a new
# consistency group.
gsnap_name = ("%(prefix)s%(id)s-tmp" %
{"prefix": self.configuration.sf_volume_prefix,
"id": source_cg['id']})
vol_ids = [vol['id'] for vol in source_vols]
self._group_snapshot_creator(gsnap_name, vol_ids)
return gsnap_name
def _list_group_snapshots(self):
result = self._issue_api_request('ListGroupSnapshots',
{},
version='7.0')
return result['result']['groupSnapshots']
def _get_group_snapshot_by_name(self, name):
target_snaps = self._list_group_snapshots()
target = next((snap for snap in target_snaps
if snap['name'] == name), None)
return target
def _delete_group_snapshot(self, gsnapid):
params = {'groupSnapshotID': gsnapid}
self._issue_api_request('DeleteGroupSnapshot',
params,
version='7.0')
def _delete_cgsnapshot_by_name(self, snap_name):
# Common function used to find and delete a snapshot.
target = self._get_group_snapshot_by_name(snap_name)
if not target:
msg = _("Failed to find group snapshot named: %s") % snap_name
raise exception.SolidFireDriverException(msg)
self._delete_group_snapshot(target['groupSnapshotID'])
def _find_linked_snapshot(self, target_uuid, group_snap):
# Because group snapshots name each individual snapshot the group
# snapshot name, we have to trawl through the SolidFire snapshots to
# find the SolidFire snapshot from the group that is linked with the
# SolidFire volumeID that is linked to the Cinder snapshot source
# volume.
source_vol = self._get_sf_volume(target_uuid)
target_snap = next((sn for sn in group_snap['members']
if sn['volumeID'] == source_vol['volumeID']), None)
return target_snap
def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid,
sf_group_snap, vol):
# Find the correct SolidFire backing snapshot.
sf_src_snap = self._find_linked_snapshot(target_uuid,
sf_group_snap)
_data, _sfaccount, model = self._do_clone_volume(src_uuid,
vol,
sf_src_snap)
model['id'] = vol['id']
model['status'] = 'available'
return model
def _map_sf_volumes(self, cinder_volumes, endpoint=None):
"""Get a list of SolidFire volumes.
Creates a list of SolidFire volumes based
on matching a list of cinder volume ID's,
also adds an 'cinder_id' key to match cinder.
"""
vols = self._issue_api_request(
'ListActiveVolumes', {},
endpoint=endpoint)['result']['volumes']
vlist = (
[sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in
sfvol['name']])
for v in vlist:
v['cinder_id'] = v['name'].split(
self.configuration.sf_volume_prefix)[1]
return vlist
# Required consistency group functions
def create_consistencygroup(self, ctxt, group):
# SolidFire does not have a viable means for storing consistency group
# volume associations. So, we're just going to play along with the
# consistency group song and dance. There will be a lot of no-ops
# because of this.
return {'status': fields.ConsistencyGroupStatus.AVAILABLE}
def create_consistencygroup_from_src(self, ctxt, group, volumes,
cgsnapshot, snapshots,
source_cg, source_vols):
if cgsnapshot and snapshots:
sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
sf_group_snap = self._get_group_snapshot_by_name(sf_name)
# Go about creating volumes from provided snaps.
vol_models = []
for vol, snap in zip(volumes, snapshots):
vol_models.append(self._create_clone_from_sf_snapshot(
snap['volume_id'],
snap['id'],
sf_group_snap,
vol))
return ({'status': fields.ConsistencyGroupStatus.AVAILABLE},
vol_models)
elif source_cg and source_vols:
# Create temporary group snapshot.
gsnap_name = self._create_temp_group_snapshot(source_cg,
source_vols)
try:
sf_group_snap = self._get_group_snapshot_by_name(gsnap_name)
# For each temporary snapshot clone the volume.
vol_models = []
for vol in volumes:
vol_models.append(self._create_clone_from_sf_snapshot(
vol['source_volid'],
vol['source_volid'],
sf_group_snap,
vol))
finally:
self._delete_cgsnapshot_by_name(gsnap_name)
return {'status': 'available'}, vol_models
def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
vol_ids = [snapshot['volume_id'] for snapshot in snapshots]
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(snapshots) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder snapshots. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(snapshots)})
raise exception.SolidFireDriverException(msg)
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._create_group_snapshot(snap_name, target_vols)
return None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
# Similar to create_consistencygroup, SolidFire's lack of a consistency
# group object means there is nothing to update on the cluster.
return None, None, None
def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._delete_cgsnapshot_by_name(snap_name)
return None, None
def delete_consistencygroup(self, ctxt, group, volumes):
for vol in volumes:
self.delete_volume(vol)
return None, None
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
return self.cluster_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"extend_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.Gi)
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
# NOTE(jdg): The SF api provides an UNBELIEVABLE amount
# of stats data, this is just one of the calls
results = self._issue_api_request('GetClusterCapacity', params)
results = results['result']['clusterCapacity']
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'SolidFire Inc'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['consistencygroup_support'] = True
# TODO(jdg): should we have a "replication_status" that includes
# enabled, disabled, failed-over, error ?
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication'] = 'enabled'
data['active_cluster_mvip'] = self.active_cluster_info['mvip']
data['total_capacity_gb'] = (
float(results['maxProvisionedSpace'] / units.Gi))
data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['compression_percent'] = (
results['compressionPercent'])
data['deduplicaton_percent'] = (
results['deDuplicationPercent'])
data['thin_provision_percent'] = (
results['thinProvisioningPercent'])
self.cluster_stats = data
def initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
properties = self._sf_initialize_connection(volume, connector)
properties['data']['discard'] = True
return properties
def attach_volume(self, context, volume,
instance_uuid, host_name,
mountpoint):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"attach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = volume.get('attach_time', None)
attributes['attached_to'] = instance_uuid
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def terminate_connection(self, volume, properties, force):
return self._sf_terminate_connection(volume,
properties,
force)
def detach_volume(self, context, volume, attachment=None):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"detach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = None
attributes['attached_to'] = None
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def accept_transfer(self, context, volume,
new_user, new_project):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"accept_transfer operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
# may not exist on the cluster yet
sfaccount = self._create_sfaccount(new_project)
params = {
'volumeID': sf_vol['volumeID'],
'accountID': sfaccount['accountID']
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
volume['project_id'] = new_project
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
qos = {}
attributes = {}
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
params = {'volumeID': sf_vol['volumeID']}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'])
if qos:
params['qos'] = qos
self._issue_api_request('ModifyVolume', params)
return True
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
sf_ref = vols[0]
sfaccount = self._create_sfaccount(volume['project_id'])
attributes = {}
qos = self._retrieve_qos_setting(volume)
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
params = {'name': volume['name'],
'volumeID': sf_ref['volumeID'],
'accountID': sfaccount['accountID'],
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
self._issue_api_request('ModifyVolume',
params, version='5.0')
return self._get_model_info(sfaccount, sf_ref['volumeID'])
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of existing volume on SF Cluster>}
"""
sfid = external_ref.get('source-id', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
return int(math.ceil(float(vols[0]['totalSize']) / units.Gi))
def unmanage(self, volume):
"""Mark SolidFire Volume as unmanaged (export from Cinder)."""
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!"), volume['id'])
raise exception.SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
export_time = timeutils.utcnow().isoformat()
attributes = sf_vol['attributes']
attributes['os_exported_at'] = export_time
params = {'volumeID': int(sf_vol['volumeID']),
'attributes': attributes}
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _failover_volume(self, remote_vol, remote):
"""Modify remote volume to R/W mode."""
self._issue_api_request(
'RemoveVolumePair',
{'volumeID': remote_vol['volumeID']},
endpoint=remote['endpoint'], version='7.0')
params = {'volumeID': remote_vol['volumeID'],
'access': 'readWrite'}
self._issue_api_request('ModifyVolume', params,
endpoint=remote['endpoint'])
def failover_host(self, context, volumes, secondary_id=None):
"""Failover to replication target."""
volume_updates = []
remote = None
if secondary_id:
for rc in self.cluster_pairs:
if rc['mvip'] == secondary_id:
remote = rc
break
if not remote:
LOG.error(_LE("SolidFire driver received failover_host "
"but was unable to find specified replication "
"pair with id: %s."), secondary_id)
raise exception.InvalidReplicationTarget
else:
remote = self.cluster_pairs[0]
if not remote or not self.replication_enabled:
LOG.error(_LE("SolidFire driver received failover_host "
"request, however replication is NOT "
"enabled, or there are no available "
"targets to fail-over to."))
raise exception.UnableToFailOver(reason=_("Failover requested "
"on non replicated "
"backend."))
remote_vols = self._map_sf_volumes(volumes,
endpoint=remote['endpoint'])
primary_vols = self._map_sf_volumes(volumes)
for v in volumes:
remote_vlist = filter(lambda sfv: sfv['cinder_id'] == v['id'],
remote_vols)
if len(remote_vlist) > 0:
remote_vol = remote_vlist[0]
self._failover_volume(remote_vol, remote)
primary_vol = filter(lambda sfv: sfv['cinder_id'] == v['id'],
primary_vols)[0]
if len(primary_vol['volumePairs']) > 0:
self._issue_api_request(
'RemoveVolumePair',
{'volumeID': primary_vol['volumeID']},
version='7.0')
iqn = remote_vol['iqn']
volume_updates.append(
{'volume_id': v['id'],
'updates': {
'provider_location': ('%s %s %s' %
(remote['endpoint']['svip'],
iqn,
0)),
'replication_status': 'failed-over'}})
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
# FIXME(jdg): This introduces a problem for us, up until now our driver
# has been pretty much stateless and has allowed customers to run
# active/active HA c-vol services with SolidFire. The introduction of
# the active_cluster and failed_over attributes is going to break that
# but for now that's going to be the trade off of using replciation
self.active_cluster_info = remote
self.failed_over = True
return remote['mvip'], volume_updates
def freeze_backend(self, context):
"""Freeze backend notification."""
pass
def thaw_backend(self, context):
"""Thaw backend notification."""
pass
class SolidFireISCSI(iscsi_driver.SanISCSITarget):
def __init__(self, *args, **kwargs):
super(SolidFireISCSI, self).__init__(*args, **kwargs)
self.sf_driver = kwargs.get('solidfire_driver')
def __getattr__(self, attr):
if hasattr(self.sf_driver, attr):
return getattr(self.sf_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _do_iscsi_export(self, volume):
sfaccount = self._get_sfaccount(volume['project_id'])
model_update = {}
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
sfaccount['targetSecret']))
return model_update
def create_export(self, context, volume, volume_path):
return self._do_iscsi_export(volume)
def ensure_export(self, context, volume, volume_path):
try:
return self._do_iscsi_export(volume)
except exception.SolidFireAPIException:
return None
# Following are abc's that we make sure are caught and
# paid attention to. In our case we don't use them
# so just stub them out here.
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def _sf_initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
if self.configuration.sf_enable_vag:
iqn = connector['initiator']
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
# safe_create_vag may opt to reuse vs create a vag, so we need to
# add our vol_id.
vag_id = self._safe_create_vag(iqn, vol_id)
self._add_volume_to_vag(vol_id, iqn, vag_id)
# Continue along with default behavior
return super(SolidFireISCSI, self).initialize_connection(volume,
connector)
def _sf_terminate_connection(self, volume, properties, force):
"""Terminate the volume connection.
Optionally remove volume from volume access group.
If the VAG is empty then the VAG is also removed.
"""
if self.configuration.sf_enable_vag:
iqn = properties['initiator']
vag = self._get_vags_by_name(iqn)
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
if vag and not volume['multiattach']:
# Multiattach causes problems with removing volumes from VAGs.
# Compromise solution for now is to remove multiattach volumes
# from VAGs during volume deletion.
vag = vag[0]
vag_id = vag['volumeAccessGroupID']
if [vol_id] == vag['volumes']:
self._remove_vag(vag_id)
elif vol_id in vag['volumes']:
self._remove_volume_from_vag(vol_id, vag_id)
return super(SolidFireISCSI, self).terminate_connection(volume,
properties,
force=force)
| apache-2.0 | -3,546,503,090,121,789,000 | 41.805419 | 79 | 0.53804 | false |
josschne/BabyWomp | cocos2d/tools/cocos2d-console/console/cocos2d_jscompile.py | 17 | 11265 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos2d "jscompile" plugin
#
# Copyright 2013 (C) Intel
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"jscompile" plugin for cocos2d command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import subprocess
import os
import json
import inspect
import cocos2d
class CCPluginJSCompile(cocos2d.CCPlugin):
"""
compiles (encodes) and minifies JS files
"""
@staticmethod
def brief_description():
# returns a short description of this module
return "jscompile\tminifies and/or compiles js files"
# This is not the constructor, just an initializator
def init(self, options, workingdir):
"""
Arguments:
- `options`:
"""
self._current_src_dir = None
self._src_dir_arr = self.normalize_path_in_list(options.src_dir_arr)
self._dst_dir = options.dst_dir
self._use_closure_compiler = options.use_closure_compiler
self._config = None
self._workingdir = workingdir
if options.compiler_config != None:
f = open(options.compiler_config)
self._config = json.load(f)
f.close()
self.normalize_path_in_list(self._config["pre_order"])
self.normalize_path_in_list(self._config["post_order"])
self.normalize_path_in_list(self._config["skip"])
self._success = []
self._failure = []
self._js_files = {}
self._compressed_js_path = os.path.join(self._dst_dir, options.compressed_filename)
self._compressed_jsc_path = os.path.join(self._dst_dir, options.compressed_filename+"c")
def normalize_path_in_list(self, list):
for i in list:
tmp = os.path.normpath(i)
list[list.index(i)] = tmp
return list
def get_relative_path(self, jsfile):
try:
# print "current src dir: "+self._current_src_dir
pos = jsfile.index(self._current_src_dir)
if pos != 0:
raise Exception("cannot find src directory in file path.")
# print "origin js path: "+ jsfile
# print "relative path: "+jsfile[len(self._current_src_dir)+1:]
return jsfile[len(self._current_src_dir)+1:]
except ValueError:
raise Exception("cannot find src directory in file path.")
def get_output_file_path(self, jsfile):
"""
Gets output file path by source js file
"""
# create folder for generated file
jsc_filepath = ""
relative_path = self.get_relative_path(jsfile)+"c"
jsc_filepath = os.path.join(self._dst_dir, relative_path)
dst_rootpath = os.path.split(jsc_filepath)[0]
try:
# print "creating dir (%s)" % (dst_rootpath)
os.makedirs(dst_rootpath)
except OSError:
if os.path.exists(dst_rootpath) == False:
# There was an error on creation, so make sure we know about it
raise Exception("Error: cannot create folder in "+dst_rootpath)
# print "return jsc path: "+jsc_filepath
return jsc_filepath
def compile_js(self, jsfile, output_file):
"""
Compiles js file
"""
print "compiling js (%s) to bytecode..." % (jsfile)
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc");
ret = subprocess.call(jsbcc_exe_path + " " + jsfile+" "+output_file, shell=True)
if ret == 0:
self._success.append(jsfile)
else:
self._failure.append(jsfile)
print "----------------------------------------"
def compress_js(self):
"""
Compress all js files into one big file.
"""
jsfiles = ""
for src_dir in self._src_dir_arr:
# print "\n----------src:"+src_dir
jsfiles = jsfiles + " --js ".join(self._js_files[src_dir]) + " "
compiler_jar_path = os.path.join(self._workingdir, "bin", "compiler.jar")
command = "java -jar %s --js %s --js_output_file %s" % (compiler_jar_path, jsfiles, self._compressed_js_path)
print "\ncommand:"+command+"\n"
ret = subprocess.call(command, shell=True)
if ret == 0:
print "js files were compressed successfully..."
else:
print "js files were compressed unsuccessfully..."
def deep_iterate_dir(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.deep_iterate_dir(path)
elif os.path.isfile(path):
if os.path.splitext(path)[1] == ".js":
self._js_files[self._current_src_dir].append(path)
def index_in_list(self, jsfile, l):
"""
Arguments:
- `self`:
- `jsfile`:
- `l`:
"""
index = -1
for el in l:
if jsfile.rfind(el) != -1:
# print "index:"+str(index+1)+", el:"+el
return index+1
index = index + 1
return -1
def js_filename_pre_order_compare(self, a, b):
"""
"""
pre_order = self._config["pre_order"]
index_a = self.index_in_list(a, pre_order)
index_b = self.index_in_list(b, pre_order)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return -1
elif not is_a_in_list and is_b_in_list:
return 1
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def js_filename_post_order_compare(self, a, b):
"""
"""
post_order = self._config["post_order"]
index_a = self.index_in_list(a, post_order)
index_b = self.index_in_list(b, post_order)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return 1
elif not is_a_in_list and is_b_in_list:
return -1
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def reorder_js_files(self):
if self._config == None:
return
# print "before:"+str(self._js_files)
for src_dir in self._js_files:
# Remove file in exclude list
need_remove_arr = []
for jsfile in self._js_files[src_dir]:
for exclude_file in self._config["skip"]:
if jsfile.rfind(exclude_file) != -1:
# print "remove:" + jsfile
need_remove_arr.append(jsfile)
for need_remove in need_remove_arr:
self._js_files[src_dir].remove(need_remove)
if (self._config != None):
pre_order = self._config["pre_order"]
self._js_files[src_dir].sort(cmp=self.js_filename_pre_order_compare)
self._js_files[src_dir].sort(cmp=self.js_filename_post_order_compare)
# print '-------------------'
# print "after:" + str(self._js_files)
def handle_all_js_files(self):
"""
Arguments:
- `self`:
"""
if self._use_closure_compiler == True:
self.compress_js()
self.compile_js(self._compressed_js_path, self._compressed_jsc_path)
# remove tmp compressed file
os.remove(self._compressed_js_path)
else:
for src_dir in self._src_dir_arr:
for jsfile in self._js_files[src_dir]:
self._current_src_dir = src_dir
self.compile_js(jsfile, self.get_output_file_path(jsfile))
# will be called from the cocos2d.py script
def run(self, argv):
"""
"""
self.parse_args(argv)
# create output directory
try:
os.makedirs(self._dst_dir)
except OSError:
if os.path.exists(self._dst_dir) == False:
raise Exception("Error: cannot create folder in "+self._dst_dir)
# deep iterate the src directory
for src_dir in self._src_dir_arr:
self._current_src_dir = src_dir
self._js_files[self._current_src_dir] = []
self.deep_iterate_dir(src_dir)
self.reorder_js_files()
self.handle_all_js_files()
print "\nCompilation finished, (%d) files succeed, (%d) files fail." % (len(self._success), len(self._failure))
if len(self._failure) > 0:
print "Failure files are:"
print self._failure
print "------------------------------"
def parse_args(self, argv):
"""
"""
from optparse import OptionParser
parser = OptionParser("usage: %prog jscompile -s src_dir -d dst_dir [-c -o COMPRESSED_FILENAME -j COMPILER_CONFIG]")
parser.add_option("-s", "--src",
action="append", type="string", dest="src_dir_arr",
help="source directory of js files needed to be compiled, supports mutiple source directory")
parser.add_option("-d", "--dst",
action="store", type="string", dest="dst_dir",
help="destination directory of js bytecode files to be stored")
parser.add_option("-c", "--use_closure_compiler",
action="store_true", dest="use_closure_compiler", default=False,
help="Whether to use closure compiler to compress all js files into just a big file")
parser.add_option("-o", "--output_compressed_filename",
action="store", dest="compressed_filename", default="game.min.js",
help="Only available when '-c' option was True")
parser.add_option("-j", "--compiler_config",
action="store", dest="compiler_config",
help="The configuration for closure compiler by using JSON, please refer to compiler_config_sample.json")
(options, args) = parser.parse_args(argv)
# print options
if options.src_dir_arr == None:
raise Exception("Please set source folder by \"-s\" or \"-src\", run ./jscompile.py -h for the usage ")
elif options.dst_dir == None:
raise Exception("Please set destination folder by \"-d\" or \"-dst\", run ./jscompile.py -h for the usage ")
else:
for src_dir in options.src_dir_arr:
if os.path.exists(src_dir) == False:
raise Exception("Error: dir (%s) doesn't exist..." % (src_dir))
# script directory
workingdir = os.path.dirname(inspect.getfile(inspect.currentframe()))
self.init(options, workingdir)
| mit | 2,244,671,237,458,321,000 | 34.424528 | 131 | 0.528362 | false |
Manexware/medical | oemedical_data/__openerp__.py | 1 | 1120 | {
'name': 'OeMedical : Module Data',
'version': '1.0',
'author': "OeMEdical Team",
'category': 'Generic Modules/Others',
'depends': ['oemedical'],
'application': True,
'description': """
About OeMedical Data
---------------------
Core Data for oemedical, is kept as a separate module to overcome need of
localizing core data.
""",
"website": "http://launchpad.net/oemedical",
"licence": "AGPL v3",
"data": [
'data/recreational_drugs.xml',
'data/disease_genes.xml',
'data/medicament_categories.xml',
'data/WHO_products.xml',
'data/WHO_list_of_essential_medicines.xml',
'data/health_specialties.xml',
'data/ethnic_groups.xml',
'data/occupations.xml',
'data/dose_units.xml',
'data/drug_routes.xml',
'data/medicament_form.xml',
'data/medication_frequencies.xml',
'data/disease_categories.xml',
'data/diseases.xml',
],
"demo": [
],
'test':[
],
'css': [
],
'js': [
],
'qweb': [
],
"active": False
}
| gpl-2.0 | -2,622,202,775,317,327,400 | 20.132075 | 73 | 0.533036 | false |
CiscoSystems/tempest | tempest/stress/driver.py | 3 | 8594 | # Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import signal
import time
from six import moves
from tempest import auth
from tempest import clients
from tempest.common import ssh
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
from tempest.stress import cleanup
CONF = config.CONF
LOG = logging.getLogger(__name__)
processes = []
def do_ssh(command, host, ssh_user, ssh_key=None):
ssh_client = ssh.Client(host, ssh_user, key_filename=ssh_key)
try:
return ssh_client.exec_command(command)
except exceptions.SSHExecCommandFailed:
LOG.error('do_ssh raise exception. command:%s, host:%s.'
% (command, host))
return None
def _get_compute_nodes(controller, ssh_user, ssh_key=None):
"""
Returns a list of active compute nodes. List is generated by running
nova-manage on the controller.
"""
nodes = []
cmd = "nova-manage service list | grep ^nova-compute"
output = do_ssh(cmd, controller, ssh_user, ssh_key)
if not output:
return nodes
# For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
# This is fragile but there is, at present, no other way to get this info.
for line in output.split('\n'):
words = line.split()
if len(words) > 0 and words[4] == ":-)":
nodes.append(words[1])
return nodes
def _has_error_in_logs(logfiles, nodes, ssh_user, ssh_key=None,
stop_on_error=False):
"""
Detect errors in the nova log files on the controller and compute nodes.
"""
grep = 'egrep "ERROR|TRACE" %s' % logfiles
ret = False
for node in nodes:
errors = do_ssh(grep, node, ssh_user, ssh_key)
if len(errors) > 0:
LOG.error('%s: %s' % (node, errors))
ret = True
if stop_on_error:
break
return ret
def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
for process in processes:
if (not process['process'].is_alive() and
process['process'].exitcode != 0):
signal.signal(signalnum, signal.SIG_DFL)
terminate_all_processes()
break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
process['process'].terminate()
except Exception:
pass
time.sleep(check_interval)
for process in processes:
if process['process'].is_alive():
try:
pid = process['process'].pid
LOG.warn("Process %d hangs. Send SIGKILL." % pid)
os.kill(pid, signal.SIGKILL)
except Exception:
pass
process['process'].join()
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
"""
Workload driver. Executes an action function against a nova-cluster.
"""
admin_manager = clients.AdminManager()
ssh_user = CONF.stress.target_ssh_user
ssh_key = CONF.stress.target_private_key_path
logfiles = CONF.stress.target_logfiles
log_check_interval = int(CONF.stress.log_check_interval)
default_thread_num = int(CONF.stress.default_thread_number_per_action)
if logfiles:
controller = CONF.stress.target_controller
computes = _get_compute_nodes(controller, ssh_user, ssh_key)
for node in computes:
do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
for test in tests:
if test.get('use_admin', False):
manager = admin_manager
else:
manager = clients.Manager()
for p_number in moves.xrange(test.get('threads', default_thread_num)):
if test.get('use_isolated_tenants', False):
username = data_utils.rand_name("stress_user")
tenant_name = data_utils.rand_name("stress_tenant")
password = "pass"
identity_client = admin_manager.identity_client
tenant = identity_client.create_tenant(name=tenant_name)
identity_client.create_user(username,
password,
tenant['id'],
"email")
creds = auth.get_credentials(username=username,
password=password,
tenant_name=tenant_name)
manager = clients.Manager(credentials=creds)
test_obj = importutils.import_class(test['action'])
test_run = test_obj(manager, max_runs, stop_on_error)
kwargs = test.get('kwargs', {})
test_run.setUp(**dict(kwargs.iteritems()))
LOG.debug("calling Target Object %s" %
test_run.__class__.__name__)
mp_manager = multiprocessing.Manager()
shared_statistic = mp_manager.dict()
shared_statistic['runs'] = 0
shared_statistic['fails'] = 0
p = multiprocessing.Process(target=test_run.execute,
args=(shared_statistic,))
process = {'process': p,
'p_number': p_number,
'action': test_run.action,
'statistic': shared_statistic}
processes.append(process)
p.start()
if stop_on_error:
# NOTE(mkoderer): only the parent should register the handler
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
try:
while True:
if max_runs is None:
remaining = end_time - time.time()
if remaining <= 0:
break
else:
remaining = log_check_interval
all_proc_term = True
for process in processes:
if process['process'].is_alive():
all_proc_term = False
break
if all_proc_term:
break
time.sleep(min(remaining, log_check_interval))
if stop_on_error:
if any([True for proc in processes
if proc['statistic']['fails'] > 0]):
break
if not logfiles:
continue
if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
stop_on_error):
had_errors = True
break
except KeyboardInterrupt:
LOG.warning("Interrupted, going to print statistics and exit ...")
if stop_on_error:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
sum_runs = 0
LOG.info("Statistics (per process):")
for process in processes:
if process['statistic']['fails'] > 0:
had_errors = True
sum_runs += process['statistic']['runs']
sum_fails += process['statistic']['fails']
LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
(process['p_number'],
process['action'],
process['statistic']['runs'],
process['statistic']['fails']))
LOG.info("Summary:")
LOG.info("Run %d actions (%d failed)" %
(sum_runs, sum_fails))
if not had_errors and CONF.stress.full_clean_stack:
LOG.info("cleaning up")
cleanup.cleanup()
if had_errors:
return 1
else:
return 0
| apache-2.0 | -1,070,183,983,445,930,800 | 34.221311 | 78 | 0.56586 | false |
vipulroxx/sympy | sympy/plotting/experimental_lambdify.py | 22 | 25077 | """ rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debuging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They recieved an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args):
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparision. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l.lambda_func
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for i in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples:
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples:
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
return template % (func_name, self.tree2str(argtree))
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not poluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
| bsd-3-clause | -6,618,289,287,147,524,000 | 37.639445 | 91 | 0.551023 | false |
glwu/python-for-android | python3-alpha/extra_modules/pyxmpp2/utils.py | 46 | 3459 | #
# (C) Copyright 2003-2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Utility functions for the pyxmpp package."""
__docformat__ = "restructuredtext en"
def xml_elements_equal(element1, element2, ignore_level1_cdata = False):
"""Check if two XML elements are equal.
:Parameters:
- `element1`: the first element to compare
- `element2`: the other element to compare
- `ignore_level1_cdata`: if direct text children of the elements
should be ignored for the comparision
:Types:
- `element1`: :etree:`ElementTree.Element`
- `element2`: :etree:`ElementTree.Element`
- `ignore_level1_cdata`: `bool`
:Returntype: `bool`
"""
# pylint: disable-msg=R0911
if None in (element1, element2) or element1.tag != element2.tag:
return False
attrs1 = list(element1.items())
attrs1.sort()
attrs2 = list(element2.items())
attrs2.sort()
if not ignore_level1_cdata:
if element1.text != element2.text:
return False
if attrs1 != attrs2:
return False
if len(element1) != len(element2):
return False
for child1, child2 in zip(element1, element2):
if child1.tag != child2.tag:
return False
if not ignore_level1_cdata:
if element1.text != element2.text:
return False
if not xml_elements_equal(child1, child2):
return False
return True
import time
import datetime
_MINUTE = datetime.timedelta(minutes = 1)
_NULLDELTA = datetime.timedelta()
def datetime_utc_to_local(utc):
"""
An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that.
"""
# pylint: disable-msg=C0103
ts = time.time()
cur = datetime.datetime.fromtimestamp(ts)
cur_utc = datetime.datetime.utcfromtimestamp(ts)
offset = cur - cur_utc
t = utc
d = datetime.timedelta(hours = 2)
while d > _MINUTE:
local = t + offset
tm = local.timetuple()
tm = tm[0:8] + (0, )
ts = time.mktime(tm)
u = datetime.datetime.utcfromtimestamp(ts)
diff = u - utc
if diff < _MINUTE and diff > -_MINUTE:
break
if diff > _NULLDELTA:
offset -= d
else:
offset += d
d //= 2
return local
def datetime_local_to_utc(local):
"""
Simple function to convert naive :std:`datetime.datetime` object containing
local time to a naive :std:`datetime.datetime` object with UTC time.
"""
timestamp = time.mktime(local.timetuple())
return datetime.datetime.utcfromtimestamp(timestamp)
# vi: sts=4 et sw=4
| apache-2.0 | -2,787,798,304,812,369,000 | 29.883929 | 79 | 0.646719 | false |
Lambdanaut/crits | crits/raw_data/api.py | 21 | 4479 | from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from crits.raw_data.raw_data import RawData
from crits.raw_data.handlers import handle_raw_data_file
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class RawDataResource(CRITsAPIResource):
"""
Class to handle everything related to the RawData API.
Currently supports GET and POST.
"""
class Meta:
object_class = RawData
allowed_methods = ('get', 'post', 'patch')
resource_name = "raw_data"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(RawDataResource, self).get_object_list(request, RawData)
def obj_create(self, bundle, **kwargs):
"""
Handles creating RawData through the API.
:param bundle: Bundle containing the information to create the RawData.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
type_ = bundle.data.get('upload_type', None)
content = {'return_code': 1,
'type': 'RawData'}
if not type_:
content['message'] = 'Must provide an upload type.'
self.crits_response(content)
if type_ not in ('metadata', 'file'):
content['message'] = 'Not a valid upload type.'
self.crits_response(content)
if type_ == 'metadata':
data = bundle.data.get('data', None)
elif type_ == 'file':
file_ = bundle.data.get('filedata', None)
if not file_:
content['message'] = "Upload type of 'file' but no file uploaded."
self.crits_response(content)
data = file_.read()
source = bundle.data.get('source', None)
description = bundle.data.get('description', '')
title = bundle.data.get('title', None)
data_type = bundle.data.get('data_type', None)
tool_name = bundle.data.get('tool_name', '')
tool_version = bundle.data.get('tool_version', '')
tool_details = bundle.data.get('tool_details', '')
link_id = bundle.data.get('link_id', None)
copy_rels = bundle.data.get('copy_relationships', False)
method = bundle.data.get('method', None) or 'Upload'
reference = bundle.data.get('reference', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
if not title:
content['message'] = "Must provide a title."
self.crits_response(content)
if not data_type:
content['message'] = "Must provide a data type."
self.crits_response(content)
result = handle_raw_data_file(data, source, analyst,
description, title, data_type,
tool_name, tool_version, tool_details,
link_id,
method=method,
reference=reference,
copy_rels=copy_rels,
bucket_list=bucket_list,
ticket=ticket)
if result.get('message'):
content['message'] = result.get('message')
if result.get('_id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'raw_data',
'api_name': 'v1',
'pk': str(result.get('_id'))})
content['url'] = url
content['id'] = str(result.get('_id'))
if result['success']:
content['return_code'] = 0
self.crits_response(content)
| mit | -6,177,120,377,050,833,000 | 38.289474 | 82 | 0.558607 | false |
jmhsi/justin_tinker | data_science/courses/learning_dl_packages/models/research/attention_ocr/python/utils.py | 16 | 2879 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to support building models for StreetView text transcription."""
import tensorflow as tf
from tensorflow.contrib import slim
def logits_to_log_prob(logits):
"""Computes log probabilities using numerically stable trick.
This uses two numerical stability tricks:
1) softmax(x) = softmax(x - c) where c is a constant applied to all
arguments. If we set c = max(x) then the softmax is more numerically
stable.
2) log softmax(x) is not numerically stable, but we can stabilize it
by using the identity log softmax(x) = x - log sum exp(x)
Args:
logits: Tensor of arbitrary shape whose last dimension contains logits.
Returns:
A tensor of the same shape as the input, but with corresponding log
probabilities.
"""
with tf.variable_scope('log_probabilities'):
reduction_indices = len(logits.shape.as_list()) - 1
max_logits = tf.reduce_max(
logits, reduction_indices=reduction_indices, keep_dims=True)
safe_logits = tf.subtract(logits, max_logits)
sum_exp = tf.reduce_sum(
tf.exp(safe_logits),
reduction_indices=reduction_indices,
keep_dims=True)
log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
return log_probs
def variables_to_restore(scope=None, strip_scope=False):
"""Returns a list of variables to restore for the specified list of methods.
It is supposed that variable name starts with the method's scope (a prefix
returned by _method_scope function).
Args:
methods_names: a list of names of configurable methods.
strip_scope: if True will return variable names without method's scope.
If methods_names is None will return names unchanged.
model_scope: a scope for a whole model.
Returns:
a dictionary mapping variable names to variables for restore.
"""
if scope:
variable_map = {}
method_variables = slim.get_variables_to_restore(include=[scope])
for var in method_variables:
if strip_scope:
var_name = var.op.name[len(scope) + 1:]
else:
var_name = var.op.name
variable_map[var_name] = var
return variable_map
else:
return {v.op.name: v for v in slim.get_variables_to_restore()}
| apache-2.0 | -5,329,140,344,653,455,000 | 34.9875 | 80 | 0.69538 | false |
madphysicist/numpy | numpy/core/_add_newdocs.py | 2 | 184288 | """
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from numpy.core.function_base import add_newdoc
from numpy.core.overrides import array_function_like_doc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> next(fl)
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> next(fl)
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the
operands may be views into the original data with the
`WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
context manager or the `nditer.close` method must be called before
using the result. The temporary data will be written back to the
original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
Context management and the `close` method appeared in version 1.15.0.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
.. versionadded:: 1.15.0
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
broadcast_shapes
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> next(row), next(col)
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
like=None)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for 'A', see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='', *, like=None)
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format. Most builtin numeric types are
supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
Passing ``sep=''``, the default, is deprecated since it will
trigger the deprecated binary mode of this function. This mode
interprets `string` as binary bytes, rather than ASCII text with
decimal numbers, an operation which is better spelt
``frombuffer(string, dtype, count)``. If `string` contains unicode
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a, b, cmp_op, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a, b : array_like
Arrays to be compared.
cmp_op : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iter, dtype, count=-1, *, like=None)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iter : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str or Path
Open file object or filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
Most builtin numeric types are supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
.. versionadded:: 1.17.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None, *, like=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : integer or real, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : integer or real
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : integer or real, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout of the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
>>> x
array([[0, 1],
[2, 3]], dtype=int32)
>>> x.ctypes.data
31962608 # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
<__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
c_uint(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
c_ulong(4294967296)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False, *, where=True)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False, *, where=True)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the max
integer/float value converted.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Arrays of byte-strings are not swapped. The real and imaginary
parts of a complex number are swapped individually.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of byte-strings are not swapped
>>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
array([b'ceg', b'fac'], dtype='|S3')
``A.newbyteorder().byteswap()`` produces an array with the same values
but different representation in memory
>>> A = np.array([1, 2, 3])
>>> A.view(np.uint8)
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0], dtype=uint8)
>>> A.newbyteorder().byteswap(inplace=True)
array([1, 2, 3])
>>> A.view(np.uint8)
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
0, 3], dtype=uint8)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[2., 2.],
[2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[8., 8.],
[8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str or Path
A string naming the dump file.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S', /)
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* '=' - native order, equivalent to `sys.byteorder`
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
numpy.argsort : Indirect sort.
numpy.lexsort : Indirect stable sort on multiple keys.
numpy.searchsorted : Find elements in sorted array.
numpy.partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove axes of length one from `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str or Path
An open file object, or a string containing a filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
except that ``tolist`` changes numpy scalars to Python scalars:
>>> a = np.uint32([1, 2])
>>> a_list = list(a)
>>> a_list
[1, 2]
>>> type(a_list[0])
<class 'numpy.uint32'>
>>> a_tolist = a.tolist()
>>> a_tolist
[1, 2]
>>> type(a_tolist[0])
<class 'int'>
Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
a.tobytes(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object is produced in C-order by default.
This behavior is controlled by the ``order`` parameter.
.. versionadded:: 1.9.0
Parameters
----------
order : {'C', 'F', 'A'}, optional
Controls the memory layout of the bytes object. 'C' means C-order,
'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
Fortran contiguous, 'C' otherwise. Default is 'C'.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
a.tostring(order='C')
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`\ s.
.. deprecated:: 1.19.0
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view([dtype][, type])
New view of array with the same data.
.. note::
Passing None for ``dtype`` is different from omitting the parameter,
since the former invokes ``dtype(None)`` which is an alias for
``dtype('float_')``.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
Omitting it results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, omission
of the parameter results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout, *[, identity])
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
identity : object, optional
The value to use for the `~numpy.ufunc.identity` attribute of the resulting
object. If specified, this is equivalent to setting the underlying
C ``identity`` field to ``PyUFunc_IdentityValue``.
If omitted, the identity is set to ``PyUFunc_None``. Note that this is
_not_ equivalent to setting the identity to ``None``, which implies the
operation is reorderable.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
"""
_set_madvise_hugepage(enabled: bool) -> bool
Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
allocating the array data. Returns the previously set value.
See `global_state` for more information.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
**Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)``
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `array`'s dimension by one, by applying ufunc along one axis.
Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
array : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `array`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `array`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(array, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = array.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``array[indices[i]]``.
* if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
Parameters
----------
array : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
``ufunc.reduceat(array, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(array) - 1)``,
``indices[1::2] = range(1, len(array))``.
Don't be fooled by this attribute's name: `reduceat(array)` is not
necessarily smaller than `array`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
r"""
outer(A, B, /, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer : A less powerful version of ``np.multiply.outer``
that `ravel`\ s all inputs to 1D. This exists
primarily for compatibility with old code.
tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
``np.multiply.outer(a, b)`` behave same for all
dimensions of a and b.
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None, /)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(dtype, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
dtype
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
Examples
--------
>>> x = np.dtype('i4')
>>> x.alignment
4
>>> x = np.dtype(float)
>>> x.alignment
8
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types.
Examples
--------
>>> x = np.dtype(float)
>>> x.char
'd'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and passing it directly to `np.dtype` will not accurately reconstruct
some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
>>> x = np.dtype(float)
>>> x.descr
[('', '<f8')]
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.descr
[('name', '<U16'), ('grades', '<f8', (2,))]
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
The following example demonstrates that operations on this particular
dtype requires Python C-API.
Examples
--------
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.flags
16
>>> np.core.multiarray.NEEDS_PYAPI
16
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
Examples
--------
>>> arr = np.array([[1, 2], [3, 4]])
>>> arr.dtype
dtype('int64')
>>> arr.itemsize
8
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.itemsize
80
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
Examples
--------
>>> dt = np.dtype('i4')
>>> dt.kind
'i'
>>> dt = np.dtype('f8')
>>> dt.kind
'f'
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.kind
'V'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
"""
Either ``None`` or a readonly dictionary of metadata (mappingproxy).
The metadata field can be set using any dictionary at data-type
creation. NumPy currently has no uniform approach to propagating
metadata; although some array operations preserve it, there is no
guarantee that others will.
.. warning::
Although used in certain projects, this feature was long undocumented
and is not well supported. Some aspects of metadata propagation
are expected to change in the future.
Examples
--------
>>> dt = np.dtype(float, metadata={"key": "value"})
>>> dt.metadata["key"]
'value'
>>> arr = np.array([1, 2, 3], dtype=dt)
>>> arr.dtype.metadata
mappingproxy({'key': 'value'})
Adding arrays with identical datatypes currently preserves the metadata:
>>> (arr + arr).dtype.metadata
mappingproxy({'key': 'value'})
But if the arrays have different dtype metadata, the metadata may be
dropped:
>>> dt2 = np.dtype(float, metadata={"key2": "value2"})
>>> arr2 = np.array([3, 2, 1], dtype=dt2)
>>> (arr + arr2).dtype.metadata is None
True # The metadata field is cleared so None is returned
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
Examples
--------
>>> x = np.dtype(float)
>>> x.name
'float64'
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.name
'void640'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
Examples
--------
>>> dt = np.dtype(str)
>>> dt.num
19
>>> dt = np.dtype(float)
>>> dt.num
12
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
Examples
--------
>>> dt = np.dtype(('i4', 4))
>>> dt.shape
(4,)
>>> dt = np.dtype(('i4', (2, 3)))
>>> dt.shape
(2, 3)
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
Examples
--------
>>> x = np.dtype(float)
>>> x.ndim
0
>>> x = np.dtype((float, 8))
>>> x.ndim
1
>>> x = np.dtype(('i4', (3, 4)))
>>> x.ndim
2
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
See Also
--------
dtype.base
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.subdtype
(dtype('float32'), (8,))
>>> x = numpy.dtype('i2')
>>> x.subdtype
>>>
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('base',
"""
Returns dtype for the base element of the subarrays,
regardless of their dimension or shape.
See Also
--------
dtype.subdtype
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.base
dtype('float32')
>>> x = numpy.dtype('i2')
>>> x.base
dtype('int16')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S', /)
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* '=' - native order
* {'|', 'I'} - ignore (no change to byte order)
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
def refer_to_array_attribute(attr, method=True):
docstring = """
Scalar {} identical to the corresponding array attribute.
Please see `ndarray.{}`.
"""
return attr, docstring.format("method" if method else "attribute", attr)
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('T', method=False))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('base', method=False))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('all'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('any'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argmax'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argmin'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argsort'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('astype'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('byteswap'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('choose'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('clip'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('compress'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('conjugate'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('copy'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('cumprod'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('cumsum'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('diagonal'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('dump'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('dumps'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('fill'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('flatten'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('getfield'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('item'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('itemset'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('max'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('mean'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('min'))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S', /)
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* '=' - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('nonzero'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('prod'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('ptp'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('put'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('ravel'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('repeat'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('reshape'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('resize'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('round'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('searchsorted'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('setfield'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('setflags'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('sort'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('squeeze'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('std'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('sum'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('swapaxes'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('take'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tofile'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tolist'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tostring'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('trace'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('transpose'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('var'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('view'))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
| bsd-3-clause | 8,496,472,388,934,332,000 | 28.326544 | 128 | 0.582252 | false |
agry/NGECore2 | scripts/mobiles/corellia/corsec_special_ops_inspector.py | 2 | 2171 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('corsec_inspector_sergeant_aggro')
mobileTemplate.setLevel(81)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("aggro corsec")
mobileTemplate.setAssistRange(10)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_corsec_captain_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_01.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_02.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_03.iff')
templates.add('object/mobile/shared_dressed_corsec_officer_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_officer_human_male_01.iff')
templates.add('object/mobile/shared_dressed_corsec_detective_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_detective_human_male_01.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('corsec_special_ops_inspector', mobileTemplate)
return | lgpl-3.0 | 7,402,483,569,024,474,000 | 39.788462 | 127 | 0.803316 | false |
darren-wang/gl | glance/tests/unit/test_cached_images.py | 20 | 4719 | # Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import webob
from glance.api import cached_images
from glance.api import policy
from glance.common import exception
from glance import image_cache
class FakePolicyEnforcer(policy.Enforcer):
def __init__(self):
self.default_rule = ''
self.policy_path = ''
self.policy_file_mtime = None
self.policy_file_contents = None
def enforce(self, context, action, target):
return 'pass'
def check(rule, target, creds, exc=None, *args, **kwargs):
return 'pass'
def _check(self, context, rule, target, *args, **kwargs):
return 'pass'
class FakeCache(image_cache.ImageCache):
def __init__(self):
self.init_driver()
self.deleted_images = []
def init_driver(self):
pass
def get_cached_images(self):
return {'id': 'test'}
def delete_cached_image(self, image_id):
self.deleted_images.append(image_id)
def delete_all_cached_images(self):
self.delete_cached_image(self.get_cached_images().get('id'))
return 1
def get_queued_images(self):
return {'test': 'passed'}
def queue_image(self, image_id):
return 'pass'
def delete_queued_image(self, image_id):
self.deleted_images.append(image_id)
def delete_all_queued_images(self):
self.delete_queued_image('deleted_img')
return 1
class FakeController(cached_images.Controller):
def __init__(self):
self.cache = FakeCache()
self.policy = FakePolicyEnforcer()
class TestController(testtools.TestCase):
def test_initialization_without_conf(self):
self.assertRaises(exception.BadDriverConfiguration,
cached_images.Controller)
class TestCachedImages(testtools.TestCase):
def setUp(self):
super(TestCachedImages, self).setUp()
test_controller = FakeController()
self.controller = test_controller
def test_get_cached_images(self):
req = webob.Request.blank('')
req.context = 'test'
result = self.controller.get_cached_images(req)
self.assertEqual({'cached_images': {'id': 'test'}}, result)
def test_delete_cached_image(self):
req = webob.Request.blank('')
req.context = 'test'
self.controller.delete_cached_image(req, image_id='test')
self.assertEqual(['test'], self.controller.cache.deleted_images)
def test_delete_cached_images(self):
req = webob.Request.blank('')
req.context = 'test'
self.assertEqual({'num_deleted': 1},
self.controller.delete_cached_images(req))
self.assertEqual(['test'], self.controller.cache.deleted_images)
def test_policy_enforce_forbidden(self):
def fake_enforce(context, action, target):
raise exception.Forbidden()
self.controller.policy.enforce = fake_enforce
req = webob.Request.blank('')
req.context = 'test'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.get_cached_images, req)
def test_get_queued_images(self):
req = webob.Request.blank('')
req.context = 'test'
result = self.controller.get_queued_images(req)
self.assertEqual({'queued_images': {'test': 'passed'}}, result)
def test_queue_image(self):
req = webob.Request.blank('')
req.context = 'test'
self.controller.queue_image(req, image_id='test1')
def test_delete_queued_image(self):
req = webob.Request.blank('')
req.context = 'test'
self.controller.delete_queued_image(req, 'deleted_img')
self.assertEqual(['deleted_img'],
self.controller.cache.deleted_images)
def test_delete_queued_images(self):
req = webob.Request.blank('')
req.context = 'test'
self.assertEqual({'num_deleted': 1},
self.controller.delete_queued_images(req))
self.assertEqual(['deleted_img'],
self.controller.cache.deleted_images)
| apache-2.0 | -934,260,007,954,881,900 | 31.544828 | 78 | 0.632973 | false |
azunite/gyp_20150930 | test/actions-multiple-outputs-with-dependencies/gyptest-action.py | 34 | 1172 | #!/usr/bin/env python
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies actions with multiple outputs & dependncies will correctly rebuild.
This is a regression test for crrev.com/1177163002.
"""
import TestGyp
import os
import sys
import time
if sys.platform in ('darwin', 'win32'):
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp()
TESTDIR='relocate/src'
test.run_gyp('action.gyp', chdir='src')
test.relocate('src', TESTDIR)
def build_and_check(content):
test.write(TESTDIR + '/input.txt', content)
test.build('action.gyp', 'upper', chdir=TESTDIR)
test.built_file_must_match('result.txt', content, chdir=TESTDIR)
build_and_check('Content for first build.')
# Ninja works with timestamps and the test above is fast enough that the
# 'updated' file may end up with the same timestamp as the original, meaning
# that ninja may not always recognize the input file has changed.
if test.format == 'ninja':
time.sleep(1)
build_and_check('An updated input file.')
test.pass_test()
| bsd-3-clause | -1,888,932,165,144,181,000 | 26.255814 | 76 | 0.733788 | false |
vikatory/kbengine | kbe/res/scripts/common/Lib/test/test_codecencodings_jp.py | 88 | 4981 | #
# test_codecencodings_jp.py
# Codec encoding tests for Japanese encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_CP932(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp932'
tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = (
# invalid bytes
(b"abc\x81\x00\x81\x00\x82\x84", "strict", None),
(b"abc\xf8", "strict", None),
(b"abc\x81\x00\x82\x84", "replace", "abc\ufffd\x00\uff44"),
(b"abc\x81\x00\x82\x84\x88", "replace", "abc\ufffd\x00\uff44\ufffd"),
(b"abc\x81\x00\x82\x84", "ignore", "abc\x00\uff44"),
(b"ab\xEBxy", "replace", "ab\uFFFDxy"),
(b"ab\xF0\x39xy", "replace", "ab\uFFFD9xy"),
(b"ab\xEA\xF0xy", "replace", 'ab\ufffd\ue038y'),
# sjis vs cp932
(b"\\\x7e", "replace", "\\\x7e"),
(b"\x81\x5f\x81\x61\x81\x7c", "replace", "\uff3c\u2225\uff0d"),
)
euc_commontests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u7956"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u7956\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u7956"),
(b"abc\xc8", "strict", None),
(b"abc\x8f\x83\x83", "replace", "abc\ufffd\ufffd\ufffd"),
(b"\x82\xFCxy", "replace", "\ufffd\ufffdxy"),
(b"\xc1\x64", "strict", None),
(b"\xa1\xc0", "strict", "\uff3c"),
(b"\xa1\xc0\\", "strict", "\uff3c\\"),
(b"\x8eXY", "replace", "\ufffdXY"),
)
class Test_EUC_JIS_2004(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jis_2004'
tstring = multibytecodec_support.load_teststring('euc_jisx0213')
codectests = euc_commontests
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
class Test_EUC_JISX0213(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jisx0213'
tstring = multibytecodec_support.load_teststring('euc_jisx0213')
codectests = euc_commontests
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
class Test_EUC_JP_COMPAT(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jp'
tstring = multibytecodec_support.load_teststring('euc_jp')
codectests = euc_commontests + (
("\xa5", "strict", b"\x5c"),
("\u203e", "strict", b"\x7e"),
)
shiftjis_commonenctests = (
(b"abc\x80\x80\x82\x84", "strict", None),
(b"abc\xf8", "strict", None),
(b"abc\x80\x80\x82\x84def", "ignore", "abc\uff44def"),
)
class Test_SJIS_COMPAT(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis'
tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
(b"abc\x80\x80\x82\x84", "replace", "abc\ufffd\ufffd\uff44"),
(b"abc\x80\x80\x82\x84\x88", "replace", "abc\ufffd\ufffd\uff44\ufffd"),
(b"\\\x7e", "strict", "\\\x7e"),
(b"\x81\x5f\x81\x61\x81\x7c", "strict", "\uff3c\u2016\u2212"),
(b"abc\x81\x39", "replace", "abc\ufffd9"),
(b"abc\xEA\xFC", "replace", "abc\ufffd\ufffd"),
(b"abc\xFF\x58", "replace", "abc\ufffdX"),
)
class Test_SJIS_2004(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis_2004'
tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
(b"\\\x7e", "strict", "\xa5\u203e"),
(b"\x81\x5f\x81\x61\x81\x7c", "strict", "\\\u2016\u2212"),
(b"abc\xEA\xFC", "strict", "abc\u64bf"),
(b"\x81\x39xy", "replace", "\ufffd9xy"),
(b"\xFF\x58xy", "replace", "\ufffdXxy"),
(b"\x80\x80\x82\x84xy", "replace", "\ufffd\ufffd\uff44xy"),
(b"\x80\x80\x82\x84\x88xy", "replace", "\ufffd\ufffd\uff44\u5864y"),
(b"\xFC\xFBxy", "replace", '\ufffd\u95b4y'),
)
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
class Test_SJISX0213(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jisx0213'
tstring = multibytecodec_support.load_teststring('shift_jisx0213')
codectests = shiftjis_commonenctests + (
(b"abc\x80\x80\x82\x84", "replace", "abc\ufffd\ufffd\uff44"),
(b"abc\x80\x80\x82\x84\x88", "replace", "abc\ufffd\ufffd\uff44\ufffd"),
# sjis vs cp932
(b"\\\x7e", "replace", "\xa5\u203e"),
(b"\x81\x5f\x81\x61\x81\x7c", "replace", "\x5c\u2016\u2212"),
)
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 | -3,785,268,423,428,863,500 | 37.315385 | 79 | 0.593857 | false |
flavoi/diventi | diventi/ebooks/views.py | 1 | 3556 | from django.shortcuts import get_object_or_404
from django.views.generic.detail import DetailView
from django.views import View
from django.utils.translation import (
get_language,
gettext_lazy as _
)
from django.contrib.auth.mixins import (
LoginRequiredMixin,
UserPassesTestMixin
)
from django.http import Http404
from django.conf import settings
from diventi.products.models import Product
from .models import Book
from .utils import (
parse_dropbox_paper_soup,
make_paper_toc,
)
class UserHasProductMixin(UserPassesTestMixin):
"""
This view checks if the user has bought the product
related to the requested book.
It assumes to have the slug of the book object available
in book_slug get parameter.
"""
permission_denied_message = _('This book is not in your collection, please check your profile.')
def test_func(self):
book_slug = self.kwargs.get('book_slug', None)
book = get_object_or_404(Book, slug=book_slug)
product = book.book_product
user_has_bought_test = product.user_has_already_bought(self.request.user) or product.user_has_authored(self.request.user)
if not user_has_bought_test:
self.permission_denied_message = _('This book is not in your collection, please check your profile.')
book_is_published_test = book.is_published()
if not book_is_published_test:
self.permission_denied_message = _('This book is not yet available, please check back later.')
return user_has_bought_test and book_is_published_test
class EbookView(View):
"""
Generic view that manages context data for ebooks.
It assumes to have the slug of the book object available
in book_slug get parameter.
"""
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
book_slug = self.kwargs.get('book_slug', None)
book = get_object_or_404(Book, slug=book_slug)
context['book'] = book
return context
class BookDetailView(LoginRequiredMixin, UserHasProductMixin,
EbookView, DetailView):
""" Returns the digital content of a product. """
model = Book
slug_url_kwarg = 'book_slug'
def get_queryset(self, **kwargs):
queryset = super().get_queryset(**kwargs)
return queryset.published().product()
def get_template_names(self):
return ['ebooks/book_detail_%s.html' % self.object.template]
from django.utils.safestring import mark_safe
class PaperEbookView(BookDetailView):
""" Renders an ebook from a paper document """
def get_object(self, queryset=None):
obj = super(PaperEbookView, self).get_object(queryset)
if not obj.paper_id:
raise Http404(_('This book is not linked to a paper, please contact the authors.'))
return obj
def get_template_names(self):
return ['ebooks/book_detail_quick.html', ]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_lan = get_language()
paper_filename = 'ebooks/partials/book_paper_{}_{}.html'.format(self.object.id, current_lan)
paper_soup = parse_dropbox_paper_soup(paper_filename)
# context['paper_title'] = paper_soup.select_one('.ace-line').extract().get_text()
context['paper_title'] = self.object.title
context['paper_toc'] = make_paper_toc(paper_soup)
context['book_paper'] = paper_filename
return context | apache-2.0 | -4,396,833,859,665,564,000 | 34.929293 | 129 | 0.665917 | false |
GitHublong/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/tests/uuid_field.py | 42 | 2384 | import re
import uuid
import six
from django_extensions.db.fields import PostgreSQLUUIDField
from django_extensions.tests.fields import FieldTestCase
from django_extensions.tests.testapp.models import UUIDTestModel_field, UUIDTestModel_pk, UUIDTestAgregateModel, UUIDTestManyToManyModel
class UUIDFieldTest(FieldTestCase):
def testUUIDFieldCreate(self):
j = UUIDTestModel_field.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
def testUUIDField_pkCreate(self):
j = UUIDTestModel_pk.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440000'))
def testUUIDField_pkAgregateCreate(self):
j = UUIDTestAgregateModel.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440001'))
self.assertEqual(j.a, 6)
self.assertIsInstance(j.pk, six.string_types)
self.assertEqual(len(j.pk), 36)
def testUUIDFieldManyToManyCreate(self):
j = UUIDTestManyToManyModel.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440010'))
class PostgreSQLUUIDFieldTest(FieldTestCase):
def test_uuid_casting(self):
# As explain by postgres documentation
# http://www.postgresql.org/docs/9.1/static/datatype-uuid.html
# an uuid needs to be a sequence of lower-case hexadecimal digits, in
# several groups separated by hyphens, specifically a group of 8 digits
# followed by three groups of 4 digits followed by a group of 12 digits
matcher = re.compile('^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}'
'-[\da-f]{12}$')
field = PostgreSQLUUIDField()
for value in (str(uuid.uuid4()), uuid.uuid4().urn, uuid.uuid4().hex,
uuid.uuid4().int, uuid.uuid4().bytes):
prepared_value = field.get_db_prep_value(value, None)
self.assertTrue(matcher.match(prepared_value) is not None,
prepared_value)
| apache-2.0 | -5,227,417,608,219,450,000 | 49.723404 | 136 | 0.687919 | false |
40023256/2015cdag1man | static/Brython3.1.0-20150301-090019/Lib/unittest/test/support.py | 770 | 3379 | import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
| gpl-3.0 | 8,713,266,870,411,440,000 | 27.635593 | 78 | 0.576206 | false |
mahak/cinder | cinder/volume/drivers/datera/datera_common.py | 2 | 11881 | # Copyright 2020 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
import re
import string
import time
import types
import uuid
from glanceclient import exc as glance_exc
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.image import glance
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
dfs_sdk = importutils.try_import('dfs_sdk')
OS_PREFIX = "OS"
UNMANAGE_PREFIX = "UNMANAGED"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s.*([a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12})")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
SNAP_RE = re.compile(r"\d{10,}\.\d+")
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
DEFAULT_SI_SLEEP = 1
DEFAULT_SI_SLEEP_API_2 = 5
DEFAULT_SNAP_SLEEP = 1
API_VERSIONS = ["2.1", "2.2"]
API_TIMEOUT = 20
VALID_CHARS = set(string.ascii_letters + string.digits + "-_.")
class DateraAPIException(exception.VolumeBackendAPIException):
message = _("Bad response from Datera API")
def get_name(resource):
dn = resource.get('display_name')
cid = resource.get('id')
if dn:
dn = filter_chars(dn)
# Check to ensure the name is short enough to fit. Prioritize
# the prefix and Cinder ID, strip all invalid characters
nl = len(OS_PREFIX) + len(dn) + len(cid) + 2
if nl >= 64:
dn = dn[:-(nl - 63)]
return "-".join((OS_PREFIX, dn, cid))
return "-".join((OS_PREFIX, cid))
def get_unmanaged(name):
return "-".join((UNMANAGE_PREFIX, name))
def filter_chars(s):
if s:
return ''.join([c for c in s if c in VALID_CHARS])
return s
def lookup(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = args[0]
name = "_" + func.__name__ + "_" + obj.apiv.replace(".", "_")
LOG.debug("Trying method: %s", name)
call_id = uuid.uuid4()
if obj.do_profile:
LOG.debug("Profiling method: %s, id %s", name, call_id)
t1 = time.time()
obj.thread_local.trace_id = call_id
result = getattr(obj, name)(*args[1:], **kwargs)
if obj.do_profile:
t2 = time.time()
timedelta = round(t2 - t1, 3)
LOG.debug("Profile for method %s, id %s: %ss",
name, call_id, timedelta)
return result
return wrapper
def _parse_vol_ref(ref):
if ref.count(":") not in (2, 3):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"tenant:app_inst_name:storage_inst_name:vol_name or "
"app_inst_name:storage_inst_name:vol_name"))
try:
(tenant, app_inst_name, storage_inst_name,
vol_name) = ref.split(":")
if tenant == "root":
tenant = None
except (TypeError, ValueError):
app_inst_name, storage_inst_name, vol_name = ref.split(
":")
tenant = None
return app_inst_name, storage_inst_name, vol_name, tenant
def _check_snap_ref(ref):
if not SNAP_RE.match(ref):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"1234567890.12345678"))
return True
def _get_size(app_inst):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
if 'data' in app_inst:
app_inst = app_inst['data']
sis = app_inst['storage_instances']
found_si = sis[0]
found_vol = found_si['volumes'][0]
return found_vol['size']
def _get_volume_type_obj(driver, resource):
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
else:
volume_type = None
return volume_type
def _get_policies_for_resource(driver, resource):
volume_type = driver._get_volume_type_obj(resource)
return driver._get_policies_for_volume_type(volume_type)
def _get_policies_for_volume_type(driver, volume_type):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
# Handle case of volume with no type. We still want the
# specified defaults from above
if volume_type:
specs = volume_type.get('extra_specs', {})
else:
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in driver._init_vendor_properties()[0].items()}
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
ctxt = context.get_admin_context()
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
specs.update(qos_kvs)
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
def _image_accessible(driver, context, volume, image_meta):
# Determine if image is accessible by current project
pid = volume.get('project_id', '')
public = False
visibility = image_meta.get('visibility', None)
LOG.debug("Image %(image)s visibility: %(vis)s",
{"image": image_meta['id'], "vis": visibility})
if visibility and visibility in ['public', 'community']:
public = True
elif visibility and visibility in ['shared', 'private']:
# Do membership check. Newton and before didn't have a 'shared'
# visibility option, so we have to do this check for 'private'
# as well
gclient = glance.get_default_image_service()
members = []
# list_members is only available in Rocky+
try:
members = gclient.list_members(context, image_meta['id'])
except AttributeError:
# This is the fallback method for the same query
try:
members = gclient._client.call(context,
'list',
controller='image_members',
image_id=image_meta['id'])
except glance_exc.HTTPForbidden as e:
LOG.warning(e)
except glance_exc.HTTPForbidden as e:
LOG.warning(e)
members = list(members)
LOG.debug("Shared image %(image)s members: %(members)s",
{"image": image_meta['id'], "members": members})
for member in members:
if (member['member_id'] == pid and
member['status'] == 'accepted'):
public = True
break
if image_meta.get('is_public', False):
public = True
else:
if image_meta.get('owner', '') == pid:
public = True
if not public:
LOG.warning("Requested image is not "
"accessible by current Project.")
return public
def _format_tenant(tenant):
if tenant == "all" or (tenant and ('/root' in tenant or 'root' in tenant)):
return '/root'
elif tenant and ('/root' not in tenant and 'root' not in tenant):
return "/" + "/".join(('root', tenant)).strip('/')
return tenant
def get_ip_pool(policies):
ip_pool = policies['ip_pool']
if ',' in ip_pool:
ip_pools = ip_pool.split(',')
ip_pool = random.choice(ip_pools)
return ip_pool
def create_tenant(driver, project_id):
if driver.tenant_id.lower() == 'map':
name = get_name({'id': project_id})
elif driver.tenant_id:
name = driver.tenant_id.replace('root', '').strip('/')
else:
name = 'root'
if name:
try:
driver.api.tenants.create(name=name)
except dfs_sdk.exceptions.ApiConflictError:
LOG.debug("Tenant %s already exists", name)
return _format_tenant(name)
def get_tenant(driver, project_id):
if driver.tenant_id.lower() == 'map':
return _format_tenant(get_name({'id': project_id}))
elif not driver.tenant_id:
return _format_tenant('root')
return _format_tenant(driver.tenant_id)
def cvol_to_ai(driver, resource, tenant=None):
if not tenant:
tenant = get_tenant(driver, resource['project_id'])
try:
# api.tenants.get needs a non '/'-prefixed tenant id
driver.api.tenants.get(tenant.strip('/'))
except dfs_sdk.exceptions.ApiNotFoundError:
create_tenant(driver, resource['project_id'])
cid = resource.get('id', None)
if not cid:
raise ValueError('Unsure what id key to use for object', resource)
ais = driver.api.app_instances.list(
filter='match(name,.*{}.*)'.format(cid),
tenant=tenant)
if not ais:
raise exception.VolumeNotFound(volume_id=cid)
return ais[0]
def cvol_to_dvol(driver, resource, tenant=None):
if not tenant:
tenant = get_tenant(driver, resource['project_id'])
ai = cvol_to_ai(driver, resource, tenant=tenant)
si = ai.storage_instances.list(tenant=tenant)[0]
vol = si.volumes.list(tenant=tenant)[0]
return vol
def _version_to_int(ver):
# Using a factor of 100 per digit so up to 100 versions are supported
# per major/minor/patch/subpatch digit in this calculation
# Example:
# In [2]: _version_to_int("3.3.0.0")
# Out[2]: 303000000
# In [3]: _version_to_int("2.2.7.1")
# Out[3]: 202070100
VERSION_DIGITS = 4
factor = pow(10, VERSION_DIGITS * 2)
div = pow(10, 2)
val = 0
for c in ver.split("."):
val += int(int(c) * factor)
factor /= div
return val
def dat_version_gte(version_a, version_b):
return _version_to_int(version_a) >= _version_to_int(version_b)
def register_driver(driver):
for func in [_get_volume_type_obj,
_get_policies_for_resource,
_get_policies_for_volume_type,
_image_accessible,
get_tenant,
create_tenant,
cvol_to_ai,
cvol_to_dvol]:
f = types.MethodType(func, driver)
setattr(driver, func.__name__, f)
| apache-2.0 | -4,897,800,041,688,824,000 | 31.911357 | 79 | 0.595404 | false |
mgax/aleph | aleph/model/selector.py | 3 | 1189 | import logging
from datetime import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from normality import normalize
from aleph.core import db
log = logging.getLogger(__name__)
class Selector(db.Model):
id = db.Column(db.Integer, primary_key=True)
_text = db.Column('text', db.Unicode, index=True)
normalized = db.Column(db.Unicode, index=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow)
entity_id = db.Column(db.Unicode(50), db.ForeignKey('entity.id'))
entity = db.relationship('Entity', backref=db.backref('selectors',
lazy='dynamic', cascade='all, delete-orphan')) # noqa
@hybrid_property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
self.normalized = self.normalize(text)
@classmethod
def normalize(cls, text):
return normalize(text)
def __repr__(self):
return '<Selector(%r, %r)>' % (self.entity_id, self.text)
def __unicode__(self):
return self.text
| mit | -7,067,419,229,274,877,000 | 27.309524 | 82 | 0.640034 | false |
edc-ecuador/edc | qa/rpc-tests/test_framework/netutil.py | 328 | 4562 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
| mit | -2,936,193,231,578,985,500 | 31.820144 | 113 | 0.583735 | false |
AthinaB/synnefo | snf-admin-app/synnefo_admin/admin/resources/networks/actions.py | 7 | 3433 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from collections import OrderedDict
from synnefo.logic.networks import validate_network_action
from synnefo.logic import networks
from synnefo_admin.admin.actions import AdminAction, noop
from synnefo_admin.admin.utils import update_actions_rbac, send_admin_email
class NetworkAction(AdminAction):
"""Class for actions on networks. Derived from AdminAction.
Pre-determined Attributes:
target: network
"""
def __init__(self, name, f, **kwargs):
"""Initialize the class with provided values."""
AdminAction.__init__(self, name=name, target='network', f=f, **kwargs)
def drain_network(network):
network.drained = True
network.save()
def undrain_network(network):
network.drained = False
network.save()
def check_network_action(action):
if action == "CONTACT":
# Contact action is allowed only on private networks. However, this
# function may get called with an AstakosUser as a target. In this
# case, we always confirm the action.
return lambda n: not getattr(n, 'public', False)
elif action == "DRAIN":
return lambda n: not n.drained and not n.deleted and n.public
elif action == "UNDRAIN":
return lambda n: n.drained and not n.deleted and n.public
else:
return lambda n: validate_network_action(n, action)
def generate_actions():
"""Create a list of actions on networks."""
actions = OrderedDict()
actions['drain'] = NetworkAction(name='Drain', f=drain_network,
c=check_network_action('DRAIN'),
caution_level=True,)
actions['undrain'] = NetworkAction(name='Undrain', f=undrain_network,
c=check_network_action('UNDRAIN'),
karma='neutral',)
actions['destroy'] = NetworkAction(name='Destroy', f=networks.delete,
c=check_network_action('DESTROY'),
karma='bad', caution_level='dangerous',)
actions['reassign'] = NetworkAction(name='Reassign to project', f=noop,
karma='neutral',
caution_level='dangerous',)
actions['change_owner'] = NetworkAction(name='Change owner', f=noop,
karma='neutral',
caution_level='dangerous',)
actions['contact'] = NetworkAction(name='Send e-mail', f=send_admin_email,
c=check_network_action("CONTACT"),)
update_actions_rbac(actions)
return actions
cached_actions = generate_actions()
| gpl-3.0 | 1,157,283,897,701,558,000 | 35.521277 | 79 | 0.618118 | false |
xapi-project/sm | drivers/LVHDoISCSISR.py | 1 | 28450 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# LVHDoISCSISR: LVHD over ISCSI software initiator SR driver
#
from __future__ import print_function
import SR
import LVHDSR
import BaseISCSI
import SRCommand
import util
import scsiutil
import lvutil
import time
import os
import sys
import xs_errors
import xmlrpclib
import mpath_cli
import iscsilib
import glob
import copy
import scsiutil
import xml.dom.minidom
CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "SR_TRIM",
"VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
"VDI_GENERATE_CONFIG", "VDI_CLONE", "VDI_SNAPSHOT",
"VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2",
"VDI_UPDATE", "VDI_MIRROR", "VDI_CONFIG_CBT",
"VDI_ACTIVATE", "VDI_DEACTIVATE"]
CONFIGURATION = [['SCSIid', 'The scsi_id of the destination LUN'], \
['target', 'IP address or hostname of the iSCSI target'], \
['targetIQN', 'The IQN of the target LUN group to be attached'], \
['chapuser', 'The username to be used during CHAP authentication'], \
['chappassword', 'The password to be used during CHAP authentication'], \
['incoming_chapuser', 'The incoming username to be used during bi-directional CHAP authentication (optional)'], \
['incoming_chappassword', 'The incoming password to be used during bi-directional CHAP authentication (optional)'], \
['port', 'The network port number on which to query the target'], \
['multihomed', 'Enable multi-homing to this target, true or false (optional, defaults to same value as host.other_config:multipathing)'], \
['usediscoverynumber', 'The specific iscsi record index to use. (optional)'], \
['allocation', 'Valid values are thick or thin (optional, defaults to thick)']]
DRIVER_INFO = {
'name': 'LVHD over iSCSI',
'description': 'SR plugin which represents disks as Logical Volumes within a Volume Group created on an iSCSI LUN',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2008 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
class LVHDoISCSISR(LVHDSR.LVHDSR):
"""LVHD over ISCSI storage repository"""
def handles(type):
if __name__ == '__main__':
name = sys.argv[0]
else:
name = __name__
if name.endswith("LVMoISCSISR"):
return type == "lvmoiscsi"
if type == "lvhdoiscsi":
return True
return False
handles = staticmethod(handles)
def load(self, sr_uuid):
if not sr_uuid:
# This is a probe call, generate a temp sr_uuid
sr_uuid = util.gen_uuid()
# If this is a vdi command, don't initialise SR
if util.isVDICommand(self.original_srcmd.cmd):
self.SCSIid = self.dconf['SCSIid']
else:
if 'target' in self.original_srcmd.dconf:
self.original_srcmd.dconf['targetlist'] = self.original_srcmd.dconf['target']
iscsi = BaseISCSI.BaseISCSISR(self.original_srcmd, sr_uuid)
self.iscsiSRs = []
self.iscsiSRs.append(iscsi)
saved_exc = None
if self.dconf['target'].find(',') == 0 or self.dconf['targetIQN'] == "*":
# Instantiate multiple sessions
self.iscsiSRs = []
if self.dconf['targetIQN'] == "*":
IQN = "any"
else:
IQN = self.dconf['targetIQN']
dict = {}
IQNstring = ""
IQNs = []
try:
if 'multiSession' in self.dconf:
IQNs = self.dconf['multiSession'].split("|")
for IQN in IQNs:
if IQN:
dict[IQN] = ""
else:
try:
IQNs.remove(IQN)
except:
# Exceptions are not expected but just in case
pass
# Order in multiSession must be preserved. It is important for dual-controllers.
# IQNstring cannot be built with a dictionary iteration because of this
IQNstring = self.dconf['multiSession']
else:
for tgt in self.dconf['target'].split(','):
try:
tgt_ip = util._convertDNS(tgt)
except:
raise xs_errors.XenError('DNSError')
iscsilib.ensure_daemon_running_ok(iscsi.localIQN)
map = iscsilib.discovery(tgt_ip, iscsi.port, iscsi.chapuser, iscsi.chappassword, targetIQN=IQN)
util.SMlog("Discovery for IP %s returned %s" % (tgt, map))
for i in range(0, len(map)):
(portal, tpgt, iqn) = map[i]
(ipaddr, port) = iscsilib.parse_IP_port(portal)
try:
util._testHost(ipaddr, long(port), 'ISCSITarget')
except:
util.SMlog("Target Not reachable: (%s:%s)" % (ipaddr, port))
continue
key = "%s,%s,%s" % (ipaddr, port, iqn)
dict[key] = ""
# Again, do not mess up with IQNs order. Dual controllers will benefit from that
if IQNstring == "":
# Compose the IQNstring first
for key in dict.iterkeys():
IQNstring += "%s|" % key
# Reinitialize and store iterator
key_iterator = dict.iterkeys()
else:
key_iterator = IQNs
# Now load the individual iSCSI base classes
for key in key_iterator:
(ipaddr, port, iqn) = key.split(',')
srcmd_copy = copy.deepcopy(self.original_srcmd)
srcmd_copy.dconf['target'] = ipaddr
srcmd_copy.dconf['targetIQN'] = iqn
srcmd_copy.dconf['multiSession'] = IQNstring
util.SMlog("Setting targetlist: %s" % srcmd_copy.dconf['targetlist'])
self.iscsiSRs.append(BaseISCSI.BaseISCSISR(srcmd_copy, sr_uuid))
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
if pbd != None and 'multiSession' not in self.dconf:
dconf = self.session.xenapi.PBD.get_device_config(pbd)
dconf['multiSession'] = IQNstring
self.session.xenapi.PBD.set_device_config(pbd, dconf)
except Exception as exc:
util.logException("LVHDoISCSISR.load")
saved_exc = exc
try:
self.iscsi = self.iscsiSRs[0]
except IndexError as exc:
if isinstance(saved_exc, SR.SROSError):
raise saved_exc # pylint: disable-msg=E0702
elif isinstance(saved_exc, Exception):
raise xs_errors.XenError('SMGeneral', str(saved_exc))
else:
raise xs_errors.XenError('SMGeneral', str(exc))
# Be extremely careful not to throw exceptions here since this function
# is the main one used by all operations including probing and creating
pbd = None
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
except:
pass
# Apart from the upgrade case, user must specify a SCSIid
if 'SCSIid' not in self.dconf:
# Dual controller issue
self.LUNs = {} # Dict for LUNs from all the iscsi objects
for ii in range(0, len(self.iscsiSRs)):
self.iscsi = self.iscsiSRs[ii]
self._LUNprint(sr_uuid)
for key in self.iscsi.LUNs:
self.LUNs[key] = self.iscsi.LUNs[key]
self.print_LUNs_XML()
self.iscsi = self.iscsiSRs[0] # back to original value
raise xs_errors.XenError('ConfigSCSIid')
self.SCSIid = self.dconf['SCSIid']
# This block checks if the first iscsi target contains the right SCSIid.
# If not it scans the other iscsi targets because chances are that more
# than one controller is present
dev_match = False
forced_login = False
# No need to check if only one iscsi target is present
if len(self.iscsiSRs) == 1:
pass
else:
target_success = False
attempt_discovery = False
for iii in range(0, len(self.iscsiSRs)):
# Check we didn't leave any iscsi session open
# If exceptions happened before, the cleanup function has worked on the right target.
if forced_login == True:
try:
iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN)
iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN)
forced_login = False
except:
raise xs_errors.XenError('ISCSILogout')
self.iscsi = self.iscsiSRs[iii]
util.SMlog("path %s" % self.iscsi.path)
util.SMlog("iscsci data: targetIQN %s, portal %s" % (self.iscsi.targetIQN, self.iscsi.target))
iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN)
if not iscsilib._checkTGT(self.iscsi.targetIQN):
attempt_discovery = True
try:
# Ensure iscsi db has been populated
map = iscsilib.discovery(
self.iscsi.target,
self.iscsi.port,
self.iscsi.chapuser,
self.iscsi.chappassword,
targetIQN=self.iscsi.targetIQN)
if len(map) == 0:
util.SMlog("Discovery for iscsi data targetIQN %s,"
" portal %s returned empty list"
" Trying another path if available" %
(self.iscsi.targetIQN,
self.iscsi.target))
continue
except:
util.SMlog("Discovery failed for iscsi data targetIQN"
" %s, portal %s. Trying another path if"
" available" %
(self.iscsi.targetIQN, self.iscsi.target))
continue
try:
iscsilib.login(self.iscsi.target,
self.iscsi.targetIQN,
self.iscsi.chapuser,
self.iscsi.chappassword,
self.iscsi.incoming_chapuser,
self.iscsi.incoming_chappassword,
self.mpath == "true")
except:
util.SMlog("Login failed for iscsi data targetIQN %s,"
" portal %s. Trying another path"
" if available" %
(self.iscsi.targetIQN, self.iscsi.target))
continue
target_success = True
forced_login = True
# A session should be active.
if not util.wait_for_path(self.iscsi.path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("%s has no associated LUNs" % self.iscsi.targetIQN)
continue
scsiid_path = "/dev/disk/by-id/scsi-" + self.SCSIid
if not util.wait_for_path(scsiid_path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("%s not found" % scsiid_path)
continue
for file in filter(self.iscsi.match_lun, util.listdir(self.iscsi.path)):
lun_path = os.path.join(self.iscsi.path, file)
lun_dev = scsiutil.getdev(lun_path)
try:
lun_scsiid = scsiutil.getSCSIid(lun_dev)
except:
util.SMlog("getSCSIid failed on %s in iscsi %s: LUN"
" offline or iscsi path down" %
(lun_dev, self.iscsi.path))
continue
util.SMlog("dev from lun %s %s" % (lun_dev, lun_scsiid))
if lun_scsiid == self.SCSIid:
util.SMlog("lun match in %s" % self.iscsi.path)
dev_match = True
# No more need to raise ISCSITarget exception.
# Resetting attempt_discovery
attempt_discovery = False
break
if dev_match:
if iii == 0:
break
util.SMlog("IQN reordering needed")
new_iscsiSRs = []
IQNs = {}
IQNstring = ""
# iscsiSRs can be seen as a circular buffer: the head now is the matching one
for kkk in range(iii, len(self.iscsiSRs)) + range(0, iii):
new_iscsiSRs.append(self.iscsiSRs[kkk])
ipaddr = self.iscsiSRs[kkk].target
port = self.iscsiSRs[kkk].port
iqn = self.iscsiSRs[kkk].targetIQN
key = "%s,%s,%s" % (ipaddr, port, iqn)
# The final string must preserve the order without repetition
if key not in IQNs:
IQNs[key] = ""
IQNstring += "%s|" % key
util.SMlog("IQNstring is now %s" % IQNstring)
self.iscsiSRs = new_iscsiSRs
util.SMlog("iqn %s is leading now" % self.iscsiSRs[0].targetIQN)
# Updating pbd entry, if any
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
if pbd != None and 'multiSession' in self.dconf:
util.SMlog("Updating multiSession in PBD")
dconf = self.session.xenapi.PBD.get_device_config(pbd)
dconf['multiSession'] = IQNstring
self.session.xenapi.PBD.set_device_config(pbd, dconf)
except:
pass
break
if not target_success and attempt_discovery:
raise xs_errors.XenError('ISCSITarget')
# Check for any unneeded open iscsi sessions
if forced_login == True:
try:
iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN)
iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN)
forced_login = False
except:
raise xs_errors.XenError('ISCSILogout')
LVHDSR.LVHDSR.load(self, sr_uuid)
def print_LUNs_XML(self):
dom = xml.dom.minidom.Document()
element = dom.createElement("iscsi-target")
dom.appendChild(element)
for uuid in self.LUNs:
val = self.LUNs[uuid]
entry = dom.createElement('LUN')
element.appendChild(entry)
for attr in ('vendor', 'serial', 'LUNid', \
'size', 'SCSIid'):
try:
aval = getattr(val, attr)
except AttributeError:
continue
if aval:
subentry = dom.createElement(attr)
entry.appendChild(subentry)
textnode = dom.createTextNode(str(aval))
subentry.appendChild(textnode)
print(dom.toprettyxml(), file=sys.stderr)
def _getSCSIid_from_LUN(self, sr_uuid):
was_attached = True
self.iscsi.attach(sr_uuid)
dev = self.dconf['LUNid'].split(',')
if len(dev) > 1:
raise xs_errors.XenError('LVMOneLUN')
path = os.path.join(self.iscsi.path, "LUN%s" % dev[0])
if not util.wait_for_path(path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("Unable to detect LUN attached to host [%s]" % path)
try:
SCSIid = scsiutil.getSCSIid(path)
except:
raise xs_errors.XenError('InvalidDev')
self.iscsi.detach(sr_uuid)
return SCSIid
def _LUNprint(self, sr_uuid):
if self.iscsi.attached:
# Force a rescan on the bus.
self.iscsi.refresh()
# time.sleep(5)
# Now call attach (handles the refcounting + session activa)
self.iscsi.attach(sr_uuid)
util.SMlog("LUNprint: waiting for path: %s" % self.iscsi.path)
if util.wait_for_path("%s/LUN*" % self.iscsi.path, BaseISCSI.MAX_TIMEOUT):
try:
adapter = self.iscsi.adapter[self.iscsi.address]
util.SMlog("adapter=%s" % adapter)
# find a scsi device on which to issue a report luns command:
devs = glob.glob("%s/LUN*" % self.iscsi.path)
sgdevs = []
for i in devs:
sgdevs.append(int(i.split("LUN")[1]))
sgdevs.sort()
sgdev = "%s/LUN%d" % (self.iscsi.path, sgdevs[0])
# issue a report luns:
luns = util.pread2(["/usr/bin/sg_luns", "-q", sgdev]).split('\n')
nluns = len(luns) - 1 # remove the line relating to the final \n
# check if the LUNs are MPP-RDAC Luns
scsi_id = scsiutil.getSCSIid(sgdev)
# make sure we've got that many sg devices present
for i in range(0, 30):
luns = scsiutil._dosgscan()
sgdevs = filter(lambda r: r[1] == adapter, luns)
if len(sgdevs) >= nluns:
util.SMlog("Got all %d sg devices" % nluns)
break
else:
util.SMlog("Got %d sg devices - expecting %d" % (len(sgdevs), nluns))
time.sleep(1)
if os.path.exists("/sbin/udevsettle"):
util.pread2(["/sbin/udevsettle"])
else:
util.pread2(["/sbin/udevadm", "settle"])
except:
util.SMlog("Generic exception caught. Pass")
pass # Make sure we don't break the probe...
self.iscsi.print_LUNs()
self.iscsi.detach(sr_uuid)
def create(self, sr_uuid, size):
# Check SCSIid not already in use by other PBDs
if util.test_SCSIid(self.session, sr_uuid, self.SCSIid):
raise xs_errors.XenError('SRInUse')
self.iscsi.attach(sr_uuid)
try:
if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid):
# UPGRADE FROM GEORGE: take care of ill-formed SCSIid
upgraded = False
matchSCSIid = False
for file in filter(self.iscsi.match_lun, util.listdir(self.iscsi.path)):
path = os.path.join(self.iscsi.path, file)
if not util.wait_for_path(path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("Unable to detect LUN attached to host [%s]" % path)
continue
try:
SCSIid = scsiutil.getSCSIid(path)
except:
continue
try:
matchSCSIid = scsiutil.compareSCSIid_2_6_18(self.SCSIid, path)
except:
continue
if (matchSCSIid):
util.SMlog("Performing upgrade from George")
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
device_config = self.session.xenapi.PBD.get_device_config(pbd)
device_config['SCSIid'] = SCSIid
self.session.xenapi.PBD.set_device_config(pbd, device_config)
self.dconf['SCSIid'] = SCSIid
self.SCSIid = self.dconf['SCSIid']
except:
continue
if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid):
raise xs_errors.XenError('InvalidDev')
else:
upgraded = True
break
else:
util.SMlog("Not a matching LUN, skip ... scsi_id is: %s" % SCSIid)
continue
if not upgraded:
raise xs_errors.XenError('InvalidDev')
self._pathrefresh(LVHDoISCSISR)
LVHDSR.LVHDSR.create(self, sr_uuid, size)
except Exception as inst:
self.iscsi.detach(sr_uuid)
raise xs_errors.XenError("SRUnavailable", opterr=inst)
self.iscsi.detach(sr_uuid)
def delete(self, sr_uuid):
self._pathrefresh(LVHDoISCSISR)
LVHDSR.LVHDSR.delete(self, sr_uuid)
for i in self.iscsiSRs:
i.detach(sr_uuid)
def attach(self, sr_uuid):
try:
connected = False
for i in self.iscsiSRs:
try:
i.attach(sr_uuid)
except SR.SROSError as inst:
# Some iscsi objects can fail login but not all. Storing exception
if inst.errno == 141:
util.SMlog("Connection failed for target %s, continuing.." % i.target)
stored_exception = inst
continue
else:
raise
else:
connected = True
if not i._attach_LUN_bySCSIid(self.SCSIid):
raise xs_errors.XenError('InvalidDev')
# Check if at least one iscsi succeeded
if not connected:
raise stored_exception
if 'multiSession' in self.dconf:
# Force a manual bus refresh
for a in self.iscsi.adapter:
scsiutil.rescan([self.iscsi.adapter[a]])
self._pathrefresh(LVHDoISCSISR)
LVHDSR.LVHDSR.attach(self, sr_uuid)
except Exception as inst:
for i in self.iscsiSRs:
i.detach(sr_uuid)
raise xs_errors.XenError("SRUnavailable", opterr=inst)
self._setMultipathableFlag(SCSIid=self.SCSIid)
def detach(self, sr_uuid):
LVHDSR.LVHDSR.detach(self, sr_uuid)
for i in self.iscsiSRs:
i.detach(sr_uuid)
def scan(self, sr_uuid):
self._pathrefresh(LVHDoISCSISR)
if self.mpath == "true":
for i in self.iscsiSRs:
try:
i.attach(sr_uuid)
except SR.SROSError:
util.SMlog("Connection failed for target %s, continuing.." % i.target)
LVHDSR.LVHDSR.scan(self, sr_uuid)
def probe(self):
self.uuid = util.gen_uuid()
# When multipathing is enabled, since we don't refcount the multipath maps,
# we should not attempt to do the iscsi.attach/detach when the map is already present,
# as this will remove it (which may well be in use).
if self.mpath == 'true' and 'SCSIid' in self.dconf:
maps = []
try:
maps = mpath_cli.list_maps()
except:
pass
if self.dconf['SCSIid'] in maps:
raise xs_errors.XenError('SRInUse')
self.iscsi.attach(self.uuid)
if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid):
util.SMlog("Unable to detect LUN")
raise xs_errors.XenError('InvalidDev')
self._pathrefresh(LVHDoISCSISR)
out = LVHDSR.LVHDSR.probe(self)
self.iscsi.detach(self.uuid)
return out
def vdi(self, uuid):
return LVHDoISCSIVDI(self, uuid)
class LVHDoISCSIVDI(LVHDSR.LVHDVDI):
def generate_config(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDoISCSIVDI.generate_config")
if not lvutil._checkLV(self.path):
raise xs_errors.XenError('VDIUnavailable')
dict = {}
self.sr.dconf['localIQN'] = self.sr.iscsi.localIQN
self.sr.dconf['multipathing'] = self.sr.mpath
self.sr.dconf['multipathhandle'] = self.sr.mpathhandle
dict['device_config'] = self.sr.dconf
if 'chappassword_secret' in dict['device_config']:
s = util.get_secret(self.session, dict['device_config']['chappassword_secret'])
del dict['device_config']['chappassword_secret']
dict['device_config']['chappassword'] = s
dict['sr_uuid'] = sr_uuid
dict['vdi_uuid'] = vdi_uuid
dict['command'] = 'vdi_attach_from_config'
# Return the 'config' encoded within a normal XMLRPC response so that
# we can use the regular response/error parsing code.
config = xmlrpclib.dumps(tuple([dict]), "vdi_attach_from_config")
return xmlrpclib.dumps((config, ), "", True)
def attach_from_config(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDoISCSIVDI.attach_from_config")
try:
self.sr.iscsi.attach(sr_uuid)
if not self.sr.iscsi._attach_LUN_bySCSIid(self.sr.SCSIid):
raise xs_errors.XenError('InvalidDev')
return LVHDSR.LVHDVDI.attach(self, sr_uuid, vdi_uuid)
except:
util.logException("LVHDoISCSIVDI.attach_from_config")
raise xs_errors.XenError('SRUnavailable', \
opterr='Unable to attach the heartbeat disk')
if __name__ == '__main__':
SRCommand.run(LVHDoISCSISR, DRIVER_INFO)
else:
SR.registerSR(LVHDoISCSISR)
| lgpl-2.1 | 489,732,788,315,582,140 | 45.486928 | 157 | 0.492865 | false |
porduna/weblabdeusto | server/src/test/unit/weblab/core/test_data_retriever.py | 3 | 14161 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
from __future__ import print_function, unicode_literals
import unittest
import time
import datetime
from voodoo.override import Override
from voodoo.gen import CoordAddress
from weblab.data.experiments import ExperimentId
from weblab.data.command import Command
import weblab.core.data_retriever as TemporalInformationRetriever
import weblab.core.coordinator.store as TemporalInformationStore
from weblab.core.db import DatabaseGateway
import test.unit.configuration as configuration
import voodoo.configuration as ConfigurationManager
RESERVATION1 = 'reservation_id1'
RESERVATION2 = 'reservation_id2'
RESERVATION3 = 'reservation_id3'
RESERVATION4 = 'reservation_id4'
DATA1 = "{'data' : 1 }"
DATA2 = "{'data' : 2 }"
DATA3 = "{'data' : 3 }"
DATA4 = "{'data' : 4 }"
DATA_REQUEST1 = "{'foo' : 1}"
DATA_REQUEST2 = "{'foo' : 1}"
DATA_REQUEST3 = "{'foo' : 1}"
DATA_REQUEST4 = "{'foo' : 1}"
def wait_for(retriever, iterations = 5, max_wait = 10):
initial_time = time.time()
initial_iterations = retriever.iterations
while retriever.iterations - initial_iterations < iterations:
time.sleep(0.01)
if time.time() - initial_time >= max_wait:
raise AssertionError("Maximum time waiting reached")
def coord_addr(coord_addr_str):
return CoordAddress.translate( coord_addr_str )
class TemporalInformationRetrieverTestCase(unittest.TestCase):
def setUp(self):
cfg_manager = ConfigurationManager.ConfigurationManager()
cfg_manager.append_module(configuration)
self.dbmanager = DatabaseGateway(cfg_manager)
self.dbmanager._delete_all_uses()
session = self.dbmanager.Session()
try:
student1 = self.dbmanager._get_user(session, 'student1')
finally:
session.close()
self.initial_store = TemporalInformationStore.InitialTemporalInformationStore()
self.finished_store = TemporalInformationStore.FinishTemporalInformationStore()
self.commands_store = TemporalInformationStore.CommandsTemporalInformationStore()
self.completed_store = TemporalInformationStore.CompletedInformationStore()
self.retriever = TemporalInformationRetriever.TemporalInformationRetriever(cfg_manager, self.initial_store, self.finished_store, self.commands_store, self.completed_store, self.dbmanager)
self.retriever.timeout = 0.001 # Be quicker instead of waiting for half a second
self.initial_time = self.end_time = datetime.datetime.now()
self.initial_timestamp = self.end_timestamp = time.time()
request_info = {'username':'student1','role':'student','permission_scope' : 'user', 'permission_id' : student1.id}
exp_id = ExperimentId('ud-dummy','Dummy Experiments')
self.entry1 = TemporalInformationStore.InitialInformationEntry(
RESERVATION1, exp_id, coord_addr('ser:inst@mach'),
DATA1, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST1)
self.entry2 = TemporalInformationStore.InitialInformationEntry(
RESERVATION2, exp_id, coord_addr('ser:inst@mach'),
DATA2, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST2)
self.entry3 = TemporalInformationStore.InitialInformationEntry(
RESERVATION3, exp_id, coord_addr('ser:inst@mach'),
DATA3, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST3)
self.entry4 = TemporalInformationStore.InitialInformationEntry(
RESERVATION4, exp_id, coord_addr('ser:inst@mach'),
DATA4, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST3)
def test_initial_finish(self):
self.retriever.start()
try:
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(0, len(usages))
self.initial_store.put(self.entry1)
self.initial_store.put(self.entry2)
self.initial_store.put(self.entry3)
self.finished_store.put(RESERVATION4, DATA4, self.initial_time, self.end_time)
# Wait and then populate the RESERVATION3 (the last one in the queue)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
# There are 3, and RESERVATION4 is waiting
self.assertEquals(3, len(usages))
# Check that it has been stored
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals("@@@initial::request@@@", full_usage1.commands[-2].command.commandstring)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-2].response.commandstring)
self.assertEquals("@@@initial::response@@@", full_usage1.commands[-1].command.commandstring)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
self.assertEquals(None, full_usage1.end_date)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA2, full_usage2.commands[-1].response.commandstring)
self.assertEquals(None, full_usage2.end_date)
full_usage3 = self.dbmanager.retrieve_usage(usages[2].experiment_use_id)
self.assertEquals(DATA3, full_usage3.commands[-1].response.commandstring)
self.assertEquals(None, full_usage3.end_date)
wait_for(self.retriever)
self.initial_store.put(self.entry4)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
# RESERVATION4 achieved
self.assertEquals(4, len(usages))
# And end_date is filled for 4
full_usage4 = self.dbmanager.retrieve_usage(usages[3].experiment_use_id)
self.assertEquals(DATA4, full_usage4.commands[-1].response.commandstring)
self.assertNotEqual(None, full_usage4.end_date)
# While in the rest it's not yet filled
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
self.assertEquals(None, full_usage1.end_date)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA2, full_usage2.commands[-1].response.commandstring)
self.assertEquals(None, full_usage2.end_date)
full_usage3 = self.dbmanager.retrieve_usage(usages[2].experiment_use_id)
self.assertEquals(DATA3, full_usage3.commands[-1].response.commandstring)
self.assertEquals(None, full_usage3.end_date)
# But if we add to the finish store, and we wait:
self.finished_store.put(RESERVATION1, DATA1, self.initial_time, self.end_time)
wait_for(self.retriever)
# Then it is filled
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
self.assertNotEqual(None, full_usage1.end_date)
finally:
self.retriever.stop()
self.retriever.join(1)
self.assertFalse(self.retriever.isAlive())
def test_commands(self):
self.retriever.start()
try:
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(0, len(usages))
self.initial_store.put(self.entry1)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(1, len(usages))
entry_id1 = 58131
entry_id2 = 14214
entry_id3 = 84123
pre_command1 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION1, True, True, entry_id1, Command(DATA_REQUEST1), self.initial_timestamp)
post_command1 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION1, False, True, entry_id1, Command(DATA1), self.initial_timestamp)
pre_command2 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION2, True, True, entry_id2, Command(DATA_REQUEST2), self.initial_timestamp)
post_command2 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION2, False, True, entry_id2, Command(DATA2), self.initial_timestamp)
pre_command3 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION3, True, True, entry_id3, Command(DATA_REQUEST3), self.initial_timestamp)
post_command3 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION3, False, True, entry_id3, Command(DATA3), self.initial_timestamp)
# The reservation is stored, therefore this command will
# also be stored
self.commands_store.put(pre_command1)
# This reservation has not been stored, therefore this command
# will not be stored yet
self.commands_store.put(pre_command2)
# Neither this reservation or the pre_command3 have been stored,
# therefore this command will not be stored
self.commands_store.put(post_command3)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(1, len(usages))
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-1].command.commandstring)
self.assertEquals(None, full_usage1.commands[-1].response.commandstring)
# So we add the post_command1, to avoid the "None"
self.commands_store.put(post_command1)
# And the pre_command3, to see if it is correctly enqueued
self.commands_store.put(pre_command3)
# And the entry 2, to let pre_command2 enter
self.initial_store.put(self.entry2)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(2, len(usages))
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-1].command.commandstring)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA_REQUEST2, full_usage2.commands[-1].command.commandstring)
self.assertEquals(None, full_usage2.commands[-1].response.commandstring)
# So now we add the rest
self.commands_store.put(post_command2)
self.initial_store.put(self.entry3)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(3, len(usages))
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-1].command.commandstring)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA_REQUEST2, full_usage2.commands[-1].command.commandstring)
self.assertEquals(DATA2, full_usage2.commands[-1].response.commandstring)
full_usage3 = self.dbmanager.retrieve_usage(usages[2].experiment_use_id)
self.assertEquals(DATA_REQUEST3, full_usage3.commands[-1].command.commandstring)
self.assertEquals(DATA3, full_usage3.commands[-1].response.commandstring)
finally:
self.retriever.stop()
self.retriever.join(1)
self.assertFalse(self.retriever.isAlive())
class FakeTemporalInformationRetriever(TemporalInformationRetriever.TemporalInformationRetriever):
PRINT_ERRORS = False
@Override(TemporalInformationRetriever.TemporalInformationRetriever)
def iterate(self):
failures = getattr(self, 'failures', 0)
self.failures = failures + 1
return 10 / 0 # cause an error
class IterationFailerTemporalInformationRetrieverTestCase(unittest.TestCase):
def test_fail(self):
initial_store = TemporalInformationStore.InitialTemporalInformationStore()
finished_store = TemporalInformationStore.FinishTemporalInformationStore()
commands_store = TemporalInformationStore.CommandsTemporalInformationStore()
completed_store = TemporalInformationStore.CompletedInformationStore()
cfg_manager = ConfigurationManager.ConfigurationManager()
cfg_manager.append_module(configuration)
fake = FakeTemporalInformationRetriever(cfg_manager, initial_store, finished_store, commands_store, completed_store, None)
fake.start()
try:
initial_time = time.time()
while not hasattr( fake, 'failures') or fake.failures < 1:
time.sleep(0.01)
if time.time() - initial_time > 5:
raise AssertionError("Too long time passed waiting for failures to increase")
finally:
fake.stop()
fake.join(1)
self.assertFalse( fake.isAlive() )
def suite():
return unittest.TestSuite((
unittest.makeSuite(TemporalInformationRetrieverTestCase),
unittest.makeSuite(IterationFailerTemporalInformationRetrieverTestCase),
))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -2,806,713,701,249,388,500 | 42.839009 | 195 | 0.671257 | false |
switchboardOp/ansible | lib/ansible/modules/network/avi/avi_cloud.py | 46 | 9205 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloud
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Cloud Avi RESTful Object
description:
- This module is used to configure Cloud object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
apic_configuration:
description:
- Apicconfiguration settings for cloud.
apic_mode:
description:
- Boolean flag to set apic_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
aws_configuration:
description:
- Awsconfiguration settings for cloud.
cloudstack_configuration:
description:
- Cloudstackconfiguration settings for cloud.
dhcp_enabled:
description:
- Select the ip address management scheme.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
dns_provider_ref:
description:
- Dns profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
docker_configuration:
description:
- Dockerconfiguration settings for cloud.
east_west_dns_provider_ref:
description:
- Dns profile for east-west services.
- It is a reference to an object of type ipamdnsproviderprofile.
east_west_ipam_provider_ref:
description:
- Ipam profile for east-west services.
- Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster.
- For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata.
- Hence, it should not be used in this profile.
- It is a reference to an object of type ipamdnsproviderprofile.
enable_vip_static_routes:
description:
- Use static routes for vip side network resolution during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ipam_provider_ref:
description:
- Ipam profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS.
linuxserver_configuration:
description:
- Linuxserverconfiguration settings for cloud.
mesos_configuration:
description:
- Mesosconfiguration settings for cloud.
mtu:
description:
- Mtu setting for the cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
name:
description:
- Name of the object.
required: true
nsx_configuration:
description:
- Configuration parameters for nsx manager.
- Field introduced in 17.1.1.
obj_name_prefix:
description:
- Default prefix for all automatically created objects in this cloud.
- This prefix can be overridden by the se-group template.
openstack_configuration:
description:
- Openstackconfiguration settings for cloud.
oshiftk8s_configuration:
description:
- Oshiftk8sconfiguration settings for cloud.
prefer_static_routes:
description:
- Prefer static routes over interface routes during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
proxy_configuration:
description:
- Proxyconfiguration settings for cloud.
rancher_configuration:
description:
- Rancherconfiguration settings for cloud.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vca_configuration:
description:
- Vcloudairconfiguration settings for cloud.
vcenter_configuration:
description:
- Vcenterconfiguration settings for cloud.
vtype:
description:
- Cloud type.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a VMware cloud with write access mode
avi_cloud:
username: ''
controller: ''
password: ''
apic_mode: false
dhcp_enabled: true
enable_vip_static_routes: false
license_type: LIC_CORES
mtu: 1500
name: VCenter Cloud
prefer_static_routes: false
tenant_ref: admin
vcenter_configuration:
datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100
management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100
password: password
privilege: WRITE_ACCESS
username: user
vcenter_url: 10.10.20.100
vtype: CLOUD_VCENTER
'''
RETURN = '''
obj:
description: Cloud (api/cloud) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
apic_configuration=dict(type='dict',),
apic_mode=dict(type='bool',),
aws_configuration=dict(type='dict',),
cloudstack_configuration=dict(type='dict',),
dhcp_enabled=dict(type='bool',),
dns_provider_ref=dict(type='str',),
docker_configuration=dict(type='dict',),
east_west_dns_provider_ref=dict(type='str',),
east_west_ipam_provider_ref=dict(type='str',),
enable_vip_static_routes=dict(type='bool',),
ipam_provider_ref=dict(type='str',),
license_type=dict(type='str',),
linuxserver_configuration=dict(type='dict',),
mesos_configuration=dict(type='dict',),
mtu=dict(type='int',),
name=dict(type='str', required=True),
nsx_configuration=dict(type='dict',),
obj_name_prefix=dict(type='str',),
openstack_configuration=dict(type='dict',),
oshiftk8s_configuration=dict(type='dict',),
prefer_static_routes=dict(type='bool',),
proxy_configuration=dict(type='dict',),
rancher_configuration=dict(type='dict',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vca_configuration=dict(type='dict',),
vcenter_configuration=dict(type='dict',),
vtype=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloud',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | 3,615,036,202,447,534,000 | 36.880658 | 159 | 0.641065 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.