gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import csv
import random
import StringIO
from django.core.mail import send_mass_mail
from django.db.models import Q
from django.http import HttpResponse
from django.http import HttpResponseBadRequest, HttpResponseNotAllowed
from django.shortcuts import render, redirect, get_object_or_404
from django.template import Context, Template
from django.views.decorators.http import require_POST
from account.decorators import login_required
# @@@ switch to pinax-teams
from symposion.teams.models import Team
from symposion.conf import settings
from symposion.proposals.models import ProposalBase, ProposalSection
from symposion.utils.mail import send_email
from symposion.reviews.forms import ReviewForm, SpeakerCommentForm
from symposion.reviews.forms import BulkPresentationForm
from symposion.reviews.models import (
ReviewAssignment, Review, LatestVote, ProposalResult, NotificationTemplate,
ResultNotification, promote_proposal
)
def access_not_permitted(request):
return render(request, "symposion/reviews/access_not_permitted.html")
def proposals_generator(request, queryset, user_pk=None, check_speaker=True):
for obj in queryset:
# @@@ this sucks; we can do better
if check_speaker:
if request.user in [s.user for s in obj.speakers()]:
continue
try:
obj.result
except ProposalResult.DoesNotExist:
ProposalResult.objects.get_or_create(proposal=obj)
obj.comment_count = obj.result.comment_count
obj.score = obj.result.score
obj.total_votes = obj.result.vote_count
obj.plus_two = obj.result.plus_two
obj.plus_one = obj.result.plus_one
obj.minus_one = obj.result.minus_one
obj.minus_two = obj.result.minus_two
lookup_params = dict(proposal=obj)
if user_pk:
lookup_params["user__pk"] = user_pk
else:
lookup_params["user"] = request.user
try:
obj.user_vote = LatestVote.objects.get(**lookup_params).vote
obj.user_vote_css = LatestVote.objects.get(**lookup_params).css_class()
except LatestVote.DoesNotExist:
obj.user_vote = None
obj.user_vote_css = "no-vote"
yield obj
VOTE_THRESHOLD = settings.SYMPOSION_VOTE_THRESHOLD
POSITIVE = "positive"
NEGATIVE = "negative"
INDIFFERENT = "indifferent"
CONTROVERSIAL = "controversial"
TOO_FEW = "too_few"
REVIEW_STATUS_FILTERS = {
# proposals with at least VOTE_THRESHOLD reviews and at least one +2 and no -2s, sorted by
# the 'score'
POSITIVE: lambda qs: qs.filter(result__vote_count__gte=VOTE_THRESHOLD, result__plus_two__gt=0,
result__minus_two=0).order_by("-result__score"),
# proposals with at least VOTE_THRESHOLD reviews and at least one -2 and no +2s, reverse
# sorted by the 'score'
NEGATIVE: lambda qs: qs.filter(result__vote_count__gte=VOTE_THRESHOLD, result__minus_two__gt=0,
result__plus_two=0).order_by("result__score"),
# proposals with at least VOTE_THRESHOLD reviews and neither a +2 or a -2, sorted by total
# votes (lowest first)
INDIFFERENT: lambda qs: qs.filter(result__vote_count__gte=VOTE_THRESHOLD, result__minus_two=0,
result__plus_two=0).order_by("result__vote_count"),
# proposals with at least VOTE_THRESHOLD reviews and both a +2 and -2, sorted by total
# votes (highest first)
CONTROVERSIAL: lambda qs: qs.filter(result__vote_count__gte=VOTE_THRESHOLD,
result__plus_two__gt=0, result__minus_two__gt=0)
.order_by("-result__vote_count"),
# proposals with fewer than VOTE_THRESHOLD reviews
TOO_FEW: lambda qs: qs.filter(result__vote_count__lt=VOTE_THRESHOLD)
.order_by("result__vote_count"),
}
# Returns a list of all proposals, proposals reviewed by the user, or the proposals the user has
# yet to review depending on the link user clicks in dashboard
@login_required
def review_section(request, section_slug, assigned=False, reviewed="all"):
if not request.user.has_perm("reviews.can_review_%s" % section_slug):
return access_not_permitted(request)
section = get_object_or_404(ProposalSection, section__slug=section_slug)
queryset = ProposalBase.objects.filter(kind__section=section.section)
if assigned:
assignments = ReviewAssignment.objects.filter(user=request.user)\
.values_list("proposal__id")
queryset = queryset.filter(id__in=assignments)
# passing reviewed in from reviews.urls and out to review_list for
# appropriate template header rendering
if reviewed == "all":
queryset = queryset.select_related("result").select_subclasses()
reviewed = "all_reviews"
elif reviewed == "reviewed":
queryset = queryset.filter(reviews__user=request.user)
reviewed = "user_reviewed"
else:
queryset = queryset.exclude(reviews__user=request.user).exclude(
speaker__user=request.user)
reviewed = "user_not_reviewed"
# lca2017 #21 -- chairs want to be able to see their own proposals in the list
check_speaker = not request.user.has_perm("reviews.can_manage_%s" % section_slug)
proposals = proposals_generator(request, queryset, check_speaker=check_speaker)
ctx = {
"proposals": proposals,
"section": section,
"reviewed": reviewed,
}
return render(request, "symposion/reviews/review_list.html", ctx)
@login_required
def review_all_proposals_csv(request):
''' Returns a CSV representation of all of the proposals this user has
permisison to review. '''
response = HttpResponse("text/csv")
response['Content-Disposition'] = 'attachment; filename="proposals.csv"'
writer = csv.writer(response, quoting=csv.QUOTE_NONNUMERIC)
queryset = ProposalBase.objects.filter()
# The fields from each proposal object to report in the csv
fields = [
"id", "proposal_type", "speaker_name","speaker_email", "title",
"submitted", "other_speakers", "speaker_travel",
"speaker_accommodation", "cancelled", "status", "score", "total_votes",
"minus_two", "minus_one", "plus_one", "plus_two",
]
# Fields are the heading
writer.writerow(fields)
for proposal in proposals_generator(request, queryset, check_speaker=False):
proposal.speaker_name = proposal.speaker.name
section_slug = proposal.kind.section.slug
kind_slug = proposal.kind.slug
proposal.proposal_type = kind_slug
proposal.other_speakers = ", ".join(
speaker.name
for speaker in proposal.additional_speakers.all()
)
proposal.speaker_travel = ", ".join(
str(bool(speaker.travel_assistance))
for speaker in proposal.speakers()
)
proposal.speaker_accommodation = ", ".join(
str(bool(speaker.accommodation_assistance))
for speaker in proposal.speakers()
)
if not request.user.has_perm("reviews.can_review_%s" % section_slug):
continue
csv_line = [getattr(proposal, field) for field in fields]
# Enusre that unicode items are handled properly.
for i, item in enumerate(csv_line):
if isinstance(item, unicode):
csv_line[i] = item.encode("utf8")
writer.writerow(csv_line)
return response
@login_required
def review_random_proposal(request, section_slug):
# lca2017 #16 view for random proposal
if not request.user.has_perm("reviews.can_review_%s" % section_slug):
return access_not_permitted(request)
section = get_object_or_404(ProposalSection, section__slug=section_slug)
queryset = ProposalBase.objects.filter(kind__section=section.section)
# Remove ones already reviewed
queryset = queryset.exclude(reviews__user=request.user)
# Remove talks the reviewer can't vote on -- their own.
queryset = queryset.exclude(speaker__user=request.user)
queryset = queryset.exclude(additional_speakers__user=request.user)
if len(queryset) == 0:
return redirect("review_section", section_slug=section_slug, reviewed="all")
# Direct reviewers to underreviewed proposals
too_few_set = REVIEW_STATUS_FILTERS[TOO_FEW](queryset)
controversial_set = REVIEW_STATUS_FILTERS[CONTROVERSIAL](queryset)
if len(too_few_set) > 0:
proposals = too_few_set.all()
elif len(controversial_set) > 0 and random.random() < 0.2:
proposals = controversial_set.all()
else:
# Select a proposal with less than the median number of total votes
proposals = proposals_generator(request, queryset, check_speaker=False)
proposals = list(proposals)
proposals.sort(key = lambda proposal: proposal.total_votes)
# The first half is the median or less.
# The +1 means we round _up_.
proposals = proposals[:(len(proposals) + 1) / 2]
# Realistically, there shouldn't be all that many proposals to choose
# from, so this should be cheap.
chosen = random.choice(proposals)
return redirect("review_detail", pk=chosen.pk)
@login_required
def review_list(request, section_slug, user_pk):
# if they're not a reviewer admin and they aren't the person whose
# review list is being asked for, don't let them in
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
if not request.user.pk == user_pk:
return access_not_permitted(request)
queryset = ProposalBase.objects.select_related("speaker__user", "result")
reviewed = LatestVote.objects.filter(user__pk=user_pk).values_list("proposal", flat=True)
queryset = queryset.filter(kind__section__slug=section_slug)
queryset = queryset.filter(pk__in=reviewed)
proposals = queryset.order_by("submitted")
admin = request.user.has_perm("reviews.can_manage_%s" % section_slug)
proposals = proposals_generator(request, proposals, user_pk=user_pk, check_speaker=not admin)
ctx = {
"proposals": proposals,
}
return render(request, "symposion/reviews/review_list.html", ctx)
@login_required
def review_admin(request, section_slug):
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
def reviewers():
already_seen = set()
for team in Team.objects.filter(permissions__codename="can_review_%s" % section_slug):
for membership in team.memberships.filter(Q(state="member") | Q(state="manager")):
user = membership.user
if user.pk in already_seen:
continue
already_seen.add(user.pk)
user.comment_count = Review.objects.filter(
user=user,
proposal__kind__section__slug=section_slug,
).count()
user_votes = LatestVote.objects.filter(
user=user,
proposal__kind__section__slug=section_slug,
)
print section_slug
print [vote.proposal.kind.section.slug for vote in user_votes]
user.total_votes = user_votes.exclude(
vote=LatestVote.VOTES.ABSTAIN,
).count()
user.plus_two = user_votes.filter(
vote=LatestVote.VOTES.PLUS_TWO,
).count()
user.plus_one = user_votes.filter(
vote=LatestVote.VOTES.PLUS_ONE,
).count()
user.minus_one = user_votes.filter(
vote=LatestVote.VOTES.MINUS_ONE,
).count()
user.minus_two = user_votes.filter(
vote=LatestVote.VOTES.MINUS_TWO,
).count()
user.abstain = user_votes.filter(
vote=LatestVote.VOTES.ABSTAIN,
).count()
if user.total_votes == 0:
user.average = "-"
else:
user.average = (
((user.plus_two * 2) + user.plus_one) -
((user.minus_two * 2) + user.minus_one)
) / (user.total_votes * 1.0)
yield user
reviewers_sorted = list(reviewers())
reviewers_sorted.sort(key= lambda reviewer: 0 - reviewer.total_votes)
ctx = {
"section_slug": section_slug,
"reviewers": reviewers_sorted,
}
return render(request, "symposion/reviews/review_admin.html", ctx)
# FIXME: This view is too complex according to flake8
@login_required
def review_detail(request, pk):
proposals = ProposalBase.objects.select_related("result").select_subclasses()
proposal = get_object_or_404(proposals, pk=pk)
if not request.user.has_perm("reviews.can_review_%s" % proposal.kind.section.slug):
return access_not_permitted(request)
speakers = [s.user for s in proposal.speakers()]
if not request.user.is_superuser and request.user in speakers:
return access_not_permitted(request)
admin = request.user.has_perm("reviews.can_manage_%s" % proposal.kind.section.slug)
try:
latest_vote = LatestVote.objects.get(proposal=proposal, user=request.user)
except LatestVote.DoesNotExist:
latest_vote = None
if request.method == "POST":
if request.user in speakers:
return access_not_permitted(request)
if "vote_submit" in request.POST or "vote_submit_and_random" in request.POST:
review_form = ReviewForm(request.POST)
if review_form.is_valid():
review = review_form.save(commit=False)
review.user = request.user
review.proposal = proposal
review.save()
if "vote_submit_and_random" in request.POST:
next_page = redirect("user_random", proposal.kind.section.slug)
next_page["Location"] += "#invalid_fragment" # Hack.
else:
next_page = redirect(request.path)
return next_page
else:
message_form = SpeakerCommentForm()
elif "message_submit" in request.POST and admin:
message_form = SpeakerCommentForm(request.POST)
if message_form.is_valid():
message = message_form.save(commit=False)
message.user = request.user
message.proposal = proposal
message.save()
for speaker in speakers:
if speaker and speaker.email:
ctx = {
"proposal": proposal,
"message": message,
"reviewer": False,
}
send_email(
[speaker.email], "proposal_new_message",
context=ctx
)
return redirect(request.path)
else:
initial = {}
if latest_vote:
initial["vote"] = latest_vote.vote
if request.user in speakers:
review_form = None
else:
review_form = ReviewForm(initial=initial)
elif "result_submit" in request.POST:
if admin:
result = request.POST["result_submit"]
if result == "accept":
proposal.result.status = "accepted"
proposal.result.save()
elif result == "reject":
proposal.result.status = "rejected"
proposal.result.save()
elif result == "undecide":
proposal.result.status = "undecided"
proposal.result.save()
elif result == "standby":
proposal.result.status = "standby"
proposal.result.save()
return redirect(request.path)
elif "publish_changes" in request.POST:
if admin and proposal.result.status == "accepted":
promote_proposal(proposal)
return redirect(request.path)
else:
initial = {}
if latest_vote:
initial["vote"] = latest_vote.vote
if request.user in speakers:
review_form = None
else:
review_form = ReviewForm(initial=initial)
message_form = SpeakerCommentForm()
proposal.comment_count = proposal.result.comment_count
proposal.total_votes = proposal.result.vote_count
proposal.plus_two = proposal.result.plus_two
proposal.plus_one = proposal.result.plus_one
proposal.minus_one = proposal.result.minus_one
proposal.minus_two = proposal.result.minus_two
reviews = Review.objects.filter(proposal=proposal).order_by("-submitted_at")
messages = proposal.messages.order_by("submitted_at")
return render(request, "symposion/reviews/review_detail.html", {
"proposal": proposal,
"latest_vote": latest_vote,
"reviews": reviews,
"review_messages": messages,
"review_form": review_form,
"message_form": message_form,
"is_manager": admin
})
@login_required
@require_POST
def review_delete(request, pk):
review = get_object_or_404(Review, pk=pk)
section_slug = review.section
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
review = get_object_or_404(Review, pk=pk)
review.delete()
return redirect("review_detail", pk=review.proposal.pk)
@login_required
def review_status(request, section_slug=None, key=None):
if not request.user.has_perm("reviews.can_review_%s" % section_slug):
return access_not_permitted(request)
ctx = {
"section_slug": section_slug,
"vote_threshold": VOTE_THRESHOLD,
}
queryset = ProposalBase.objects.select_related("speaker__user", "result").select_subclasses()
if section_slug:
queryset = queryset.filter(kind__section__slug=section_slug)
proposals = dict((key, filt(queryset)) for key, filt in REVIEW_STATUS_FILTERS.items())
admin = request.user.has_perm("reviews.can_manage_%s" % section_slug)
for status in proposals:
if key and key != status:
continue
proposals[status] = list(proposals_generator(request, proposals[status], check_speaker=not admin))
if key:
ctx.update({
"key": key,
"proposals": proposals[key],
})
else:
ctx["proposals"] = proposals
return render(request, "symposion/reviews/review_stats.html", ctx)
@login_required
def review_assignments(request):
if not request.user.groups.filter(name="reviewers").exists():
return access_not_permitted(request)
assignments = ReviewAssignment.objects.filter(
user=request.user,
opted_out=False
)
return render(request, "symposion/reviews/review_assignment.html", {
"assignments": assignments,
})
@login_required
@require_POST
def review_assignment_opt_out(request, pk):
review_assignment = get_object_or_404(
ReviewAssignment, pk=pk, user=request.user)
if not review_assignment.opted_out:
review_assignment.opted_out = True
review_assignment.save()
ReviewAssignment.create_assignments(
review_assignment.proposal, origin=ReviewAssignment.AUTO_ASSIGNED_LATER)
return redirect("review_assignments")
@login_required
def review_bulk_accept(request, section_slug):
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
if request.method == "POST":
form = BulkPresentationForm(request.POST)
if form.is_valid():
talk_ids = form.cleaned_data["talk_ids"].split(",")
talks = ProposalBase.objects.filter(id__in=talk_ids).select_related("result")
for talk in talks:
talk.result.status = "accepted"
talk.result.save()
return redirect("review_section", section_slug=section_slug)
else:
form = BulkPresentationForm()
return render(request, "symposion/reviews/review_bulk_accept.html", {
"form": form,
})
@login_required
def result_notification(request, section_slug, status):
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
proposals = ProposalBase.objects.filter(kind__section__slug=section_slug, result__status=status).select_related("speaker__user", "result").select_subclasses()
notification_templates = NotificationTemplate.objects.all()
ctx = {
"section_slug": section_slug,
"status": status,
"proposals": proposals,
"notification_templates": notification_templates,
}
return render(request, "symposion/reviews/result_notification.html", ctx)
@login_required
def result_notification_prepare(request, section_slug, status):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
proposal_pks = []
try:
for pk in request.POST.getlist("_selected_action"):
proposal_pks.append(int(pk))
except ValueError:
return HttpResponseBadRequest()
proposals = ProposalBase.objects.filter(
kind__section__slug=section_slug,
result__status=status,
)
proposals = proposals.filter(pk__in=proposal_pks)
proposals = proposals.select_related("speaker__user", "result")
proposals = proposals.select_subclasses()
notification_template_pk = request.POST.get("notification_template", "")
if notification_template_pk:
notification_template = NotificationTemplate.objects.get(pk=notification_template_pk)
else:
notification_template = None
ctx = {
"section_slug": section_slug,
"status": status,
"notification_template": notification_template,
"proposals": proposals,
"proposal_pks": ",".join([str(pk) for pk in proposal_pks]),
}
return render(request, "symposion/reviews/result_notification_prepare.html", ctx)
@login_required
def result_notification_send(request, section_slug, status):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
if not all([k in request.POST for k in ["proposal_pks", "from_address", "subject", "body"]]):
return HttpResponseBadRequest()
try:
proposal_pks = [int(pk) for pk in request.POST["proposal_pks"].split(",")]
except ValueError:
return HttpResponseBadRequest()
proposals = ProposalBase.objects.filter(
kind__section__slug=section_slug,
result__status=status,
)
proposals = proposals.filter(pk__in=proposal_pks)
proposals = proposals.select_related("speaker__user", "result")
proposals = proposals.select_subclasses()
notification_template_pk = request.POST.get("notification_template", "")
if notification_template_pk:
notification_template = NotificationTemplate.objects.get(pk=notification_template_pk)
else:
notification_template = None
emails = []
for proposal in proposals:
rn = ResultNotification()
rn.proposal = proposal
rn.template = notification_template
rn.to_address = proposal.speaker_email
rn.from_address = request.POST["from_address"]
proposal_context = proposal.notification_email_context()
rn.subject = Template(request.POST["subject"]).render(
Context({
"proposal": proposal_context
})
)
rn.body = Template(request.POST["body"]).render(
Context({
"proposal": proposal_context
})
)
rn.save()
emails.append(rn.email_args)
send_mass_mail(emails)
return redirect("result_notification", section_slug=section_slug, status=status)
|
|
"""
Unified interfaces to root finding algorithms.
Functions
---------
- root : find a root of a vector function.
"""
__all__ = ['root']
import numpy as np
ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov',
'df-sane']
from warnings import warn
from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options
from .minpack import _root_hybr, leastsq
from ._spectral import _root_df_sane
from . import nonlin
def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
options=None):
"""
Find a root of a vector function.
Parameters
----------
fun : callable
A vector function to find a root of.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its Jacobian.
method : str, optional
Type of solver. Should be one of
- 'hybr' :ref:`(see here) <optimize.root-hybr>`
- 'lm' :ref:`(see here) <optimize.root-lm>`
- 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
- 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
- 'anderson' :ref:`(see here) <optimize.root-anderson>`
- 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
- 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
- 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
- 'krylov' :ref:`(see here) <optimize.root-krylov>`
- 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
jac : bool or callable, optional
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of `fun`. In
this case, it must accept the same arguments as `fun`.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual. For all methods but 'hybr' and 'lm'.
options : dict, optional
A dictionary of solver options. E.g., `xtol` or `maxiter`, see
:obj:`show_options()` for details.
Returns
-------
sol : OptimizeResult
The solution represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the algorithm exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *hybr*.
Method *hybr* uses a modification of the Powell hybrid method as
implemented in MINPACK [1]_.
Method *lm* solves the system of nonlinear equations in a least squares
sense using a modification of the Levenberg-Marquardt algorithm as
implemented in MINPACK [1]_.
Method *df-sane* is a derivative-free spectral method. [3]_
Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
*diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
with backtracking or full line searches [2]_. Each method corresponds
to a particular Jacobian approximations. See `nonlin` for details.
- Method *broyden1* uses Broyden's first Jacobian approximation, it is
known as Broyden's good method.
- Method *broyden2* uses Broyden's second Jacobian approximation, it
is known as Broyden's bad method.
- Method *anderson* uses (extended) Anderson mixing.
- Method *Krylov* uses Krylov approximation for inverse Jacobian. It
is suitable for large-scale problem.
- Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
- Method *linearmixing* uses a scalar Jacobian approximation.
- Method *excitingmixing* uses a tuned diagonal Jacobian
approximation.
.. warning::
The algorithms implemented for methods *diagbroyden*,
*linearmixing* and *excitingmixing* may be useful for specific
problems, but whether they will work may depend strongly on the
problem.
.. versionadded:: 0.11.0
References
----------
.. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
1980. User Guide for MINPACK-1.
.. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
Equations. Society for Industrial and Applied Mathematics.
<https://archive.siam.org/books/kelley/fr16/>
.. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
Examples
--------
The following functions define a system of nonlinear equations and its
jacobian.
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
>>> def jac(x):
... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
... -1.5 * (x[0] - x[1])**2],
... [-1.5 * (x[1] - x[0])**2,
... 1 + 1.5 * (x[1] - x[0])**2]])
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
>>> sol.x
array([ 0.8411639, 0.1588361])
"""
if not isinstance(args, tuple):
args = (args,)
meth = method.lower()
if options is None:
options = {}
if callback is not None and meth in ('hybr', 'lm'):
warn('Method %s does not accept callback.' % method,
RuntimeWarning)
# fun also returns the Jacobian
if not callable(jac) and meth in ('hybr', 'lm'):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth in ('hybr', 'lm'):
options.setdefault('xtol', tol)
elif meth in ('df-sane',):
options.setdefault('ftol', tol)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
options.setdefault('xtol', tol)
options.setdefault('xatol', np.inf)
options.setdefault('ftol', np.inf)
options.setdefault('fatol', np.inf)
if meth == 'hybr':
sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
elif meth == 'lm':
sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
elif meth == 'df-sane':
_warn_jac_unused(jac, method)
sol = _root_df_sane(fun, x0, args=args, callback=callback,
**options)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
_warn_jac_unused(jac, method)
sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
_method=meth, _callback=callback,
**options)
else:
raise ValueError('Unknown solver %s' % method)
return sol
def _warn_jac_unused(jac, method):
if jac is not None:
warn('Method %s does not use the jacobian (jac).' % (method,),
RuntimeWarning)
def _root_leastsq(fun, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
**unknown_options):
"""
Solve for least squares with Levenberg-Marquardt
Options
-------
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns
of the Jacobian.
maxiter : int
The maximum number of calls to the function. If zero, then
100*(N+1) is the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of
the Jacobian (for Dfun=None). If epsfcn is less than the machine
precision, it is assumed that the relative errors in the functions
are of the order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
"""
_check_unknown_options(unknown_options)
x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac,
full_output=True,
col_deriv=col_deriv, xtol=xtol,
ftol=ftol, gtol=gtol,
maxfev=maxiter, epsfcn=eps,
factor=factor, diag=diag)
sol = OptimizeResult(x=x, message=msg, status=ier,
success=ier in (1, 2, 3, 4), cov_x=cov_x,
fun=info.pop('fvec'))
sol.update(info)
return sol
def _root_nonlin_solve(fun, x0, args=(), jac=None,
_callback=None, _method=None,
nit=None, disp=False, maxiter=None,
ftol=None, fatol=None, xtol=None, xatol=None,
tol_norm=None, line_search='armijo', jac_options=None,
**unknown_options):
_check_unknown_options(unknown_options)
f_tol = fatol
f_rtol = ftol
x_tol = xatol
x_rtol = xtol
verbose = disp
if jac_options is None:
jac_options = dict()
jacobian = {'broyden1': nonlin.BroydenFirst,
'broyden2': nonlin.BroydenSecond,
'anderson': nonlin.Anderson,
'linearmixing': nonlin.LinearMixing,
'diagbroyden': nonlin.DiagBroyden,
'excitingmixing': nonlin.ExcitingMixing,
'krylov': nonlin.KrylovJacobian
}[_method]
if args:
if jac:
def f(x):
return fun(x, *args)[0]
else:
def f(x):
return fun(x, *args)
else:
f = fun
x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
iter=nit, verbose=verbose,
maxiter=maxiter, f_tol=f_tol,
f_rtol=f_rtol, x_tol=x_tol,
x_rtol=x_rtol, tol_norm=tol_norm,
line_search=line_search,
callback=_callback, full_output=True,
raise_exception=False)
sol = OptimizeResult(x=x)
sol.update(info)
return sol
def _root_broyden1_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``
Drop all matrix columns. Has no
extra parameters.
- ``simple``
Drop oldest matrix column. Has no
extra parameters.
- ``svd``
Keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``
Number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (i.e., no rank reduction).
"""
pass
def _root_broyden2_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``
Drop all matrix columns. Has no
extra parameters.
- ``simple``
Drop oldest matrix column. Has no
extra parameters.
- ``svd``
Keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``
Number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (i.e., no rank reduction).
"""
pass
def _root_anderson_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
"""
pass
def _root_linearmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, ``NoConvergence`` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_diagbroyden_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_excitingmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
"""
pass
def _root_krylov_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same
interface as the iterative solvers in
`scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will
be called as ``update(x, f)`` after each nonlinear step,
with ``x`` giving the current point, and ``f`` the current
function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the "inner" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear
iterations.
See `scipy.sparse.linalg.lgmres` for details.
"""
pass
|
|
# -*- coding: utf-8 -*-
"""
Standard Markov Chain Monte Carlo updates.
"""
__authors__ = 'Matt Graham'
__copyright__ = 'Copyright 2015, Matt Graham'
__license__ = 'MIT'
import numpy as np
import warnings
def metropolis_step(x_curr, log_f_curr, log_f_func, prng, prop_sampler,
prop_scales):
""" Performs single Metropolis Markov chain update.
Proposed state update is sampled from proposal sampler given current state
(with proposal density assumed to be symmetric for Metropolis variant) and
then proposed update accepted with probability
.. math::
p_a = \min(1, \exp(\log f(x_p) - \log f(x_c)))
where :math:`f(\cdot)` is the (unnormalised) target density.
Parameters
----------
x_curr : ndarray
Current Markov chain state.
log_f_curr : float
Logarithm of target invariant density for Markov chain evaluated at
current state.
log_f_func : function or callable object
Function which calculates the logarithm of the (potentially
unnormalised target density) at a specified state. Should have call
signature::
log_f = log_f_func(x)
where ``x`` is the state to evaluate the density at and ``log_f`` the
calculated log density.
prng : RandomState
Pseudo-random number generator object (either an instance of a
``numpy`` ``RandomState`` or an object with an equivalent
interface) used to randomly sample accept decisions in MH accept
step.
prop_sampler : function or callable object
Function which returns a proposed new parameter state drawn from
proposal distribution given a current parameter state. Should have
a call signature::
x_prop = prop_sampler(x_curr, prop_params)
where ``x_curr`` is the current parameter state vector (as a ndarray)
which the proposal should be conditioned on, ``prop_params`` is a
ndarray of parameters for the proposal distribution (e.g. standard
deviation for Gaussian proposals, may also be ``None`` if no free
parameters in proposal distribution) and ``x_prop`` is the returned
random propsal distribution draw, again an ndarray.
prop_params : ndarray
Array of values to set the parameters of the proposal distribution to.
Returns
-------
x_next : ndarray
Markov chain state after performing update - if previous state was
distributed according to target density this state will be too.
log_f_next : float
Logarithm of target density at updated state.
rejection : boolean
Whether the proposed update was rejected (True) or accepted (False).
"""
x_prop = prop_sampler(x_curr, prop_scales)
log_f_prop = log_f_func(x_prop)
if prng.uniform() < np.exp(log_f_prop - log_f_curr):
return x_prop, log_f_prop, False
else:
return x_curr, log_f_curr, True
def met_hastings_step(x_curr, log_f_curr, log_f_func, prng, prop_sampler,
prop_params, log_prop_density):
""" Performs single Metropolis-Hastings Markov chain update.
Proposed state update is sampled from proposal sampler given current state
and then proposed update accepted with probability
.. math::
p_a = \min(1, \exp(\log f(x_p) + \log q(x_c | x_p) -
\log f(x_c) - \log q(x_p | x_c)))
where :math:`f(\cdot)` is the (unnormalised) target density and
:math:`q(\cdot | \cdot)` is the potentially asymmetric proposal density.
Parameters
----------
x_curr : ndarray
Current Markov chain state.
log_f_curr : float
Logarithm of target invariant density for Markov chain evaluated at
current state.
log_f_func : function or callable object
Function which calculates the logarithm of the (potentially
unnormalised target density) at a specified state. Should have call
signature::
log_f = log_f_func(x)
where ``x`` is the state to evaluate the density at and ``log_f`` the
calculated log density.
prng : RandomState
Pseudo-random number generator object (either an instance of a
``numpy`` ``RandomState`` or an object with an equivalent
interface) used to randomly sample accept decisions in MH accept
step.
prop_sampler : function or callable object
Function which returns a proposed new parameter state drawn from
proposal distribution given a current parameter state. Should have
a call signature::
x_prop = prop_sampler(x_curr, prop_params)
where ``x_curr`` is the current parameter state vector (as a ndarray)
which the proposal should be conditioned on, ``prop_params`` is a
ndarray of parameters for the proposal distribution (e.g. standard
deviation for Gaussian proposals, may also be ``None`` if no free
parameters in proposal distribution) and ``x_prop`` is the returned
random propsal distribution draw, again an ndarray.
prop_params : ndarray
Array of values to set the parameters of the proposal distribution to.
log_prop_density : function or callable object
Function which returns the logarithm of the proposal distribution
density at a specified state given a current state and set of
parameters. Should have a call signature::
log_prop_dens = log_prop_density(x_prop, x_curr, prop_params)
where ``x_prop`` is the proposed state to evaluate the log density at,
``x_curr`` is the state to condition the proposal distribution on,
``prop_params`` are the values of any free parameters in the proposal
distribution density function and ``log_prop_dens`` is the calculated
log proposal density value.
Returns
-------
x_next : ndarray
Markov chain state after performing update - if previous state was
distributed according to target density this state will be too.
log_f_next : float
Logarithm of target density at updated state.
rejection : boolean
Whether the proposed update was rejected (True) or accepted (False).
"""
x_prop = prop_sampler(x_curr, prop_params)
log_f_prop = log_f_func(x_prop)
log_prop_dens_fwd = log_prop_density(x_prop, x_curr, prop_params)
log_prop_dens_bwd = log_prop_density(x_curr, x_prop, prop_params)
accept_prob = np.exp(log_f_prop + log_prop_dens_bwd -
log_f_curr - log_prop_dens_fwd)
if prng.uniform() < accept_prob:
return x_prop, log_f_prop, False
else:
return x_curr, log_f_curr, True
def metropolis_indepedence_step(x_curr, log_f_curr, log_f_func, prng,
prop_sampler, prop_params=None,
log_prop_density=None):
""" Performs single Metropolis indepedence sampler Markov chain update.
Two modes of operation. If ``log_prop_density`` is specified it is assumed
the target density is of the form
.. math::
\pi(x) \propto f(x)
The proposed state update is then sampled independently from the proposal
distribution :math:`q(x)` and the proposed update is accepted with
probability
.. math::
p_a = \min(1, \exp(\log f(x_p) + \log q(x_c) -
\log f(x_c) - \log q(x_p)))
where :math:`f(\cdot)` is the (unnormalised) target density and
:math:`q(\cdot)` is the proposal density.
Alternatively if ``log_prop_density`` is not specified it is assumed
the target density is of the form
.. math::
\pi(x) \propto f(x) q(x)
where :math:`q(x)` is a 'prior' density which the ``prop_sampler`` object
returns a sample from. In this case the acceptance probability has the form
.. math::
p_a = \min(1, \exp(\log f(x_p) + \log q(x_p) + \log q(x_c) -
\log f(x_c) + \log q(x_c) - \log q(x_p)))
which simplifies to
.. math::
p_a = \min(1, \exp(\log f(x_p) - \log f(x_c)))
i.e. the :math:`q` terms cancel.
Parameters
----------
x_curr : ndarray
Current Markov chain state.
log_f_curr : float
One of
- Logarithm of the (potentially unnormalised) target invariant
density for Markov chain evaluated at current state if
``log_prop_density != None``.
- Logarithm of 'non-prior' factor in target invariant density
evaluated at the current state if ``log_prop_density == None``,
i.e. the factor which the density which ``prop_sampler`` returns a
sample from is multiplied by in the target density.
log_f_func : function or callable object
One of
- Function which calculates the logarithm of the (potentially
unnormalised) target invariant density for Markov chain at a
specified state if ``log_prop_density != None``.
- Function which calculates the logarithm of 'non-prior' factor in
the (potentially unnormalised) target invariant density at a
specified state if ``log_prop_density == None``.
Should have call signature::
log_f = log_f_func(x)
where ``x`` is the state to evaluate the density at and ``log_f`` the
calculated log density.
prng : RandomState
Pseudo-random number generator object (either an instance of a
``numpy`` ``RandomState`` or an object with an equivalent
interface) used to randomly sample accept decisions in MH accept
step.
prop_sampler : function or callable object
Function which returns a proposed new parameter state drawn
independently from the proposal distribution. Should have a call
signature::
x_prop = prop_sampler(prop_params)
if ``prop_params`` are specified or::
x_prop = prop_sampler()
if ``prop_params == None``.
prop_params : ndarray
Array of values to set the parameters of the proposal distribution to.
May also be set to ``None`` if proposal distribution has no free
parameters to set.
log_prop_density : function or callable object or None
Function which returns the logarithm of the proposal distribution
density at a specified state and optionally a set of parameters.
Should have a call signature if ``prop_params != None``::
log_prop_dens = log_prop_density(x_prop, prop_params)
or if ``prop_params == None``::
log_prop_dens = log_prop_density(x_prop)
where ``x_prop`` is the proposed state to evaluate the log density at,
and ``log_prop_dens`` is the calculated log proposal density value.
May also be set to ``None`` if second mode of operation is being used
as described above.
Returns
-------
x_next : ndarray
Markov chain state after performing update - if previous state was
distributed according to target density this state will be too.
log_f_next : float
Logarithm of target density at updated state.
rejection : boolean
Whether the proposed update was rejected (True) or accepted (False).
"""
if prop_params:
x_prop = prop_sampler(prop_params)
else:
x_prop = prop_sampler()
log_f_prop = log_f_func(x_prop)
if log_prop_density:
if prop_params:
log_prop_dens_fwd = log_prop_density(x_prop, prop_params)
log_prop_dens_bwd = log_prop_density(x_curr, prop_params)
else:
log_prop_dens_fwd = log_prop_density(x_prop)
log_prop_dens_bwd = log_prop_density(x_curr)
accept_prob = np.exp(log_f_prop + log_prop_dens_bwd -
log_f_curr - log_prop_dens_fwd)
else:
accept_prob = np.exp(log_f_prop - log_f_curr)
if prng.uniform() < accept_prob:
return x_prop, log_f_prop, False
else:
return x_curr, log_f_curr, True
class MaximumIterationsExceededError(Exception):
""" Error raised when iterations of a loop exceeds a predefined limit. """
pass
def elliptical_slice_step(x_curr, log_f_curr, log_f_func, prng,
gaussian_sample, max_slice_iters=1000):
""" Performs single elliptical slice sampling update.
Markov chain update for a target density of the form
.. math::
\pi(x) \propto N(x | 0, \Sigma) f(x)
where :math:`N(x | 0, \Sigma)` represents a zero-mean multivariate
Gaussian density with covariance matrix :math:`\Sigma` and :math:`f(x)`
is some non-Gaussian factor in the target density (e.g. a likelihood).
**Reference:**
`Elliptical slice sampling`, Murray, Adams and Mackay (2010)
Parameters
----------
x_curr : ndarray
Current Markov chain state.
log_f_curr : float
Logarithm of the non-Gaussian target density factor evaluated at
current state.
log_f_func : function or callable object
Function which calculates the logarithm of the non-Gaussian target
density factor at a specified state. Should have call signature::
log_f = log_f_func(x)
where ``x`` is the state to evaluate the density at and ``log_f`` the
calculated log density.
prng : RandomState
Pseudo-random number generator object (either an instance of a
``numpy`` ``RandomState`` or an object with an equivalent
interface).
gaussian_sample : ndarray
Independent sample from the Gaussian factor in the target density
with zero mean and covariance matrix :math:`\Sigma`.
max_slice_iters : integer
Maximum number of elliptical slice shrinking iterations to perform
before terminating and raising an ``MaximumIterationsExceededError``
exception. This should be set to a relatively large value (e.g. the
default is 1000) which is significantly larger than the expected number
of slice shrinking iterations so that this exception is only raised
when there is some error condition e.g. when there is a bug in the
implementation of the ``log_f_func`` which would otherwise cause the
shriking loop to never be terminated.
Returns
-------
x_next : ndarray
Markov chain state after performing update - if previous state was
distributed according to target density this state will be too.
log_f_next : float
Logarithm of non-Gaussian factor in target density at updated state.
Raises
------
MaximumIterationsExceededError
Raised when slice shrinking loop does not terminate within the
specified limit.
"""
# draw random log slice height between -infinity and log_f_curr
log_y = log_f_curr + np.log(prng.uniform())
# draw first proposed slice angle and use to define intial bracket
phi = prng.uniform() * 2. * np.pi
phi_min = phi - 2. * np.pi
phi_max = phi
i = 0
while i < max_slice_iters:
# calculate proposed state on ellipse defined by Gaussian sample and
# slice angle and calculate logarithm of non-Gaussian factor
x_prop = x_curr * np.cos(phi) + gaussian_sample * np.sin(phi)
log_f_prop = log_f_func(x_prop)
# check if proposed state on slice if not shrink
if log_f_prop > log_y:
return x_prop, log_f_prop
elif phi < 0:
phi_min = phi
elif phi > 0:
phi_max = phi
else:
warnings.warn('Slice collapsed to current value')
return x_curr, log_f_curr
# draw new slice angle from updated bracket
phi = phi_min + prng.uniform() * (phi_max - phi_min)
i += 1
raise MaximumIterationsExceededError(
'Exceed maximum slice iterations: '
'i={0}, phi_min={1}, phi_max={2}, log_f_prop={3}, log_f_curr={4}'
.format(i, phi_min, phi_max, log_f_prop, log_f_curr))
def linear_slice_step(x_curr, log_f_curr, log_f_func, slice_width, prng,
max_steps_out=0, max_slice_iters=1000):
""" Performs single linear slice sampling update.
Performs slice sampling along some line in the target distribution state
space. This line might be axis-aligned corresponding to sampling along
only one of the dimensions of the target distribution or some arbitrary
linear combination of the dimensions.
The first step in a slice sampling update is to randomly sample a slice
height between 0 and the (potentially unnormalised) density at the current
Markov chain state. The set of all the points on the line with a density
above this slice height value are defined as the current slice and moving
to a state corresponding to a point drawn uniformly from this slice on
the line will leave the target distribution invariant. To achieve this
the first step is to randomly position a bracket of a specified width
around the current point on the line, optionally stepping out this bracket
until its ends lie outside the slice. Points are then repeatedly drawn at
uniform from the current bracket and tested to see if they are in the slice
(i.e. have a density above the slice height), if they are the current point
returned otherwise rejected proposed points are used to adaptively shrink
the slice bracket while maintaining the reversibility of the algorithm.
**Reference:**
`Slice sampling`, Neal (2003)
Parameters
----------
x_curr : ndarray
Point on line corresponding to current Markov chain state.
log_f_curr : float
Logarithm of the potentially unnormalised target density evaluated at
current state.
log_f_func : function or callable object
Function which calculates the logarithm of the potentially unnormalised
target density at a point on the line. Should have call signature::
log_f = log_f_func(x)
where ``x`` is the position on the line to evaluate the density at and
``log_f`` the calculated log density.
slice_width : float
Initial slice bracket width with bracket of this width being randomly
positioned around current point.
prng : RandomState
Pseudo-random number generator object (either an instance of a
``numpy`` ``RandomState`` or an object with an equivalent
interface).
max_steps_out : integer
Maximum number of stepping out iterations to perform (default 0). If
non-zero then the initial slice bracket is linearly 'stepped-out' in
positive and negative directions by ``slice_width`` each time, until
either the slice bracket ends are outside the slice or the maximum
number of steps has been reached.
max_slice_iters : integer
Maximum number of slice bracket shrinking iterations to perform
before terminating and raising an ``MaximumIterationsExceededError``
exception. This should be set to a relatively large value (e.g. the
default is 1000) which is significantly larger than the expected number
of slice shrinking iterations so that this exception is only raised
when there is some error condition e.g. when there is a bug in the
implementation of the ``log_f_func`` which would otherwise cause the
shriking loop to never be terminated.
Returns
-------
x_next : ndarray
Point on line corresponding to new Markov chain state after performing
update - if previous state was distributed according to target density
this state will be too.
log_f_next : float
Logarithm of target density at updated state.
Raises
------
MaximumIterationsExceededError
Raised when slice shrinking loop does not terminate within the
specified limit.
"""
# draw random log slice height between -infinity and log_f_curr
log_y = np.log(prng.uniform()) + log_f_curr
# randomly set initial slice bracket of specified width w
x_min = x_curr - slice_width * prng.uniform()
x_max = x_min + slice_width
# step out bracket if non-zero maximum steps out
if max_steps_out > 0:
# randomly split maximum number of steps between up and down steps
# to ensure reversibility
steps_down = np.round(prng.uniform() * max_steps_out)
steps_up = max_steps_out - steps_down
s = 0
while s < steps_down and log_y < log_f_func(x_min):
x_min -= slice_width
s += 1
s = 0
while s < steps_up and log_y < log_f_func(x_max):
x_max += slice_width
s += 1
i = 0
while i < max_slice_iters:
# draw new proposed point randomly on current slice bracket and
# calculate log density at proposed point
x_prop = x_min + (x_max - x_min) * prng.uniform()
log_f_prop = log_f_func(x_prop)
# check if proposed state on slice if not shrink
if log_f_prop > log_y:
return x_prop, log_f_prop
elif x_prop < x_curr:
x_min = x_prop
elif x_prop > x_curr:
x_max = x_prop
else:
warnings.warn('Slice collapsed to current value')
return x_curr, log_f_curr
i += 1
raise MaximumIterationsExceededError(
'Exceed maximum slice iterations: '
'i={0}, x_min={1}, x_max={2}, log_f_prop={3}, log_f_curr={4}'
.format(i, x_min, x_max, log_f_prop, log_f_curr))
|
|
"""views for the :mod:`grid` app"""
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Count
from django.http import HttpResponseRedirect, Http404, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render
from grid.forms import ElementForm, FeatureForm, GridForm, GridPackageForm
from grid.models import Element, Feature, Grid, GridPackage
from package.models import Package
from package.forms import PackageForm
from package.views import repo_data_for_js
def build_element_map(elements):
# Horrifying two-level dict due to needing to use hash() function later
element_map = {}
for element in elements:
element_map.setdefault(element.feature_id, {})
element_map[element.feature_id][element.grid_package_id] = element
return element_map
def grids(request, template_name="grid/grids.html"):
"""lists grids
Template context:
* ``grids`` - all grid objects
"""
# annotations providing bad counts
#grids = Grid.objects.annotate(gridpackage_count=Count('gridpackage'), feature_count=Count('feature'))
return render(request, template_name, {'grids': Grid.objects.all(),})
def grid_detail_landscape(request, slug, template_name="grid/grid_detail2.html"):
"""displays a grid in detail
Template context:
* ``grid`` - the grid object
* ``elements`` - elements of the grid
* ``features`` - feature set used in the grid
* ``grid_packages`` - packages involved in the current grid
"""
grid = get_object_or_404(Grid, slug=slug)
features = grid.feature_set.all()
grid_packages = grid.grid_packages
elements = Element.objects.all() \
.filter(feature__in=features,
grid_package__in=grid_packages)
element_map = build_element_map(elements)
# These attributes are how we determine what is displayed in the grid
default_attributes = [('repo_description', 'Description'),
('category','Category'), ('pypi_downloads', 'Downloads'), ('last_updated', 'Last Updated'), ('pypi_version', 'Version'),
('repo', 'Repo'), ('commits_over_52', 'Commits'), ('repo_watchers', 'Repo watchers'), ('repo_forks', 'Forks'),
('participant_list', 'Participants'), ('license_latest', 'License')
]
return render(request, template_name, {
'grid': grid,
'features': features,
'grid_packages': grid_packages,
'attributes': default_attributes,
'elements': element_map,
})
def grid_detail_feature(request, slug, feature_id, bogus_slug, template_name="grid/grid_detail_feature.html"):
"""a slightly more focused view than :func:`grid.views.grid_detail`
shows comparison for only one feature, and does not show the basic
grid parameters
Template context is the same as in :func:`grid.views.grid_detail`
"""
grid = get_object_or_404(Grid, slug=slug)
features = grid.feature_set.filter(id=feature_id)
if not features.count():
raise Http404
grid_packages = grid.gridpackage_set.select_related('gridpackage')
elements = Element.objects.all() \
.filter(feature__in=features,
grid_package__in=grid_packages)
element_map = build_element_map(elements)
return render(
request,
template_name,
{
'grid': grid,
'feature': features[0],
'grid_packages': grid_packages,
'elements': element_map,
}
)
@login_required
def add_grid(request, template_name="grid/add_grid.html"):
"""Creates a new grid, requires user to be logged in.
Works for both GET and POST request methods
Template context:
* ``form`` - an instance of :class:`~app.grid.forms.GridForm`
"""
if not request.user.get_profile().can_add_grid:
return HttpResponseForbidden("permission denied")
new_grid = Grid()
form = GridForm(request.POST or None, instance=new_grid)
if form.is_valid():
new_grid = form.save()
return HttpResponseRedirect(reverse('grid', kwargs={'slug':new_grid.slug}))
return render(request, template_name, { 'form': form })
@login_required
def edit_grid(request, slug, template_name="grid/edit_grid.html"):
"""View to modify the grid, handles GET and POST requests.
This view requires user to be logged in.
Template context:
* ``form`` - instance of :class:`grid.forms.GridForm`
"""
if not request.user.get_profile().can_edit_grid:
return HttpResponseForbidden("permission denied")
grid = get_object_or_404(Grid, slug=slug)
form = GridForm(request.POST or None, instance=grid)
if form.is_valid():
grid = form.save()
message = "Grid has been edited"
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('grid', kwargs={'slug': grid.slug}))
return render(request, template_name, { 'form': form, 'grid': grid } )
@login_required
def add_feature(request, grid_slug, template_name="grid/add_feature.html"):
"""Adds a feature to the grid, accepts GET and POST requests.
Requires user to be logged in
Template context:
* ``form`` - instance of :class:`grid.forms.FeatureForm` form
* ``grid`` - instance of :class:`grid.models.Grid` model
"""
if not request.user.get_profile().can_add_grid_feature:
return HttpResponseForbidden("permission denied")
grid = get_object_or_404(Grid, slug=grid_slug)
feature = Feature()
form = FeatureForm(request.POST or None, instance=feature)
if form.is_valid():
feature = Feature(
grid=grid,
title=request.POST['title'],
description = request.POST['description']
)
feature.save()
return HttpResponseRedirect(reverse('grid', kwargs={'slug':feature.grid.slug}))
return render(request, template_name, { 'form': form,'grid':grid })
@login_required
def edit_feature(request, id, template_name="grid/edit_feature.html"):
"""edits feature on a grid - this view has the same
semantics as :func:`grid.views.add_feature`.
Requires the user to be logged in.
"""
if not request.user.get_profile().can_edit_grid_feature:
return HttpResponseForbidden("permission denied")
feature = get_object_or_404(Feature, id=id)
form = FeatureForm(request.POST or None, instance=feature)
if form.is_valid():
feature = form.save()
return HttpResponseRedirect(reverse('grid', kwargs={'slug': feature.grid.slug}))
return render(request, template_name, { 'form': form,'grid': feature.grid })
@permission_required('grid.delete_feature')
def delete_feature(request, id, template_name="grid/edit_feature.html"):
# do not need to check permission via profile because
# we default to being strict about deleting
"""deletes a feature from the grid, ``id`` is id of the
:class:`grid.models.Feature` model that is to be deleted
Requires permission `grid.delete_feature`.
Redirects to the parent :func:`grid.views.grid_detail`
"""
feature = get_object_or_404(Feature, id=id)
Element.objects.filter(feature=feature).delete()
feature.delete()
return HttpResponseRedirect(reverse('grid', kwargs={'slug': feature.grid.slug}))
@permission_required('grid.delete_gridpackage')
def delete_grid_package(request, id, template_name="grid/edit_feature.html"):
"""Deletes package from the grid, ``id`` is the id of the
:class:`grid.models.GridPackage` instance
Requires permission ``grid.delete_gridpackage``.
Redirects to :func:`grid.views.grid_detail`.
"""
# do not need to check permission via profile because
# we default to being strict about deleting
package = get_object_or_404(GridPackage, id=id)
Element.objects.filter(grid_package=package).delete()
package.delete()
return HttpResponseRedirect(reverse('grid', kwargs={'slug': package.grid.slug}))
@login_required
def edit_element(request, feature_id, package_id, template_name="grid/edit_element.html"):
if not request.user.get_profile().can_edit_grid_element:
return HttpResponseForbidden("permission denied")
feature = get_object_or_404(Feature, pk=feature_id)
grid_package = get_object_or_404(GridPackage, pk=package_id)
# Sanity check to make sure both the feature and grid_package are related to
# the same grid!
if feature.grid_id != grid_package.grid_id:
raise Http404
element, created = Element.objects.get_or_create(
grid_package=grid_package,
feature=feature
)
form = ElementForm(request.POST or None, instance=element)
if form.is_valid():
element = form.save()
return HttpResponseRedirect(reverse('grid', kwargs={'slug': feature.grid.slug}))
return render(request, template_name, {
'form': form,
'feature':feature,
'package':grid_package.package,
'grid':feature.grid
})
@login_required
def add_grid_package(request, grid_slug, template_name="grid/add_grid_package.html"):
"""Add an existing package to this grid."""
if not request.user.get_profile().can_add_grid_package:
return HttpResponseForbidden("permission denied")
grid = get_object_or_404(Grid, slug=grid_slug)
grid_package = GridPackage()
form = GridPackageForm(request.POST or None, instance=grid_package)
if form.is_valid():
package = get_object_or_404(Package, id=request.POST['package'])
try:
GridPackage.objects.get(grid=grid, package=package)
message = "Sorry, but '%s' is already in this grid." % package.title
messages.add_message(request, messages.ERROR, message)
except GridPackage.DoesNotExist:
grid_package = GridPackage(
grid=grid,
package=package
)
grid_package.save()
redirect = request.POST.get('redirect','')
if redirect:
return HttpResponseRedirect(redirect)
return HttpResponseRedirect(reverse('grid', kwargs={'slug':grid.slug}))
return render(request, template_name, {
'form': form,
'grid': grid
})
@login_required
def add_new_grid_package(request, grid_slug, template_name="package/package_form.html"):
"""Add a package to a grid that isn't yet represented on the site."""
if not request.user.get_profile().can_add_grid_package:
return HttpResponseForbidden("permission denied")
grid = get_object_or_404(Grid, slug=grid_slug)
new_package = Package()
form = PackageForm(request.POST or None, instance=new_package)
if form.is_valid():
new_package = form.save()
GridPackage.objects.create(
grid=grid,
package=new_package
)
return HttpResponseRedirect(reverse("grid", kwargs={"slug":grid_slug}))
return render(request, template_name, {"form": form,"repo_data": repo_data_for_js(),"action": "add",})
def ajax_grid_list(request, template_name="grid/ajax_grid_list.html"):
q = request.GET.get('q','')
grids = []
if q:
grids = Grid.objects.filter(title__istartswith=q)
package_id = request.GET.get('package_id','')
if package_id:
grids = grids.exclude(gridpackage__package__id=package_id)
return render(request, template_name, {'grids': grids})
def grid_detail(request, slug, template_name="grid/grid_detail.html"):
"""displays a grid in detail
Template context:
* ``grid`` - the grid object
* ``elements`` - elements of the grid
* ``features`` - feature set used in the grid
* ``grid_packages`` - packages involved in the current grid
"""
grid = get_object_or_404(Grid, slug=slug)
features = grid.feature_set.all()
grid_packages = grid.grid_packages
elements = Element.objects.all() \
.filter(feature__in=features,
grid_package__in=grid_packages)
element_map = build_element_map(elements)
# These attributes are how we determine what is displayed in the grid
default_attributes = [('repo_description', 'Description'),
('category','Category'), ('pypi_downloads', 'Downloads'), ('last_updated', 'Last Updated'), ('pypi_version', 'Version'),
('repo', 'Repo'), ('commits_over_52', 'Commits'), ('repo_watchers', 'Repo watchers'), ('repo_forks', 'Forks'),
('participant_list', 'Participants'), ('license_latest', 'License')
]
return render(request, template_name, {
'grid': grid,
'features': features,
'grid_packages': grid_packages,
'attributes': default_attributes,
'elements': element_map,
})
|
|
import json
import os
import decimal
import bson
import itertools
import datetime
import numpy
from numpy.ma import MaskedArray
from numpy.ma.core import MaskedConstant
from pymongo import ASCENDING
from bdo_platform.settings import DATASET_DIR
from aggregator.models import Dataset as AgDataset, Variable as AgVariable, Dimension as AgDimension
class BaseVariable(object):
name = ''
title = ''
unit = None
def to_json(self):
return {
'name': self.name,
'title': self.title,
'unit': self.unit,
}
class Dimension(BaseVariable):
min = None
max = None
step = 1
values = None
axis = None
def to_json(self):
document = super(Dimension, self).to_json()
document.update({
'min': self.min,
'max': self.max,
'step': self.step,
'values': self.values,
'axis': self.axis,
})
return encode_document(document)
class Variable(BaseVariable):
scale_factor = 1
add_offset = 0
cell_methods = []
dimensions = []
type_of_analysis = None
extra_info = {}
def to_json(self):
document = super(Variable, self).to_json()
document.update({
'scale_factor': self.scale_factor,
'add_offset': self.add_offset,
'cell_methods': self.cell_methods,
'type_of_analysis': self.type_of_analysis,
'extra_info': self.extra_info,
'dimensions': self.dimensions,
})
return encode_document(document)
class DatasetInfo(object):
title = ''
source = ''
description = ''
references = []
def to_json(self):
return encode_document({
'title': self.title,
'source': self.source,
'description': self.description,
'references': self.references,
})
def encode_document(obj):
for key in obj.keys():
if isinstance(obj[key], numpy.integer):
obj[key] = int(obj[key])
elif isinstance(obj[key], numpy.floating):
obj[key] = float(obj[key])
elif isinstance(obj[key], numpy.ndarray):
obj[key] = obj[key].tolist()
return obj
class BaseConverter(object):
name = None
_dataset = None
_variables = []
_dimensions = []
_data = []
MAX_DOCUMENT_SIZE = 16000000 # 16M limit on BSON documents
AVERAGE_ELEMENT_SIZE = 16
@property
def dataset(self):
raise NotImplementedError('`dataset` getter was not implemented')
@property
def variables(self):
raise NotImplementedError('`variables` getter was not implemented')
@property
def dimensions(self):
raise NotImplementedError('`dimensions` getter was not implemented')
def variable_iter(self, v_name):
raise NotImplementedError('`variable_iter` optional method is not implemented')
def data(self, v_name):
raise NotImplementedError('variable data getter was not implemented')
def count(self, v_name):
total = 1
dt = self.data(v_name)
while True:
try:
total *= len(dt)
dt = dt[0]
except:
break
return total
def get_value(self, v_name, comb):
value = self.data(v_name=v_name)
for idx, _ in enumerate(comb):
try:
value = value[comb[idx][0]]
except IndexError:
raise ValueError('Could not get value for this combination')
if type(value) is MaskedConstant:
return None
if str(value) == '--':
return None
return value
def normalize(self, dimension, value):
# default normalization changes nothing
return value
def get_variable(self, v_name):
try:
return [v for v in self.variables if v.name == v_name][0]
except IndexError:
return None
def get_dimension(self, d_name):
try:
return [d for d in self.dimensions if d.name == d_name][0]
except IndexError:
return None
@staticmethod
def full_input_path(filename=None):
source_path = os.path.join(DATASET_DIR, 'source')
if not os.path.isdir(source_path):
os.mkdir(source_path)
if filename is None:
return source_path
return os.path.join(source_path, filename)
@staticmethod
def full_output_path(filename=None):
dist_path = os.path.join(DATASET_DIR, 'dist')
if not os.path.isdir(dist_path):
os.mkdir(dist_path)
if filename is None:
return dist_path
return os.path.join(dist_path, filename)
def store(self, target, stdout=None, update_dataset=None):
"""
:param target: Either {'type': 'postgres', 'cursor': <Cursor>, 'withIndices': True|False} or
{'type': 'mongo', 'db': <MongoClient>}
:param stdout: Optional output stream
:param update_dataset: Existing dataset to update
:return: Inserts data to database
"""
def insert(iv):
if target['type'] == 'postgres':
target['cursor'].execute('INSERT INTO %s VALUES %s;' % (agv.data_table_name, ','.join(iv)))
elif target['type'] == 'mongo':
target['db'][v.name].insert_many(iv)
def postgres_serialize(val):
if type(val) == datetime.datetime:
return "TIMESTAMP '%s'" % val.isoformat().replace('T', ' ')
else:
return str(val)
if 'type' not in target:
raise ValueError('Target type is required')
if target['type'] not in ['postgres', 'mongo']:
raise ValueError('Unsupported store target type')
agd = None
# add datasets, variables & their dimensions
try:
# get or create dataset
if update_dataset is not None:
agd = update_dataset
elif target['type'] == 'postgres':
agd = AgDataset.objects.create(title=self.dataset.title, source=self.dataset.source,
description=self.dataset.description, references=self.dataset.references)
elif target['type'] == 'mongo':
agd = target['db'].datasets.insert(self.dataset.to_json())
for v in self.variables:
print v.name
v_existed = False
# get or create variable
if target['type'] == 'postgres':
try:
agv = AgVariable.objects.get(name=v.name, dataset=agd)
v_existed = True
except AgVariable.DoesNotExist:
agv = AgVariable.objects.create(name=v.name, title=v.title, unit=v.unit,
scale_factor=v.scale_factor, add_offset=v.add_offset,
cell_methods=v.cell_methods, type_of_analysis=v.type_of_analysis,
dataset=agd)
elif target['type'] == 'mongo':
v_doc = v.to_json()
v_doc['dataset_id'] = agd
agv = target['db'].variables.insert(v_doc)
# TODO get variable for updating dataset
dimensions = []
for dimension_name in v.dimensions:
for d in self.dimensions:
if d.name == dimension_name:
if target['type'] == 'postgres':
try:
agdim = AgDimension.objects.get(name=d.name, variable=agv)
except AgDimension.DoesNotExist:
agdim = AgDimension.objects.create(name=d.name, title=d.title, unit=d.unit,
min=decimal.Decimal(str(d.min)) if d.min is not None else None,
max=decimal.Decimal(str(d.max)) if d.max is not None else None,
step=decimal.Decimal(str(d.step))
if d.step is not None else None,
axis=d.axis,
variable=agv)
elif target['type'] == 'mongo':
d_doc = d.to_json()
d_doc['variable_id'] = agv
agdim = target['db'].dimensions.insert(d_doc)
dimensions.append(d)
break
# create data table for variable
if not v_existed:
if target['type'] == 'postgres':
agv.create_data_table(cursor=target['cursor'], with_indices=False)
# add data
try:
_iter = self.variable_iter(v.name)
except NotImplementedError:
dim_values = []
for dimension in dimensions:
if dimension.values:
dim_values.append([(k, x) for k, x in enumerate(dimension.values)])
continue
vv = []
x = dimension.min
idx = 0
while (dimension.step < 0 and x >= dimension.max) or \
((dimension.step >= 0 or dimension.step is None) and x <= dimension.max):
vv.append((idx, self.normalize(dimension, x)))
if dimension.step is None:
break
idx += 1
x += dimension.step
dim_values.append(vv)
_iter = itertools.product(*dim_values)
insert_values = []
progress = 0
total = self.count(v_name=v.name)
for comb in _iter:
error = False
value = None
try:
if comb[-1][1] != 'V':
raise ValueError()
value = comb[-1][0]
comb = comb[:-1]
except ValueError:
try:
value = self.get_value(v_name=v.name, comb=comb)
except ValueError:
error = True
progress += 1
if progress % 1000 == 0:
if stdout:
stdout.write("\r Adding data... %d%%" % (progress * 100 / total), ending='')
stdout.flush()
else:
print('%d%%' % (progress * 100 / total))
if error or (value is None):
continue
if target['type'] == 'postgres':
insert_values.append('(%s)' % ','.join([postgres_serialize(combi[1]) for combi in comb] +
[str(value)]))
elif target['type'] == 'mongo':
val_doc = {}
for idx, combi in enumerate(comb):
val_doc[dimensions[idx].name] = combi[1]
val_doc['value'] = value
insert_values.append(encode_document(val_doc))
if len(insert_values) == 1000:
insert(insert_values)
insert_values = []
if insert_values:
insert(insert_values)
insert_values = []
if target['type'] == 'mongo':
if 'with_indices' in target and target['with_indices']:
for d in dimensions:
target['db'][v.name].create_index([(d.name, ASCENDING)])
elif target['type'] == 'postgres':
# update value distributions
agv.update_distribution(cursor=target['cursor'])
# create indices
if 'with_indices' in target and target['with_indices'] and not v_existed:
agv.create_indices(cursor=target['cursor'])
if stdout:
stdout.write("\r Completed\t\t\t", ending='\n')
stdout.flush()
else:
print('completed')
return agd
except:
# if agd and type(agd) == AgDataset:
# agd.delete()
raise
|
|
from __future__ import annotations
from typing import cast
import warnings
import numpy as np
from pandas._libs.lib import (
NoDefault,
no_default,
)
from pandas._libs.missing import is_matching_na
import pandas._libs.testing as _testing
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
)
from pandas.core.algorithms import (
safe_sort,
take_nd,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.core.arrays.string_ import StringDtype
from pandas.io.formats.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: bool | str = "equiv",
check_less_precise: bool | int | NoDefault = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
# https://github.com/python/mypy/issues/7642
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
# type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
rtol = atol = _get_tol_from_less_precise(
check_less_precise # type: ignore[arg-type]
)
if isinstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
# if we have "equiv", this becomes True
check_dtype = bool(check_dtype)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _get_tol_from_less_precise(check_less_precise: bool | int) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def assert_index_equal(
left: Index,
right: Index,
exact: bool | str = "equiv",
check_names: bool = True,
check_less_precise: bool | int | NoDefault = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas import testing as tm
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> tm.assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index") -> None:
if not exact:
return
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal("inferred_type", left, right, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if is_categorical_dtype(left.dtype) and is_categorical_dtype(right.dtype):
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
assert_index_equal(left.categories, right.categories, exact=exact)
return
assert_attr_equal("dtype", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_nd(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
# https://github.com/python/mypy/issues/7642
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
# type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
rtol = atol = _get_tol_from_less_precise(
check_less_precise # type: ignore[arg-type]
)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = Index(safe_sort(left))
right = Index(safe_sort(right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
# if we have "equiv", this becomes True
exact_bool = bool(exact)
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact_bool,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: bool | str = True, obj="Input"):
"""
Checks classes are equal.
"""
from pandas.core.indexes.numeric import NumericIndex
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if type(left) == type(right):
return
if exact == "equiv":
# accept equivalence of NumericIndex (sub-)classes
if isinstance(left, NumericIndex) and isinstance(right, NumericIndex):
return
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif is_matching_na(left_attr, right_attr):
# e.g. both np.nan, both NaT, both pd.NA, ...
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if (left_attr is pd.NA) ^ (right_attr is pd.NA):
result = False
elif not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
exact: bool | str
if isinstance(left.categories, RangeIndex) or isinstance(
right.categories, RangeIndex
):
exact = "equiv"
else:
# We still want to require exact matches for NumericIndex
exact = True
if check_category_order:
assert_index_equal(
left.categories, right.categories, obj=f"{obj}.categories", exact=exact
)
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
exact=exact,
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif (
is_categorical_dtype(left)
or isinstance(left, PandasDtype)
or isinstance(left, StringDtype)
):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif (
is_categorical_dtype(right)
or isinstance(right, PandasDtype)
or isinstance(right, StringDtype)
):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas import testing as tm
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> tm.assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
*,
check_index=True,
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
check_index : bool, default True
Whether to check index equivalence. If False, then compare only values.
.. versionadded:: 1.3.0
Examples
--------
>>> from pandas import testing as tm
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> tm.assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
if check_index:
# GH #38183
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
left_values = left._values
right_values = right._values
# Only check exact if dtype is numeric
if isinstance(left_values, ExtensionArray) and isinstance(
right_values, ExtensionArray
):
assert_extension_array_equal(
left_values,
right_values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
assert_numpy_array_equal(
left_values,
right_values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype_and_needs_i8_conversion(
left.dtype, right.dtype
) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
# GH #38183
# use check_index=False, because we do not want to run
# assert_index_equal for each column,
# as we already checked it for the whole dataframe before.
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
check_index=False,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (DatetimeIndex, TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
"""
Checks that we have the combination of an ExtensionArraydtype and
a dtype that should be converted to int64
Returns
-------
bool
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
|
|
import warnings
import hashlib
import pymongo
import re
from pymongo.read_preferences import ReadPreference
from bson import ObjectId
from bson.dbref import DBRef
from mongoengine import signals
from mongoengine.common import _import_class
from mongoengine.base import (
DocumentMetaclass,
TopLevelDocumentMetaclass,
BaseDocument,
BaseDict,
BaseList,
EmbeddedDocumentList,
ALLOW_INHERITANCE,
get_document
)
from mongoengine.errors import ValidationError, InvalidQueryError, InvalidDocumentError
from mongoengine.queryset import (OperationError, NotUniqueError,
QuerySet, transform)
from mongoengine.connection import get_db, DEFAULT_CONNECTION_NAME
from mongoengine.context_managers import switch_db, switch_collection
import collections
__all__ = ('Document', 'EmbeddedDocument', 'DynamicDocument',
'DynamicEmbeddedDocument', 'OperationError',
'InvalidCollectionError', 'NotUniqueError', 'MapReduceDocument')
def includes_cls(fields):
""" Helper function used for ensuring and comparing indexes
"""
first_field = None
if len(fields):
if isinstance(fields[0], str):
first_field = fields[0]
elif isinstance(fields[0], (list, tuple)) and len(fields[0]):
first_field = fields[0][0]
return first_field == '_cls'
class InvalidCollectionError(Exception):
pass
class EmbeddedDocument(BaseDocument, metaclass=DocumentMetaclass):
"""A :class:`~mongoengine.Document` that isn't stored in its own
collection. :class:`~mongoengine.EmbeddedDocument`\ s should be used as
fields on :class:`~mongoengine.Document`\ s through the
:class:`~mongoengine.EmbeddedDocumentField` field type.
A :class:`~mongoengine.EmbeddedDocument` subclass may be itself subclassed,
to create a specialised version of the embedded document that will be
stored in the same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of
`_cls` set :attr:`allow_inheritance` to ``False`` in the :attr:`meta`
dictionary.
"""
__slots__ = ('_instance')
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
def __init__(self, *args, **kwargs):
super(EmbeddedDocument, self).__init__(*args, **kwargs)
self._instance = None
self._changed_fields = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._data == other._data
return False
def __ne__(self, other):
return not self.__eq__(other)
def save(self, *args, **kwargs):
self._instance.save(*args, **kwargs)
def reload(self, *args, **kwargs):
self._instance.reload(*args, **kwargs)
class Document(BaseDocument, metaclass=TopLevelDocumentMetaclass):
"""The base class used for defining the structure and properties of
collections of documents stored in MongoDB. Inherit from this class, and
add fields as class attributes to define a document's structure.
Individual documents may then be created by making instances of the
:class:`~mongoengine.Document` subclass.
By default, the MongoDB collection used to store documents created using a
:class:`~mongoengine.Document` subclass will be the name of the subclass
converted to lowercase. A different collection may be specified by
providing :attr:`collection` to the :attr:`meta` dictionary in the class
definition.
A :class:`~mongoengine.Document` subclass may be itself subclassed, to
create a specialised version of the document that will be stored in the
same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of
`_cls` set :attr:`allow_inheritance` to ``False`` in the :attr:`meta`
dictionary.
A :class:`~mongoengine.Document` may use a **Capped Collection** by
specifying :attr:`max_documents` and :attr:`max_size` in the :attr:`meta`
dictionary. :attr:`max_documents` is the maximum number of documents that
is allowed to be stored in the collection, and :attr:`max_size` is the
maximum size of the collection in bytes. If :attr:`max_size` is not
specified and :attr:`max_documents` is, :attr:`max_size` defaults to
10000000 bytes (10MB).
Indexes may be created by specifying :attr:`indexes` in the :attr:`meta`
dictionary. The value should be a list of field names or tuples of field
names. Index direction may be specified by prefixing the field names with
a **+** or **-** sign.
Automatic index creation can be disabled by specifying
:attr:`auto_create_index` in the :attr:`meta` dictionary. If this is set to
False then indexes will not be created by MongoEngine. This is useful in
production systems where index creation is performed as part of a
deployment system.
By default, _cls will be added to the start of every index (that
doesn't contain a list) if allow_inheritance is True. This can be
disabled by either setting cls to False on the specific index or
by setting index_cls to False on the meta dictionary for the document.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__slots__ = ('__objects')
def pk():
"""Primary key alias
"""
def fget(self):
return getattr(self, self._meta['id_field'])
def fset(self, value):
return setattr(self, self._meta['id_field'], value)
return property(fget, fset)
pk = pk()
@classmethod
def _get_db(cls):
"""Some Model using other db_alias"""
return get_db(cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME))
@classmethod
def _get_collection(cls):
"""Returns the collection for the document."""
if not hasattr(cls, '_collection') or cls._collection is None:
db = cls._get_db()
collection_name = cls._get_collection_name()
# Create collection as a capped collection if specified
if cls._meta['max_size'] or cls._meta['max_documents']:
# Get max document limit and max byte size from meta
max_size = cls._meta['max_size'] or 10000000 # 10MB default
max_documents = cls._meta['max_documents']
if collection_name in db.collection_names():
cls._collection = db[collection_name]
# The collection already exists, check if its capped
# options match the specified capped options
options = cls._collection.options()
if options.get('max') != max_documents or \
options.get('size') != max_size:
msg = (('Cannot create collection "%s" as a capped '
'collection as it already exists')
% cls._collection)
raise InvalidCollectionError(msg)
else:
# Create the collection as a capped collection
opts = {'capped': True, 'size': max_size}
if max_documents:
opts['max'] = max_documents
cls._collection = db.create_collection(
collection_name, **opts
)
else:
cls._collection = db[collection_name]
if cls._meta.get('auto_create_index', True):
cls.ensure_indexes()
return cls._collection
def modify(self, query={}, **update):
"""Perform an atomic update of the document in the database and reload
the document object using updated version.
Returns True if the document has been updated or False if the document
in the database doesn't match the query.
.. note:: All unsaved changes that has been made to the document are
rejected if the method returns True.
:param query: the update will be performed only if the document in the
database matches the query
:param update: Django-style update keyword arguments
"""
if self.pk is None:
raise InvalidDocumentError("The document does not have a primary key.")
id_field = self._meta["id_field"]
query = query.copy() if isinstance(query, dict) else query.to_query(self)
if id_field not in query:
query[id_field] = self.pk
elif query[id_field] != self.pk:
raise InvalidQueryError("Invalid document modify query: it must modify only this document.")
updated = self._qs(**query).modify(new=True, **update)
if updated is None:
return False
for field in self._fields_ordered:
setattr(self, field, self._reload(field, updated[field]))
self._changed_fields = updated._changed_fields
self._created = False
return True
def save(self, force_insert=False, validate=True, clean=True,
write_concern=None, cascade=None, cascade_kwargs=None,
_refs=None, save_condition=None, **kwargs):
"""Save the :class:`~mongoengine.Document` to the database. If the
document already exists, it will be updated, otherwise it will be
created.
:param force_insert: only try to create a new document, don't allow
updates of existing documents
:param validate: validates the document; set to ``False`` to skip.
:param clean: call the document clean method, requires `validate` to be
True.
:param write_concern: Extra keyword arguments are passed down to
:meth:`~pymongo.collection.Collection.save` OR
:meth:`~pymongo.collection.Collection.insert`
which will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param cascade: Sets the flag for cascading saves. You can set a
default by setting "cascade" in the document __meta__
:param cascade_kwargs: (optional) kwargs dictionary to be passed throw
to cascading saves. Implies ``cascade=True``.
:param _refs: A list of processed references used in cascading saves
:param save_condition: only perform save if matching record in db
satisfies condition(s) (e.g., version number)
.. versionchanged:: 0.5
In existing documents it only saves changed fields using
set / unset. Saves are cascaded and any
:class:`~bson.dbref.DBRef` objects that have changes are
saved as well.
.. versionchanged:: 0.6
Added cascading saves
.. versionchanged:: 0.8
Cascade saves are optional and default to False. If you want
fine grain control then you can turn off using document
meta['cascade'] = True. Also you can pass different kwargs to
the cascade save using cascade_kwargs which overwrites the
existing kwargs with custom values.
.. versionchanged:: 0.8.5
Optional save_condition that only overwrites existing documents
if the condition is satisfied in the current db record.
"""
signals.pre_save.send(self.__class__, document=self)
if validate:
self.validate(clean=clean)
if write_concern is None:
write_concern = {"w": 1}
doc = self.to_mongo()
created = ('_id' not in doc or self._created or force_insert)
signals.pre_save_post_validation.send(self.__class__, document=self,
created=created)
try:
collection = self._get_collection()
if self._meta.get('auto_create_index', True):
self.ensure_indexes()
if created:
if force_insert:
object_id = collection.insert(doc, **write_concern)
else:
object_id = collection.save(doc, **write_concern)
else:
object_id = doc['_id']
updates, removals = self._delta()
# Need to add shard key to query, or you get an error
if save_condition is not None:
select_dict = transform.query(self.__class__,
**save_condition)
else:
select_dict = {}
select_dict['_id'] = object_id
shard_key = self.__class__._meta.get('shard_key', tuple())
for k in shard_key:
actual_key = self._db_field_map.get(k, k)
select_dict[actual_key] = doc[actual_key]
def is_new_object(last_error):
if last_error is not None:
updated = last_error.get("updatedExisting")
if updated is not None:
return not updated
return created
update_query = {}
if updates:
update_query["$set"] = updates
if removals:
update_query["$unset"] = removals
if updates or removals:
upsert = save_condition is None
last_error = collection.update(select_dict, update_query,
upsert=upsert, **write_concern)
created = is_new_object(last_error)
if cascade is None:
cascade = self._meta.get(
'cascade', False) or cascade_kwargs is not None
if cascade:
kwargs = {
"force_insert": force_insert,
"validate": validate,
"write_concern": write_concern,
"cascade": cascade
}
if cascade_kwargs: # Allow granular control over cascades
kwargs.update(cascade_kwargs)
kwargs['_refs'] = _refs
self.cascade_save(**kwargs)
except pymongo.errors.DuplicateKeyError as err:
message = 'Tried to save duplicate unique keys (%s)'
raise NotUniqueError(message % str(err))
except pymongo.errors.OperationFailure as err:
message = 'Could not save document (%s)'
if re.match('^E1100[01] duplicate key', str(err)):
# E11000 - duplicate key error index
# E11001 - duplicate key on update
message = 'Tried to save duplicate unique keys (%s)'
raise NotUniqueError(message % str(err))
raise OperationError(message % str(err))
id_field = self._meta['id_field']
if created or id_field not in self._meta.get('shard_key', []):
self[id_field] = self._fields[id_field].to_python(object_id)
signals.post_save.send(self.__class__, document=self, created=created)
self._clear_changed_fields()
self._created = False
return self
def cascade_save(self, *args, **kwargs):
"""Recursively saves any references /
generic references on an objects"""
_refs = kwargs.get('_refs', []) or []
ReferenceField = _import_class('ReferenceField')
GenericReferenceField = _import_class('GenericReferenceField')
for name, cls in list(self._fields.items()):
if not isinstance(cls, (ReferenceField,
GenericReferenceField)):
continue
ref = self._data.get(name)
if not ref or isinstance(ref, DBRef):
continue
if not getattr(ref, '_changed_fields', True):
continue
ref_id = "%s,%s" % (ref.__class__.__name__, str(ref._data))
if ref and ref_id not in _refs:
_refs.append(ref_id)
kwargs["_refs"] = _refs
ref.save(**kwargs)
ref._changed_fields = []
@property
def _qs(self):
"""
Returns the queryset to use for updating / reloading / deletions
"""
if not hasattr(self, '__objects'):
self.__objects = QuerySet(self, self._get_collection())
return self.__objects
@property
def _object_key(self):
"""Dict to identify object in collection
"""
select_dict = {'pk': self.pk}
shard_key = self.__class__._meta.get('shard_key', tuple())
for k in shard_key:
select_dict[k] = getattr(self, k)
return select_dict
def update(self, **kwargs):
"""Performs an update on the :class:`~mongoengine.Document`
A convenience wrapper to :meth:`~mongoengine.QuerySet.update`.
Raises :class:`OperationError` if called on an object that has not yet
been saved.
"""
if not self.pk:
if kwargs.get('upsert', False):
query = self.to_mongo()
if "_cls" in query:
del(query["_cls"])
return self._qs.filter(**query).update_one(**kwargs)
else:
raise OperationError(
'attempt to update a document not yet saved')
# Need to add shard key to query, or you get an error
return self._qs.filter(**self._object_key).update_one(**kwargs)
def delete(self, **write_concern):
"""Delete the :class:`~mongoengine.Document` from the database. This
will only take effect if the document has been previously saved.
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
"""
signals.pre_delete.send(self.__class__, document=self)
try:
self._qs.filter(
**self._object_key).delete(write_concern=write_concern, _from_doc_delete=True)
except pymongo.errors.OperationFailure as err:
message = 'Could not delete document (%s)' % err.message
raise OperationError(message)
signals.post_delete.send(self.__class__, document=self)
def switch_db(self, db_alias):
"""
Temporarily switch the database for a document instance.
Only really useful for archiving off data and calling `save()`::
user = User.objects.get(id=user_id)
user.switch_db('archive-db')
user.save()
:param str db_alias: The database alias to use for saving the document
.. seealso::
Use :class:`~mongoengine.context_managers.switch_collection`
if you need to read from another collection
"""
with switch_db(self.__class__, db_alias) as cls:
collection = cls._get_collection()
db = cls._get_db()
self._get_collection = lambda: collection
self._get_db = lambda: db
self._collection = collection
self._created = True
self.__objects = self._qs
self.__objects._collection_obj = collection
return self
def switch_collection(self, collection_name):
"""
Temporarily switch the collection for a document instance.
Only really useful for archiving off data and calling `save()`::
user = User.objects.get(id=user_id)
user.switch_collection('old-users')
user.save()
:param str collection_name: The database alias to use for saving the
document
.. seealso::
Use :class:`~mongoengine.context_managers.switch_db`
if you need to read from another database
"""
with switch_collection(self.__class__, collection_name) as cls:
collection = cls._get_collection()
self._get_collection = lambda: collection
self._collection = collection
self._created = True
self.__objects = self._qs
self.__objects._collection_obj = collection
return self
def select_related(self, max_depth=1):
"""Handles dereferencing of :class:`~bson.dbref.DBRef` objects to
a maximum depth in order to cut down the number queries to mongodb.
.. versionadded:: 0.5
"""
DeReference = _import_class('DeReference')
DeReference()([self], max_depth + 1)
return self
def reload(self, *fields, **kwargs):
"""Reloads all attributes from the database.
:param fields: (optional) args list of fields to reload
:param max_depth: (optional) depth of dereferencing to follow
.. versionadded:: 0.1.2
.. versionchanged:: 0.6 Now chainable
.. versionchanged:: 0.9 Can provide specific fields to reload
"""
max_depth = 1
if fields and isinstance(fields[0], int):
max_depth = fields[0]
fields = fields[1:]
elif "max_depth" in kwargs:
max_depth = kwargs["max_depth"]
if not self.pk:
raise self.DoesNotExist("Document does not exist")
obj = self._qs.read_preference(ReadPreference.PRIMARY).filter(
**self._object_key).only(*fields).limit(1
).select_related(max_depth=max_depth)
if obj:
obj = obj[0]
else:
raise self.DoesNotExist("Document does not exist")
for field in self._fields_ordered:
if not fields or field in fields:
try:
setattr(self, field, self._reload(field, obj[field]))
except KeyError:
# If field is removed from the database while the object
# is in memory, a reload would cause a KeyError
# i.e. obj.update(unset__field=1) followed by obj.reload()
delattr(self, field)
self._changed_fields = obj._changed_fields
self._created = False
return self
def _reload(self, key, value):
"""Used by :meth:`~mongoengine.Document.reload` to ensure the
correct instance is linked to self.
"""
if isinstance(value, BaseDict):
value = [(k, self._reload(k, v)) for k, v in list(value.items())]
value = BaseDict(value, self, key)
elif isinstance(value, EmbeddedDocumentList):
value = [self._reload(key, v) for v in value]
value = EmbeddedDocumentList(value, self, key)
elif isinstance(value, BaseList):
value = [self._reload(key, v) for v in value]
value = BaseList(value, self, key)
elif isinstance(value, (EmbeddedDocument, DynamicEmbeddedDocument)):
value._instance = None
value._changed_fields = []
return value
def to_dbref(self):
"""Returns an instance of :class:`~bson.dbref.DBRef` useful in
`__raw__` queries."""
if not self.pk:
msg = "Only saved documents can have a valid dbref"
raise OperationError(msg)
return DBRef(self.__class__._get_collection_name(), self.pk)
@classmethod
def register_delete_rule(cls, document_cls, field_name, rule):
"""This method registers the delete rules to apply when removing this
object.
"""
classes = [get_document(class_name)
for class_name in cls._subclasses
if class_name != cls.__name__] + [cls]
documents = [get_document(class_name)
for class_name in document_cls._subclasses
if class_name != document_cls.__name__] + [document_cls]
for cls in classes:
for document_cls in documents:
delete_rules = cls._meta.get('delete_rules') or {}
delete_rules[(document_cls, field_name)] = rule
cls._meta['delete_rules'] = delete_rules
@classmethod
def drop_collection(cls):
"""Drops the entire collection associated with this
:class:`~mongoengine.Document` type from the database.
"""
cls._collection = None
db = cls._get_db()
db.drop_collection(cls._get_collection_name())
@classmethod
def ensure_index(cls, key_or_list, drop_dups=False, background=False,
**kwargs):
"""Ensure that the given indexes are in place.
:param key_or_list: a single index key or a list of index keys (to
construct a multi-field index); keys may be prefixed with a **+**
or a **-** to determine the index ordering
"""
index_spec = cls._build_index_spec(key_or_list)
index_spec = index_spec.copy()
fields = index_spec.pop('fields')
index_spec['drop_dups'] = drop_dups
index_spec['background'] = background
index_spec.update(kwargs)
return cls._get_collection().ensure_index(fields, **index_spec)
@classmethod
def ensure_indexes(cls):
"""Checks the document meta data and ensures all the indexes exist.
Global defaults can be set in the meta - see :doc:`guide/defining-documents`
.. note:: You can disable automatic index creation by setting
`auto_create_index` to False in the documents meta data
"""
background = cls._meta.get('index_background', False)
drop_dups = cls._meta.get('index_drop_dups', False)
index_opts = cls._meta.get('index_opts') or {}
index_cls = cls._meta.get('index_cls', True)
collection = cls._get_collection()
# 746: when connection is via mongos, the read preference is not necessarily an indication that
# this code runs on a secondary
if not collection.is_mongos and collection.read_preference > 1:
return
# determine if an index which we are creating includes
# _cls as its first field; if so, we can avoid creating
# an extra index on _cls, as mongodb will use the existing
# index to service queries against _cls
cls_indexed = False
# Ensure document-defined indexes are created
if cls._meta['index_specs']:
index_spec = cls._meta['index_specs']
for spec in index_spec:
spec = spec.copy()
fields = spec.pop('fields')
cls_indexed = cls_indexed or includes_cls(fields)
opts = index_opts.copy()
opts.update(spec)
collection.ensure_index(fields, background=background,
drop_dups=drop_dups, **opts)
# If _cls is being used (for polymorphism), it needs an index,
# only if another index doesn't begin with _cls
if (index_cls and not cls_indexed and
cls._meta.get('allow_inheritance', ALLOW_INHERITANCE) is True):
collection.ensure_index('_cls', background=background,
**index_opts)
@classmethod
def list_indexes(cls, go_up=True, go_down=True):
""" Lists all of the indexes that should be created for given
collection. It includes all the indexes from super- and sub-classes.
"""
if cls._meta.get('abstract'):
return []
# get all the base classes, subclasses and siblings
classes = []
def get_classes(cls):
if (cls not in classes and
isinstance(cls, TopLevelDocumentMetaclass)):
classes.append(cls)
for base_cls in cls.__bases__:
if (isinstance(base_cls, TopLevelDocumentMetaclass) and
base_cls != Document and
not base_cls._meta.get('abstract') and
base_cls._get_collection().full_name == cls._get_collection().full_name and
base_cls not in classes):
classes.append(base_cls)
get_classes(base_cls)
for subclass in cls.__subclasses__():
if (isinstance(base_cls, TopLevelDocumentMetaclass) and
subclass._get_collection().full_name == cls._get_collection().full_name and
subclass not in classes):
classes.append(subclass)
get_classes(subclass)
get_classes(cls)
# get the indexes spec for all of the gathered classes
def get_indexes_spec(cls):
indexes = []
if cls._meta['index_specs']:
index_spec = cls._meta['index_specs']
for spec in index_spec:
spec = spec.copy()
fields = spec.pop('fields')
indexes.append(fields)
return indexes
indexes = []
for cls in classes:
for index in get_indexes_spec(cls):
if index not in indexes:
indexes.append(index)
# finish up by appending { '_id': 1 } and { '_cls': 1 }, if needed
if [('_id', 1)] not in indexes:
indexes.append([('_id', 1)])
if (cls._meta.get('index_cls', True) and
cls._meta.get('allow_inheritance', ALLOW_INHERITANCE) is True):
indexes.append([('_cls', 1)])
return indexes
@classmethod
def compare_indexes(cls):
""" Compares the indexes defined in MongoEngine with the ones existing
in the database. Returns any missing/extra indexes.
"""
required = cls.list_indexes()
existing = [info['key']
for info in list(cls._get_collection().index_information().values())]
missing = [index for index in required if index not in existing]
extra = [index for index in existing if index not in required]
# if { _cls: 1 } is missing, make sure it's *really* necessary
if [('_cls', 1)] in missing:
cls_obsolete = False
for index in existing:
if includes_cls(index) and index not in extra:
cls_obsolete = True
break
if cls_obsolete:
missing.remove([('_cls', 1)])
return {'missing': missing, 'extra': extra}
class DynamicDocument(Document, metaclass=TopLevelDocumentMetaclass):
"""A Dynamic Document class allowing flexible, expandable and uncontrolled
schemas. As a :class:`~mongoengine.Document` subclass, acts in the same
way as an ordinary document but has expando style properties. Any data
passed or set against the :class:`~mongoengine.DynamicDocument` that is
not a field is automatically converted into a
:class:`~mongoengine.fields.DynamicField` and data can be attributed to that
field.
.. note::
There is one caveat on Dynamic Documents: fields cannot start with `_`
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
_dynamic = True
def __delattr__(self, *args, **kwargs):
"""Deletes the attribute by setting to None and allowing _delta to unset
it"""
field_name = args[0]
if field_name in self._dynamic_fields:
setattr(self, field_name, None)
else:
super(DynamicDocument, self).__delattr__(*args, **kwargs)
class DynamicEmbeddedDocument(EmbeddedDocument, metaclass=DocumentMetaclass):
"""A Dynamic Embedded Document class allowing flexible, expandable and
uncontrolled schemas. See :class:`~mongoengine.DynamicDocument` for more
information about dynamic documents.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
_dynamic = True
def __delattr__(self, *args, **kwargs):
"""Deletes the attribute by setting to None and allowing _delta to unset
it"""
field_name = args[0]
if field_name in self._fields:
default = self._fields[field_name].default
if isinstance(default, collections.Callable):
default = default()
setattr(self, field_name, default)
else:
setattr(self, field_name, None)
class MapReduceDocument(object):
"""A document returned from a map/reduce query.
:param collection: An instance of :class:`~pymongo.Collection`
:param key: Document/result key, often an instance of
:class:`~bson.objectid.ObjectId`. If supplied as
an ``ObjectId`` found in the given ``collection``,
the object can be accessed via the ``object`` property.
:param value: The result(s) for this key.
.. versionadded:: 0.3
"""
def __init__(self, document, collection, key, value):
self._document = document
self._collection = collection
self.key = key
self.value = value
@property
def object(self):
"""Lazy-load the object referenced by ``self.key``. ``self.key``
should be the ``primary_key``.
"""
id_field = self._document()._meta['id_field']
id_field_type = type(id_field)
if not isinstance(self.key, id_field_type):
try:
self.key = id_field_type(self.key)
except:
raise Exception("Could not cast key as %s" %
id_field_type.__name__)
if not hasattr(self, "_key_object"):
self._key_object = self._document.objects.with_id(self.key)
return self._key_object
return self._key_object
|
|
"""
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
from itertools import izip_longest
import numpy as np
from ._caffe import Net, SGDSolver
import caffe.io
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict(zip(self._blob_names, self._blobs))
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
return OrderedDict([(name, lr.blobs)
for name, lr in zip(self._layer_names, self.layers)
if len(lr.blobs) > 0])
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Take
blobs: list of blobs to return in addition to output blobs.
kwargs: Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start: optional name of layer at which to begin the forward pass
end: optional name of layer at which to finish the forward pass (inclusive)
Give
outs: {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in kwargs.iteritems():
if blob.ndim != 4:
raise Exception('{} blob is not 4-d'.format(in_))
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs}
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Take
diffs: list of diffs to return in addition to bottom diffs.
kwargs: Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
start: optional name of layer at which to begin the backward pass
end: optional name of layer at which to finish the backward pass (inclusive)
Give
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = len(self.layers) - 1
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + diffs)
else:
end_ind = 0
outputs = set(self.inputs + diffs)
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in kwargs.iteritems():
if diff.ndim != 4:
raise Exception('{} diff is not 4-d'.format(top))
if diff.shape[0] != self.blobs[top].num:
raise Exception('Diff is not batch sized')
self.blobs[top].diff[...] = diff
self._backward(start_ind, end_ind)
# Unpack diffs to extract
return {out: self.blobs[out].diff for out in outputs}
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in outs.iteritems():
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Take
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Give
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in batch_blobs.iteritems():
all_outs[out].extend(out_blobs)
for diff, out_diffs in batch_diffs.iteritems():
all_diffs[diff].extend(out_diffs)
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_mean(self, input_, mean, mode='elementwise'):
"""
Set the mean to subtract for data centering.
Take
input_: which input to assign this mean.
mean: mean K x H x W ndarray (input dimensional or broadcastable)
mode: elementwise = use the whole mean (and check dimensions)
channel = channel constant (e.g. mean pixel instead of mean image)
"""
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
in_shape = self.blobs[input_].data.shape
if mode == 'elementwise':
if mean.shape[1:] != in_shape[2:]:
# Resize mean (which requires H x W x K input).
mean = caffe.io.resize_image(mean.transpose((1,2,0)),
in_shape[2:]).transpose((2,0,1))
self.mean[input_] = mean
elif mode == 'channel':
self.mean[input_] = mean.mean(1).mean(1).reshape((in_shape[1], 1, 1))
else:
raise Exception('Mode not in {}'.format(['elementwise', 'channel']))
def _Net_set_input_scale(self, input_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Take
input_: which input to assign this scale factor
scale: scale coefficient
"""
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
self.input_scale[input_] = scale
def _Net_set_raw_scale(self, input_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Take
input_: which input to assign this scale factor
scale: scale coefficient
"""
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
self.raw_scale[input_] = scale
def _Net_set_channel_swap(self, input_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Take
input_: which input to assign this channel order
order: the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
self.channel_swap[input_] = order
def _Net_preprocess(self, input_name, input_):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- transpose dimensions to K x H x W
- subtract mean
- scale feature
Take
input_name: name of input blob to preprocess for
input_: (H' x W' x K) ndarray
Give
caffe_inputs: (K x H x W) ndarray
"""
caffe_in = input_.astype(np.float32, copy=False)
mean = self.mean.get(input_name)
input_scale = self.input_scale.get(input_name)
raw_scale = self.raw_scale.get(input_name)
channel_order = self.channel_swap.get(input_name)
in_size = self.blobs[input_name].data.shape[2:]
if caffe_in.shape[:2] != in_size:
caffe_in = caffe.io.resize_image(caffe_in, in_size)
if channel_order is not None:
caffe_in = caffe_in[:, :, channel_order]
caffe_in = caffe_in.transpose((2, 0, 1))
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def _Net_deprocess(self, input_name, input_):
"""
Invert Caffe formatting; see Net.preprocess().
"""
decaf_in = input_.copy().squeeze()
mean = self.mean.get(input_name)
input_scale = self.input_scale.get(input_name)
raw_scale = self.raw_scale.get(input_name)
channel_order = self.channel_swap.get(input_name)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
decaf_in = decaf_in.transpose((1,2,0))
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(decaf_in.shape[2])]
decaf_in = decaf_in[:, :, channel_order_inverse]
return decaf_in
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Take
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Give (yield)
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(blobs.itervalues().next())
batch_size = self.blobs.itervalues().next().num
remainder = num % batch_size
num_batches = num / batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_mean = _Net_set_mean
Net.set_input_scale = _Net_set_input_scale
Net.set_raw_scale = _Net_set_raw_scale
Net.set_channel_swap = _Net_set_channel_swap
Net.preprocess = _Net_preprocess
Net.deprocess = _Net_deprocess
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
|
|
from django.contrib.contenttypes.generic import GenericForeignKey
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse, resolve, NoReverseMatch, Resolver404
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
import settings
def _get_key_choices():
"""
Return all the keys that are not mapped to functions, as a list of choices.
"""
keys = list(
set(settings.URLMAPPER_KEYS) - set(settings.URLMAPPER_FUNCTIONS.keys())
)
if not keys:
return [('', ugettext("There are no defined keys"))]
return zip(keys, keys)
def _get_content_type_choices():
"""
Return all the content types that can be mapped.
"""
filters = models.Q()
for app_label, model in settings.URLMAPPER_CONTENTTYPES:
filters |= models.Q(app_label=app_label, model=model)
# Always exclude this app
filters &= ~models.Q(app_label='urlmapper')
return filters
class URLMapVisibleMananger(models.Manager):
def get_queryset(self):
queryset = super(URLMapVisibleMananger, self).get_queryset()
return queryset.exclude(key__in=settings.URLMAPPER_FUNCTIONS.keys())
class URLMap(models.Model):
"""
Map a key to a URL in the database. This could be a straight-up URL, an
object that has a get_absolute_url method, or a view name and keyword args.
"""
key = models.CharField(
_("Key"),
max_length=64,
unique=True,
choices=_get_key_choices()
)
# Map to a URL
url = models.CharField(
_("URL"),
max_length=255,
help_text=_("Enter a relative URL"),
blank=True
)
# Map to an object
content_type = models.ForeignKey(
'contenttypes.ContentType',
verbose_name=_("Content Type"),
limit_choices_to=_get_content_type_choices(),
blank=True,
null=True
)
object_id = models.PositiveIntegerField(
_("Object ID"),
null=True,
blank=True
)
content_object = GenericForeignKey()
# Map to a view
view_name = models.CharField(
_("View name"),
max_length=255,
blank=True
)
view_keywords = models.TextField(
_("View keywords"),
help_text=_(
"Use a=b to define keywords and commas to separate e.g "
"slug=terms-and-conditions, language=en"
),
blank=True
)
objects = URLMapVisibleMananger()
_objects = models.Manager()
def __unicode__(self):
return u"{key} --> {url}".format(
key=self.key,
url=self.get_url()
)
def _get_view_kwargs(self, raise_exception=True):
if not self.view_keywords:
return {}
try:
return {
keyword.split('=')[0].strip(): keyword.split('=')[1].strip()
for keyword in self.view_keywords.split(',')
if keyword
}
except Exception as e:
if raise_exception:
raise e
return {}
def _get_view_url(self, raise_exception=True):
try:
return reverse(
self.view_name,
kwargs=self._get_view_kwargs(raise_exception=False)
)
except NoReverseMatch as e:
if raise_exception:
raise e
return ''
def _validate_url(self):
if self.url:
try:
resolve(self.url)
except Resolver404:
raise ValidationError(
ugettext(
"URL {url} does not correspond to a valid application view."
).format(
url=self.url
)
)
def _validate_object(self):
if self.content_type is not None or self.object_id is not None:
if self.content_type is None or self.object_id is None:
raise ValidationError(
ugettext(
"Please supply both a content type and object ID."
)
)
if not self.content_object:
raise ValidationError(
ugettext(
"Object with type {type} and ID {id} does not exist"
).format(
type=self.content_type,
id=self.object_id
)
)
if getattr(self.content_object, 'get_absolute_url', None) is None:
raise ValidationError(
ugettext(
"Object with type {type} and ID {id} does not have a "
"get_absolute_url method."
).format(
type=self.content_type,
id=self.object_id
)
)
def _validate_view(self):
if self.view_keywords and not self.view_name:
raise ValidationError(
ugettext(
"View keywords supplied but no view name provided"
)
)
try:
kwargs = self._get_view_kwargs()
except:
raise ValidationError(
ugettext(
"Keywords are not in the format a=b, c=d"
)
)
if self.view_name:
try:
self._get_view_url()
except NoReverseMatch:
raise ValidationError(
ugettext(
"No match for view {view} and keyword arguments {kwargs}."
).format(
view=self.view_name,
kwargs=kwargs
)
)
def _validate_single_mapping(self):
num_supplied_values = sum(
(
bool(self.url),
self.content_type is not None or self.object_id is not None,
bool(self.view_name or self.view_keywords)
)
)
if num_supplied_values != 1:
raise ValidationError(
ugettext(
"Please supply exactly one form of URL mapping ({n} supplied).".format(
n=num_supplied_values
)
)
)
def clean_fields(self, exclude=None):
super(URLMap, self).clean_fields(exclude=exclude)
self._validate_single_mapping()
self._validate_url()
self._validate_object()
self._validate_view()
def get_url(self):
if self.url:
return self.url
if self.content_object:
return self.content_object.get_absolute_url()
if self.view_name:
return self._get_view_url(raise_exception=False)
return ''
get_url.short_description = _('URL')
def mapping_type(self):
if self.url:
return _("Direct")
if self.object_id:
return _("Object")
if self.view_name:
return _("View")
mapping_type.short_description = _("Mapping type")
class Meta:
verbose_name = _("URL map")
verbose_name_plural = _("URL maps")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ultimately, the design is to handle all the complicated stuff at set
# rather than get tiime. When something is set on a group, then all
# members of that group are examined and 'inheritedfrom' attributes
# are pushed. as expression definned values are iinserted, their
# dependdentt attributes are added to a private dict to aid in auto
# calculation. When a name is changed, all attributes are re-evaluated
# on get, should be simple read value *except* for encrypted values,
# which are only decrypted when explicitly requested
# encrypted fields do not support expressions, either as a source or
# destination
#TODO: clustered mode
# In clustered case, only one instance is the 'master'. If some 'def set'
# is requested on a slave, it creates a transaction id and an event, firing it
# to master. It then waits on the event. When the master reflects the data
# back and that reflection data goes into memory, the wait will be satisfied
# this means that set on a slave will be much longer.
# the assumption is that only the calls to 'def set' need be pushed to/from
# master and all the implicit activity that ensues will pan out since
# the master is ensuring a strict ordering of transactions
# for missed transactions, transaction log will be used to track transactions
# transaction log can have a constrained size if we want, in which case full
# replication will trigger.
# uuid.uuid4() will be used for transaction ids
# Note on the cryptography. Default behavior is mostly just to pave the
# way to meaningful security. Root all potentially sensitive data in
# one key. That key is in plain sight, so not meaningfully protected
# However, the key can be protected in the following ways:
# - Passphrase protected (requiring human interaction every restart)
# - TPM sealing (which would forgo the interactive assuming risk of
# physical attack on TPM is not a concern)
# This module provides cryptographic convenience functions, largely to be
# used by config.py to protect/unlock configuration as appropriopriate.
# The default behavior provides no meaningful protection, all encrypted
# values are linked to a master key that is stored in the clear.
# meanigful protection comes when the user elects to protect the key
# by passphrase and optionally TPM
import Crypto.Protocol.KDF as KDF
from Crypto.Cipher import AES
from Crypto.Hash import HMAC
from Crypto.Hash import SHA256
import anydbm as dbm
import ast
import base64
import confluent.config.attributes as allattributes
import confluent.log
import confluent.util
import copy
import cPickle
import errno
import json
import operator
import os
import random
import re
import string
import sys
import threading
_masterkey = None
_masterintegritykey = None
_dirtylock = threading.RLock()
_config_areas = ('nodegroups', 'nodes', 'usergroups', 'users')
tracelog = None
def _mkpath(pathname):
try:
os.makedirs(pathname)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(pathname):
pass
else:
raise
def _derive_keys(password, salt):
#implement our specific combination of pbkdf2 transforms to get at
#key. We bump the iterations up because we can afford to
#TODO: WORKERPOOL PBKDF2 is expensive
tmpkey = KDF.PBKDF2(password, salt, 32, 50000,
lambda p, s: HMAC.new(p, s, SHA256).digest())
finalkey = KDF.PBKDF2(tmpkey, salt, 32, 50000,
lambda p, s: HMAC.new(p, s, SHA256).digest())
return finalkey[:32], finalkey[32:]
def _get_protected_key(keydict, password):
if keydict['unencryptedvalue']:
return keydict['unencryptedvalue']
# TODO(jbjohnso): check for TPM sealing
if 'passphraseprotected' in keydict:
if password is None:
raise Exception("Passphrase protected secret requires password")
for pp in keydict['passphraseprotected']:
salt = pp[0]
privkey, integkey = _derive_keys(password, salt)
return decrypt_value(pp[1:], key=privkey, integritykey=integkey)
else:
raise Exception("No available decryption key")
def _format_key(key, password=None):
if password is not None:
salt = os.urandom(32)
privkey, integkey = _derive_keys(password, salt)
cval = crypt_value(key, key=privkey, integritykey=integkey)
return {"passphraseprotected": cval}
else:
return {"unencryptedvalue": key}
def init_masterkey(password=None):
global _masterkey
global _masterintegritykey
cfgn = get_global('master_privacy_key')
if cfgn:
_masterkey = _get_protected_key(cfgn, password=password)
else:
_masterkey = os.urandom(32)
set_global('master_privacy_key', _format_key(
_masterkey,
password=password))
cfgn = get_global('master_integrity_key')
if cfgn:
_masterintegritykey = _get_protected_key(cfgn, password=password)
else:
_masterintegritykey = os.urandom(64)
set_global('master_integrity_key', _format_key(
_masterintegritykey,
password=password))
def decrypt_value(cryptvalue,
key=_masterkey,
integritykey=_masterintegritykey):
iv, cipherdata, hmac = cryptvalue
if _masterkey is None or _masterintegritykey is None:
init_masterkey()
check_hmac = HMAC.new(_masterintegritykey, cipherdata, SHA256).digest()
if hmac != check_hmac:
raise Exception("bad HMAC value on crypted value")
decrypter = AES.new(_masterkey, AES.MODE_CBC, iv)
value = decrypter.decrypt(cipherdata)
padsize = ord(value[-1])
pad = value[-padsize:]
# Note that I cannot grasp what could be done with a subliminal
# channel in padding in this case, but check the padding anyway
for padbyte in pad:
if ord(padbyte) != padsize:
raise Exception("bad padding in encrypted value")
return value[0:-padsize]
def crypt_value(value,
key=_masterkey,
integritykey=_masterintegritykey):
# encrypt given value
# PKCS7 is the padding scheme to employ, if no padded needed, pad with 16
# check HMAC prior to attempting decrypt
if key is None or integritykey is None:
init_masterkey()
key = _masterkey
integritykey = _masterintegritykey
iv = os.urandom(16)
crypter = AES.new(key, AES.MODE_CBC, iv)
neededpad = 16 - (len(value) % 16)
pad = chr(neededpad) * neededpad
value += pad
cryptval = crypter.encrypt(value)
hmac = HMAC.new(integritykey, cryptval, SHA256).digest()
return iv, cryptval, hmac
def _load_dict_from_dbm(dpath, tdb):
try:
dbe = dbm.open(tdb, 'r')
currdict = _cfgstore
for elem in dpath:
if elem not in currdict:
currdict[elem] = {}
currdict = currdict[elem]
for tk in dbe.iterkeys():
currdict[tk] = cPickle.loads(dbe[tk])
except dbm.error:
return
def is_tenant(tenant):
try:
return tenant in _cfgstore['tenant']
except KeyError:
return False
def get_global(globalname):
"""Get a global variable
:param globalname: The global parameter name to read
"""
try:
return _cfgstore['globals'][globalname]
except KeyError:
return None
def set_global(globalname, value):
"""Set a global variable.
Globals should be rarely ever used. Almost everything should be under a
tenant scope. Some things like master key and socket numbers/paths can be
reasonably considered global in nature.
:param globalname: The global parameter name to store
:param value: The value to set the global parameter to.
"""
with _dirtylock:
if 'dirtyglobals' not in _cfgstore:
_cfgstore['dirtyglobals'] = set()
_cfgstore['dirtyglobals'].add(globalname)
if 'globals' not in _cfgstore:
_cfgstore['globals'] = {globalname: value}
else:
_cfgstore['globals'][globalname] = value
ConfigManager._bg_sync_to_file()
def _mark_dirtykey(category, key, tenant=None):
if type(key) in (str, unicode):
key = key.encode('utf-8')
with _dirtylock:
if 'dirtykeys' not in _cfgstore:
_cfgstore['dirtykeys'] = {}
if tenant not in _cfgstore['dirtykeys']:
_cfgstore['dirtykeys'][tenant] = {}
if category not in _cfgstore['dirtykeys'][tenant]:
_cfgstore['dirtykeys'][tenant][category] = set()
_cfgstore['dirtykeys'][tenant][category].add(key)
def _generate_new_id():
# generate a random id outside the usual ranges used for normal users in
# /etc/passwd. Leave an equivalent amount of space near the end disused,
# just in case
uid = str(confluent.util.securerandomnumber(65537, 4294901759))
if 'idmap' not in _cfgstore['main']:
return uid
while uid in _cfgstore['main']['idmap']:
uid = str(confluent.util.securerandomnumber(65537, 4294901759))
return uid
class _ExpressionFormat(string.Formatter):
# This class is used to extract the literal value from an expression
# in the db
# This is made easier by subclassing one of the 'fprintf' mechanisms
# baked into python
posmatch = re.compile('^n([0-9]*)$')
nummatch = re.compile('[0-9]+')
_supported_ops = {
ast.Mult: operator.mul,
ast.Div: operator.floordiv,
ast.Mod: operator.mod,
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.LShift: operator.lshift,
ast.RShift: operator.rshift,
ast.BitAnd: operator.and_,
ast.BitXor: operator.xor,
ast.BitOr: operator.or_,
}
def __init__(self, nodeobj, nodename):
self._nodeobj = nodeobj
self._nodename = nodename
self._numbers = None
def get_field(self, field_name, args, kwargs):
parsed = ast.parse(field_name)
return self._handle_ast_node(parsed.body[0].value), field_name
def _handle_ast_node(self, node):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Attribute):
#ok, we have something with a dot
left = node.value.id
right = node.attr
key = left + '.' + right
if '_expressionkeys' not in self._nodeobj:
self._nodeobj['_expressionkeys'] = set([key])
else:
self._nodeobj['_expressionkeys'].add(key)
val = _decode_attribute(key, self._nodeobj,
formatter=self)
return val['value'] if 'value' in val else ""
elif isinstance(node, ast.Name):
var = node.id
if var == 'nodename':
return self._nodename
mg = re.match(self.posmatch, var)
if mg:
idx = int(mg.group(1))
if self._numbers is None:
self._numbers = re.findall(self.nummatch, self._nodename)
return int(self._numbers[idx - 1])
else:
if var in self._nodeobj:
if '_expressionkeys' not in self._nodeobj:
self._nodeobj['_expressionkeys'] = set([var])
else:
self._nodeobj['_expressionkeys'].add(var)
val = _decode_attribute(var, self._nodeobj,
formatter=self)
return val['value'] if 'value' in val else ""
elif isinstance(node, ast.BinOp):
optype = type(node.op)
if optype not in self._supported_ops:
raise Exception("Unsupported operation")
op = self._supported_ops[optype]
return op(self._handle_ast_node(node.left),
self._handle_ast_node(node.right))
def _decode_attribute(attribute, nodeobj, formatter=None, decrypt=False):
if attribute not in nodeobj:
return None
# if we have an expression and a formatter, that overrides 'value'
# which may be out of date
# get methods will skip the formatter allowing value to come on through
# set methods induce recalculation as appropriate to get a cached value
if 'expression' in nodeobj[attribute] and formatter is not None:
retdict = copy.deepcopy(nodeobj[attribute])
if 'value' in retdict:
del retdict['value']
try:
retdict['value'] = formatter.format(retdict['expression'])
except Exception as e:
retdict['broken'] = str(e)
return retdict
elif 'value' in nodeobj[attribute]:
return nodeobj[attribute]
elif 'cryptvalue' in nodeobj[attribute] and decrypt:
retdict = copy.deepcopy(nodeobj[attribute])
retdict['value'] = decrypt_value(nodeobj[attribute]['cryptvalue'])
return retdict
return nodeobj[attribute]
# my thinking at this point is that noderange and configdata objects
# will be constructed and passed as part of a context object to plugins
# reasoning being that the main program will handle establishing the
# tenant context and then modules need not consider the current tenant
# most of the time as things are automatic
def _addchange(changeset, node, attrname):
if node not in changeset:
changeset[node] = {attrname: 1}
else:
changeset[node][attrname] = 1
def hook_new_configmanagers(callback):
"""Register callback for new tenants
From the point when this function is called until the end,
callback may be invoked to indicate a new tenant and
callback is notified to perform whatever tasks appropriate for
a new tenant
:param callback: Function to call for each possible config manager
:returns: identifier that can be used to cancel this registration
"""
#TODO(jbjohnso): actually live up to the promise of ongoing callbacks
callback(ConfigManager(None))
try:
for tenant in _cfgstore['tenant'].iterkeys():
callback(ConfigManager(tenant))
except KeyError:
pass
class ConfigManager(object):
_cfgdir = "/etc/confluent/cfg/"
_cfgwriter = None
_writepending = False
_syncrunning = False
_syncstate = threading.RLock()
_attribwatchers = {}
_nodecollwatchers = {}
_notifierids = {}
def __init__(self, tenant, decrypt=False):
global _cfgstore
self.decrypt = decrypt
if tenant is None:
self.tenant = None
if 'main' not in _cfgstore:
_cfgstore['main'] = {}
self._bg_sync_to_file()
self._cfgstore = _cfgstore['main']
if 'nodegroups' not in self._cfgstore:
self._cfgstore['nodegroups'] = {'everything': {'nodes': set()}}
self._bg_sync_to_file()
if 'nodes' not in self._cfgstore:
self._cfgstore['nodes'] = {}
self._bg_sync_to_file()
return
elif 'tenant' not in _cfgstore:
_cfgstore['tenant'] = {tenant: {}}
self._bg_sync_to_file()
elif tenant not in _cfgstore['tenant']:
_cfgstore['tenant'][tenant] = {}
self._bg_sync_to_file()
self.tenant = tenant
self._cfgstore = _cfgstore['tenant'][tenant]
if 'nodegroups' not in self._cfgstore:
self._cfgstore['nodegroups'] = {'everything': {}}
if 'nodes' not in self._cfgstore:
self._cfgstore['nodes'] = {}
self._bg_sync_to_file()
def filter_node_attributes(self, expression, nodes=None):
"""Filtered nodelist according to expression
expression may be:
attribute.name=value
attribute.name==value
attribute.name=~value
attribute.name!=value
attribute.name!~value
== and != do strict equality. The ~ operators do a regular expression.
! negates the sense of the match
:param expression: The expression containing the criteria to match
:param nodes: Optional iterable set of nodes to limit the check
"""
exmatch = None
yieldmatches = True
if nodes is None:
nodes = self._cfgstore['nodes']
if '==' in expression:
attribute, match = expression.split('==')
elif '!=' in expression:
attribute, match = expression.split('!=')
yieldmatches = False
elif '=~' in expression:
attribute, match = expression.split('=~')
exmatch = re.compile(match)
elif '!~' in expression:
attribute, match = expression.split('!~')
exmatch = re.compile(match)
yieldmatches = False
elif '=' in expression:
attribute, match = expression.split('=')
else:
raise Exception('Invalid Expression')
for node in nodes:
try:
currval = self._cfgstore['nodes'][node][attribute]['value']
except KeyError:
# Let's treat 'not set' as being an empty string for this path
currval = ''
if exmatch:
if yieldmatches:
if exmatch.search(currval):
yield node
else:
if not exmatch.search(currval):
yield node
else:
if yieldmatches:
if match == currval:
yield node
else:
if match != currval:
yield node
def filter_nodenames(self, expression, nodes=None):
"""Filter nodenames by regular expression
:param expression: Regular expression for matching nodenames
:param nodes: Optional iterable of candidates
"""
if nodes is None:
nodes = self._cfgstore['nodes']
expression = re.compile(expression)
for node in nodes:
if expression.search(node):
yield node
def watch_attributes(self, nodes, attributes, callback):
"""
Watch a list of attributes for changes on a list of nodes
:param nodes: An iterable of node names to be watching
:param attributes: An iterable of attribute names to be notified about
:param callback: A callback to process a notification
Returns an identifier that can be used to unsubscribe from these
notifications using remove_watcher
"""
notifierid = random.randint(0, sys.maxint)
while notifierid in self._notifierids:
notifierid = random.randint(0, sys.maxint)
self._notifierids[notifierid] = {'attriblist': []}
if self.tenant not in self._attribwatchers:
self._attribwatchers[self.tenant] = {}
attribwatchers = self._attribwatchers[self.tenant]
for node in nodes:
if node not in attribwatchers:
attribwatchers[node] = {}
for attribute in attributes:
self._notifierids[notifierid]['attriblist'].append(
(node, attribute))
if attribute not in attribwatchers[node]:
attribwatchers[node][attribute] = {
notifierid: callback
}
else:
attribwatchers[node][attribute][notifierid] = callback
return notifierid
def watch_nodecollection(self, callback):
"""
Watch the nodecollection for addition or removal of nodes.
A watcher is notified prior after node has been added and before node
is actually removed.
:param callback: Function to call when a node is added or removed
Returns an identifier that can be used to unsubscribe from these
notifications using remove_watcher
"""
# first provide an identifier for the calling code to
# use in case of cancellation.
# I anticipate no more than a handful of watchers of this sort, so
# this loop should not have to iterate too many times
notifierid = random.randint(0, sys.maxint)
while notifierid in self._notifierids:
notifierid = random.randint(0, sys.maxint)
# going to track that this is a nodecollection type watcher,
# but there is no additional data associated.
self._notifierids[notifierid] = set(['nodecollection'])
if self.tenant not in self._nodecollwatchers:
self._nodecollwatchers[self.tenant] = {}
self._nodecollwatchers[self.tenant][notifierid] = callback
return notifierid
def remove_watcher(self, watcher):
# identifier of int would be a collection watcher
if watcher not in self._notifierids:
raise Exception("Invalid")
# return
if 'attriblist' in self._notifierids[watcher]:
attribwatchers = self._attribwatchers[self.tenant]
for nodeattrib in self._notifierids[watcher]['attriblist']:
node, attrib = nodeattrib
del attribwatchers[node][attrib][watcher]
elif 'nodecollection' in self._notifierids[watcher]:
del self._nodecollwatchers[self.tenant][watcher]
else:
raise Exception("Completely not a valid place to be")
del self._notifierids[watcher]
def list_users(self):
try:
return self._cfgstore['users'].iterkeys()
except KeyError:
return []
def get_user(self, name):
"""Get user information from DB
:param name: Name of the user
Returns a dictionary describing parameters of a user. These parameters
may include numeric id (id), certificate thumbprint (certthumb),
password hash (passhash, which currently is going to be PBKDF2 derived)
full name (displayname), ...
"""
try:
return copy.deepcopy(self._cfgstore['users'][name])
except KeyError:
return None
def get_usergroup(self, groupname):
"""Get user group information from DB
:param groupname: Name of the group
Returns a dictionary describing parameters of a user group.
This may include the role for users in the group to receive
if no more specific information is found.
"""
try:
return copy.deepcopy(self._cfgstore['usergroups'][groupname])
except KeyError:
return None
def set_usergroup(self, groupname, attributemap):
"""Set usergroup attribute(s)
:param groupname: the name of teh group to modify
:param attributemap: The mapping of keys to values to set
"""
for attribute in attributemap.iterkeys():
self._cfgstore['usergroups'][attribute] = attributemap[attribute]
_mark_dirtykey('usergroups', groupname, self.tenant)
def create_usergroup(self, groupname, role="Administrator"):
if 'usergroups' not in self._cfgstore:
self._cfgstore['usergroups'] = {}
groupname = groupname.encode('utf-8')
if groupname in self._cfgstore['usergroups']:
raise Exception("Duplicate groupname requested")
self._cfgstore['usergroups'][groupname] = {'role': role}
_mark_dirtykey('usergroups', groupname, self.tenant)
def set_user(self, name, attributemap):
"""Set user attribute(s)
:param name: The login name of the user
:param attributemap: A dict of key values to set
"""
user = self._cfgstore['users'][name]
for attribute in attributemap:
if attribute == 'password':
salt = os.urandom(8)
#TODO: WORKERPOOL, offload password set to a worker
crypted = KDF.PBKDF2(
attributemap[attribute], salt, 32, 10000,
lambda p, s: HMAC.new(p, s, SHA256).digest()
)
user['cryptpass'] = (salt, crypted)
else:
user[attribute] = attributemap[attribute]
_mark_dirtykey('users', name, self.tenant)
self._bg_sync_to_file()
def del_user(self, name):
if name in self._cfgstore['users']:
del self._cfgstore['users'][name]
_mark_dirtykey('users', name, self.tenant)
self._bg_sync_to_file()
def create_user(self, name,
role="Administrator", uid=None, displayname=None,
attributemap=None):
"""Create a new user
:param name: The login name of the user
:param role: The role the user should be considered. Can be
"Administrator" or "Technician", defaults to
"Administrator"
:param uid: Custom identifier number if desired. Defaults to random.
:param displayname: Optional long format name for UI consumption
"""
if uid is None:
uid = _generate_new_id()
else:
if uid in _cfgstore['main']['idmap']:
raise Exception("Duplicate id requested")
if 'users' not in self._cfgstore:
self._cfgstore['users'] = {}
name = name.encode('utf-8')
if name in self._cfgstore['users']:
raise Exception("Duplicate username requested")
self._cfgstore['users'][name] = {'id': uid}
if displayname is not None:
self._cfgstore['users'][name]['displayname'] = displayname
if 'idmap' not in _cfgstore['main']:
_cfgstore['main']['idmap'] = {}
_cfgstore['main']['idmap'][uid] = {
'tenant': self.tenant,
'username': name
}
if attributemap is not None:
self.set_user(name, attributemap)
_mark_dirtykey('users', name, self.tenant)
_mark_dirtykey('idmap', uid)
self._bg_sync_to_file()
def is_node(self, node):
return node in self._cfgstore['nodes']
def is_nodegroup(self, nodegroup):
return nodegroup in self._cfgstore['nodegroups']
def get_groups(self):
return self._cfgstore['nodegroups'].iterkeys()
def list_nodes(self):
try:
return self._cfgstore['nodes'].iterkeys()
except KeyError:
return []
def get_nodegroup_attributes(self, nodegroup, attributes=()):
cfgnodeobj = self._cfgstore['nodegroups'][nodegroup]
if not attributes:
attributes = cfgnodeobj.iterkeys()
nodeobj = {}
for attribute in attributes:
if attribute.startswith('_'):
continue
if attribute not in cfgnodeobj:
continue
nodeobj[attribute] = _decode_attribute(attribute, cfgnodeobj,
decrypt=self.decrypt)
return nodeobj
def get_node_attributes(self, nodelist, attributes=()):
retdict = {}
relattribs = attributes
if isinstance(nodelist, str) or isinstance(nodelist, unicode):
nodelist = [nodelist]
for node in nodelist:
if node not in self._cfgstore['nodes']:
continue
cfgnodeobj = self._cfgstore['nodes'][node]
nodeobj = {}
if len(attributes) == 0:
relattribs = cfgnodeobj
for attribute in relattribs:
if attribute.startswith('_'):
# skip private things
continue
if attribute not in cfgnodeobj:
continue
# since the formatter is not passed in, the calculator is
# skipped. The decryption, however, we want to do only on
# demand
nodeobj[attribute] = _decode_attribute(attribute, cfgnodeobj,
decrypt=self.decrypt)
retdict[node] = nodeobj
return retdict
def _node_added_to_group(self, node, group, changeset):
try:
nodecfg = self._cfgstore['nodes'][node]
groupcfg = self._cfgstore['nodegroups'][group]
except KeyError: # something did not exist, nothing to do
return
for attrib in groupcfg.iterkeys():
self._do_inheritance(nodecfg, attrib, node, changeset)
_addchange(changeset, node, attrib)
def _node_removed_from_group(self, node, group, changeset):
try:
nodecfg = self._cfgstore['nodes'][node]
except KeyError: # node did not exist, nothing to do
return
for attrib in nodecfg.keys():
if attrib.startswith("_"):
continue
if attrib == 'groups':
continue
try:
if nodecfg[attrib]['inheritedfrom'] == group:
del nodecfg[attrib] # remove invalid inherited data
self._do_inheritance(nodecfg, attrib, node, changeset)
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
except KeyError: # inheritedfrom not set, move on
pass
def _do_inheritance(self, nodecfg, attrib, nodename, changeset,
srcgroup=None):
# for now, just do single inheritance
# TODO: concatenating inheritance if requested
if attrib in ('nodes', 'groups'):
#not attributes that should be considered here
return
if attrib in nodecfg and 'inheritedfrom' not in nodecfg[attrib]:
return # already has a non-inherited value set, nothing to do
# if the attribute is not set, this will search for a candidate
# if it is set, but inheritedfrom, search for a replacement, just
# in case
if not 'groups' in nodecfg:
return
for group in nodecfg['groups']:
if attrib in self._cfgstore['nodegroups'][group]:
if srcgroup is not None and group != srcgroup:
# skip needless deepcopy
return
nodecfg[attrib] = \
copy.deepcopy(self._cfgstore['nodegroups'][group][attrib])
nodecfg[attrib]['inheritedfrom'] = group
self._refresh_nodecfg(nodecfg, attrib, nodename,
changeset=changeset)
_mark_dirtykey('nodes', nodename, self.tenant)
return
if srcgroup is not None and group == srcgroup:
# break out
return
def _sync_groups_to_node(self, groups, node, changeset):
for group in self._cfgstore['nodegroups'].iterkeys():
if group not in groups:
if node in self._cfgstore['nodegroups'][group]['nodes']:
self._cfgstore['nodegroups'][group]['nodes'].discard(node)
self._node_removed_from_group(node, group, changeset)
_mark_dirtykey('nodegroups', group, self.tenant)
for group in groups:
if group not in self._cfgstore['nodegroups']:
self._cfgstore['nodegroups'][group] = {'nodes': set([node])}
_mark_dirtykey('nodegroups', group, self.tenant)
elif node not in self._cfgstore['nodegroups'][group]['nodes']:
self._cfgstore['nodegroups'][group]['nodes'].add(node)
_mark_dirtykey('nodegroups', group, self.tenant)
# node was not already in given group, perform inheritence fixup
self._node_added_to_group(node, group, changeset)
def _sync_nodes_to_group(self, nodes, group, changeset):
for node in self._cfgstore['nodes'].iterkeys():
if node not in nodes and 'groups' in self._cfgstore['nodes'][node]:
if group in self._cfgstore['nodes'][node]['groups']:
self._cfgstore['nodes'][node]['groups'].remove(group)
self._node_removed_from_group(node, group, changeset)
for node in nodes:
if node not in self._cfgstore['nodes']:
self._cfgstore['nodes'][node] = {'groups': [group]}
_mark_dirtykey('nodes', node, self.tenant)
elif group not in self._cfgstore['nodes'][node]['groups']:
self._cfgstore['nodes'][node]['groups'].insert(0, group)
_mark_dirtykey('nodes', node, self.tenant)
else:
continue # next node, this node already in
self._node_added_to_group(node, group, changeset)
def add_group_attributes(self, attribmap):
self.set_group_attributes(attribmap, autocreate=True)
def set_group_attributes(self, attribmap, autocreate=False):
changeset = {}
for group in attribmap.iterkeys():
if not autocreate and group not in self._cfgstore['nodegroups']:
raise ValueError("{0} group does not exist".format(group))
for attr in attribmap[group].iterkeys():
if (attr not in ('nodes', 'noderange') and
(attr not in allattributes.node or
('type' in allattributes.node[attr] and
not isinstance(attribmap[group][attr],
allattributes.node[attr]['type'])))):
raise ValueError
if attr == 'nodes':
if not isinstance(attribmap[group][attr], list):
raise ValueError(
"nodes attribute on group must be list")
for node in attribmap[group]['nodes']:
if node not in self._cfgstore['nodes']:
raise ValueError(
"{0} node does not exist to add to {1}".format(
node, group))
for group in attribmap.iterkeys():
group = group.encode('utf-8')
if group not in self._cfgstore['nodegroups']:
self._cfgstore['nodegroups'][group] = {'nodes': set()}
cfgobj = self._cfgstore['nodegroups'][group]
for attr in attribmap[group].iterkeys():
if attr == 'nodes':
newdict = set(attribmap[group][attr])
elif (isinstance(attribmap[group][attr], str) or
isinstance(attribmap[group][attr], unicode)):
newdict = {'value': attribmap[group][attr]}
else:
newdict = attribmap[group][attr]
if 'value' in newdict and attr.startswith("secret."):
newdict['cryptvalue'] = crypt_value(newdict['value'])
del newdict['value']
cfgobj[attr] = newdict
if attr == 'nodes':
self._sync_nodes_to_group(group=group,
nodes=attribmap[group]['nodes'],
changeset=changeset)
elif attr != 'noderange': # update inheritence
for node in cfgobj['nodes']:
nodecfg = self._cfgstore['nodes'][node]
self._do_inheritance(nodecfg, attr, node, changeset,
srcgroup=group)
_addchange(changeset, node, attr)
_mark_dirtykey('nodegroups', group, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def clear_group_attributes(self, groups, attributes):
changeset = {}
if type(groups) in (str, unicode):
groups = (groups,)
for group in groups:
group = group.encode('utf-8')
try:
groupentry = self._cfgstore['nodegroups'][group]
except KeyError:
continue
for attrib in attributes:
if attrib == 'nodes':
groupentry['nodes'] = set()
self._sync_nodes_to_group(
group=group, nodes=(), changeset=changeset)
else:
try:
del groupentry[attrib]
except KeyError:
pass
for node in groupentry['nodes']:
nodecfg = self._cfgstore['nodes'][node]
try:
delnodeattrib = (
nodecfg[attrib]['inheritedfrom'] == group)
except KeyError:
delnodeattrib = False
if delnodeattrib:
del nodecfg[attrib]
self._do_inheritance(nodecfg, attrib, node,
changeset)
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
_mark_dirtykey('nodegroups', group, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def _refresh_nodecfg(self, cfgobj, attrname, node, changeset):
exprmgr = None
if 'expression' in cfgobj[attrname]: # evaluate now
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
cfgobj[attrname] = _decode_attribute(attrname, cfgobj,
formatter=exprmgr)
if ('_expressionkeys' in cfgobj and
attrname in cfgobj['_expressionkeys']):
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
self._recalculate_expressions(cfgobj, formatter=exprmgr, node=node,
changeset=changeset)
def _notif_attribwatchers(self, nodeattrs):
if self.tenant not in self._attribwatchers:
return
notifdata = {}
attribwatchers = self._attribwatchers[self.tenant]
for node in nodeattrs.iterkeys():
if node not in attribwatchers:
continue
attribwatcher = attribwatchers[node]
for attrname in nodeattrs[node].iterkeys():
if attrname not in attribwatcher:
continue
for notifierid in attribwatcher[attrname].iterkeys():
if notifierid in notifdata:
if node in notifdata[notifierid]['nodeattrs']:
notifdata[notifierid]['nodeattrs'][node].append(
attrname)
else:
notifdata[notifierid]['nodeattrs'][node] = [
attrname]
else:
notifdata[notifierid] = {
'nodeattrs': {node: [attrname]},
'callback': attribwatcher[attrname][notifierid]
}
for watcher in notifdata.itervalues():
callback = watcher['callback']
try:
callback(nodeattribs=watcher['nodeattrs'], configmanager=self)
except Exception:
global tracelog
if tracelog is None:
tracelog = confluent.log.Logger('trace')
tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
def del_nodes(self, nodes):
if self.tenant in self._nodecollwatchers:
for watcher in self._nodecollwatchers[self.tenant].itervalues():
watcher(added=[], deleting=nodes, configmanager=self)
changeset = {}
for node in nodes:
node = node.encode('utf-8')
if node in self._cfgstore['nodes']:
self._sync_groups_to_node(node=node, groups=[],
changeset=changeset)
del self._cfgstore['nodes'][node]
_mark_dirtykey('nodes', node, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def del_groups(self, groups):
changeset = {}
for group in groups:
if group in self._cfgstore['nodegroups']:
self._sync_nodes_to_group(group=group, nodes=[],
changeset=changeset)
del self._cfgstore['nodegroups'][group]
_mark_dirtykey('nodegroups', group, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def clear_node_attributes(self, nodes, attributes):
# accumulate all changes into a changeset and push in one go
changeset = {}
for node in nodes:
node = node.encode('utf-8')
try:
nodek = self._cfgstore['nodes'][node]
except KeyError:
continue
recalcexpressions = False
for attrib in attributes:
if attrib in nodek and 'inheritedfrom' not in nodek[attrib]:
# if the attribute is set and not inherited,
# delete it and check for inheritence to backfil data
del nodek[attrib]
self._do_inheritance(nodek, attrib, node, changeset)
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
if ('_expressionkeys' in nodek and
attrib in nodek['_expressionkeys']):
recalcexpressions = True
if recalcexpressions:
exprmgr = _ExpressionFormat(nodek, node)
self._recalculate_expressions(nodek, formatter=exprmgr,
node=node, changeset=changeset)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def add_node_attributes(self, attribmap):
for node in attribmap.iterkeys():
if 'groups' not in attribmap[node]:
attribmap[node]['groups'] = []
self.set_node_attributes(attribmap, autocreate=True)
def set_node_attributes(self, attribmap, autocreate=False):
# TODO(jbjohnso): multi mgr support, here if we have peers,
# pickle the arguments and fire them off in eventlet
# flows to peers, all should have the same result
newnodes = []
changeset = {}
# first do a sanity check of the input upfront
# this mitigates risk of arguments being partially applied
for node in attribmap.iterkeys():
node = node.encode('utf-8')
if autocreate is False and node not in self._cfgstore['nodes']:
raise ValueError("node {0} does not exist".format(node))
for attrname in attribmap[node].iterkeys():
attrval = attribmap[node][attrname]
if (attrname not in allattributes.node or
('type' in allattributes.node[attrname] and
not isinstance(
attrval,
allattributes.node[attrname]['type']))):
errstr = "{0} attribute on node {1} is invalid".format(
attrname, node)
raise ValueError(errstr)
if attrname == 'groups':
for group in attribmap[node]['groups']:
if group not in self._cfgstore['nodegroups']:
raise ValueError(
"group {0} does not exist".format(group))
if ('everything' in self._cfgstore['nodegroups'] and
'everything' not in attribmap[node]['groups']):
attribmap[node]['groups'].append('everything')
for node in attribmap.iterkeys():
node = node.encode('utf-8')
exprmgr = None
if node not in self._cfgstore['nodes']:
newnodes.append(node)
self._cfgstore['nodes'][node] = {}
cfgobj = self._cfgstore['nodes'][node]
recalcexpressions = False
for attrname in attribmap[node].iterkeys():
if (isinstance(attribmap[node][attrname], str) or
isinstance(attribmap[node][attrname], unicode)):
newdict = {'value': attribmap[node][attrname]}
else:
newdict = attribmap[node][attrname]
if 'value' in newdict and attrname.startswith("secret."):
newdict['cryptvalue'] = crypt_value(newdict['value'])
del newdict['value']
cfgobj[attrname] = newdict
if attrname == 'groups':
self._sync_groups_to_node(node=node,
groups=attribmap[node]['groups'],
changeset=changeset)
if ('_expressionkeys' in cfgobj and
attrname in cfgobj['_expressionkeys']):
recalcexpressions = True
if 'expression' in cfgobj[attrname]: # evaluate now
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
cfgobj[attrname] = _decode_attribute(attrname, cfgobj,
formatter=exprmgr)
# if any code is watching these attributes, notify
# them of the change
_addchange(changeset, node, attrname)
_mark_dirtykey('nodes', node, self.tenant)
if recalcexpressions:
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
self._recalculate_expressions(cfgobj, formatter=exprmgr,
node=node, changeset=changeset)
self._notif_attribwatchers(changeset)
if newnodes:
if self.tenant in self._nodecollwatchers:
nodecollwatchers = self._nodecollwatchers[self.tenant]
for watcher in nodecollwatchers.itervalues():
watcher(added=newnodes, deleting=[], configmanager=self)
self._bg_sync_to_file()
#TODO: wait for synchronization to suceed/fail??)
def _dump_to_json(self, redact=None):
"""Dump the configuration in json form to output
password is used to protect the 'secret' attributes in liue of the
actual in-configuration master key (which will have no clear form
in the dump
:param redact: If True, then sensitive password data will be redacted.
Other values may be used one day to redact in more
complex and interesting ways for non-secret
data.
"""
dumpdata = {}
for confarea in _config_areas:
if confarea not in self._cfgstore:
continue
dumpdata[confarea] = {}
for element in self._cfgstore[confarea].iterkeys():
dumpdata[confarea][element] = \
copy.deepcopy(self._cfgstore[confarea][element])
for attribute in self._cfgstore[confarea][element].iterkeys():
if 'inheritedfrom' in dumpdata[confarea][element][attribute]:
del dumpdata[confarea][element][attribute]
elif (attribute == 'cryptpass' or
'cryptvalue' in
dumpdata[confarea][element][attribute]):
if redact is not None:
dumpdata[confarea][element][attribute] = '*REDACTED*'
else:
if attribute == 'cryptpass':
target = dumpdata[confarea][element][attribute]
else:
target = dumpdata[confarea][element][attribute]['cryptvalue']
cryptval = []
for value in target:
cryptval.append(base64.b64encode(value))
if attribute == 'cryptpass':
dumpdata[confarea][element][attribute] = '!'.join(cryptval)
else:
dumpdata[confarea][element][attribute]['cryptvalue'] = '!'.join(cryptval)
elif isinstance(dumpdata[confarea][element][attribute], set):
dumpdata[confarea][element][attribute] = \
list(dumpdata[confarea][element][attribute])
return json.dumps(
dumpdata, sort_keys=True, indent=4, separators=(',', ': '))
@classmethod
def _read_from_path(cls):
global _cfgstore
_cfgstore = {}
rootpath = cls._cfgdir
_load_dict_from_dbm(['globals'], rootpath + "/globals")
for confarea in _config_areas:
_load_dict_from_dbm(['main', confarea], rootpath + "/" + confarea)
try:
for tenant in os.listdir(rootpath + '/tenants/'):
for confarea in _config_areas:
_load_dict_from_dbm(
['main', tenant, confarea],
"%s/%s/%s" % (rootpath, tenant, confarea))
except OSError:
pass
@classmethod
def shutdown(cls):
cls._bg_sync_to_file()
if cls._cfgwriter is not None:
cls._cfgwriter.join()
sys.exit(0)
@classmethod
def _bg_sync_to_file(cls):
with cls._syncstate:
if cls._syncrunning:
cls._writepending = True
return
cls._syncrunning = True
# if the thread is exiting, join it to let it close, just in case
if cls._cfgwriter is not None:
cls._cfgwriter.join()
cls._cfgwriter = threading.Thread(target=cls._sync_to_file)
cls._cfgwriter.start()
@classmethod
def _sync_to_file(cls):
if 'dirtyglobals' in _cfgstore:
with _dirtylock:
dirtyglobals = copy.deepcopy(_cfgstore['dirtyglobals'])
del _cfgstore['dirtyglobals']
_mkpath(cls._cfgdir)
globalf = dbm.open(cls._cfgdir + "/globals", 'c', 384) # 0600
try:
for globalkey in dirtyglobals:
if globalkey in _cfgstore['globals']:
globalf[globalkey] = \
cPickle.dumps(_cfgstore['globals'][globalkey])
else:
if globalkey in globalf:
del globalf[globalkey]
finally:
globalf.close()
if 'dirtykeys' in _cfgstore:
with _dirtylock:
currdirt = copy.deepcopy(_cfgstore['dirtykeys'])
del _cfgstore['dirtykeys']
for tenant in currdirt.iterkeys():
dkdict = currdirt[tenant]
if tenant is None:
pathname = cls._cfgdir
currdict = _cfgstore['main']
else:
pathname = cls._cfgdir + '/tenants/' + tenant + '/'
currdict = _cfgstore['tenant'][tenant]
for category in dkdict.iterkeys():
_mkpath(pathname)
dbf = dbm.open(pathname + category, 'c', 384) # 0600
try:
for ck in dkdict[category]:
if ck not in currdict[category]:
if ck in dbf:
del dbf[ck]
else:
dbf[ck] = cPickle.dumps(currdict[category][ck])
finally:
dbf.close()
willrun = False
with cls._syncstate:
if cls._writepending:
cls._writepending = False
willrun = True
else:
cls._syncrunning = False
if willrun:
return cls._sync_to_file()
def _recalculate_expressions(self, cfgobj, formatter, node, changeset):
for key in cfgobj.iterkeys():
if not isinstance(cfgobj[key], dict):
continue
if 'expression' in cfgobj[key]:
cfgobj[key] = _decode_attribute(key, cfgobj,
formatter=formatter)
_addchange(changeset, node, key)
elif ('cryptvalue' not in cfgobj[key] and
'value' not in cfgobj[key]):
# recurse for nested structures, with some hint that
# it might indeed be a nested structure
self._recalculate_expressions(cfgobj[key], formatter, node,
changeset)
def _dump_keys(password):
if _masterkey is None or _masterintegritykey is None:
init_masterkey()
cryptkey = _format_key(_masterkey, password=password)
cryptkey = '!'.join(map(base64.b64encode, cryptkey['passphraseprotected']))
integritykey = _format_key(_masterintegritykey, password=password)
integritykey = '!'.join(map(base64.b64encode, integritykey['passphraseprotected']))
return json.dumps({'cryptkey': cryptkey, 'integritykey': integritykey},
sort_keys=True, indent=4, separators=(',', ': '))
def dump_db_to_directory(location, password, redact=None):
with open(os.path.join(location, 'keys.json'), 'w') as cfgfile:
cfgfile.write(_dump_keys(password))
cfgfile.write('\n')
with open(os.path.join(location, 'main.json'), 'w') as cfgfile:
cfgfile.write(ConfigManager(tenant=None)._dump_to_json(redact=redact))
cfgfile.write('\n')
try:
for tenant in os.listdir(ConfigManager._cfgdir + '/tenants/'):
with open(os.path.join(location, tenant + '.json'), 'w') as cfgfile:
cfgfile.write(ConfigManager(tenant=tenant)._dump_to_json(
redact=redact))
cfgfile.write('\n')
except OSError:
pass
try:
ConfigManager._read_from_path()
except IOError:
_cfgstore = {}
# some unit tests worth implementing:
# set group attribute on lower priority group, result is that node should not
# change
# after that point, then unset on the higher priority group, lower priority
# group should get it then
# rinse and repeat for set on node versus set on group
# clear group attribute and assure than it becomes unset on all nodes
# set various expressions
|
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import typing as T
import subprocess, os
from .. import coredata
from .compilers import (
clike_debug_args,
Compiler,
)
from .mixins.clike import CLikeCompiler
from .mixins.gnu import (
GnuCompiler, gnulike_buildtype_args, gnu_optimization_args,
)
from .mixins.intel import IntelGnuLikeCompiler, IntelVisualStudioLikeCompiler
from .mixins.clang import ClangCompiler
from .mixins.elbrus import ElbrusCompiler
from .mixins.pgi import PGICompiler
from mesonbuild.mesonlib import (
version_compare, EnvironmentException, MesonException, MachineChoice,
LibType, OptionKey,
)
if T.TYPE_CHECKING:
from ..coredata import KeyedOptionDictType
from ..dependencies import Dependency
from ..envconfig import MachineInfo
from ..environment import Environment
from ..linkers import DynamicLinker
from ..programs import ExternalProgram
from .compilers import CompileCheckMode
class FortranCompiler(CLikeCompiler, Compiler):
language = 'fortran'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
Compiler.__init__(self, exelist, version, for_machine, info,
is_cross=is_cross, full_version=full_version, linker=linker)
CLikeCompiler.__init__(self, exe_wrapper)
def has_function(self, funcname: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
raise MesonException('Fortran does not have "has_function" capability.\n'
'It is better to test if a Fortran capability is working like:\n\n'
"meson.get_compiler('fortran').links('block; end block; end program')\n\n"
'that example is to see if the compiler has Fortran 2008 Block element.')
def sanity_check(self, work_dir_: str, environment: 'Environment') -> None:
work_dir = Path(work_dir_)
source_name = work_dir / 'sanitycheckf.f90'
binary_name = work_dir / 'sanitycheckf'
if binary_name.is_file():
binary_name.unlink()
source_name.write_text('program main; print *, "Fortran compilation is working."; end program', encoding='utf-8')
extra_flags: T.List[str] = []
extra_flags += environment.coredata.get_external_args(self.for_machine, self.language)
extra_flags += environment.coredata.get_external_link_args(self.for_machine, self.language)
extra_flags += self.get_always_args()
# %% build the test executable "sanitycheckf"
# cwd=work_dir is necessary on Windows especially for Intel compilers to avoid error: cannot write on sanitycheckf.obj
# this is a defect with how Windows handles files and ifort's object file-writing behavior vis concurrent ProcessPoolExecutor.
# This simple workaround solves the issue.
returncode = subprocess.run(self.exelist + extra_flags + [str(source_name), '-o', str(binary_name)],
cwd=work_dir).returncode
if returncode != 0:
raise EnvironmentException('Compiler %s can not compile programs.' % self.name_string())
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper.get_command() + [str(binary_name)]
else:
cmdlist = [str(binary_name)]
# %% Run the test executable
try:
returncode = subprocess.run(cmdlist, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode
if returncode != 0:
raise EnvironmentException('Executables created by Fortran compiler %s are not runnable.' % self.name_string())
except OSError:
raise EnvironmentException('Executables created by Fortran compiler %s are not runnable.' % self.name_string())
def get_buildtype_args(self, buildtype: str) -> T.List[str]:
return gnulike_buildtype_args[buildtype]
def get_optimization_args(self, optimization_level: str) -> T.List[str]:
return gnu_optimization_args[optimization_level]
def get_debug_args(self, is_debug: bool) -> T.List[str]:
return clike_debug_args[is_debug]
def get_preprocess_only_args(self) -> T.List[str]:
return ['-cpp'] + super().get_preprocess_only_args()
def get_module_incdir_args(self) -> T.Tuple[str, ...]:
return ('-I', )
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-module', path]
def compute_parameters_with_absolute_paths(self, parameter_list: T.List[str],
build_dir: str) -> T.List[str]:
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
def module_name_to_filename(self, module_name: str) -> str:
if '_' in module_name: # submodule
s = module_name.lower()
if self.id in ('gcc', 'intel', 'intel-cl'):
filename = s.replace('_', '@') + '.smod'
elif self.id in ('pgi', 'flang'):
filename = s.replace('_', '-') + '.mod'
else:
filename = s + '.mod'
else: # module
filename = module_name.lower() + '.mod'
return filename
def find_library(self, libname: str, env: 'Environment', extra_dirs: T.List[str],
libtype: LibType = LibType.PREFER_SHARED) -> T.Optional[T.List[str]]:
code = 'stop; end program'
return self._find_library_impl(libname, env, extra_dirs, code, libtype)
def has_multi_arguments(self, args: T.List[str], env: 'Environment') -> T.Tuple[bool, bool]:
return self._has_multi_arguments(args, env, 'stop; end program')
def has_multi_link_arguments(self, args: T.List[str], env: 'Environment') -> T.Tuple[bool, bool]:
return self._has_multi_link_arguments(args, env, 'stop; end program')
def get_options(self) -> 'KeyedOptionDictType':
opts = super().get_options()
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts.update({
key: coredata.UserComboOption(
'Fortran language standard to use',
['none'],
'none',
),
})
return opts
class GnuFortranCompiler(GnuCompiler, FortranCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
GnuCompiler.__init__(self, defines)
default_warn_args = ['-Wall']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic', '-fimplicit-none']}
def get_options(self) -> 'KeyedOptionDictType':
opts = FortranCompiler.get_options(self)
fortran_stds = ['legacy', 'f95', 'f2003']
if version_compare(self.version, '>=4.4.0'):
fortran_stds += ['f2008']
if version_compare(self.version, '>=8.0.0'):
fortran_stds += ['f2018']
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts[key].choices = ['none'] + fortran_stds
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_dependency_gen_args(self, outtarget: str, outfile: str) -> T.List[str]:
# Disabled until this is fixed:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62162
# return ['-cpp', '-MD', '-MQ', outtarget]
return []
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-J' + path]
def language_stdlib_only_link_flags(self, env: 'Environment') -> T.List[str]:
# We need to apply the search prefix here, as these link arguments may
# be passed to a different compiler with a different set of default
# search paths, such as when using Clang for C/C++ and gfortran for
# fortran,
search_dir = self._get_search_dirs(env)
search_dirs: T.List[str] = []
if search_dir is not None:
for d in search_dir.split()[-1][len('libraries: ='):].split(':'):
search_dirs.append(f'-L{d}')
return search_dirs + ['-lgfortran', '-lm']
def has_header(self, hname: str, prefix: str, env: 'Environment', *,
extra_args: T.Union[None, T.List[str], T.Callable[['CompileCheckMode'], T.List[str]]] = None,
dependencies: T.Optional[T.List['Dependency']] = None,
disable_cache: bool = False) -> T.Tuple[bool, bool]:
'''
Derived from mixins/clike.py:has_header, but without C-style usage of
__has_include which breaks with GCC-Fortran 10:
https://github.com/mesonbuild/meson/issues/7017
'''
code = f'{prefix}\n#include <{hname}>'
return self.compiles(code, env, extra_args=extra_args,
dependencies=dependencies, mode='preprocess', disable_cache=disable_cache)
class ElbrusFortranCompiler(ElbrusCompiler, FortranCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
ElbrusCompiler.__init__(self)
def get_options(self) -> 'KeyedOptionDictType':
opts = FortranCompiler.get_options(self)
fortran_stds = ['f95', 'f2003', 'f2008', 'gnu', 'legacy', 'f2008ts']
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts[key].choices = ['none'] + fortran_stds
return opts
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-J' + path]
class G95FortranCompiler(FortranCompiler):
LINKER_PREFIX = '-Wl,'
id = 'g95'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
default_warn_args = ['-Wall']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-pedantic']}
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-fmod=' + path]
def get_no_warn_args(self) -> T.List[str]:
# FIXME: Confirm that there's no compiler option to disable all warnings
return []
class SunFortranCompiler(FortranCompiler):
LINKER_PREFIX = '-Wl,'
id = 'sun'
def get_dependency_gen_args(self, outtarget: str, outfile: str) -> T.List[str]:
return ['-fpp']
def get_always_args(self) -> T.List[str]:
return []
def get_warn_args(self, level: str) -> T.List[str]:
return []
def get_module_incdir_args(self) -> T.Tuple[str, ...]:
return ('-M', )
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-moddir=' + path]
def openmp_flags(self) -> T.List[str]:
return ['-xopenmp']
class IntelFortranCompiler(IntelGnuLikeCompiler, FortranCompiler):
file_suffixes = ('f90', 'f', 'for', 'ftn', 'fpp', )
id = 'intel'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
# FIXME: Add support for OS X and Windows in detect_fortran_compiler so
# we are sent the type of compiler
IntelGnuLikeCompiler.__init__(self)
default_warn_args = ['-warn', 'general', '-warn', 'truncated_source']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-warn', 'unused'],
'3': ['-warn', 'all']}
def get_options(self) -> 'KeyedOptionDictType':
opts = FortranCompiler.get_options(self)
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts[key].choices = ['none', 'legacy', 'f95', 'f2003', 'f2008', 'f2018']
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
stds = {'legacy': 'none', 'f95': 'f95', 'f2003': 'f03', 'f2008': 'f08', 'f2018': 'f18'}
if std.value != 'none':
args.append('-stand=' + stds[std.value])
return args
def get_preprocess_only_args(self) -> T.List[str]:
return ['-cpp', '-EP']
def language_stdlib_only_link_flags(self, env: 'Environment') -> T.List[str]:
# TODO: needs default search path added
return ['-lifcore', '-limf']
def get_dependency_gen_args(self, outtarget: str, outfile: str) -> T.List[str]:
return ['-gen-dep=' + outtarget, '-gen-depformat=make']
class IntelClFortranCompiler(IntelVisualStudioLikeCompiler, FortranCompiler):
file_suffixes = ('f90', 'f', 'for', 'ftn', 'fpp', )
always_args = ['/nologo']
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo', target: str,
exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
IntelVisualStudioLikeCompiler.__init__(self, target)
default_warn_args = ['/warn:general', '/warn:truncated_source']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['/warn:unused'],
'3': ['/warn:all']}
def get_options(self) -> 'KeyedOptionDictType':
opts = FortranCompiler.get_options(self)
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts[key].choices = ['none', 'legacy', 'f95', 'f2003', 'f2008', 'f2018']
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
stds = {'legacy': 'none', 'f95': 'f95', 'f2003': 'f03', 'f2008': 'f08', 'f2018': 'f18'}
if std.value != 'none':
args.append('/stand:' + stds[std.value])
return args
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['/module:' + path]
class PathScaleFortranCompiler(FortranCompiler):
id = 'pathscale'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
default_warn_args = ['-fullwarn']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def openmp_flags(self) -> T.List[str]:
return ['-mp']
class PGIFortranCompiler(PGICompiler, FortranCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
PGICompiler.__init__(self)
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args + ['-Mdclchk']}
def language_stdlib_only_link_flags(self, env: 'Environment') -> T.List[str]:
# TODO: needs default search path added
return ['-lpgf90rtl', '-lpgf90', '-lpgf90_rpm1', '-lpgf902',
'-lpgf90rtl', '-lpgftnrtl', '-lrt']
class NvidiaHPC_FortranCompiler(PGICompiler, FortranCompiler):
id = 'nvidia_hpc'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
PGICompiler.__init__(self)
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args + ['-Mdclchk']}
class FlangFortranCompiler(ClangCompiler, FortranCompiler):
id = 'flang'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
ClangCompiler.__init__(self, {})
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def language_stdlib_only_link_flags(self, env: 'Environment') -> T.List[str]:
# We need to apply the search prefix here, as these link arguments may
# be passed to a different compiler with a different set of default
# search paths, such as when using Clang for C/C++ and gfortran for
# fortran,
# XXX: Untested....
search_dir = self._get_search_dirs(env)
search_dirs: T.List[str] = []
if search_dir is not None:
for d in search_dir.split()[-1][len('libraries: ='):].split(':'):
search_dirs.append(f'-L{d}')
return search_dirs + ['-lflang', '-lpgmath']
class ArmLtdFlangFortranCompiler(FlangFortranCompiler):
id = 'armltdflang'
class Open64FortranCompiler(FortranCompiler):
id = 'open64'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
default_warn_args = ['-fullwarn']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def openmp_flags(self) -> T.List[str]:
return ['-mp']
class NAGFortranCompiler(FortranCompiler):
id = 'nagfor'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, linker=linker,
full_version=full_version)
# Warnings are on by default; -w disables (by category):
self.warn_args = {
'0': ['-w=all'],
'1': [],
'2': [],
'3': [],
}
def get_always_args(self) -> T.List[str]:
return self.get_nagfor_quiet(self.version)
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-mdir', path]
@staticmethod
def get_nagfor_quiet(version: str) -> T.List[str]:
return ['-quiet'] if version_compare(version, '>=7100') else []
def get_pic_args(self) -> T.List[str]:
return ['-PIC']
def get_preprocess_only_args(self) -> T.List[str]:
return ['-fpp']
def get_std_exe_link_args(self) -> T.List[str]:
return self.get_always_args()
def openmp_flags(self) -> T.List[str]:
return ['-openmp']
|
|
import settings
import helpers
import glob
import pandas
import ntpath
import numpy
import cv2
import os
CUBE_IMGTYPE_SRC = "_i"
def save_cube_img(target_path, cube_img, rows, cols):
assert rows * cols == cube_img.shape[0]
img_height = cube_img.shape[1]
img_width = cube_img.shape[1]
res_img = numpy.zeros((rows * img_height, cols * img_width), dtype=numpy.uint8)
for row in range(rows):
for col in range(cols):
target_y = row * img_height
target_x = col * img_width
res_img[target_y:target_y + img_height, target_x:target_x + img_width] = cube_img[row * cols + col]
cv2.imwrite(target_path, res_img)
def get_cube_from_img(img3d, center_x, center_y, center_z, block_size):
start_x = max(center_x - block_size / 2, 0)
if start_x + block_size > img3d.shape[2]:
start_x = img3d.shape[2] - block_size
start_y = max(center_y - block_size / 2, 0)
start_z = max(center_z - block_size / 2, 0)
if start_z + block_size > img3d.shape[0]:
start_z = img3d.shape[0] - block_size
start_z = int(start_z)
start_y = int(start_y)
start_x = int(start_x)
res = img3d[start_z:start_z + block_size, start_y:start_y + block_size, start_x:start_x + block_size]
return res
def make_pos_annotation_images():
src_dir = settings.LUNA_16_TRAIN_DIR2D2 + "metadata/"
dst_dir = settings.BASE_DIR_SSD + "luna16_train_cubes_pos/"
for file_path in glob.glob(dst_dir + "*.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*_annos_pos.csv")):
patient_id = ntpath.basename(csv_file).replace("_annos_pos.csv", "")
# print(patient_id)
# if not "148229375703208214308676934766" in patient_id:
# continue
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA_16_TRAIN_DIR2D2, "*" + CUBE_IMGTYPE_SRC + ".png")
for index, row in df_annos.iterrows():
coord_x = int(row["coord_x"] * images.shape[2])
coord_y = int(row["coord_y"] * images.shape[1])
coord_z = int(row["coord_z"] * images.shape[0])
diam_mm = int(row["diameter"] * images.shape[2])
anno_index = int(row["anno_index"])
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_" + str(diam_mm) + "_1_" + "pos.png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
def make_annotation_images_lidc():
src_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/luna16_train_cubes_lidc/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_path in glob.glob(dst_dir + "*.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*_annos_pos_lidc.csv")):
patient_id = ntpath.basename(csv_file).replace("_annos_pos_lidc.csv", "")
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png")
for index, row in df_annos.iterrows():
coord_x = int(row["coord_x"] * images.shape[2])
coord_y = int(row["coord_y"] * images.shape[1])
coord_z = int(row["coord_z"] * images.shape[0])
malscore = int(row["malscore"])
anno_index = row["anno_index"]
anno_index = str(anno_index).replace(" ", "xspacex").replace(".", "xpointx").replace("_", "xunderscorex")
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
if cube_img.shape != (64, 64, 64):
print(" ***** incorrect shape !!! ", str(anno_index), " - ",(coord_x, coord_y, coord_z))
continue
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_" + str(malscore * malscore) + "_1_pos.png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
def make_pos_annotation_images_manual():
src_dir = "resources/luna16_manual_labels/"
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/luna16_train_cubes_manual/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_path in glob.glob(dst_dir + "*_manual.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*.csv")):
patient_id = ntpath.basename(csv_file).replace(".csv", "")
if "1.3.6.1.4" not in patient_id:
continue
print(patient_id)
# if not "172845185165807139298420209778" in patient_id:
# continue
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png")
for index, row in df_annos.iterrows():
coord_x = int(row["x"] * images.shape[2])
coord_y = int(row["y"] * images.shape[1])
coord_z = int(row["z"] * images.shape[0])
diameter = int(row["d"] * images.shape[2])
node_type = int(row["id"])
malscore = int(diameter)
malscore = min(25, malscore)
malscore = max(16, malscore)
anno_index = index
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
if cube_img.shape != (64, 64, 64):
print(" ***** incorrect shape !!! ", str(anno_index), " - ",(coord_x, coord_y, coord_z))
continue
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_" + str(malscore) + "_1_" + ("pos" if node_type == 0 else "neg") + ".png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
def make_candidate_auto_images(candidate_types=[]):
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/luna16_train_cubes_auto/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for candidate_type in candidate_types:
for file_path in glob.glob(dst_dir + "*_" + candidate_type + ".png"):
os.remove(file_path)
for candidate_type in candidate_types:
if candidate_type == "falsepos":
src_dir = "resources/luna16_falsepos_labels/"
else:
src_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
for index, csv_file in enumerate(glob.glob(src_dir + "*_candidates_" + candidate_type + ".csv")):
patient_id = ntpath.basename(csv_file).replace("_candidates_" + candidate_type + ".csv", "")
print(index, ",patient: ", patient_id, " type:", candidate_type)
# if not "148229375703208214308676934766" in patient_id:
# continue
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png", exclude_wildcards=[])
row_no = 0
for index, row in df_annos.iterrows():
coord_x = int(row["coord_x"] * images.shape[2])
coord_y = int(row["coord_y"] * images.shape[1])
coord_z = int(row["coord_z"] * images.shape[0])
anno_index = int(row["anno_index"])
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 48)
if cube_img.sum() < 10:
print("Skipping ", coord_x, coord_y, coord_z)
continue
# print(cube_img.sum())
try:
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_0_" + candidate_type + ".png", cube_img, 6, 8)
except Exception as ex:
print(ex)
row_no += 1
max_item = 240 if candidate_type == "white" else 200
if candidate_type == "luna":
max_item = 500
if row_no > max_item:
break
def make_pos_annotation_images_manual_ndsb3():
src_dir = "resources/ndsb3_manual_labels/"
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/ndsb3_train_cubes_manual/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
train_label_df = pandas.read_csv("resources/stage1_labels.csv")
train_label_df.set_index(["id"], inplace=True)
for file_path in glob.glob(dst_dir + "*.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*.csv")):
patient_id = ntpath.basename(csv_file).replace(".csv", "")
if "1.3.6.1.4.1" in patient_id:
continue
cancer_label = train_label_df.loc[patient_id]["cancer"]
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.NDSB3_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png")
anno_index = 0
for index, row in df_annos.iterrows():
pos_neg = "pos" if row["id"] == 0 else "neg"
coord_x = int(row["x"] * images.shape[2])
coord_y = int(row["y"] * images.shape[1])
coord_z = int(row["z"] * images.shape[0])
malscore = int(round(row["dmm"]))
anno_index += 1
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
if cube_img.shape != (64, 64, 64):
print(" ***** incorrect shape !!! ", str(anno_index), " - ",(coord_x, coord_y, coord_z))
continue
print(patient_id)
assert malscore > 0 or pos_neg == "neg"
save_cube_img(dst_dir + "ndsb3manual_" + patient_id + "_" + str(anno_index) + "_" + pos_neg + "_" + str(cancer_label) + "_" + str(malscore) + "_1_pn.png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
if __name__ == "__main__":
if not os.path.exists(settings.BASE_DIR_SSD + "generated_traindata/"):
os.mkdir(settings.BASE_DIR_SSD + "generated_traindata/")
if True:
make_annotation_images_lidc()
if True:
make_pos_annotation_images_manual()
# if False:
# make_pos_annotation_images() # not used anymore
if True:
make_candidate_auto_images(["falsepos", "luna", "edge"])
if True:
make_pos_annotation_images_manual_ndsb3() # for second model
|
|
import logging
logging.basicConfig(level=logging.DEBUG)
#logging.basicConfig(filename='dev-match-new.log',level=logging.DEBUG)
import numpy as np
from scipy import stats
from scipy.spatial.distance import cosine, euclidean
import random
class Match:
def __init__(self, crowd, alpha = 1.8, group_size_limit = 4, match_criteria = 'e-i', exclusion = None, invalid_indices = set()):
self.logger = logging#.getLogger("Match")
#self.logger.setLevel(logging.DEBUG)
self.logger.debug("Initializing for %s" % match_criteria)
self.crowd = crowd
self.alpha = alpha
self.group_size_limit = group_size_limit # real range: [2, group_size_limit]
# match criteria: e-i, similar-topics, dissimilar-topics, serve-my-interest
self.match_criteria = match_criteria
self.exclusion = exclusion
self.invalid_indices = invalid_indices
self.lm = self.compute_learned_matrix()
self.pm = self.compute_pair_matching_matrix()
#print self.lm
self.size = self.pm.shape[0]
self.left_people = set(range(self.pm.shape[0]))
self.matched = []
# Get matching score of pair/group efficiently. Take use of learned-matrix
def score(self, indices = None):
lm = self.lm
if indices is None:
return None
sublm = lm[np.ix_(indices, indices)]
if self.match_criteria == "e-i":
learned = [sum(sublm[i]) for i in range(sublm.shape[0])]
score_ret = stats.gmean(learned)
elif self.match_criteria == "serve-my-interest":
score_ret = sum(sublm[0])
elif self.match_criteria == "similar-topics":
score_ret = sum(sum(sublm))/2
elif self.match_criteria == "dissimilar-topics":
score_ret = sum(sum(sublm))/2
return score_ret
# Get learned score by inner product.
def learned_score(self, learner, teacher):
if self.match_criteria in ["e-i", "serve-my-interest"]: # The more to learn, the higher score.
return sum(teacher[0]*learner[1])
elif self.match_criteria == "similar-topics": # The more similar, the higher score.
# cosine: same --> [0,2] --> reverse
# euclidian distance: same -->[0, +...] --> different
#return 2 - cosine(teacher, learner)
return 1/(euclidean(teacher[0], learner[0])+0.1)
elif self.match_criteria == "dissimilar-topics": # The more different, the higher score.
return euclidean(teacher[0], learner[0])+0.1
else:
self.logger.error("Unknown criteria %s" % self.match_criteria)
return -1
# Compute a unsymmetric learned matrix. M_ij means how much can i learn from j.
def compute_learned_matrix(self):
crowd = self.crowd
m = np.zeros((len(crowd), len(crowd)))
for i in range(len(crowd)):
for j in range(len(crowd)):
if i != j:
#print "crowd[i]", crowd[i]
#print "crowd[j]", crowd[j]
if self.exclusion:
if self.exclusion.check([i, j]) == False:
# reset the learned score as zero if they're exclusive.
m[i][j] = 0
continue
m[i][j] = self.learned_score(crowd[i], crowd[j])
self.logger.debug("Computed learned matrix:" )
self.logger.debug(m.shape)
#self.logger.debug("%s" % m)
return m
# Compute the pair-wise matching score based on a learned matrix, and form a symmetric pair-wise matrix.
def compute_pair_matching_matrix(self):
lm = self.lm
if self.match_criteria == "e-i":
pm = np.zeros(lm.shape)
for i in range(lm.shape[0]):
for j in range(i+1, lm.shape[1]):
pm[i][j] = pm[j][i] = self.score([i,j])
elif self.match_criteria in ["serve-my-interest", "similar-topics", "dissimilar-topics"]:
# TODO: for serve-my-interest, we can have a better strategy that prefers the candidate who don't have too many useless expertise for the learner.
# in order to serve more people better.
pm = lm
else:
self.logger.error("Unknown match criteria: %s" % self.match_criteria)
self.logger.debug("Computed pair matching matrix:" )
self.logger.debug(pm.shape)
#self.logger.debug("%s" % pm)
return pm
def get_one_group_from(self, start_person, candidates):
logger = self.logger
logger.debug("Begin to get one group from %d under %s" % (start_person, self.match_criteria))
pm = self.pm
lm = self.lm
matched = self.matched
#left_people = self.left_people
size = self.size
if start_person in self.invalid_indices:
return ([], -1)
def get_best_candidate(start_person, candidates):
scores = list(pm[start_person])
for i in range(len(scores)):
if i not in candidates:
scores[i] = 0
scores = np.array(scores)
idx = scores.argmax()
if idx not in candidates:
idx = list(candidates)[0]
score = scores[idx]
return idx, score
def select_one_more(members, candidates):
scores_with_i = {}
for i in candidates:
# TODO: different on criteria!! define in self.score()
# TODO: in "similar-topics", the score would definitly decrease if we add more members.
scores_with_i[i] = self.score(members+[i])
#print scores_with_i
scores_rank = sorted(scores_with_i, key=scores_with_i.get, reverse=True)
i = scores_rank[0]
return i, scores_with_i[i]
if len(candidates) < 3:
members = [start_person] + list(candidates)
score = self.score(members)
logger.debug('not many left, just group them all: %s' % members)
else:
second_person, score = get_best_candidate(start_person, candidates)
members = [start_person, second_person]
logger.debug('get a best pair: %s =%.2f' % (members, score))
#candidates.remove(second_person)
candidates = candidates.difference(self.exclusion.query(members))
# logger.debug('the left candidates: %s' % candidates)
while len(members) < self.group_size_limit and len(candidates) > 2:
i, score_with_i = select_one_more(members, candidates)
if score_with_i/score >= self.alpha:
logger.debug("Add %s into group %s with score %s" % (i, members, score_with_i))
members.append(i)
score = score_with_i
#candidates.remove(i)
candidates = candidates.difference(self.exclusion.query(members))
else:
break
return members, score
def gen_random_crowd(popu_size, area_size):
np.random.seed(2)
group = []
for i in range(popu_size):
group.append(np.floor(np.random.random(area_size*2)+0.5).reshape(2,area_size))
return group
if __name__ == "__main__":
from rematchr import Exclusion
e = Exclusion()
#e.add([0,1,2,3,4,5,6,7])
crowd = gen_random_crowd(10, 5)
#m = Match(crowd, alpha = 2, group_size_limit = 4, match_criteria = 'serve-my-interest', exclusion = e)
print "==================Preview the crowd========================="
print "------a simulated crowd of expertise/interest---------------"
for i in range(len(crowd)):
print "Person", i, "expertises", crowd[i][0], "interests", crowd[i][1]
print "========================Over================================"
matches = {}
for criterion in ['e-i', 'serve-my-interest', 'similar-topics', 'dissimilar-topics']:
if criterion == "similar-topics" or criterion == "dissimilar-topics":
invalid_indices = set([1,2])
else:
invalid_indices = set()
matches[criterion] = Match(crowd, alpha = 2, group_size_limit = 4, match_criteria = criterion, exclusion = e, invalid_indices = invalid_indices)
results = {}
for round_num in range(1, 5):
people = set(range(10))
matched_people = set()
left_people = people
start_person = random.choice(list(left_people))
while len(matched_people) < len(people):
logging.debug("starting point: %d", start_person)
# TODO: when some people cannot get matched from a specific criteria (lacking data), we should choose a proper criteria.
m = random.choice(matches.values())
while start_person in m.invalid_indices:
m = random.choice(matches.values())
members, score = m.get_one_group_from(start_person, matched_people)
matched_people = matched_people.union(members)
left_people = left_people.difference(set(members))
for member in members:
m_info = {'partners': list(set(members) - set([member])), 'score': score, 'criterion': m.match_criteria, 'location': 'unknown'}
if not results.has_key(member):
results[member] = {}
results[member][round_num] = m_info
if len(left_people) <= 0:
break
start_person = random.choice(list(left_people))
print results
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot.writer import DataFileWriter
from robot.utils import abspath, is_string, normalize, NormalizedDict
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return,
Template, MetadataList, ImportList)
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped=False):
"""Parses a file or directory to a corresponding model object.
:param parent: (optional) parent to be used in creation of the model object.
:param source: path where test data is read from.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
if os.path.isdir(source):
return TestDataDirectory(parent, source).populate(include_suites,
warn_on_skipped)
return TestCaseFile(parent, source).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings', 'Metadata'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases'
_keyword_table_names = 'Keyword', 'Keywords', 'User Keyword', 'User Keywords'
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = abspath(source) if source else None
self.children = []
self._tables = NormalizedDict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table)]:
for name in names:
yield name, table
def start_table(self, header_row):
try:
table = self._tables[header_row[0]]
except (KeyError, IndexError):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _table_is_allowed(self, table):
return True
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' contains a test case table "
"which is not allowed." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, warn_on_skipped=False, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
warn_on_skipped, recurse)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite init file in '%s' contains a test case "
"table which is not allowed." % self.source)
return False
return True
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __bool__(self):
return bool(self._header or len(self))
#PY2
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
def get_setter(self, setting_name):
normalized = self.normalize(setting_name)
if normalized in self._setters:
return self._setters[normalized](self)
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def is_setting(self, setting_name):
return self.normalize(setting_name) in self._setters
def normalize(self, setting):
result = normalize(setting)
return result[0:-1] if result and result[-1]==':' else result
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'forcetags': lambda s: s.force_tags.populate,
'defaulttags': lambda s: s.default_tags.populate,
'testtemplate': lambda s: s.test_template.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'forcetags': lambda s: s.force_tags.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
class TestCaseTable(_Table):
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
def __bool__(self):
return True
#PY2
def __nonzero__(self):
return self.__bool__()
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if is_string(value):
value = [value]
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __bool__(self):
return self.has_data()
#PY2
def __nonzero__(self):
return self.__bool__()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'template': lambda s: s.template.populate,
'setup': lambda s: s.setup.populate,
'precondition': lambda s: s.setup.populate,
'teardown': lambda s: s.teardown.populate,
'postcondition': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate,
'timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(ForLoop(declaration, comment))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.tags = Tags('[Tags]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'arguments': lambda s: s.args.populate,
'return': lambda s: s.return_.populate,
'timeout': lambda s: s.timeout.populate,
'teardown': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.tags, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.tags, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
"""The parsed representation of a for-loop.
:param list declaration: The literal cell values that declare the loop
(excluding ":FOR").
:param str comment: A comment, default None.
:ivar str flavor: The value of the 'IN' item, uppercased.
Typically 'IN', 'IN RANGE', 'IN ZIP', or 'IN ENUMERATE'.
:ivar list vars: Variables set per-iteration by this loop.
:ivar list items: Loop values that come after the 'IN' item.
:ivar str comment: A comment, or None.
:ivar list steps: A list of steps in the loop.
"""
def __init__(self, declaration, comment=None):
self.flavor, index = self._get_flavors_and_index(declaration)
self.vars = declaration[:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
def _get_flavors_and_index(self, declaration):
for index, item in enumerate(declaration):
item = item.upper()
if item.replace(' ', '').startswith('IN'):
return item, index
return 'IN', len(declaration)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=False, include_comment=True):
comments = self.comment.as_list() if include_comment else []
return [': FOR'] + self.vars + [self.flavor] + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
class Step(object):
def __init__(self, content, comment=None):
self.assign = list(self._get_assigned_vars(content))
try:
self.name = content[len(self.assign)]
except IndexError:
self.name = None
self.args = content[len(self.assign)+1:]
self.comment = Comment(comment)
def _get_assigned_vars(self, content):
for item in content:
if not is_var(item.rstrip('= ')):
return
yield item
def is_comment(self):
return not (self.assign or self.name or self.args)
def is_for_loop(self):
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.name] if self.name is not None else []
comments = self.comment.as_list() if include_comment else []
data = self.assign + kw + self.args + comments
if indent:
data.insert(0, '')
return data
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all((True if e.lower() == 'value' else False)
for e in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for h in header[2:]:
if not h.lower().startswith('arg'):
return False
return True
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class IsolateTest( GafferSceneTest.SceneTestCase ) :
def testPassThrough( self ) :
sphere = IECore.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"groupA" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphereAA" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
"sphereAB" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
"groupB" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphereBA" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
"sphereBB" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
},
} ),
)
isolate = GafferScene.Isolate()
isolate["in"].setInput( input["out"] )
self.assertSceneValid( input["out"] )
self.assertSceneValid( isolate["out"] )
# with no filter applied, nothing should be isolated so we should have a perfect pass through
self.assertScenesEqual( input["out"], isolate["out"] )
self.assertSceneHashesEqual( input["out"], isolate["out"] )
self.assertTrue( input["out"].object( "/groupA/sphereAA", _copy = False ).isSame( isolate["out"].object( "/groupA/sphereAA", _copy = False ) ) )
# and even with a filter applied, we should have a perfect pass through if the node is disabled.
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/*" ] ) )
isolate["filter"].setInput( filter["match"] )
isolate["enabled"].setValue( False )
self.assertScenesEqual( input["out"], isolate["out"] )
self.assertSceneHashesEqual( input["out"], isolate["out"] )
self.assertTrue( input["out"].object( "/groupA/sphereAA", _copy = False ).isSame( isolate["out"].object( "/groupA/sphereAA", _copy = False ) ) )
def testIsolation( self ) :
sphere = IECore.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"groupA" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphereAA" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
"sphereAB" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
"groupB" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphereBA" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
"sphereBB" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
},
} ),
)
isolate = GafferScene.Isolate()
isolate["in"].setInput( input["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/groupA/sphereAB" ] ) )
isolate["filter"].setInput( filter["match"] )
self.assertNotEqual( isolate["out"].childNamesHash( "/groupA" ), input["out"].childNamesHash( "/groupA" ) )
self.assertEqual( isolate["out"].childNames( "/groupA" ), IECore.InternedStringVectorData( [ "sphereAB" ] ) )
self.assertEqual( isolate["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "groupA" ] ) )
filter["paths"].setValue( IECore.StringVectorData( [ "/groupA/sphereAA" ] ) )
self.assertEqual( isolate["out"].childNames( "/groupA" ), IECore.InternedStringVectorData( [ "sphereAA" ] ) )
self.assertEqual( isolate["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "groupA" ] ) )
def testAdjustBounds( self ) :
sphere1 = IECore.SpherePrimitive()
sphere2 = IECore.SpherePrimitive( 2 )
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere2.bound() ),
"children" : {
"group" : {
"bound" : IECore.Box3fData( sphere2.bound() ),
"children" : {
"sphere1" : {
"bound" : IECore.Box3fData( sphere1.bound() ),
"object" : sphere1,
},
"sphere2" : {
"bound" : IECore.Box3fData( sphere2.bound() ),
"object" : sphere2,
},
},
},
},
} ),
)
isolate = GafferScene.Isolate()
isolate["in"].setInput( input["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) )
isolate["filter"].setInput( filter["match"] )
self.assertEqual( isolate["out"].bound( "/" ), sphere2.bound() )
self.assertEqual( isolate["out"].bound( "/group" ), sphere2.bound() )
self.assertEqual( isolate["out"].bound( "/group/sphere1" ), sphere1.bound() )
isolate["adjustBounds"].setValue( True )
self.assertEqual( isolate["out"].bound( "/" ), sphere1.bound() )
self.assertEqual( isolate["out"].bound( "/group" ), sphere1.bound() )
self.assertEqual( isolate["out"].bound( "/group/sphere1" ), sphere1.bound() )
def testForwardDeclarations( self ) :
light1 = GafferSceneTest.TestLight()
light2 = GafferSceneTest.TestLight()
group = GafferScene.Group()
group["in"].setInput( light1["out"] )
group["in1"].setInput( light2["out"] )
fd = group["out"]["globals"].getValue()["gaffer:forwardDeclarations"]
self.assertEqual( set( fd.keys() ), set( [ "/group/light", "/group/light1" ] ) )
isolate = GafferScene.Isolate()
isolate["in"].setInput( group["out"] )
fd = isolate["out"]["globals"].getValue()["gaffer:forwardDeclarations"]
self.assertEqual( set( fd.keys() ), set( [ "/group/light", "/group/light1" ] ) )
filter = GafferScene.PathFilter()
isolate["filter"].setInput( filter["match"] )
fd = isolate["out"]["globals"].getValue()["gaffer:forwardDeclarations"]
self.assertEqual( set( fd.keys() ), set( [] ) )
filter["paths"].setValue( IECore.StringVectorData( [ "/group/light" ] ) )
fd = isolate["out"]["globals"].getValue()["gaffer:forwardDeclarations"]
self.assertEqual( set( fd.keys() ), set( [ "/group/light" ] ) )
filter["paths"].setValue( IECore.StringVectorData( [ "/group/light*" ] ) )
fd = isolate["out"]["globals"].getValue()["gaffer:forwardDeclarations"]
self.assertEqual( set( fd.keys() ), set( [ "/group/light", "/group/light1" ] ) )
if __name__ == "__main__":
unittest.main()
|
|
# Waitrose web scraper
__author__ = 'robdobsn'
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.support.ui as webdriverui
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, WebDriverException, TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import logging
import json
import re
class WaitroseScraper():
def __init__(self):
logging.info("Waitrose scraper starting")
self.isInitalized = False
self.isLoggedIn = False
self.webDriverType = "PhantomJS"
self.execUsingJS = False
def clickButtonByClassName(self, className):
if self.execUsingJS:
self.webDriver.execute_script("document.getElementsByClassName('" + className + "')[0].click()")
else:
btn = self.webDriver.find_element_by_class_name(className)
btn.click()
def clickButtonByXPath(self, xpath):
if self.execUsingJS:
self.webDriver.execute_script("return document.evaluate('" + xpath + "', document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()")
else:
btn = self.webDriver.find_element_by_xpath(xpath)
btn.click()
def clickButtonByCSSSelector(self, cssSelector):
btn = self.webDriver.find_element_by_css_selector(cssSelector)
btn.click()
def checkButtonEnabledByCSSSelector(self, cssSelector):
btn = self.webDriver.find_element_by_css_selector(cssSelector)
return btn.is_enabled() and btn.is_displayed()
def sendKeysToFieldById(self, elemId, strToSend, pressEnterAfter, clearFirst):
# if self.execUsingJS:
# self.webDriver.execute_script("document.getElementsByClassName('" + elemId + "').value = '" + strToSend)
# else:
print("Sending keys to elemId " + elemId + " keys = " + strToSend)
field = self.webDriver.find_element_by_id(elemId)
print(field)
if (clearFirst):
field.send_keys(Keys.CONTROL + "a")
field.send_keys(Keys.DELETE)
field.send_keys(strToSend + (Keys.RETURN if pressEnterAfter else ""))
def debugDumpPageSource(self, filenameExtra=""):
with open("debugPageSource" + filenameExtra + ".html", "w") as debugDumpFile:
debugDumpFile.write(self.webDriver.page_source)
self.webDriver.save_screenshot('debugPageImage.png')
# Start the web driver (runs the browser)
def startWebDriver(self):
# Clear current session file info
with open('browserSession.json', 'w') as outfile:
json.dump({}, outfile)
# Create WebDriver
if self.webDriverType == "Chrome":
try:
self.webDriver = webdriver.Chrome()
except WebDriverException:
logging.error("startWebDriver() Chrome Failed to start")
return False
elif self.webDriverType == "Firefox":
try:
self.webDriver = webdriver.Firefox()
except WebDriverException:
logging.error("startWebDriver() Firefox Failed to start")
return False
elif self.webDriverType == "PhantomJS":
try:
self.webDriver = webdriver.PhantomJS() # or add to your PATH
except:
try:
self.webDriver = webdriver.PhantomJS(
executable_path='C:\ProgramData\PhantomJS\bin')
except:
try:
self.webDriver = webdriver.PhantomJS(
executable_path='/usr/local/lib/node_modules/phantomjs/lib/phantom/bin/phantomjs')
except:
try:
self.webDriver = webdriver.PhantomJS(
executable_path=r'C:\Users\rob_2\AppData\Roaming\npm\node_modules\phantomjs\lib\phantom\bin\phantomjs.exe')
except:
logging.error("Failed to load the PhantomJS webdriver")
return False
# Set the window size (seems to be needed in phantomJS particularly
# This is probably because the website responds in mobile mode?
self.webDriver.set_window_size(1280,1024)
# Save session info
url = self.webDriver.command_executor._url
session_id = self.webDriver.session_id
with open('browserSession.json', 'w') as outfile:
json.dump({"url": url, "session_id": session_id}, outfile)
return True
def websiteLogin(self, username, password, attemptIdx):
try:
self.webDriver.save_screenshot('debug1_'+str(attemptIdx)+'.png')
logging.info("Waiting for signInRegister button")
wait = WebDriverWait(self.webDriver, 30)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "js-sign-in-register")))
logging.info("waitroseLogin() pressing signInRegister button")
self.clickButtonByClassName('js-sign-in-register')
self.webDriver.save_screenshot('debug2_'+str(attemptIdx)+'.png')
try:
print("Starting to wait for logon-email")
wait = WebDriverWait(self.webDriver, 30)
wait.until(EC.visibility_of_element_located((By.ID, "logon-email")))
print("Finished waiting for logon-email")
self.webDriver.save_screenshot('debug3_' + str(attemptIdx) + '.png')
try:
logging.info("waitroseLogin() entering username")
self.debugDumpPageSource("contbutton")
self.sendKeysToFieldById('logon-email', username, False, True)
self.webDriver.save_screenshot('debug4_' + str(attemptIdx) + '.png')
# self.clickButtonByXPath("//input[@type='button' and @value='Continue']")
if (self.checkButtonEnabledByCSSSelector("input[value='Continue'][type='button']")):
self.clickButtonByCSSSelector("input[value='Continue'][type='button']")
try:
logging.info("waitroseLogin() waiting for logon-password visible")
wait = WebDriverWait(self.webDriver, 60)
wait.until(EC.visibility_of_element_located((By.ID, "logon-password")))
self.webDriver.save_screenshot('debug5_' + str(attemptIdx) + '.png')
try:
logging.info("waitroseLogin() entering password")
self.sendKeysToFieldById('logon-password', password, False, True)
#self.clickButtonById('logon-button-sign-in')
self.clickButtonByCSSSelector("input[value='Sign in'][type='button']")
self.webDriver.save_screenshot('debug6_' + str(attemptIdx) + '.png')
logging.info("waitroseLogin() waiting for trolley-total to be visible")
wait = WebDriverWait(self.webDriver, 60)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total")))
self.webDriver.save_screenshot('debug7_' + str(attemptIdx) + '.png')
elem2 = self.webDriver.find_element_by_class_name('trolley-total')
if elem2:
logging.info("waitroseLogin() basket found")
else:
logging.info("waitroseLogin() basket not found")
return True
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find logon-password after wait " + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find logon-password field" + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Error entering logon-email" + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find logon-email field" + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find sign-in-register button" + err.msg)
self.debugDumpPageSource()
return False
def getBasketSummary(self):
basketSummary = {}
# Ensure we wait until the trolley-total is visible
try:
wait = WebDriverWait(self.webDriver, 20)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total")))
except TimeoutException:
logging.error("Get basket summary timeout exception")
self.debugDumpPageSource()
return None
except WebDriverException:
logging.error("Get basket summary webdriver element exception")
self.debugDumpPageSource()
return None
# Get basket total price
try:
totalElem = self.webDriver.find_element_by_class_name('trolley-total')
if totalElem:
reTotalElem = re.search("([0-9]{1,4}\.[0-9]{2})", totalElem.text)
if reTotalElem:
basketSummary["totalPrice"] = reTotalElem.group(1)
logging.info("waitrose: Basket: total=" + str(basketSummary["totalPrice"]))
# Get number of basket items
summaryElem = self.webDriver.find_element_by_class_name('trolley-summary')
if summaryElem:
reSummaryElem = re.search("([0-9]{1,4}) items", summaryElem.text)
if reSummaryElem:
basketSummary["numItems"] = reSummaryElem.group(1)
logging.info("waitrose: Basket: num items=" + str(basketSummary["numItems"]))
except WebDriverException:
logging.error("waitrose: Get basket summary webdriver element exception")
self.debugDumpPageSource()
return None
# Return info found
return basketSummary
def getElemAttrIfPresent(self, soup, elemName, className, subElem, attrName, regexReplace, destDict=None, dictName=None):
rslt = ""
try:
el = soup.find(elemName, class_=className)
if subElem is not "":
el = el.find(subElem)
if attrName == "text":
rslt = el.get_text()
else:
rslt = el[attrName]
if regexReplace is not "":
rslt = re.sub(regexReplace, "", rslt)
if destDict is not None:
destDict[dictName] = rslt
except WebDriverException:
logging.error("waitrose: Error extracting element " + elemName + " " + className)
self.debugDumpPageSource()
except:
logging.error("waitrose: Error (not webdriver) extracting element " + elemName + " " + className)
self.debugDumpPageSource()
return rslt
def getShoppingItems(self, isTrolleyPage):
# Make sure all items on the page are loaded - lazy loader
try:
self.debugDumpPageSource("m-product")
webdriverui.WebDriverWait(self.webDriver, 10)\
.until(EC.visibility_of_element_located((By.CLASS_NAME, "m-product")))
except WebDriverException:
logging.error("Wait for m-product webdriver element exception")
return []
productsFound = self.webDriver.find_elements_by_class_name("m-product")
print("waitrose: Lazy loading products - currently " + str(len(productsFound)) + " found")
numRepeats = 0
if len(productsFound) > 10:
while True:
prevFound = len(productsFound)
self.webDriver.execute_script("window.scrollBy(0,window.innerHeight)")
productsFound = self.webDriver.find_elements_by_class_name("m-product")
print("Loading products - currently " + str(len(productsFound)) + " found")
if len(productsFound) <= prevFound:
numRepeats += 1
if numRepeats > 20:
break
else:
numRepeats = 0
print("Done lazy loading products " + str(len(productsFound)) + " found")
# Go through items in the list on the current page
shoppingItems = []
for product in productsFound:
# Get HTML for this product
basketIt = {}
el = product.get_attribute("innerHTML")
productSoup = BeautifulSoup(el, "html.parser")
# Extract some common details
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "", "href", "", basketIt, "detailsHref")
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "src", "", basketIt, "imageSrc")
self.getElemAttrIfPresent(productSoup, "div", "m-product-volume", "", "text", r"\W", basketIt, "productVolume")
# Check if we are doing the trolley page - which has extra info like number of items ordered
if isTrolleyPage:
self.getElemAttrIfPresent(productSoup, "div", "m-product-title", "a", "text", "", basketIt, "productTitle")
if not "productTitle" in basketIt or basketIt["productTitle"] == "":
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "title", "", basketIt,
"productTitle")
self.getElemAttrIfPresent(productSoup, "div", "quantity-append", "input", "value", "", basketIt,
"trolleyQuantity")
self.getElemAttrIfPresent(productSoup, "p", "m-product-details", "span", "text", "", basketIt,
"trolleyPrice")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-price", "",
basketIt,
"price")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-priceperkg",
"", basketIt, "pricePerKg")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-orderitemid",
"", basketIt, "orderItemId")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-producttype",
"", basketIt, "productType")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-productid",
"", basketIt, "productId")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-uom", "", basketIt,
"UOM")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-weighttype",
"", basketIt, "weightType")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-substitute",
"", basketIt, "substitute")
else:
self.getElemAttrIfPresent(productSoup, "div", "m-product-price-container", "span", "text", "\W", basketIt,
"price")
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "", "text", "", basketIt,
"productTitle")
if not "productTitle" in basketIt or basketIt["productTitle"] == "":
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "title", "", basketIt,
"productTitle")
# Check if the product at least has a title and only add to list if it does
if not "productTitle" in basketIt or basketIt["productTitle"] == "":
logging.error("Extract Shopping List: Failed to extract product name")
else:
shoppingItems.append(basketIt)
return shoppingItems
def getTrolleyContents(self):
# Ensure we wait until the trolley-total is visible
try:
wait = WebDriverWait(self.webDriver, 20)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total")))
except WebDriverException:
logging.error("Wait for Trolley-Total webdriver element exception")
self.debugDumpPageSource()
return None
# Navigate to the basket contents
try:
self.clickButtonByXPath('//div[@class="mini-trolley"]//a')
wait = WebDriverWait(self.webDriver, 30)
wait.until(EC.visibility_of_element_located((By.ID, "my-trolley")))
except NoSuchElementException:
logging.error("Press view trolley button no such element")
self.debugDumpPageSource()
return None
except WebDriverException:
logging.error("Press view trolley button webdriver element exception")
self.debugDumpPageSource()
return None
# Get the shopping items on the current page
return self.getShoppingItems(True)
def getFavourites(self):
# Ensure we wait until the favourites is visible
try:
wait = WebDriverWait(self.webDriver, 20)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "js-navbar-favourites")))
except WebDriverException:
logging.error("Wait for favourites button webdriver element exception")
self.debugDumpPageSource()
return None
# Navigate to the favourites
try:
FAVOURITES_BUTTON_XPATH = '//a[@class="js-navbar-favourites"]'
elemBasketBtn = self.webDriver.find_element_by_xpath(FAVOURITES_BUTTON_XPATH)
print(elemBasketBtn)
elemBasketBtn.click()
wait = WebDriverWait(self.webDriver, 60)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "products-grid")))
except NoSuchElementException:
logging.error("Press view favourites button no such element")
self.debugDumpPageSource()
return None
except WebDriverException:
logging.error("Press view favourites button webdriver element exception")
self.debugDumpPageSource()
return None
# Get the shopping items on the current page
return self.getShoppingItems(False)
# Handle site login
def siteLogin(self, siteUrl, username, password, titleMustContainStr):
# Start webDriver
if not self.startWebDriver():
logging.error("Unable to start webdriver")
return False
self.isInitalized = True
# Go to URL
logging.info("Webdriver going to " + siteUrl)
self.webDriver.get(siteUrl)
logging.info("Webdriver site title = " + self.webDriver.title)
if not titleMustContainStr in self.webDriver.title:
logging.error("Site " + siteUrl + " title doesn't contain " + titleMustContainStr)
self.debugDumpPageSource()
return False
# Handle login
self.isLoggedIn = self.websiteLogin(username, password, 1)
# Succeeded so far
return self.isLoggedIn
# Ensure that we are logged in
def ensureLoggedIn(self, username, password):
# Ensure we are initialised
if not self.isInitalized:
self.siteLogin("http://www.waitrose.com", username, password, "Waitrose")
# Try to login again if not currently logged in
if self.isInitalized:
if not self.isLoggedIn:
self.isLoggedIn = self.websiteLogin(username, password, 2)
return self.isLoggedIn
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cudnn recurrent layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class CuDNNTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM],
return_sequences=[True, False]))
@test_util.run_gpu_only
def test_cudnn_rnn_return_sequence(self, layer_class, return_sequences):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'return_sequences': return_sequences},
input_shape=(num_samples, timesteps, input_size))
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM],
go_backwards=[True, False]))
@test_util.run_gpu_only
def test_cudnn_rnn_go_backward(self, layer_class, go_backwards):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'go_backwards': go_backwards},
input_shape=(num_samples, timesteps, input_size))
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
def test_return_state(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
_, state = outputs[0], outputs[1:]
self.assertEqual(len(state), num_states)
model = keras.models.Model(inputs, state[0])
model.run_eagerly = testing_utils.should_run_eagerly()
inputs = np.random.random((num_samples, timesteps, input_size))
state = model.predict(inputs)
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
def test_time_major_input(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
model = keras.models.Sequential()
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
layer = layer_class(units, time_major=True, return_sequences=True)
model.add(layer)
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(learning_rate=0.001))
model.fit(
np.ones((num_samples, timesteps, input_size)),
np.ones((num_samples, timesteps, units)))
out = model.predict(np.ones((num_samples, timesteps, input_size)))
self.assertEqual(out.shape, (num_samples, timesteps, units))
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
def test_specify_initial_state_keras_tensor(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = keras.Input((timesteps, input_size))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertIn(initial_state[0], layer._inbound_nodes[0].input_tensors)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, input_size))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
class CuDNNGraphOnlyTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_deprecated_v1
@test_util.run_gpu_only
def test_regularizer(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
layer = layer_class(
units,
return_sequences=False,
input_shape=(timesteps, input_size),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2')
layer.build((None, None, input_size))
self.assertEqual(len(layer.losses), 3)
layer = layer_class(
units,
return_sequences=False,
input_shape=(timesteps, input_size),
activity_regularizer='l2')
self.assertTrue(layer.activity_regularizer)
x = keras.backend.variable(
np.ones((num_samples, timesteps, input_size)))
layer(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
@test_util.run_v1_only('b/120941292')
def test_statefulness(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
with self.cached_session(use_gpu=True):
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
10,
input_size,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
@test_util.run_all_in_graph_and_eager_modes
class CuDNNV1OnlyTest(keras_parameterized.TestCase):
@test_util.run_gpu_only
def test_trainability(self):
input_size = 10
units = 2
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
layer = layer_class(units)
layer.build((None, None, input_size))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False],
bidirectional=[True, False], implementation=[1, 2],
model_nest_level=[1, 2], model_type=['seq', 'func']))
@test_util.run_v1_only('b/120911602, b/112083752')
@test_util.run_gpu_only
def test_load_weights_between_noncudnn_rnn(self, rnn_type, to_cudnn,
bidirectional, implementation,
model_nest_level, model_type):
input_size = 10
timesteps = 6
input_shape = (timesteps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, input_size))
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
'implementation': implementation
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
layer = rnn_layer_class(units, **rnn_layer_kwargs)
if bidirectional:
layer = keras.layers.Bidirectional(layer)
cudnn_layer = cudnn_rnn_layer_class(units)
if bidirectional:
cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
model = self._make_nested_model(input_shape, layer, model_nest_level,
model_type)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer,
model_nest_level, model_type)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
atol=1e-4)
def _make_nested_model(self, input_shape, layer, level=1, model_type='func'):
# example: make_nested_seq_model((1,), Dense(10), level=2).summary()
def make_nested_seq_model(input_shape, layer, level=1):
model = layer
for i in range(1, level + 1):
layers = [keras.layers.InputLayer(input_shape),
model] if (i == 1) else [model]
model = keras.models.Sequential(layers)
return model
# example: make_nested_func_model((1,), Dense(10), level=2).summary()
def make_nested_func_model(input_shape, layer, level=1):
model_input = keras.layers.Input(input_shape)
model = layer
for _ in range(level):
model = keras.models.Model(model_input, model(model_input))
return model
if model_type == 'func':
return make_nested_func_model(input_shape, layer, level)
elif model_type == 'seq':
return make_nested_seq_model(input_shape, layer, level)
def _convert_model_weights(self, source_model, target_model):
_, fname = tempfile.mkstemp('.h5')
source_model.save_weights(fname)
target_model.load_weights(fname)
os.remove(fname)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False]))
@test_util.run_v1_only('b/120911602')
@test_util.run_gpu_only
def test_load_weights_between_noncudnn_rnn_time_distributed(self, rnn_type,
to_cudnn):
# Similar test as test_load_weights_between_noncudnn_rnn() but has different
# rank of input due to usage of TimeDistributed. Issue: #10356.
input_size = 10
steps = 6
timesteps = 6
input_shape = (timesteps, steps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, steps, input_size))
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
layer = rnn_layer_class(units, **rnn_layer_kwargs)
layer = keras.layers.TimeDistributed(layer)
cudnn_layer = cudnn_rnn_layer_class(units)
cudnn_layer = keras.layers.TimeDistributed(cudnn_layer)
model = self._make_nested_model(input_shape, layer)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
atol=1e-4)
@test_util.run_gpu_only
def test_cudnnrnn_bidirectional(self):
rnn = keras.layers.CuDNNGRU
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'concat'
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(None, dim)))
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(None, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer=R'rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.Input((timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer=R'rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = keras.Input(batch_shape=(1, timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
@test_util.run_gpu_only
def test_preprocess_weights_for_loading_gru_incompatible(self):
"""Test loading weights between incompatible layers.
Should fail fast with an exception.
"""
input_shape = (3, 5)
def gru(cudnn=False, **kwargs):
layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
return layer_class(2, input_shape=input_shape, **kwargs)
def get_layer_weights(layer):
layer.build(input_shape=input_shape)
return layer.get_weights()
def assert_not_compatible(src, dest, message):
with self.assertRaises(ValueError) as ex:
keras.saving.preprocess_weights_for_loading(
dest,
get_layer_weights(src))
self.assertIn(message, str(ex.exception))
assert_not_compatible(
gru(),
gru(cudnn=True),
'GRU(reset_after=False) is not compatible with CuDNNGRU')
assert_not_compatible(
gru(cudnn=True),
gru(),
'CuDNNGRU is not compatible with GRU(reset_after=False)')
assert_not_compatible(
gru(),
gru(reset_after=True),
'GRU(reset_after=False) is not compatible with '
'GRU(reset_after=True)')
assert_not_compatible(
gru(reset_after=True),
gru(),
'GRU(reset_after=True) is not compatible with '
'GRU(reset_after=False)')
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tempfile
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six import StringIO
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe,
Recommendation, Recommender, ReferencedByGenRel, ReferencedByInline,
ReferencedByParent, RelatedPrepopulated, RelatedWithUUIDPKModel, Report,
Reservation, Restaurant, RowLevelChangePermissionModel, Section,
ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber,
SuperVillain, Telegram, Thing, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast,
Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
)
list_editable = ('section',)
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'[email protected]',
['[email protected]']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'[email protected]',
['[email protected]']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'[email protected]',
['[email protected]']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'[email protected]',
['[email protected]']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'multiline_html_allow_tags', 'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
def multiline_html_allow_tags(self, instance):
return "Multiline<br>html<br>content<br>with allow tags"
multiline_html_allow_tags.allow_tags = True
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(
request, queryset, search_term
)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super(UnchangeableObjectAdmin, self).get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super(DependentChildAdminForm, self).clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super(GetFormsetsArgumentCheckingAdmin, self).add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super(GetFormsetsArgumentCheckingAdmin, self).change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super(GetFormsetsArgumentCheckingAdmin, self).get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
|
|
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from word_model import WordModel
from utils.eval_utils import compute_eval_metric
from models.layers import multi_nll_loss
from utils import constants as Constants
from collections import Counter
from models.drqa import DrQA
class Model(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, config, train_set=None):
# Book-keeping.
self.config = config
if self.config['pretrained']:
self.init_saved_network(self.config['pretrained'])
else:
assert train_set is not None
print('Train vocab: {}'.format(len(train_set.vocab)))
vocab = Counter()
for w in train_set.vocab:
if train_set.vocab[w] >= config['min_freq']:
vocab[w] = train_set.vocab[w]
print('Pruned train vocab: {}'.format(len(vocab)))
# Building network.
word_model = WordModel(embed_size=self.config['embed_size'],
filename=self.config['embed_file'],
embed_type=self.config['embed_type'],
top_n=self.config['top_vocab'],
additional_vocab=vocab)
self.config['embed_size'] = word_model.embed_size
self._init_new_network(train_set, word_model)
num_params = 0
for name, p in self.network.named_parameters():
print('{}: {}'.format(name, str(p.size())))
num_params += p.numel()
print('#Parameters = {}\n'.format(num_params))
self._init_optimizer()
def init_saved_network(self, saved_dir):
_ARGUMENTS = ['rnn_padding', 'embed_size', 'hidden_size', 'num_layers', 'rnn_type',
'concat_rnn_layers', 'question_merge', 'use_qemb', 'f_qem', 'f_pos', 'f_ner',
'sum_loss', 'doc_self_attn', 'resize_rnn_input', 'span_dependency',
'fix_embeddings', 'dropout_rnn', 'dropout_emb', 'dropout_ff',
'dropout_rnn_output', 'variational_dropout', 'word_dropout']
# Load all saved fields.
fname = os.path.join(saved_dir, Constants._SAVED_WEIGHTS_FILE)
print('[ Loading saved model %s ]' % fname)
saved_params = torch.load(fname, map_location=lambda storage, loc: storage)
self.word_dict = saved_params['word_dict']
self.feature_dict = saved_params['feature_dict']
self.config['num_features'] = len(self.feature_dict)
self.state_dict = saved_params['state_dict']
for k in _ARGUMENTS:
if saved_params['config'][k] != self.config[k]:
print('Overwrite {}: {} -> {}'.format(k, self.config[k], saved_params['config'][k]))
self.config[k] = saved_params['config'][k]
w_embedding = self._init_embedding(len(self.word_dict) + 1, self.config['embed_size'])
self.network = DrQA(self.config, w_embedding)
# Merge the arguments
if self.state_dict:
merged_state_dict = self.network.state_dict()
for k, v in self.state_dict['network'].items():
if k in merged_state_dict:
merged_state_dict[k] = v
self.network.load_state_dict(merged_state_dict)
def _init_new_network(self, train_set, word_model):
self.feature_dict = self._build_feature_dict(train_set)
self.config['num_features'] = len(self.feature_dict)
self.word_dict = word_model.get_vocab()
w_embedding = self._init_embedding(word_model.vocab_size, self.config['embed_size'],
pretrained_vecs=word_model.get_word_vecs())
self.network = DrQA(self.config, w_embedding)
def _init_embedding(self, vocab_size, embed_size, pretrained_vecs=None):
"""Initializes the embeddings
"""
return nn.Embedding(vocab_size, embed_size, padding_idx=0,
_weight=torch.from_numpy(pretrained_vecs).float()
if pretrained_vecs is not None else None)
def _build_feature_dict(self, train_set):
feature_dict = {}
if self.config['f_qem']:
feature_dict['f_qem_cased'] = len(feature_dict)
feature_dict['f_qem_uncased'] = len(feature_dict)
if self.config['f_pos']:
pos_tags = set()
for ex in train_set:
for context in ex['evidence']:
assert 'pos' in context
pos_tags |= set(context['pos'])
print('{} pos tags: {}'.format(len(pos_tags), str(pos_tags)))
for pos in pos_tags:
feature_dict['f_pos={}'.format(pos)] = len(feature_dict)
if self.config['f_ner']:
ner_tags = set()
for ex in train_set:
for context in ex['evidence']:
assert 'ner' in context
ner_tags |= set(context['ner'])
print('{} ner tags: {}'.format(len(ner_tags), str(ner_tags)))
for ner in ner_tags:
feature_dict['f_ner={}'.format(ner)] = len(feature_dict)
print('# features: {}'.format(len(feature_dict)))
return feature_dict
def _init_optimizer(self):
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.config['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
momentum=self.config['momentum'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.config['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
def predict(self, ex, update=True, out_predictions=False):
# Train/Eval mode
self.network.train(update)
# Run forward
res = self.network(ex)
score_s, score_e = res['score_s'], res['score_e']
output = {
'f1': 0.0,
'em': 0.0,
'loss': 0.0
}
# Loss cannot be computed for test-time as we may not have targets
if update:
# Compute loss and accuracies
loss = self.compute_span_loss(score_s, score_e, res['targets'])
output['loss'] = loss.item()
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.config['grad_clipping'])
# Update parameters
self.optimizer.step()
if (not update) or self.config['predict_train']:
predictions, spans = self.extract_predictions(ex, score_s, score_e)
output['f1'], output['em'] = self.evaluate_predictions(predictions, ex['answers'])
if out_predictions:
output['predictions'] = predictions
output['spans'] = spans
return output
def compute_span_loss(self, score_s, score_e, targets):
assert targets.size(0) == score_s.size(0) == score_e.size(0)
if self.config['sum_loss']:
loss = multi_nll_loss(score_s, targets[:, :, 0]) + multi_nll_loss(score_e, targets[:, :, 1])
else:
loss = F.nll_loss(score_s, targets[:, 0]) + F.nll_loss(score_e, targets[:, 1])
return loss
def extract_predictions(self, ex, score_s, score_e):
# Transfer to CPU/normal tensors for numpy ops (and convert log probabilities to probabilities)
score_s = score_s.exp().squeeze()
score_e = score_e.exp().squeeze()
predictions = []
spans = []
for i, (_s, _e) in enumerate(zip(score_s, score_e)):
if self.config['predict_raw_text']:
prediction, span = self._scores_to_raw_text(ex['raw_evidence_text'][i],
ex['offsets'][i], _s, _e)
else:
prediction, span = self._scores_to_text(ex['evidence_text'][i], _s, _e)
predictions.append(prediction)
spans.append(span)
return predictions, spans
def _scores_to_text(self, text, score_s, score_e):
max_len = self.config['max_answer_len'] or score_s.size(1)
scores = torch.ger(score_s.squeeze(), score_e.squeeze())
scores.triu_().tril_(max_len - 1)
scores = scores.cpu().detach().numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
return ' '.join(text[s_idx: e_idx + 1]), (int(s_idx), int(e_idx))
def _scores_to_raw_text(self, raw_text, offsets, score_s, score_e):
max_len = self.config['max_answer_len'] or score_s.size(1)
scores = torch.ger(score_s.squeeze(), score_e.squeeze())
scores.triu_().tril_(max_len - 1)
scores = scores.cpu().detach().numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
return raw_text[offsets[s_idx][0]: offsets[e_idx][1]], (offsets[s_idx][0], offsets[e_idx][1])
def evaluate_predictions(self, predictions, answers):
f1_score = compute_eval_metric('f1', predictions, answers)
em_score = compute_eval_metric('em', predictions, answers)
return f1_score, em_score
def save(self, dirname):
params = {
'state_dict': {
'network': self.network.state_dict(),
},
'word_dict': self.word_dict,
'feature_dict': self.feature_dict,
'config': self.config,
'dir': dirname,
}
try:
torch.save(params, os.path.join(dirname, Constants._SAVED_WEIGHTS_FILE))
except BaseException:
print('[ WARN: Saving failed... continuing anyway. ]')
|
|
import pprint
from unittest import TestCase
from dogpile.cache.api import CacheBackend, CachedValue, NO_VALUE
from dogpile.cache import make_region, register_backend, CacheRegion, util
from dogpile.cache.proxy import ProxyBackend
from . import eq_, is_, assert_raises_message, io, configparser
import time, datetime
import itertools
from collections import defaultdict
import operator
from ._fixtures import MockBackend
def key_mangler(key):
return "HI!" + key
class RegionTest(TestCase):
def _region(self, init_args={}, config_args={}, backend="mock"):
reg = CacheRegion(**init_args)
reg.configure(backend, **config_args)
return reg
def test_set_name(self):
my_region = make_region(name='my-name')
eq_(my_region.name, 'my-name')
def test_instance_from_dict(self):
my_conf = {
'cache.example.backend': 'mock',
'cache.example.expiration_time': 600,
'cache.example.arguments.url': '127.0.0.1'
}
my_region = make_region()
my_region.configure_from_config(my_conf, 'cache.example.')
eq_(my_region.expiration_time, 600)
assert isinstance(my_region.backend, MockBackend) is True
eq_(my_region.backend.arguments, {'url': '127.0.0.1'})
def test_instance_from_config_string(self):
my_conf = \
'[xyz]\n'\
'cache.example.backend=mock\n'\
'cache.example.expiration_time=600\n'\
'cache.example.arguments.url=127.0.0.1\n'\
'cache.example.arguments.dogpile_lockfile=false\n'\
'cache.example.arguments.xyz=None\n'
my_region = make_region()
config = configparser.ConfigParser()
config.readfp(io.StringIO(my_conf))
my_region.configure_from_config(dict(config.items('xyz')), 'cache.example.')
eq_(my_region.expiration_time, 600)
assert isinstance(my_region.backend, MockBackend) is True
eq_(my_region.backend.arguments, {'url': '127.0.0.1',
'dogpile_lockfile':False, 'xyz':None})
def test_datetime_expiration_time(self):
my_region = make_region()
my_region.configure(
backend='mock',
expiration_time=datetime.timedelta(days=1, hours=8)
)
eq_(my_region.expiration_time, 32*60*60)
def test_reject_invalid_expiration_time(self):
my_region = make_region()
assert_raises_message(
Exception,
"expiration_time is not a number or timedelta.",
my_region.configure, 'mock', 'one hour'
)
def test_key_mangler_argument(self):
reg = self._region(init_args={"key_mangler":key_mangler})
assert reg.key_mangler is key_mangler
reg = self._region()
assert reg.key_mangler is None
MockBackend.key_mangler = km = lambda self, k: "foo"
reg = self._region()
eq_(reg.key_mangler("bar"), "foo")
MockBackend.key_mangler = None
def test_key_mangler_impl(self):
reg = self._region(init_args={"key_mangler":key_mangler})
reg.set("some key", "some value")
eq_(list(reg.backend._cache), ["HI!some key"])
eq_(reg.get("some key"), "some value")
eq_(reg.get_or_create("some key", lambda: "some new value"), "some value")
reg.delete("some key")
eq_(reg.get("some key"), NO_VALUE)
def test_dupe_config(self):
reg = CacheRegion()
reg.configure("mock")
assert_raises_message(
Exception,
"This region is already configured",
reg.configure, "mock"
)
def test_no_config(self):
reg = CacheRegion()
assert_raises_message(
Exception,
"No backend is configured on this region.",
getattr, reg, "backend"
)
def test_set_get_value(self):
reg = self._region()
reg.set("some key", "some value")
eq_(reg.get("some key"), "some value")
def test_set_get_nothing(self):
reg = self._region()
eq_(reg.get("some key"), NO_VALUE)
eq_(reg.get("some key", expiration_time=10), NO_VALUE)
reg.invalidate()
eq_(reg.get("some key"), NO_VALUE)
def test_creator(self):
reg = self._region()
def creator():
return "some value"
eq_(reg.get_or_create("some key", creator), "some value")
def test_multi_creator(self):
reg = self._region()
def creator(*keys):
return ["some value %s" % key for key in keys]
eq_(reg.get_or_create_multi(["k3", "k2", "k5"], creator),
['some value k3', 'some value k2', 'some value k5'])
def test_remove(self):
reg = self._region()
reg.set("some key", "some value")
reg.delete("some key")
reg.delete("some key")
eq_(reg.get("some key"), NO_VALUE)
def test_expire(self):
reg = self._region(config_args={"expiration_time":1})
counter = itertools.count(1)
def creator():
return "some value %d" % next(counter)
eq_(reg.get_or_create("some key", creator), "some value 1")
time.sleep(2)
is_(reg.get("some key"), NO_VALUE)
eq_(reg.get("some key", ignore_expiration=True), "some value 1")
eq_(reg.get_or_create("some key", creator), "some value 2")
eq_(reg.get("some key"), "some value 2")
def test_expire_multi(self):
reg = self._region(config_args={"expiration_time":1})
counter = itertools.count(1)
def creator(*keys):
return ["some value %s %d" % (key, next(counter)) for key in keys]
eq_(reg.get_or_create_multi(["k3", "k2", "k5"], creator),
['some value k3 2', 'some value k2 1', 'some value k5 3'])
time.sleep(2)
is_(reg.get("k2"), NO_VALUE)
eq_(reg.get("k2", ignore_expiration=True), "some value k2 1")
eq_(reg.get_or_create_multi(["k3", "k2"], creator),
['some value k3 5', 'some value k2 4'])
eq_(reg.get("k2"), "some value k2 4")
def test_expire_on_get(self):
reg = self._region(config_args={"expiration_time":.5})
reg.set("some key", "some value")
eq_(reg.get("some key"), "some value")
time.sleep(1)
is_(reg.get("some key"), NO_VALUE)
def test_ignore_expire_on_get(self):
reg = self._region(config_args={"expiration_time":.5})
reg.set("some key", "some value")
eq_(reg.get("some key"), "some value")
time.sleep(1)
eq_(reg.get("some key", ignore_expiration=True), "some value")
def test_override_expire_on_get(self):
reg = self._region(config_args={"expiration_time":.5})
reg.set("some key", "some value")
eq_(reg.get("some key"), "some value")
time.sleep(1)
eq_(reg.get("some key", expiration_time=5), "some value")
is_(reg.get("some key"), NO_VALUE)
def test_expire_override(self):
reg = self._region(config_args={"expiration_time":5})
counter = itertools.count(1)
def creator():
return "some value %d" % next(counter)
eq_(reg.get_or_create("some key", creator, expiration_time=1),
"some value 1")
time.sleep(2)
eq_(reg.get("some key"), "some value 1")
eq_(reg.get_or_create("some key", creator, expiration_time=1),
"some value 2")
eq_(reg.get("some key"), "some value 2")
def test_invalidate_get(self):
reg = self._region()
reg.set("some key", "some value")
reg.invalidate()
is_(reg.get("some key"), NO_VALUE)
def test_invalidate_get_or_create(self):
reg = self._region()
counter = itertools.count(1)
def creator():
return "some value %d" % next(counter)
eq_(reg.get_or_create("some key", creator),
"some value 1")
reg.invalidate()
eq_(reg.get_or_create("some key", creator),
"some value 2")
def test_should_cache_fn(self):
reg = self._region()
values = [1, 2, 3]
def creator():
return values.pop(0)
should_cache_fn = lambda val: val in (1, 3)
ret = reg.get_or_create(
"some key", creator,
should_cache_fn=should_cache_fn)
eq_(ret, 1)
eq_(reg.backend._cache['some key'][0], 1)
reg.invalidate()
ret = reg.get_or_create(
"some key", creator,
should_cache_fn=should_cache_fn)
eq_(ret, 2)
eq_(reg.backend._cache['some key'][0], 1)
reg.invalidate()
ret = reg.get_or_create(
"some key", creator,
should_cache_fn=should_cache_fn)
eq_(ret, 3)
eq_(reg.backend._cache['some key'][0], 3)
def test_should_cache_fn_multi(self):
reg = self._region()
values = [1, 2, 3]
def creator(*keys):
v = values.pop(0)
return [v for k in keys]
should_cache_fn = lambda val: val in (1, 3)
ret = reg.get_or_create_multi(
[1, 2], creator,
should_cache_fn=should_cache_fn)
eq_(ret, [1, 1])
eq_(reg.backend._cache[1][0], 1)
reg.invalidate()
ret = reg.get_or_create_multi(
[1, 2], creator,
should_cache_fn=should_cache_fn)
eq_(ret, [2, 2])
eq_(reg.backend._cache[1][0], 1)
reg.invalidate()
ret = reg.get_or_create_multi(
[1, 2], creator,
should_cache_fn=should_cache_fn)
eq_(ret, [3, 3])
eq_(reg.backend._cache[1][0], 3)
def test_should_set_multiple_values(self):
reg = self._region()
values = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
reg.set_multi(values)
eq_(values['key1'], reg.get('key1'))
eq_(values['key2'], reg.get('key2'))
eq_(values['key3'], reg.get('key3'))
def test_should_get_multiple_values(self):
reg = self._region()
values = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
reg.set_multi(values)
reg_values = reg.get_multi(['key1', 'key2', 'key3'])
eq_(
reg_values,
["value1", "value2", "value3"]
)
def test_should_delete_multiple_values(self):
reg = self._region()
values = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
reg.set_multi(values)
reg.delete_multi(['key2', 'key1000'])
eq_(values['key1'], reg.get('key1'))
eq_(NO_VALUE, reg.get('key2'))
eq_(values['key3'], reg.get('key3'))
class ProxyRegionTest(RegionTest):
''' This is exactly the same as the region test above, but it goes through
a dummy proxy. The purpose of this is to make sure the tests still run
successfully even when there is a proxy '''
class MockProxy(ProxyBackend):
@property
def _cache(self):
return self.proxied._cache
def _region(self, init_args={}, config_args={}, backend="mock"):
reg = CacheRegion(**init_args)
config_args['wrap'] = [ProxyRegionTest.MockProxy]
reg.configure(backend, **config_args)
return reg
class ProxyBackendTest(TestCase):
class GetCounterProxy(ProxyBackend):
counter = 0
def get(self, key):
ProxyBackendTest.GetCounterProxy.counter += 1
return self.proxied.get(key)
class SetCounterProxy(ProxyBackend):
counter = 0
def set(self, key, value):
ProxyBackendTest.SetCounterProxy.counter += 1
return self.proxied.set(key, value)
class UsedKeysProxy(ProxyBackend):
''' Keep a counter of hose often we set a particular key'''
def __init__(self, *args, **kwargs):
super(ProxyBackendTest.UsedKeysProxy, self).__init__(
*args, **kwargs)
self._key_count = defaultdict(lambda: 0)
def setcount(self, key):
return self._key_count[key]
def set(self, key, value):
self._key_count[key] += 1
self.proxied.set(key, value)
class NeverSetProxy(ProxyBackend):
''' A totally contrived example of a Proxy that we pass arguments to.
Never set a key that matches never_set '''
def __init__(self, never_set, *args, **kwargs):
super(ProxyBackendTest.NeverSetProxy, self).__init__(*args, **kwargs)
self.never_set = never_set
self._key_count = defaultdict(lambda: 0)
def set(self, key, value):
if key != self.never_set:
self.proxied.set(key, value)
def _region(self, init_args={}, config_args={}, backend="mock"):
reg = CacheRegion(**init_args)
reg.configure(backend, **config_args)
return reg
def test_counter_proxies(self):
# count up the gets and sets and make sure they are passed through
# to the backend properly. Test that methods not overridden
# continue to work
reg = self._region(config_args={"wrap": [
ProxyBackendTest.GetCounterProxy,
ProxyBackendTest.SetCounterProxy]})
ProxyBackendTest.GetCounterProxy.counter = 0
ProxyBackendTest.SetCounterProxy.counter = 0
# set a range of values in the cache
for i in range(10):
reg.set(i, i)
eq_(ProxyBackendTest.GetCounterProxy.counter, 0)
eq_(ProxyBackendTest.SetCounterProxy.counter, 10)
# check that the range of values is still there
for i in range(10):
v = reg.get(i)
eq_(v, i)
eq_(ProxyBackendTest.GetCounterProxy.counter, 10)
eq_(ProxyBackendTest.SetCounterProxy.counter, 10)
# make sure the delete function(not overridden) still
# executes properly
for i in range(10):
reg.delete(i)
v = reg.get(i)
is_(v, NO_VALUE)
def test_instance_proxies(self):
# Test that we can create an instance of a new proxy and
# pass that to make_region instead of the class. The two instances
# should not interfere with each other
proxy_num = ProxyBackendTest.UsedKeysProxy(5)
proxy_abc = ProxyBackendTest.UsedKeysProxy(5)
reg_num = self._region(config_args={"wrap": [proxy_num]})
reg_abc = self._region(config_args={"wrap": [proxy_abc]})
for i in range(10):
reg_num.set(i, True)
reg_abc.set(chr(ord('a') + i), True)
for i in range(5):
reg_num.set(i, True)
reg_abc.set(chr(ord('a') + i), True)
# make sure proxy_num has the right counts per key
eq_(proxy_num.setcount(1), 2)
eq_(proxy_num.setcount(9), 1)
eq_(proxy_num.setcount('a'), 0)
# make sure proxy_abc has the right counts per key
eq_(proxy_abc.setcount('a'), 2)
eq_(proxy_abc.setcount('g'), 1)
eq_(proxy_abc.setcount('9'), 0)
def test_argument_proxies(self):
# Test that we can pass an argument to Proxy on creation
proxy = ProxyBackendTest.NeverSetProxy(5)
reg = self._region(config_args={"wrap": [proxy]})
for i in range(10):
reg.set(i, True)
# make sure 1 was set, but 5 was not
eq_(reg.get(5), NO_VALUE)
eq_(reg.get(1), True)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging.config
import os
import shutil
import tempfile
import unittest
import sys
from airflow import models, configuration, settings
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.settings import Session
from airflow.utils.timezone import datetime
from airflow.www import app as application
from airflow import configuration as conf
class TestChartModelView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/chart/new/?url=/admin/chart/'
@classmethod
def setUpClass(cls):
super(TestChartModelView, cls).setUpClass()
session = Session()
session.query(models.Chart).delete()
session.query(models.User).delete()
session.commit()
user = models.User(username='airflow')
session.add(user)
session.commit()
session.close()
def setUp(self):
super(TestChartModelView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.chart = {
'label': 'chart',
'owner': 'airflow',
'conn_id': 'airflow_ci',
}
def tearDown(self):
self.session.query(models.Chart).delete()
self.session.commit()
self.session.close()
super(TestChartModelView, self).tearDown()
@classmethod
def tearDownClass(cls):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
super(TestChartModelView, cls).tearDownClass()
def test_create_chart(self):
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.chart,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.Chart).count(), 1)
def test_get_chart(self):
response = self.app.get(
'/admin/chart?sort=3',
follow_redirects=True,
)
print(response.data)
self.assertEqual(response.status_code, 200)
self.assertIn('Sort by Owner', response.data.decode('utf-8'))
class TestVariableView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/variable/new/?url=/admin/variable/'
@classmethod
def setUpClass(cls):
super(TestVariableView, cls).setUpClass()
session = Session()
session.query(models.Variable).delete()
session.commit()
session.close()
def setUp(self):
super(TestVariableView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.variable = {
'key': 'test_key',
'val': 'text_val',
'is_encrypted': True
}
def tearDown(self):
self.session.query(models.Variable).delete()
self.session.commit()
self.session.close()
super(TestVariableView, self).tearDown()
def test_can_handle_error_on_decrypt(self):
# create valid variable
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.variable,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
# update the variable with a wrong value, given that is encrypted
Var = models.Variable
(self.session.query(Var)
.filter(Var.key == self.variable['key'])
.update({
'val': 'failed_value_not_encrypted'
}, synchronize_session=False))
self.session.commit()
# retrieve Variables page, should not fail and contain the Invalid
# label for the variable
response = self.app.get('/admin/variable', follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.Variable).count(), 1)
self.assertIn('<span class="label label-danger">Invalid</span>',
response.data.decode('utf-8'))
def test_xss_prevention(self):
xss = "/admin/airflow/variables/asdf<img%20src=''%20onerror='alert(1);'>"
response = self.app.get(
xss,
follow_redirects=True,
)
self.assertEqual(response.status_code, 404)
self.assertNotIn("<img src='' onerror='alert(1);'>",
response.data.decode("utf-8"))
class TestKnownEventView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/knownevent/new/?url=/admin/knownevent/'
@classmethod
def setUpClass(cls):
super(TestKnownEventView, cls).setUpClass()
session = Session()
session.query(models.KnownEvent).delete()
session.query(models.User).delete()
session.commit()
user = models.User(username='airflow')
session.add(user)
session.commit()
cls.user_id = user.id
session.close()
def setUp(self):
super(TestKnownEventView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.known_event = {
'label': 'event-label',
'event_type': '1',
'start_date': '2017-06-05 12:00:00',
'end_date': '2017-06-05 13:00:00',
'reported_by': self.user_id,
'description': '',
}
def tearDown(self):
self.session.query(models.KnownEvent).delete()
self.session.commit()
self.session.close()
super(TestKnownEventView, self).tearDown()
@classmethod
def tearDownClass(cls):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
super(TestKnownEventView, cls).tearDownClass()
def test_create_known_event(self):
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.known_event,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.KnownEvent).count(), 1)
def test_create_known_event_with_end_data_earlier_than_start_date(self):
self.known_event['end_date'] = '2017-06-05 11:00:00'
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.known_event,
follow_redirects=True,
)
self.assertIn(
'Field must be greater than or equal to Start Date.',
response.data.decode('utf-8'),
)
self.assertEqual(self.session.query(models.KnownEvent).count(), 0)
class TestPoolModelView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/pool/new/?url=/admin/pool/'
@classmethod
def setUpClass(cls):
super(TestPoolModelView, cls).setUpClass()
session = Session()
session.query(models.Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolModelView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.pool = {
'pool': 'test-pool',
'slots': 777,
'description': 'test-pool-description',
}
def tearDown(self):
self.session.query(models.Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolModelView, self).tearDown()
def test_create_pool(self):
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_create_pool_with_same_name(self):
# create test pool
self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
# create pool with the same name
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
self.assertIn('Already exists.', response.data.decode('utf-8'))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_create_pool_with_empty_name(self):
self.pool['pool'] = ''
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
self.assertIn('This field is required.', response.data.decode('utf-8'))
self.assertEqual(self.session.query(models.Pool).count(), 0)
class TestLogView(unittest.TestCase):
DAG_ID = 'dag_for_testing_log_view'
TASK_ID = 'task_for_testing_log_view'
DEFAULT_DATE = datetime(2017, 9, 1)
ENDPOINT = '/admin/airflow/log?dag_id={dag_id}&task_id={task_id}&execution_date={execution_date}'.format(
dag_id=DAG_ID,
task_id=TASK_ID,
execution_date=DEFAULT_DATE,
)
@classmethod
def setUpClass(cls):
super(TestLogView, cls).setUpClass()
session = Session()
session.query(TaskInstance).filter(
TaskInstance.dag_id == cls.DAG_ID and
TaskInstance.task_id == cls.TASK_ID and
TaskInstance.execution_date == cls.DEFAULT_DATE).delete()
session.commit()
session.close()
def setUp(self):
super(TestLogView, self).setUp()
# Create a custom logging configuration
configuration.load_test_config()
logging_config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
current_dir = os.path.dirname(os.path.abspath(__file__))
logging_config['handlers']['file.task']['base_log_folder'] = os.path.normpath(
os.path.join(current_dir, 'test_logs'))
logging_config['handlers']['file.task']['filename_template'] = \
'{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts | replace(":", ".") }}/{{ try_number }}.log'
# Write the custom logging configuration to a file
self.settings_folder = tempfile.mkdtemp()
settings_file = os.path.join(self.settings_folder, "airflow_local_settings.py")
new_logging_file = "LOGGING_CONFIG = {}".format(logging_config)
with open(settings_file, 'w') as handle:
handle.writelines(new_logging_file)
sys.path.append(self.settings_folder)
conf.set('core', 'logging_config_class', 'airflow_local_settings.LOGGING_CONFIG')
app = application.create_app(testing=True)
self.app = app.test_client()
self.session = Session()
from airflow.www.views import dagbag
dag = DAG(self.DAG_ID, start_date=self.DEFAULT_DATE)
task = DummyOperator(task_id=self.TASK_ID, dag=dag)
dagbag.bag_dag(dag, parent_dag=dag, root_dag=dag)
ti = TaskInstance(task=task, execution_date=self.DEFAULT_DATE)
ti.try_number = 1
self.session.merge(ti)
self.session.commit()
def tearDown(self):
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
dagbag = models.DagBag(settings.DAGS_FOLDER)
self.session.query(TaskInstance).filter(
TaskInstance.dag_id == self.DAG_ID and
TaskInstance.task_id == self.TASK_ID and
TaskInstance.execution_date == self.DEFAULT_DATE).delete()
self.session.commit()
self.session.close()
sys.path.remove(self.settings_folder)
shutil.rmtree(self.settings_folder)
conf.set('core', 'logging_config_class', '')
super(TestLogView, self).tearDown()
def test_get_file_task_log(self):
response = self.app.get(
TestLogView.ENDPOINT,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertIn('Log file does not exist',
response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
|
from ScrollText import ScrollText
from Tkinter import *
from PrologFrame import PrologFrame
from Prolog import PrologException
from FastIndex import FastIndex
import ErrDialog
import re
import AnnotationColours
#import Colour
import parser
def startCompare(x, y):
(xLine, xCol) = x[1].split('.')
(yLine, yCol) = y[1].split('.')
if xLine == yLine:
return int(xCol) - int(yCol)
else:
return int(xLine) - int(yLine)
class SourceFrame(PrologFrame):
def __init__(self, master=None, text="", readonly=True, app=None):
PrologFrame.__init__(self, master=master, text=text,
readonly=readonly, app=app)
self.app = app
app.pref.register_listener('annotation highlights',
self.annotation_colours_changed)
self.colours_changed(app.colours.ann_colour)
app.pref.register_listener('filter highlights', self.colours_changed)
self.colours_changed(app.colours.fil_colour)
for fTag in ['dynamic', 'static']:
self.text.tag_bind(fTag, "<Motion>",
lambda _, fTag=fTag: self.mouse_over_filter_tag(fTag))
self.text.tag_bind(fTag, "<Leave>",
lambda _, fTag=fTag: self.mouse_leave_filter_tag(fTag))
self.text.tag_configure("unsafe", background="red")
self.text.tag_configure("hide_nf", background="yellow")
self.text.tag_configure("errorTag", background="red")
self.last_annotation = ""
self.annotation_tags = AnnotationColours.annotations
self.annotation_colour = self.app.colours.ann_colour
self.menu = {}
self.annotation_menu = {"unfold":"call", "call":"call", "memo":"call",
"rescall":"call", "ucall":"call",
"mcall":"call", "unknown":"call",
"semicall":"call", "online":"call",
"if":"if", "resif":"if", "semif":"if",
"logif":"logif", "reslogif":"logif",
"findall":"findall", "resfindall":"findall",
"resnot":"not", "not":"not",
";":"or", "resdisj":"or",
"pp_cll":"pp_cll",
"pp_mnf":"pp_mnf",
"time_out":"time_out",
"mnf":"mnf",
"when":"when", "reswhen":"when",
"semiwhen":"when",
"gxspec":"module", "gx":"module",
#"spec":"module",
}
self.text.bind("<Alt-n>", self.keyb_next_ann)
self.text.bind("<Alt-p>", self.keyb_prev_ann)
self.commands = []
if (sys.platform == "win32"):
Menu_Key = "<Button-3>"
else:
Menu_Key = "<Button-1>"
self.annotation_colours_changed(self.annotation_colour)
for tag in self.annotation_tags:
self.text.tag_bind(tag, "<Motion>", self.mouse_over_tag)
self.text.tag_bind(tag, "<Leave>", self.mouse_leave_tag)
self.text.tag_bind(tag, Menu_Key, self.mouse_click_tag)
#self.text.tag_bind("hide_nf", Menu_Key, self.hidenf_click)
self.text.tag_configure("unknown", background="black",
foreground="white")
#make call and rescall stand out as requested by mv
self.text.tag_configure("call", underline=True)
self.text.tag_configure("rescall", underline=True)
self.text.tag_configure("unsafe", background="red", foreground="white")
def annotation_colours_changed(self, map):
# reset menus
for m in ['call', 'if', 'logif', 'findall', 'not', 'or', 'pp_cll',
'pp_mnf', 'time_out', 'mnf', 'when', 'module']:
self.menu[m] = Menu(self.app, tearoff=0)
self.commands = []
menus = {}
self.hide_nf_menu_pos = {}
for tag in self.annotation_tags:
if self.hide_nf_menu_pos.get(self.annotation_menu[tag]) == None:
# start at 2 because item 0, there will be a separator
self.hide_nf_menu_pos[self.annotation_menu[tag]] = 2
else:
self.hide_nf_menu_pos[self.annotation_menu[tag]] += 1
self.commands.append(lambda tag=tag: self.change_ann(tag))
menus[self.annotation_menu[tag]] = 1
self.menu[self.annotation_menu[tag]].add_command(label=tag,
foreground=map[tag],
command=self.commands[-1],
underline=0)
# STEVE : should perhaps to be restricted to only a few menus...
for m in menus:
menu = self.menu[m]
menu.add_separator()
menu.add_command(label="Remove hide_nf",
command=self.remove_hidenf)
self.colours_changed(map)
def new_hidenf(self):
sel = self.text.tag_ranges("sel")
if sel != ():
(start, stop) = sel
#h2 = self.get_tag_position("head", stop)
h2 = self.text.tag_prevrange("head", stop)
if h2 == ():
# attempting to annotate before any head tags, which means
# nothing of use is being annotated!
print "annotation is pointless as before all heads"
return
elif self.text.compare(h2[1], ">", start):
print "annotation encompasses head"
return
h1 = self.get_prev_ann(stop, self.annotation_tags)
hidenf_stop = h1[2]
(_, s1, e1) = self.get_prev_ann(start, self.annotation_tags)
if self.text.compare(start, ">=", s1) and \
self.text.compare(start, "<", e1):
hidenf_start = s1
else:
(_, hidenf_start, _) = self.get_next_ann(start, self.annotation_tags)
if self.text.compare(hidenf_start, ">", hidenf_stop) or \
hidenf_start == 0.0:
print "no clauses selected"
return
#print hidenf_start, hidenf_stop
self.text.tag_add("hide_nf", hidenf_start, hidenf_stop)
self.text.tag_remove("sel", start, stop)
self.text.ann_changed = True
def remove_hidenf(self):
# should never be called if there is no hide_nf tag or error will occur
(_, (start, _)) = self.selectedAnn
(start, stop) = self.get_tag_position("hide_nf", start)
self.text.tag_remove("hide_nf", start, stop)
def change_ann(self, new_ann, selected=None):
if selected is None:
(ann, (start, stop)) = self.selectedAnn
else:
(ann, (start, stop)) = selected
if ann != new_ann:
self.text.tag_remove(ann, start, stop)
self.text.tag_add(new_ann, start, stop)
self.ann_changed = True
def keyb_change_ann(self,next=True):
(ann,(start,end)) = self.get_annotation_at(index="insert")
if ann in self.annotation_tags:
group = self.annotation_menu[ann]
poss = []
for i in self.annotation_menu:
if self.annotation_menu[i] == group:
poss += [i]
if next:
next = self.get_next_from_list(ann,poss)
else:
next = self.get_prev_from_list(ann,poss)
self.change_ann(next,(ann,(start,end)))
self.app.status.message('help', next)
def keyb_next_ann(self, unused):
self.keyb_change_ann(next=True)
def keyb_prev_ann(self, unused):
self.keyb_change_ann(next=False)
def get_next_from_list(self, item, list):
for i in xrange(0, len(list)):
if list[i] == item:
if i < len(list) - 1:
return list[i + 1]
else:
return list[0]
#if not found just give first
return list[0]
def get_prev_from_list(self, item, list):
for i in xrange(0, len(list)):
if list[i] == item:
if i == 0:
return list[-1]
else:
return list[i - 1]
#if not found just give first
return list[0]
def mouse_over_tag(self, event):
self.highlight_tag(event, False)
def mouse_leave_tag(self, unused):
self.text.tag_remove("hilite", "1.0", "end")
self.last_annotation = ""
self.app.status.clear()
def mouse_click_tag(self, event):
self.highlight_tag(event, True)
def hidenf_click(self, event):
#not used.
# if want to remove hide_nf by clicking on just hide_nf annotated code
# (eg no unfold) then add code here
print "hidenf_click"
(ann, (start, stop)) = self.get_annotation_at(event.x, event.y)
print ann, start, stop
return "break"
def highlight_tag(self, event, show_menu):
(ann, (start, stop)) = self.get_annotation_at(event.x, event.y)
if self.last_annotation != "":
(s,e) = self.last_annotation
self.text.tag_remove("hilite", s, e)
self.text.tag_add("hilite", start, stop)
self.last_annotation = (start, stop)
if self.ErrMsg != None and start in self.ErrMsg:
self.app.status.set(self.ErrMsg[start] + ":::" + ann + " - " + str(start) + " -> " + str(stop))
else:
self.app.status.set(ann + " - " + str(start) + " -> " + str(stop))
if show_menu:
self.selectedAnn = (ann, (start, stop))
menu = self.menu[self.annotation_menu[ann]]
menu.tk_popup(event.x_root, event.y_root)
hide_nf = self.get_tag_position("hide_nf", start)
state = NORMAL
if hide_nf == ():
state = DISABLED
else:
(hstart, hend) = hide_nf
if self.text.compare(hend, "<", start):
state = DISABLED
menu.entryconfig(self.hide_nf_menu_pos[self.annotation_menu[ann]],
state=state)
def get_annotation_at(self, x=None, y=None,index=None):
if index is None:
index = self.text.index("@" + str(x) + "," + str(y))
curann = self.text.tag_names(index)
for ann in curann:
if self.annotation_tags.count(ann) > 0:
return (ann, self.get_tag_position(ann, index))
return ("", (0.0, 0.0))
def get_tag_position(self, ann, index):
newindex = self.text.index(index + " + 1 char")
return self.text.tag_prevrange(ann, newindex)
def mouse_over_filter_tag(self, filterTag):
self.app.status.set("Argument annotated as %s" % filterTag)
def mouse_leave_filter_tag(self, unused):
self.app.status.clear()
def getHtmlTag(self, text, ann):
if text.tag_cget(ann,"underline") != "":
under = "text-decoration:underline;"
else:
under = ""
openTag = '<p class="code" style="color:%s;%s">'%(self.convertColour(text.tag_cget(ann,"foreground")),under)
closeTag = '</p>'
if ann in self.annotation_tags:
openTag+= '<a onmouseover="window.status=\'%s\';" onmouseout="window.status=\'\';" >' % ann
closeTag = '</a>' +closeTag
return (openTag,closeTag)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Facoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Facoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# -*- coding: utf-8 -*-
# pylint: disable=fixme
"""Sonos Music Services interface.
This module provides the MusicService class and related functionality.
"""
from __future__ import absolute_import, unicode_literals
import logging
import requests
from xmltodict import parse
from .. import discovery
from ..compat import parse_qs, quote_url, urlparse
from ..exceptions import MusicServiceException
from ..music_services.accounts import Account
from .data_structures import parse_response, MusicServiceItem
from ..soap import SoapFault, SoapMessage
from ..xml import XML
log = logging.getLogger(__name__) # pylint: disable=C0103
# pylint: disable=too-many-instance-attributes, protected-access
class MusicServiceSoapClient(object):
"""A SOAP client for accessing Music Services.
This class handles all the necessary authentication for accessing
third party music services. You are unlikely to need to use it
yourself.
"""
def __init__(self, endpoint, timeout, music_service):
"""
Args:
endpoint (str): The SOAP endpoint. A url.
timeout (int): Timeout the connection after this number of
seconds.
music_service (MusicService): The MusicService object to which
this client belongs.
"""
self.endpoint = endpoint
self.timeout = timeout
self.music_service = music_service
self.namespace = 'http://www.sonos.com/Services/1.1'
self._cached_soap_header = None
# Spotify uses gzip. Others may do so as well. Unzipping is handled
# for us by the requests library. Google Play seems to be very fussy
# about the user-agent string. The firmware release number (after
# 'Sonos/') has to be '26' for some reason to get Google Play to
# work. Although we have access to a real SONOS user agent
# string (one is returned, eg, in the SERVER header of discovery
# packets and looks like this: Linux UPnP/1.0 Sonos/29.5-91030 (
# ZPS3)) it is a bit too much trouble here to access it, and Google
# Play does not like it anyway.
self.http_headers = {
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Linux UPnP/1.0 Sonos/26.99-12345'
}
self._device = discovery.any_soco()
self._device_id = self._device.systemProperties.GetString(
[('VariableName', 'R_TrialZPSerial')])['StringValue']
def get_soap_header(self):
"""Generate the SOAP authentication header for the related service.
This header contains all the necessary authentication details.
Returns:
str: A string representation of the XML content of the SOAP
header.
"""
# According to the SONOS SMAPI, this header must be sent with all
# SOAP requests. Building this is an expensive operation (though
# occasionally necessary), so f we have a cached value, return it
if self._cached_soap_header is not None:
return self._cached_soap_header
music_service = self.music_service
credentials_header = XML.Element(
"credentials", {'xmlns': "http://www.sonos.com/Services/1.1"})
device_id = XML.SubElement(credentials_header, 'deviceId')
device_id.text = self._device_id
device_provider = XML.SubElement(credentials_header, 'deviceProvider')
device_provider.text = 'Sonos'
if music_service.account.oa_device_id:
# OAuth account credentials are present. We must use them to
# authenticate.
login_token = XML.Element('loginToken')
token = XML.SubElement(login_token, 'token')
token.text = music_service.account.oa_device_id
key = XML.SubElement(login_token, 'key')
key.text = music_service.account.key
household_id = XML.SubElement(login_token, 'householdId')
household_id.text = self._device.household_id
credentials_header.append(login_token)
# otherwise, perhaps use DeviceLink or UserId auth
elif music_service.auth_type in ['DeviceLink', 'UserId']:
# We need a session ID from Sonos
session_id = self._device.musicServices.GetSessionId([
('ServiceId', music_service.service_id),
('Username', music_service.account.username)
])['SessionId']
session_elt = XML.Element('sessionId')
session_elt.text = session_id
credentials_header.append(session_elt)
# Anonymous auth. No need for anything further.
self._cached_soap_header = XML.tostring(
credentials_header,
encoding='utf-8').decode(encoding='utf-8')
return self._cached_soap_header
def call(self, method, args=None):
"""Call a method on the server.
Args:
method (str): The name of the method to call.
args (List[Tuple[str, str]] or None): A list of (parameter,
value) pairs representing the parameters of the method.
Defaults to `None`.
Returns:
~collections.OrderedDict: An OrderedDict representing the response.
Raises:
`MusicServiceException`: containing details of the error
returned by the music service.
"""
message = SoapMessage(
endpoint=self.endpoint,
method=method,
parameters=[] if args is None else args,
http_headers=self.http_headers,
soap_action="http://www.sonos.com/Services/1"
".1#{0}".format(method),
soap_header=self.get_soap_header(),
namespace=self.namespace,
timeout=self.timeout)
try:
result_elt = message.call()
except SoapFault as exc:
if 'Client.TokenRefreshRequired' in exc.faultcode:
log.debug('Token refresh required. Trying again')
# Remove any cached value for the SOAP header
self._cached_soap_header = None
# <detail>
# <refreshAuthTokenResult>
# <authToken>xxxxxxx</authToken>
# <privateKey>zzzzzz</privateKey>
# </refreshAuthTokenResult>
# </detail>
auth_token = exc.detail.findtext('.//authToken')
private_key = exc.detail.findtext('.//privateKey')
# We have new details - update the account
self.music_service.account.oa_device_id = auth_token
self.music_service.account.key = private_key
message = SoapMessage(
endpoint=self.endpoint,
method=method,
parameters=args,
http_headers=self.http_headers,
soap_action="http://www.sonos.com/Services/1"
".1#{0}".format(method),
soap_header=self.get_soap_header(),
namespace=self.namespace,
timeout=self.timeout)
result_elt = message.call()
else:
raise MusicServiceException(exc.faultstring, exc.faultcode)
# The top key in the OrderedDict will be the methodResult. Its
# value may be None if no results were returned.
result = list(parse(
XML.tostring(result_elt), process_namespaces=True,
namespaces={'http://www.sonos.com/Services/1.1': None}
).values())[0]
return result if result is not None else {}
# pylint: disable=too-many-instance-attributes
class MusicService(object):
"""The MusicService class provides access to third party music services.
Example:
List all the services Sonos knows about:
>>> from soco.music_services import MusicService
>>> print(MusicService.get_all_music_services_names())
['Spotify', 'The Hype Machine', 'Saavn', 'Bandcamp',
'Stitcher SmartRadio', 'Concert Vault',
...
]
Or just those to which you are subscribed:
>>> print(MusicService.get_subscribed_services_names())
['Spotify', 'radioPup', 'Spreaker']
Interact with TuneIn:
>>> tunein = MusicService('TuneIn')
>>> print (tunein)
<MusicService 'TuneIn' at 0x10ad84e10>
Browse an item. By default, the root item is used. An
:class:`~collections.OrderedDict` is returned:
>>> from json import dumps # Used for pretty printing ordereddicts
>>> print(dumps(tunein.get_metadata(), indent=4))
{
"index": "0",
"count": "7",
"total": "7",
"mediaCollection": [
{
"id": "featured:c100000150",
"title": "Blue Note on SONOS",
"itemType": "container",
"authRequired": "false",
"canPlay": "false",
"canEnumerate": "true",
"canCache": "true",
"homogeneous": "false",
"canAddToFavorite": "false",
"canScroll": "false",
"albumArtURI":
"http://cdn-albums.tunein.com/sonos/channel_legacy.png"
},
{
"id": "y1",
"title": "Music",
"itemType": "container",
"authRequired": "false",
"canPlay": "false",
"canEnumerate": "true",
"canCache": "true",
"homogeneous": "false",
"canAddToFavorite": "false",
"canScroll": "false",
"albumArtURI": "http://cdn-albums.tunein.com/sonos...
.png"
},
...
]
}
Interact with Spotify (assuming you are subscribed):
>>> spotify = MusicService('Spotify')
Get some metadata about a specific track:
>>> response = spotify.get_media_metadata(
... item_id='spotify:track:6NmXV4o6bmp704aPGyTVVG')
>>> print(dumps(response, indent=4))
{
"mediaMetadata": {
"id": "spotify:track:6NmXV4o6bmp704aPGyTVVG",
"itemType": "track",
"title": "B\u00f8n Fra Helvete (Live)",
"mimeType": "audio/x-spotify",
"trackMetadata": {
"artistId": "spotify:artist:1s1DnVoBDfp3jxjjew8cBR",
"artist": "Kaizers Orchestra",
"albumId": "spotify:album:6K8NUknbPh5TGaKeZdDwSg",
"album": "Mann Mot Mann (Ep)",
"duration": "317",
"albumArtURI":
"http://o.scdn.co/image/7b76a5074416e83fa3f3cd...9",
"canPlay": "true",
"canSkip": "true",
"canAddToFavorites": "true"
}
}
}
or even a playlist:
>>> response = spotify.get_metadata(
... item_id='spotify:user:spotify:playlist:0FQk6BADgIIYd3yTLCThjg')
Find the available search categories, and use them:
>>> print(spotify.available_search_categories)
['albums', 'tracks', 'artists']
>>> result = spotify.search(category='artists', term='miles')
Note:
Some of this code is still unstable, and in particular the data
structures returned by methods such as `get_metadata` may change in
future.
"""
_music_services_data = None
def __init__(self, service_name, account=None):
"""
Args:
service_name (str): The name of the music service, as returned by
`get_all_music_services_names()`, eg 'Spotify', or 'TuneIn'
account (Account): The account to use to access this service.
If none is specified, one will be chosen automatically if
possible. Defaults to `None`.
Raises:
`MusicServiceException`
"""
self.service_name = service_name
# Look up the data for this service
data = self.get_data_for_name(service_name)
self.uri = data['Uri']
self.secure_uri = data['SecureUri']
self.capabilities = data['Capabilities']
self.version = data['Version']
self.container_type = data['ContainerType']
self.service_id = data['Id']
# Auth_type can be 'Anonymous', 'UserId, 'DeviceLink'
self.auth_type = data['Auth']
self.presentation_map_uri = data.get('PresentationMapUri', None)
self._search_prefix_map = None
self.service_type = data['ServiceType']
if account is not None:
self.account = account
else:
# try to find an account for this service
for acct in Account.get_accounts().values():
if acct.service_type == self.service_type:
self.account = acct
break
else:
raise MusicServiceException(
"No account found for service: '%s'" % service_name)
self.soap_client = MusicServiceSoapClient(
endpoint=self.secure_uri,
timeout=9, # The default is 60
music_service=self
)
def __repr__(self):
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
self.service_name,
hex(id(self)))
def __str__(self):
return self.__repr__()
@staticmethod
def _get_music_services_data_xml(soco=None):
"""Fetch the music services data xml from a Sonos device.
Args:
soco (SoCo): a SoCo instance to query. If none is specified, a
random device will be used. Defaults to `None`.
Returns:
str: a string containing the music services data xml
"""
device = soco or discovery.any_soco()
log.debug("Fetching music services data from %s", device)
available_services = device.musicServices.ListAvailableServices()
descriptor_list_xml = available_services[
'AvailableServiceDescriptorList']
log.debug("Services descriptor list: %s", descriptor_list_xml)
return descriptor_list_xml
@classmethod
def _get_music_services_data(cls):
"""Parse raw account data xml into a useful python datastructure.
Returns:
dict: Each key is a service_type, and each value is a
`dict` containing relevant data.
"""
# Return from cache if we have it.
if cls._music_services_data is not None:
return cls._music_services_data
result = {}
root = XML.fromstring(
cls._get_music_services_data_xml().encode('utf-8')
)
# <Services SchemaVersion="1">
# <Service Id="163" Name="Spreaker" Version="1.1"
# Uri="http://sonos.spreaker.com/sonos/service/v1"
# SecureUri="https://sonos.spreaker.com/sonos/service/v1"
# ContainerType="MService"
# Capabilities="513"
# MaxMessagingChars="0">
# <Policy Auth="Anonymous" PollInterval="30" />
# <Presentation>
# <Strings
# Version="1"
# Uri="https:...string_table.xml" />
# <PresentationMap Version="2"
# Uri="https://...presentation_map.xml" />
# </Presentation>
# </Service>
# ...
# </ Services>
# Ideally, the search path should be './/Service' to find Service
# elements at any level, but Python 2.6 breaks with this if Service
# is a child of the current element. Since 'Service' works here, we use
# that instead
services = root.findall('Service')
for service in services:
result_value = service.attrib.copy()
name = service.get('Name')
result_value['Name'] = name
auth_element = (service.find('Policy'))
auth = auth_element.attrib
result_value.update(auth)
presentation_element = (service.find('.//PresentationMap'))
if presentation_element is not None:
result_value['PresentationMapUri'] = \
presentation_element.get('Uri')
result_value['ServiceID'] = service.get('Id')
# ServiceType is used elsewhere in Sonos, eg to form tokens,
# and get_subscribed_music_services() below. It is also the
# 'Type' used in account_xml (see above). Its value always
# seems to be (ID*256) + 7. Some serviceTypes are also
# listed in available_services['AvailableServiceTypeList']
# but this does not seem to be comprehensive
service_type = str(int(service.get('Id')) * 256 + 7)
result_value['ServiceType'] = service_type
result[service_type] = result_value
# Cache this so we don't need to do it again.
cls._music_services_data = result
return result
@classmethod
def get_all_music_services_names(cls):
"""Get a list of the names of all available music services.
These services have not necessarily been subscribed to.
Returns:
list: A list of strings.
"""
return [
service['Name'] for service in
cls._get_music_services_data().values()
]
@classmethod
def get_subscribed_services_names(cls):
"""Get a list of the names of all subscribed music services.
Returns:
list: A list of strings.
"""
# This is very inefficient - loops within loops within loops, and
# many network requests
# Optimise it?
accounts_for_service = Account.get_accounts_for_service
service_data = cls._get_music_services_data().values()
return [
service['Name'] for service in service_data
if len(
accounts_for_service(service['ServiceType'])
) > 0
]
@classmethod
def get_data_for_name(cls, service_name):
"""Get the data relating to a named music service.
Args:
service_name (str): The name of the music service for which data
is required.
Returns:
dict: Data relating to the music service.
Raises:
`MusicServiceException`: if the music service cannot be found.
"""
for service in cls._get_music_services_data().values():
if service_name == service["Name"]:
return service
raise MusicServiceException(
"Unknown music service: '%s'" % service_name)
def _get_search_prefix_map(self):
"""Fetch and parse the service search category mapping.
Standard Sonos search categories are 'all', 'artists', 'albums',
'tracks', 'playlists', 'genres', 'stations', 'tags'. Not all are
available for each music service
"""
# TuneIn does not have a pmap. Its search keys are is search:station,
# search:show, search:host
# Presentation maps can also define custom categories. See eg
# http://sonos-pmap.ws.sonos.com/hypemachine_pmap.6.xml
# <SearchCategories>
# ...
# <CustomCategory mappedId="SBLG" stringId="Blogs"/>
# </SearchCategories>
# Is it already cached? If so, return it
if self._search_prefix_map is not None:
return self._search_prefix_map
# Not cached. Fetch and parse presentation map
self._search_prefix_map = {}
# Tunein is a special case. It has no pmap, but supports searching
if self.service_name == "TuneIn":
self._search_prefix_map = {
'stations': 'search:station',
'shows': 'search:show',
'hosts': 'search:host',
}
return self._search_prefix_map
if self.presentation_map_uri is None:
# Assume not searchable?
return self._search_prefix_map
log.info('Fetching presentation map from %s',
self.presentation_map_uri)
pmap = requests.get(self.presentation_map_uri, timeout=9)
pmap_root = XML.fromstring(pmap.content)
# Search translations can appear in Category or CustomCategory elements
categories = pmap_root.findall(".//SearchCategories/Category")
if categories is None:
return self._search_prefix_map
for cat in categories:
self._search_prefix_map[cat.get('id')] = cat.get('mappedId')
custom_categories = pmap_root.findall(
".//SearchCategories/CustomCategory")
for cat in custom_categories:
self._search_prefix_map[cat.get('stringId')] = cat.get('mappedId')
return self._search_prefix_map
@property
def available_search_categories(self):
"""list: The list of search categories (each a string) supported.
May include ``'artists'``, ``'albums'``, ``'tracks'``, ``'playlists'``,
``'genres'``, ``'stations'``, ``'tags'``, or others depending on the
service. Some services, such as Spotify, support ``'all'``, but do not
advertise it.
Any of the categories in this list may be used as a value for
``category`` in :meth:`search`.
Example:
>>> print(spotify.available_search_categories)
['albums', 'tracks', 'artists']
>>> result = spotify.search(category='artists', term='miles')
"""
return self._get_search_prefix_map().keys()
def sonos_uri_from_id(self, item_id):
"""Get a uri which can be sent for playing.
Args:
item_id (str): The unique id of a playable item for this music
service, such as that returned in the metadata from
`get_metadata`, eg ``spotify:track:2qs5ZcLByNTctJKbhAZ9JE``
Returns:
str: A URI of the form: ``soco://spotify%3Atrack
%3A2qs5ZcLByNTctJKbhAZ9JE?sid=2311&sn=1`` which encodes the
``item_id``, and relevant data from the account for the music
service. This URI can be sent to a Sonos device for playing,
and the device itself will retrieve all the necessary metadata
such as title, album etc.
"""
# Real Sonos URIs look like this:
# x-sonos-http:tr%3a92352286.mp3?sid=2&flags=8224&sn=4 The
# extension (.mp3) presumably comes from the mime-type returned in a
# MusicService.get_metadata() result (though for Spotify the mime-type
# is audio/x-spotify, and there is no extension. See
# http://musicpartners.sonos.com/node/464 for supported mime-types and
# related extensions). The scheme (x-sonos-http) presumably
# indicates how the player is to obtain the stream for playing. It
# is not clear what the flags param is used for (perhaps bitrate,
# or certain metadata such as canSkip?). Fortunately, none of these
# seems to be necessary. We can leave them out, (or in the case of
# the scheme, use 'soco' as dummy text, and the players still seem
# to do the right thing.
# quote_url will break if given unicode on Py2.6, and early 2.7. So
# we need to encode.
item_id = quote_url(item_id.encode('utf-8'))
# Add the account info to the end as query params
account = self.account
result = "soco://{0}?sid={1}&sn={2}".format(
item_id, self.service_id,
account.serial_number
)
return result
@property
def desc(self):
"""str: The Sonos descriptor to use for this service.
The Sonos descriptor is used as the content of the <desc> tag in
DIDL metadata, to indicate the relevant music service id and username.
"""
desc = "SA_RINCON{0}_{1}".format(
self.account.service_type, self.account.username
)
return desc
########################################################################
# #
# SOAP METHODS. #
# #
########################################################################
# Looking at various services, we see that the following SOAP methods
# are implemented, but not all in each service. Probably, the
# Capabilities property indicates which features are implemented, but
# it is not clear precisely how. Some of the more common/useful
# features have been wrapped into instance methods, below.
# See generally: http://musicpartners.sonos.com/node/81
# createItem(xs:string favorite)
# createTrialAccount(xs:string deviceId)
# deleteItem(xs:string favorite)
# getAccount()
# getExtendedMetadata(xs:string id)
# getExtendedMetadataText(xs:string id, xs:string Type)
# getLastUpdate()
# getMediaMetadata(xs:string id)
# getMediaURI(xs:string id)
# getMetadata(xs:string id, xs:int index, xs:int count,xs:boolean
# recursive)
# getScrollIndices(xs:string id)
# getSessionId(xs:string username, xs:string password)
# mergeTrialccount(xs:string deviceId)
# rateItem(id id, xs:integer rating)
# search(xs:string id, xs:string term, xs:string index, xs:int count)
# setPlayedSeconds(id id, xs:int seconds)
def get_metadata(
self, item='root', index=0, count=100, recursive=False):
"""Get metadata for a container or item.
Args:
item (str or MusicServiceItem): The container or item to browse
given either as a MusicServiceItem instance or as a str.
Defaults to the root item.
index (int): The starting index. Default 0.
count (int): The maximum number of items to return. Default 100.
recursive (bool): Whether the browse should recurse into sub-items
(Does not always work). Defaults to `False`.
Returns:
~collections.OrderedDict: The item or container's metadata,
or `None`.
See also:
The Sonos `getMetadata API
<http://musicpartners.sonos.com/node/83>`_.
"""
if isinstance(item, MusicServiceItem):
item_id = item.id # pylint: disable=no-member
else:
item_id = item
response = self.soap_client.call(
'getMetadata', [
('id', item_id),
('index', index), ('count', count),
('recursive', 1 if recursive else 0)]
)
return parse_response(self, response, 'browse')
def search(self, category, term='', index=0, count=100):
"""Search for an item in a category.
Args:
category (str): The search category to use. Standard Sonos search
categories are 'artists', 'albums', 'tracks', 'playlists',
'genres', 'stations', 'tags'. Not all are available for each
music service. Call available_search_categories for a list for
this service.
term (str): The term to search for.
index (int): The starting index. Default 0.
count (int): The maximum number of items to return. Default 100.
Returns:
~collections.OrderedDict: The search results, or `None`.
See also:
The Sonos `search API <http://musicpartners.sonos.com/node/86>`_
"""
search_category = self._get_search_prefix_map().get(category, None)
if search_category is None:
raise MusicServiceException(
"%s does not support the '%s' search category" % (
self.service_name, category))
response = self.soap_client.call(
'search',
[
('id', search_category), ('term', term), ('index', index),
('count', count)])
return parse_response(self, response, category)
def get_media_metadata(self, item_id):
"""Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_
"""
response = self.soap_client.call(
'getMediaMetadata',
[('id', item_id)])
return response.get('getMediaMetadataResult', None)
def get_media_uri(self, item_id):
"""Get a streaming URI for an item.
Note:
You should not need to use this directly. It is used by the Sonos
players (not the controllers) to obtain the uri of the media
stream. If you want to have a player play a media item,
you should add add it to the queue using its id and let the
player work out where to get the stream from (see `On Demand
Playback <http://musicpartners.sonos.com/node/421>`_ and
`Programmed Radio <http://musicpartners.sonos.com/node/422>`_)
Args:
item_id (str): The item for which the URI is required
Returns:
str: The item's streaming URI.
"""
response = self.soap_client.call(
'getMediaURI',
[('id', item_id)])
return response.get('getMediaURIResult', None)
def get_last_update(self):
"""Get last_update details for this music service.
Returns:
~collections.OrderedDict: A dict with keys 'catalog',
and 'favorites'. The value of each is a string which changes
each time the catalog or favorites change. You can use this to
detect when any caches need to be updated.
"""
# TODO: Maybe create a favorites/catalog cache which is invalidated
# TODO: when these values change?
response = self.soap_client.call('getLastUpdate')
return response.get('getLastUpdateResult', None)
def get_extended_metadata(self, item_id):
"""Get extended metadata for a media item, such as related items.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's extended metadata or None.
See also:
The Sonos `getExtendedMetadata API
<http://musicpartners.sonos.com/node/128>`_
"""
response = self.soap_client.call(
'getExtendedMetadata',
[('id', item_id)])
return response.get('getExtendedMetadataResult', None)
def get_extended_metadata_text(self, item_id, metadata_type):
"""Get extended metadata text for a media item.
Args:
item_id (str): The item for which metadata is required
metadata_type (str): The type of text to return, eg
``'ARTIST_BIO'``, or ``'ALBUM_NOTES'``. Calling
`get_extended_metadata` for the item will show which extended
metadata_types are available (under relatedBrowse and relatedText).
Returns:
str: The item's extended metadata text or None
See also:
The Sonos `getExtendedMetadataText API
<http://musicpartners.sonos.com/node/127>`_
"""
response = self.soap_client.call(
'getExtendedMetadataText',
[('id', item_id), ('type', metadata_type)])
return response.get('getExtendedMetadataTextResult', None)
def desc_from_uri(uri):
"""Create the content of DIDL desc element from a uri.
Args:
uri (str): A uri, eg:
``'x-sonos-http:track%3a3402413.mp3?sid=2&flags=32&sn=4'``
Returns:
str: The content of a desc element for that uri, eg
``'[email protected]'``
"""
#
# If there is an sn parameter (which is the serial number of an account),
# we can obtain all the information we need from that, because we can find
# the relevant service_id in the account database (it is the same as the
# service_type). Consequently, the sid parameter is unneeded. But if sn is
# missing, we need the sid (service_type) parameter to find a relevant
# account
# urlparse does not work consistently with custom URI schemes such as
# those used by Sonos. This is especially broken in Python 2.6 and
# early versions of 2.7: http://bugs.python.org/issue9374
# As a workaround, we split off the scheme manually, and then parse
# the uri as if it were http
if ":" in uri:
_, uri = uri.split(":", 1)
query_string = parse_qs(urlparse(uri, 'http').query)
# Is there an account serial number?
if query_string.get('sn'):
account_serial_number = query_string['sn'][0]
try:
account = Account.get_accounts()[account_serial_number]
desc = "SA_RINCON{0}_{1}".format(
account.service_type, account.username)
return desc
except KeyError:
# There is no account matching this serial number. Fall back to
# using the service id to find an account
pass
if query_string.get('sid'):
service_id = query_string['sid'][0]
for service in MusicService._get_music_services_data().values():
if service_id == service["ServiceID"]:
service_type = service["ServiceType"]
account = Account.get_accounts_for_service(service_type)
if not account:
break
# Use the first account we find
account = account[0]
desc = "SA_RINCON{0}_{1}".format(
account.service_type, account.username)
return desc
# Nothing found. Default to the standard desc value. Is this the right
# thing to do?
desc = 'RINCON_AssociatedZPUDN'
return desc
|
|
# Test fixture methods to be called from app context so we can access the db
from pebbles.models import (
User, Group, GroupUserAssociation, BlueprintTemplate, Blueprint,
Plugin, Notification, Instance)
from pebbles.tests.base import db
def primary_test_setup(namespace):
""" Setup taken from FlaskApiTestCase to re-use it elsewhere as well.
db.create_all is left to the caller.
namespace is a descendant of unittest.testcase and we store things to
it for easy access during tests.
ToDo: store test vars inside a namespace on the parent object, e.g.
namespace.vars to avoid cluttering.
"""
namespace.known_admin_eppn = "[email protected]"
namespace.known_admin_password = "admin"
namespace.known_user_eppn = "[email protected]"
namespace.known_user_password = "user"
u1 = User(namespace.known_admin_eppn, namespace.known_admin_password, is_admin=True)
u2 = User(namespace.known_user_eppn, namespace.known_user_password, is_admin=False)
u3 = User("[email protected]", "group_owner")
u4 = User("[email protected]", "group_owner2")
# Fix user IDs to be the same for all tests, in order to reuse the same token
# for multiple tests
u1.id = 'u1'
u2.id = 'u2'
namespace.known_admin_id = u1.id
namespace.known_admin_id = u1.id
namespace.known_user_id = u2.id
u3.id = 'u3'
u3.is_group_owner = True
u4.id = 'u4'
u4.is_group_owner = True
namespace.known_admin_id = u1.id
namespace.known_user_id = u2.id
namespace.known_group_owner_id = u3.id
namespace.known_group_owner_id_2 = u4.id
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
g1 = Group('Group1')
g2 = Group('Group2')
g3 = Group('Group3')
g4 = Group('Group4')
g5 = Group('System.default')
g1.id = 'g1'
g1u2 = GroupUserAssociation(user=u2)
g1u3 = GroupUserAssociation(user=u3, manager=True, owner=True)
g1u4 = GroupUserAssociation(user=u4, manager=True)
g1.users.append(g1u2)
g1.users.append(g1u3)
g1.users.append(g1u4)
g2.id = 'g2'
g2u3 = GroupUserAssociation(user=u3)
g2u4 = GroupUserAssociation(user=u4, owner=True)
g2.users.append(g2u3)
g2.users.append(g2u4)
g3.id = 'g3'
g3u4 = GroupUserAssociation(user=u4, owner=True)
g3.users.append(g3u4)
g3.banned_users.append(u2)
g3.banned_users.append(u3)
g4.id = 'g4'
g4u1 = GroupUserAssociation(user=u1, owner=True)
g4.users.append(g4u1)
g5.id = 'g5'
g5u1 = GroupUserAssociation(user=u1, owner=True)
g5.users.append(g5u1)
namespace.known_group_id = g1.id
namespace.known_group_id_2 = g2.id
namespace.known_group_id_3 = g3.id
namespace.known_banned_group_join_id = g3.join_code
namespace.known_group_join_id = g4.join_code
namespace.system_default_group_id = g5.id
db.session.add(g1)
db.session.add(g2)
db.session.add(g3)
db.session.add(g4)
db.session.add(g5)
db.session.commit()
p1 = Plugin()
p1.name = "TestPlugin"
p1.schema = {
"type": "object",
"title": "Comment",
"description": "Description",
"properties": {
"name": {
"type": "string"
},
"description": {
"type": "string"
},
"maximum_lifetime": {
"type": "string"
}
}
}
namespace.known_plugin_id = p1.id
db.session.add(p1)
t1 = BlueprintTemplate()
t1.name = 'TestTemplate'
t1.plugin = p1.id
db.session.add(t1)
namespace.known_template_id_disabled = t1.id
t2 = BlueprintTemplate()
t2.name = 'EnabledTestTemplate'
t2.plugin = p1.id
t2.config = {
'cost_multiplier': '1.0',
'maximum_lifetime': '1h',
'memory_limit': '512m',
'allow_update_client_connectivity': False
}
t2.allowed_attrs = [
'maximum_lifetime',
'cost_multiplier',
'preallocated_credits',
'allow_update_client_connectivity'
]
t2.is_enabled = True
db.session.add(t2)
namespace.known_template_id = t2.id
b1 = Blueprint()
b1.name = "TestBlueprint"
b1.template_id = t2.id
b1.group_id = g1.id
db.session.add(b1)
namespace.known_blueprint_id_disabled = b1.id
b2 = Blueprint()
b2.name = "EnabledTestBlueprint"
b2.template_id = t2.id
b2.group_id = g1.id
b2.is_enabled = True
db.session.add(b2)
namespace.known_blueprint_id = b2.id
b3 = Blueprint()
b3.name = "EnabledTestBlueprintClientIp"
b3.template_id = t2.id
b3.group_id = g1.id
b3.is_enabled = True
b3.config = {'allow_update_client_connectivity': True}
db.session.add(b3)
namespace.known_blueprint_id_2 = b3.id
b4 = Blueprint()
b4.name = "EnabledTestBlueprintOtherGroup"
b4.template_id = t2.id
b4.group_id = g2.id
b4.is_enabled = True
db.session.add(b4)
namespace.known_blueprint_id_g2 = b4.id
b5 = Blueprint()
b5.name = "DisabledTestBlueprintOtherGroup"
b5.template_id = t2.id
b5.group_id = g2.id
db.session.add(b5)
namespace.known_blueprint_id_disabled_2 = b5.id
b6 = Blueprint()
b6.name = "TestArchivedBlueprint"
b6.template_id = t2.id
b6.group_id = g2.id
b6.current_status = 'archived'
db.session.add(b6)
b7 = Blueprint()
b7.name = "TestDeletedBlueprint"
b7.template_id = t2.id
b7.group_id = g2.id
b7.current_status = 'deleted'
db.session.add(b7)
n1 = Notification()
n1.subject = "First notification"
n1.message = "First notification message"
namespace.known_notification_id = n1.id
db.session.add(n1)
n2 = Notification()
n2.subject = "Second notification"
n2.message = "Second notification message"
namespace.known_notification2_id = n2.id
db.session.add(n2)
db.session.commit()
i1 = Instance(
Blueprint.query.filter_by(id=b2.id).first(),
User.query.filter_by(eppn="[email protected]").first())
db.session.add(i1)
namespace.known_instance_id = i1.id
i2 = Instance(
Blueprint.query.filter_by(id=b3.id).first(),
User.query.filter_by(eppn="[email protected]").first())
db.session.add(i2)
namespace.known_instance_id_2 = i2.id
i3 = Instance(
Blueprint.query.filter_by(id=b3.id).first(),
User.query.filter_by(eppn="[email protected]").first())
db.session.add(i3)
i3.state = Instance.STATE_DELETED
i4 = Instance(
Blueprint.query.filter_by(id=b3.id).first(),
User.query.filter_by(eppn="[email protected]").first())
db.session.add(i4)
i5 = Instance(
Blueprint.query.filter_by(id=b4.id).first(),
User.query.filter_by(eppn="[email protected]").first())
db.session.add(i5)
db.session.commit()
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text CategoryEncoding preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import keras_export
TFIDF = "tf-idf"
INT = "int"
BINARY = "binary"
COUNT = "count"
# The string tokens in the extracted vocabulary
_NUM_ELEMENTS_NAME = "num_elements"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
@keras_export("keras.layers.experimental.preprocessing.CategoryEncoding", v1=[])
class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Category encoding layer.
This layer provides options for condensing data into a categorical encoding.
It accepts integer values as inputs and outputs a dense representation
(one sample = 1-index tensor of float values representing data about the
sample's tokens) of those inputs.
Examples:
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... max_tokens=4, output_mode="count")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[2., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
Examples with weighted inputs:
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... max_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
<tf.Tensor: shape=(4, 4), dtype=float64, numpy=
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]])>
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary.
output_mode: Specification for the output of the layer.
Defaults to "binary". Values can
be "binary", "count" or "tf-idf", configuring the layer as follows:
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
Call arguments:
inputs: A 2D tensor `(samples, timesteps)`.
count_weights: A 2D tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode. Not used in
`binary` or `tfidf` mode.
"""
def __init__(self,
max_tokens=None,
output_mode=BINARY,
sparse=False,
**kwargs):
# 'output_mode' must be one of (COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(COUNT, BINARY, TFIDF),
layer_name="CategoryEncoding",
arg_name="output_mode")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
# We need to call super() before we call _add_state_variable().
combiner = _CategoryEncodingCombiner(
max_tokens=max_tokens,
compute_idf=output_mode == TFIDF)
super(CategoryEncoding, self).__init__(combiner=combiner, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell("V2").set("CategoryEncoding")
self._max_tokens = max_tokens
self._output_mode = output_mode
self._sparse = sparse
self._called = False
if self._output_mode == TFIDF:
# The TF-IDF weight may have a (None,) tensorshape. This creates
# a 1D variable with arbitrary shape, which we can assign any weight to
# so long as it has 1 dimension. In order to properly initialize this
# weight in Keras, we need to provide a custom callable initializer which
# does not depend on the shape of the weight (as all other initializers
# do) since the weight is not known. Hence the lambda shape, dtype: [0].
if max_tokens is None:
initializer = lambda shape, dtype: [0]
else:
initializer = init_ops.zeros_initializer
# We are adding these here instead of in build() since they do not depend
# on the input shape at all.
self.tf_idf_weights = self._add_state_variable(
name=_IDF_NAME,
shape=tensor_shape.TensorShape((max_tokens,)),
dtype=K.floatx(),
initializer=initializer)
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = K.floatx() if self._output_mode == TFIDF else dtypes.int64
if self._sparse:
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
Raises:
RuntimeError: if the layer cannot be adapted at this time.
"""
if not reset_state:
raise ValueError("CategoryEncoding does not support streaming adapts.")
super(CategoryEncoding, self).adapt(data, reset_state)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if _NUM_ELEMENTS_NAME in updates:
if self._max_tokens is None:
self.set_num_elements(updates[_NUM_ELEMENTS_NAME])
elif self._max_tokens != updates[_NUM_ELEMENTS_NAME]:
raise RuntimeError("Cannot update states if you construct the layer "
"with `max_tokens`={}".format(self._max_tokens))
if self._output_mode == TFIDF:
self.set_tfidf_data(updates[_IDF_NAME])
def get_config(self):
config = {
"max_tokens": self._max_tokens,
"output_mode": self._output_mode,
"sparse": self._sparse,
}
base_config = super(CategoryEncoding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _convert_to_ndarray(self, x):
if isinstance(x, ops.Tensor):
return x
else:
return np.array(x)
def _convert_to_sparse_inputs(self, inputs):
if isinstance(inputs, sparse_tensor.SparseTensor):
return inputs
elif isinstance(inputs, ragged_tensor.RaggedTensor):
return inputs.to_sparse()
else:
indices = array_ops.where_v2(
math_ops.greater_equal(inputs, array_ops.constant(0, inputs.dtype)))
values = array_ops.gather_nd(inputs, indices)
shape = array_ops.shape(inputs, out_type=dtypes.int64)
return sparse_tensor.SparseTensor(indices, values, shape)
def set_num_elements(self, num_elements):
if self._max_tokens is not None:
raise RuntimeError(
"In order to dynamically set the number of elements, the "
"layer's 'max_tokens' arg must be set to None.")
if not isinstance(num_elements, numbers.Integral):
raise ValueError("num_elements must be a scalar integer.")
if self._called:
raise RuntimeError("num_elements cannot be changed after the layer is "
"called.")
self._max_tokens = num_elements
def set_tfidf_data(self, tfidf_data):
tfidf_data = self._convert_to_ndarray(tfidf_data)
if self._output_mode != TFIDF:
raise RuntimeError(
"In order to set TF-IDF data, the output mode must be 'tf-idf'.")
if tfidf_data.ndim != 1:
raise ValueError("TF-IDF data must be a 1-index array.")
if self._max_tokens is not None:
input_data_length = tfidf_data.shape[0]
if input_data_length > self._max_tokens:
raise ValueError("The array provided has %d elements. This layer is "
"configured to only allow %d elements." %
(input_data_length, self._max_tokens))
if input_data_length < self._max_tokens:
tfidf_data = np.resize(tfidf_data, (self._max_tokens,))
K.set_value(self.tf_idf_weights, tfidf_data)
def call(self, inputs, count_weights=None):
if isinstance(inputs, (list, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
if inputs.shape.rank == 1:
inputs = array_ops.expand_dims(inputs, 1)
if count_weights is not None and self._output_mode != COUNT:
raise ValueError("count_weights is not used in `output_mode='tf-idf'`, "
"or `output_mode='binary'`. Please pass a single input.")
self._called = True
if self._max_tokens is None:
raise RuntimeError(
"If you construct a `CategoryEncoding` layer with "
"`max_tokens=None`, you need to call `adapt()` "
"on it before using it")
else:
out_depth = self._max_tokens
if self._output_mode == TFIDF:
# If the input is a sparse tensor, we densify it with the default value of
# -1. Because -1 is ignored by one_hot, this effectively drops the non-set
# positions from the output encoding.
if self._sparse:
raise ValueError("`sparse=True` with `output_mode=tfidf` "
"is not supported.")
if isinstance(inputs, sparse_tensor.SparseTensor):
inputs = sparse_ops.sparse_tensor_to_dense(inputs, default_value=-1)
one_hot_data = array_ops.one_hot(inputs, depth=out_depth)
counts = math_ops.reduce_sum(one_hot_data, axis=1)
tf_idf_data = math_ops.multiply(counts, self.tf_idf_weights)
tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))
return tf_idf_data
binary_output = (self._output_mode == BINARY)
if self._sparse:
result = bincount_ops.sparse_bincount(
inputs,
weights=count_weights,
minlength=out_depth,
axis=-1,
binary_output=binary_output)
result = math_ops.cast(result, K.floatx())
batch_size = array_ops.shape(result)[0]
result = sparse_tensor.SparseTensor(
indices=result.indices,
values=result.values,
dense_shape=[batch_size, out_depth])
return result
else:
result = bincount_ops.bincount(
inputs,
weights=count_weights,
minlength=out_depth,
dtype=K.floatx(),
axis=-1,
binary_output=binary_output)
result.set_shape(tensor_shape.TensorShape((None, out_depth)))
return result
class _CategoryEncodingAccumulator(
collections.namedtuple("Accumulator", ["data", "per_doc_count_dict"])):
pass
class _CategoryEncodingCombiner(base_preprocessing_layer.Combiner):
"""Combiner for the CategoryEncoding preprocessing layer.
This class encapsulates the logic for computing the number of elements in the
input dataset and the document frequency for each element.
Attributes:
compute_max_element: (Optional) If set, this combiner will return the
maximum element in this set as part of its `extract()` call.
compute_idf: (Optional) If set, the inverse document frequency will be
computed for each value.
"""
# These are indices into the accumulator's `data` array.
MAX_VALUE_IDX = 0
DOC_ID_IDX = 1
def __init__(self, max_tokens=None, compute_idf=False):
self._max_tokens = max_tokens
self._compute_idf = compute_idf
def compute(self, values, accumulator=None):
"""Computes a step in this computation, returning a new accumulator."""
values = base_preprocessing_layer.convert_to_list(values)
if accumulator is None:
accumulator = self._create_accumulator()
# TODO(momernick): Benchmark improvements to this algorithm.
for element in values:
if not isinstance(element, list):
element = [element]
current_doc_id = accumulator.data[self.DOC_ID_IDX]
for value in element:
if self._max_tokens is None:
current_max_value = accumulator.data[self.MAX_VALUE_IDX]
if value > current_max_value:
accumulator.data[self.MAX_VALUE_IDX] = value
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[value]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
accumulator.data[self.DOC_ID_IDX] += 1
return accumulator
def merge(self, accumulators):
"""Merges several accumulators to a single accumulator."""
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
base_accumulator.data[self.DOC_ID_IDX] += accumulator.data[
self.DOC_ID_IDX]
if self._max_tokens is None:
base_accumulator.data[self.MAX_VALUE_IDX] = max(
base_accumulator.data[self.MAX_VALUE_IDX],
accumulator.data[self.MAX_VALUE_IDX])
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator
def _inverse_document_frequency(self, document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of TFIDF.
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
document_counts: An array of the # of documents each token appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return np.log(1 + num_documents / (1 + np.array(document_counts)))
def extract(self, accumulator):
"""Converts an accumulator into a dict of output values.
Args:
accumulator: An accumulator aggregating over the full dataset.
Returns:
A dict of:
"num_elements": The number of unique elements in the data set. Only
returned if `compute_max_element` is True.
"idf": The inverse-document-frequency for each index, where idf[i] is
the IDF value for index i. Only returned if `compute_idf` is True.
"""
data, document_counts = accumulator
if data[self.MAX_VALUE_IDX] is not None:
max_element = data[self.MAX_VALUE_IDX] + 1
else:
max_element = self._max_tokens
output_dict = {}
if self._max_tokens is None:
output_dict[_NUM_ELEMENTS_NAME] = max_element
if self._compute_idf:
num_documents = data[self.DOC_ID_IDX]
# Here, we need to get the doc_counts for every token value, including
# values we have not yet seen (and are not in the document_counts dict).
# However, because document_counts is a defaultdict (see below), querying
# the dict directly for those values gives us meaningful counts (of 0).
# However, this also means we can't just extract the values in
# document_counts - we need to do a deliberate indexing using range().
doc_counts = [document_counts[i]["count"] for i in range(max_element)]
idf = self._inverse_document_frequency(doc_counts, num_documents)
output_dict[_IDF_NAME] = idf
return output_dict
def restore(self, output):
"""Creates an accumulator based on 'output'."""
raise NotImplementedError(
"CategoryEncoding does not restore or support streaming updates.")
def serialize(self, accumulator):
"""Serializes an accumulator for a remote call."""
output_dict = {}
output_dict["data"] = accumulator.data
if self._compute_idf:
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
"""Deserializes an accumulator received from 'serialize()'."""
accumulator_dict = json.loads(compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
for i, value in enumerate(accumulator_dict["data"]):
accumulator.data[i] = value
if self._compute_idf:
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
"""Accumulates a sorted array of vocab tokens and corresponding counts."""
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
else:
per_doc_count_dict = None
if self._max_tokens is None:
data = [0, 0]
else:
data = [None, 0]
return _CategoryEncodingAccumulator(data, per_doc_count_dict)
|
|
from copy import deepcopy
import os
from collections import defaultdict
from classytags.tests import DummyParser, DummyTokens
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory
from django.utils.html import escape
from django.utils.timezone import now
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from sekizai.data import UniqueSequence
from sekizai.helpers import get_varname
import cms
from cms.api import create_page, create_title, add_plugin
from cms.middleware.toolbar import ToolbarMiddleware
from cms.models import Page, Placeholder
from cms.templatetags.cms_tags import (
_get_page_by_untyped_arg,
_show_placeholder_by_id,
RenderPlugin,
)
from cms.templatetags.cms_js_tags import json_filter, render_placeholder_toolbar_js
from cms.test_utils.fixtures.templatetags import TwoPagesFixture
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.toolbar import CMSToolbar
from cms.utils import get_cms_setting, get_site_id
from cms.utils.placeholder import get_placeholders
from sekizai.context import SekizaiContext
class TemplatetagTests(CMSTestCase):
def test_get_site_id_from_nothing(self):
with self.settings(SITE_ID=10):
self.assertEqual(10, get_site_id(None))
def test_get_site_id_from_int(self):
self.assertEqual(10, get_site_id(10))
def test_get_site_id_from_site(self):
site = Site()
site.id = 10
self.assertEqual(10, get_site_id(site))
def test_get_site_id_from_str_int(self):
self.assertEqual(10, get_site_id('10'))
def test_get_site_id_from_str(self):
with self.settings(SITE_ID=10):
self.assertEqual(10, get_site_id("something"))
def test_unicode_placeholder_name_fails_fast(self):
self.assertRaises(ImproperlyConfigured, get_placeholders, 'unicode_placeholder.html')
def test_page_attribute_tag_escapes_content(self):
script = '<script>alert("XSS");</script>'
class FakePage(object):
def get_page_title(self, *args, **kwargs):
return script
class FakeRequest(object):
current_page = FakePage()
GET = {'language': 'en'}
request = FakeRequest()
template = '{% load cms_tags %}{% page_attribute page_title %}'
output = self.render_template_obj(template, {}, request)
self.assertNotEqual(script, output)
self.assertEqual(escape(script), output)
def test_json_encoder(self):
self.assertEqual(json_filter(True), 'true')
self.assertEqual(json_filter(False), 'false')
self.assertEqual(json_filter([1, 2, 3]), '[1, 2, 3]')
self.assertEqual(json_filter((1, 2, 3)), '[1, 2, 3]')
filtered_dict = json_filter({'item1': 1, 'item2': 2, 'item3': 3})
self.assertTrue('"item1": 1' in filtered_dict)
self.assertTrue('"item2": 2' in filtered_dict)
self.assertTrue('"item3": 3' in filtered_dict)
today = now().today()
self.assertEqual('"%s"' % today.isoformat()[:-3], json_filter(today))
def test_static_with_version(self):
expected = '<script src="/static/cms/css/%(version)s/cms.base.css" type="text/javascript"></script>'
expected = expected % {'version': cms.__version__}
template = (
"""{% load cms_static %}<script src="{% static_with_version "cms/css/cms.base.css" %}" """
"""type="text/javascript"></script>"""
)
output = self.render_template_obj(template, {}, None)
self.assertEqual(expected, output)
class TemplatetagDatabaseTests(TwoPagesFixture, CMSTestCase):
def _getfirst(self):
return Page.objects.public().get(title_set__title='first')
def _getsecond(self):
return Page.objects.public().get(title_set__title='second')
def test_get_page_by_untyped_arg_none(self):
control = self._getfirst()
request = self.get_request('/')
request.current_page = control
page = _get_page_by_untyped_arg(None, request, 1)
self.assertEqual(page, control)
def test_get_page_by_pk_arg_edit_mode(self):
control = self._getfirst()
request = self.get_request('/')
request.GET = {"edit": ''}
user = self._create_user("admin", True, True)
request.current_page = control
request.user = user
middleware = ToolbarMiddleware()
middleware.process_request(request)
page = _get_page_by_untyped_arg(control.pk, request, 1)
self.assertEqual(page, control.publisher_draft)
def test_get_page_by_untyped_arg_page(self):
control = self._getfirst()
request = self.get_request('/')
page = _get_page_by_untyped_arg(control, request, 1)
self.assertEqual(page, control)
def test_get_page_by_untyped_arg_reverse_id(self):
second = self._getsecond()
request = self.get_request('/')
page = _get_page_by_untyped_arg("myreverseid", request, 1)
self.assertEqual(page, second)
def test_get_page_by_untyped_arg_dict(self):
second = self._getsecond()
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': second.pk}, request, 1)
self.assertEqual(page, second)
def test_get_page_by_untyped_arg_dict_fail_debug(self):
with self.settings(DEBUG=True):
request = self.get_request('/')
self.assertRaises(Page.DoesNotExist,
_get_page_by_untyped_arg, {'pk': 1003}, request, 1
)
self.assertEqual(len(mail.outbox), 0)
def test_get_page_by_untyped_arg_dict_fail_nodebug_do_email(self):
with self.settings(SEND_BROKEN_LINK_EMAILS=True, DEBUG=False,
MANAGERS=[("Jenkins", "[email protected]")]):
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': 1003}, request, 1)
self.assertEqual(page, None)
self.assertEqual(len(mail.outbox), 1)
def test_get_page_by_untyped_arg_dict_fail_nodebug_no_email(self):
with self.settings(SEND_BROKEN_LINK_EMAILS=False, DEBUG=False,
MANAGERS=[("Jenkins", "[email protected]")]):
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': 1003}, request, 1)
self.assertEqual(page, None)
self.assertEqual(len(mail.outbox), 0)
def test_get_page_by_untyped_arg_fail(self):
request = self.get_request('/')
self.assertRaises(TypeError, _get_page_by_untyped_arg, [], request, 1)
def test_show_placeholder_for_page_placeholder_does_not_exist(self):
"""
Verify ``show_placeholder`` correctly handles being given an
invalid identifier.
"""
with self.settings(DEBUG=True):
context = self.get_context('/')
self.assertRaises(Placeholder.DoesNotExist, _show_placeholder_by_id,
context, 'does_not_exist', 'myreverseid')
with self.settings(DEBUG=False):
content = _show_placeholder_by_id(context, 'does_not_exist', 'myreverseid')
self.assertEqual(content, '')
def test_untranslated_language_url(self):
""" Tests page_language_url templatetag behavior when used on a page
without the requested translation, both when CMS_HIDE_UNTRANSLATED is
True and False.
When True it should return the root page URL if the current page is
untranslated (PR #1125)
"""
page_1 = create_page('Page 1', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='page1')
create_title("de", "Seite 1", page_1, slug="seite-1")
page_1.publish('en')
page_1.publish('de')
page_2 = create_page('Page 2', 'nav_playground.html', 'en', page_1, published=True,
in_navigation=True, reverse_id='page2')
create_title("de", "Seite 2", page_2, slug="seite-2")
page_2.publish('en')
page_2.publish('de')
page_3 = create_page('Page 3', 'nav_playground.html', 'en', page_2, published=True,
in_navigation=True, reverse_id='page3')
tpl = "{% load menu_tags %}{% page_language_url 'de' %}"
lang_settings = deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][1]['hide_untranslated'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/seite-2/")
# Default configuration has CMS_HIDE_UNTRANSLATED=False
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/seite-2/")
context = self.get_context(page_3.get_absolute_url())
context['request'].current_page = page_3.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/page-3/")
lang_settings[1][1]['hide_untranslated'] = True
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/seite-2/")
context = self.get_context(page_3.get_absolute_url())
context['request'].current_page = page_3.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/")
def test_create_placeholder_if_not_exist_in_template(self):
"""
Tests that adding a new placeholder to a an exising page's template
creates the placeholder.
"""
page = create_page('Test', 'col_two.html', 'en')
# I need to make it seem like the user added another placeholder to the SAME template.
page._template_cache = 'col_three.html'
class FakeRequest(object):
current_page = page
GET = {'language': 'en'}
context = self.get_context(page=page)
content_renderer = context['cms_content_renderer']
placeholder = content_renderer._get_page_placeholder(context, page, 'col_right')
page.placeholders.get(slot='col_right')
self.assertEqual(placeholder.slot, 'col_right')
def test_render_plugin_toolbar_config(self):
"""
Ensures that the render_plugin_toolbar_config tag
sets the correct values in the sekizai context.
"""
page = self._getfirst()
placeholder = page.placeholders.get(slot='body')
parent_plugin = add_plugin(placeholder, 'SolarSystemPlugin', 'en')
child_plugin_1 = add_plugin(placeholder, 'PlanetPlugin', 'en', target=parent_plugin)
child_plugin_2 = add_plugin(placeholder, 'PlanetPlugin', 'en', target=parent_plugin)
parent_plugin.child_plugin_instances = [
child_plugin_1,
child_plugin_2,
]
plugins = [
parent_plugin,
child_plugin_1,
child_plugin_2,
]
with self.login_user_context(self.get_superuser()):
context = self.get_context(path=page.get_absolute_url(), page=page)
context['request'].toolbar = CMSToolbar(context['request'])
context['request'].toolbar.edit_mode = True
context[get_varname()] = defaultdict(UniqueSequence)
content_renderer = context['cms_content_renderer']
output = content_renderer.render_plugin(
instance=parent_plugin,
context=context,
placeholder=placeholder,
editable=True
)
tag_format = '<template class="cms-plugin cms-plugin-start cms-plugin-{}">'
for plugin in plugins:
start_tag = tag_format.format(plugin.pk)
self.assertIn(start_tag, output)
def test_render_placeholder_toolbar_js_with_no_plugins(self):
page = self._getfirst()
request = self.get_request(language='en', page=page)
renderer = self.get_content_renderer(request)
placeholder = page.placeholders.get(slot='body')
content = render_placeholder_toolbar_js(
placeholder,
render_language='en',
content_renderer=renderer,
)
expected_bits = [
'"addPluginHelpTitle": "Add plugin to placeholder \\"Body\\""',
'"name": "Body"',
'"placeholder_id": "{}"'.format(placeholder.pk),
'"plugin_language": "en"',
'"page_language": "en"',
]
for bit in expected_bits:
self.assertIn(bit, content)
class NoFixtureDatabaseTemplateTagTests(CMSTestCase):
def test_cached_show_placeholder_sekizai(self):
from django.core.cache import cache
cache.clear()
from cms.test_utils import project
template_dir = os.path.join(os.path.dirname(project.__file__), 'templates', 'alt_plugin_templates',
'show_placeholder')
page = create_page('Test', 'col_two.html', 'en')
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='HIDDEN')
request = RequestFactory().get('/')
request.user = self.get_staff_user_with_no_permissions()
request.current_page = page
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['DIRS'] = [template_dir]
with self.settings(**override):
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('JAVASCRIPT', output)
def test_show_placeholder_lang_parameter(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en')
create_title('fr', 'Fr Test', page)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>En Test</b>')
add_plugin(placeholder, TextPlugin, 'fr', body='<b>Fr Test</b>')
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.current_page = page
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>En Test</b>', output)
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'fr' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Fr Test</b>', output)
# Cache is now primed for both languages
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>En Test</b>', output)
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'fr' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Fr Test</b>', output)
def test_show_placeholder_for_page_marks_output_safe(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en')
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.current_page = page
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
with self.assertNumQueries(4):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test</b>', output)
with self.assertNumQueries(1):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test</b>', output)
def test_cached_show_placeholder_preview(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
request = RequestFactory().get('/')
user = self._create_user("admin", True, True)
request.current_page = page.publisher_public
request.user = user
template = "{% load cms_tags %}{% show_placeholder slot page 'en' 1 %}"
with self.assertNumQueries(3):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test</b>', output)
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test2</b>')
request = RequestFactory().get('/?preview')
request.current_page = page
request.user = user
with self.assertNumQueries(3):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test2</b>', output)
def test_render_plugin(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
placeholder = page.placeholders.all()[0]
plugin = add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
template = "{% load cms_tags %}{% render_plugin plugin %}"
request = RequestFactory().get('/')
user = self._create_user("admin", True, True)
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'plugin': plugin}, request)
self.assertIn('<b>Test</b>', output)
def test_render_plugin_no_context(self):
placeholder = Placeholder.objects.create(slot='test')
plugin = add_plugin(placeholder, TextPlugin, 'en', body='Test')
parser = DummyParser()
tokens = DummyTokens(plugin)
tag = RenderPlugin(parser, tokens)
superuser = self.get_superuser()
request = RequestFactory().get('/')
request.current_page = None
request.user = superuser
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
context = SekizaiContext({
'request': request,
'cms_content_renderer': request.toolbar.content_renderer,
})
output = tag.render(context)
self.assertEqual(
output,
'<template class="cms-plugin cms-plugin-start cms-plugin-{0}"></template>Test<template class="cms-plugin cms-plugin-end cms-plugin-{0}"></template>'.format(
plugin.pk
)
)
def test_render_placeholder_with_no_page(self):
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% placeholder test or %}< --- empty --->{% endplaceholder %}"
request = RequestFactory().get('/asdadsaasd/')
user = self.get_superuser()
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(2):
output = self.render_template_obj(template, {}, request)
self.assertEqual(output, '< --- empty --->')
def test_render_placeholder_as_var(self):
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% placeholder test or %}< --- empty --->{% endplaceholder %}"
request = RequestFactory().get('/asdadsaasd/')
user = self.get_superuser()
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(2):
output = self.render_template_obj(template, {}, request)
self.assertEqual(output, '< --- empty --->')
def test_render_model_with_deferred_fields(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
Category.objects.create(name='foo', depth=1)
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% render_model category 'name' %}"
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
category = Category.objects.only('name').get()
output = self.render_template_obj(template, {'category': category}, request)
expected = "cms-plugin cms-plugin-start cms-plugin-sampleapp-category-name-%d cms-render-model" % category.pk
self.assertIn(expected, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': category}, request)
expected = 'foo'
self.assertEqual(expected, output)
def test_render_model_add(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% render_model_add category %}"
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(2):
output = self.render_template_obj(template, {'category': Category()}, request)
expected_start = '<template class="cms-plugin cms-plugin-start cms-plugin-sampleapp-category-add-0 cms-render-model-add"></template>'
expected_end = '<template class="cms-plugin cms-plugin-end cms-plugin-sampleapp-category-add-0 cms-render-model-add"></template>'
self.assertIn(expected_start, output)
self.assertIn(expected_end, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': Category()}, request)
expected = ''
self.assertEqual(expected, output)
def test_render_model_add_block(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% render_model_add_block category %}wrapped{% endrender_model_add_block %}"
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(2):
output = self.render_template_obj(template, {'category': Category()}, request)
expected_start = '<template class="cms-plugin cms-plugin-start cms-plugin-sampleapp-category-add-0 '
'cms-render-model-add"></template>'
expected_end = '<template class="cms-plugin cms-plugin-end cms-plugin-sampleapp-category-add-0 '
'cms-render-model-add"></template>'
self.assertIn(expected_start, output)
self.assertIn(expected_end, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': Category()}, request)
expected = 'wrapped'
self.assertEqual(expected, output)
|
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright (c) 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Hyper-V Nova Compute driver.
"""
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
def init_host(self, host):
self._vmops.restart_vm_log_writers()
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance['name'])
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance['name'])
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_host_stats(self, refresh=False):
return self._hostops.get_host_stats(refresh)
def host_power_action(self, host, action):
return self._hostops.host_power_action(host, action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
# TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance_name, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self, host):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_console_output(self, context, instance):
return self._vmops.get_console_output(instance)
|
|
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, cos, sin, pi
from time import clock
from copy import deepcopy
class Blob:
def __init__(self,x,y,r):
self.x = x
self.y = y
self.r = r
self.path = [(x,y)]
def addToPath(self,nx,ny):
self.path.append( (nx,ny) )
self.x = nx
self.y = ny
def predict(self):
if len(self.path) == 1:
return self.path[0]
else:
#(x1-x0) + x1 predicts x2 reasonably well for human movement
return (2*self.path[-1][0]-self.path[-2][0],
2*self.path[-1][1]-self.path[-2][1])
class Video:
def __init__(self, pathname):
self.pathname = pathname
self.pipe = getVideoPipe(self.pathname)
self.frame = 0
self.blobs = []
def finish(self):
self.pipe.terminate()
def getVideoPipe(VIDEO_PATH):
FFMPEG_BIN = r"C:\Python33\Lib\ffmpeg-20140713-git-42c1cc3-win64-static\bin\ffmpeg.exe"
### get video info
import subprocess as sp
command = [FFMPEG_BIN, '-i', VIDEO_PATH, '-']
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
pipe.stdout.readline()
pipe.terminate()
infos = str(pipe.stderr.read())[2:-1]
## ### get the frames per second
## end = infos.index("fps")-1
## beg = end-1
## while infos[beg-1] != " ":
## beg -= 1
## fps = float(infos[beg:end])
##
## ### calculate timecode from frame and fps
## s = (frame // fps) % 60
## m = (frame // fps) // 60 % 60
## h = (frame // fps) // 3600
## f = (frame % fps) * fps
## timecode = "%02d:%02d:%02d.%03d" % (h,m,s,f)
### set up pipe to get single video frame
command = [ FFMPEG_BIN,
## '-ss', timecode,
'-i', VIDEO_PATH,
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo', '-']
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
return pipe
def getVideoFrame(pipe, xdim=800, ydim=600):
import numpy as np
### get frame image
raw_image = pipe.stdout.read(xdim*ydim*3)
image = np.fromstring(raw_image, dtype='uint8')
image = image.reshape((ydim,xdim,3))
pipe.stdout.flush()
## pipe.terminate()
return image, xdim,ydim
def colorDistance(target,actual): return sqrt(sum([(a-b)**2 for a,b in zip(target,actual)]))
def colDis(targetColor): return lambda col: colorDistance(targetColor,col)
def gray(z): return [np.uint8(z)]*3
def col(r,g,b): return list(map(np.uint8,[r,g,b]))
def mean(X): return sum(X)/len(X)
def circumcircle(p1,p2,p3): #each point is expected to be [x,y]
#taken from https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_from_cross-_and_dot-products
diff1 = [p1[0]-p2[0], p1[1]-p2[1]]
diff2 = [p1[0]-p3[0], p1[1]-p3[1]]
diff3 = [p2[0]-p3[0], p2[1]-p3[1]]
magSquared1 = diff1[0]**2 + diff1[1]**2
magSquared2 = diff2[0]**2 + diff2[1]**2
magSquared3 = diff3[0]**2 + diff3[1]**2
mag1 = sqrt(magSquared1)
mag2 = sqrt(magSquared2)
mag3 = sqrt(magSquared3)
dot1 = diff1[0]*diff2[0] + diff1[1]*diff2[1]
dot2 = -diff1[0]*diff3[0] + -diff1[1]*diff3[1]
dot3 = -diff2[0]*-diff3[0] + -diff2[1]*-diff3[1]
#working in 2D simplifies the cross product and its magnitude a lot
crossMag = diff1[0]*diff3[1] - diff1[1]*diff3[0]
crossMagSquared = crossMag**2
r = mag1*mag2*mag3/(2*crossMag)
alpha = magSquared3*dot1/(2*crossMagSquared)
beta = magSquared2*dot2/(2*crossMagSquared)
gamma = magSquared1*dot3/(2*crossMagSquared)
x = alpha*p1[0] + beta*p2[0] + gamma*p3[0]
y = alpha*p1[1] + beta*p2[1] + gamma*p3[1]
return [x,y,r]
def squareSpiral(startX,startY, spacing, limit):
sx = startX
sy = startY
yield sx,sy
level = 0
dx = -1
dy = 0
while level < limit:
if dx == -1 and dy == level:
level += 1
dx = 0
dy = level
else:
if dy == level and dx < level:
dx += 1
elif dx == level and dy > -level:
dy -= 1
elif dy == -level and dx > -level:
dx -= 1
elif dx == -level and dy < level:
dy += 1
yield sx+dx*spacing, sy+dy*spacing
def identifyTriangles(blobs, debug=0):
#precondition: all points are accounted for in exactly two triangles
if len(blobs) != 6:
raise ValueError("Too many or too few points. Need exactly 6.")
points = [(blob.x,blob.y) for blob in blobs]
tris = []
n = len(points)
def D(x1,y1, x2,y2): return sqrt((x2-x1)**2+(y2-y1)**2)
for k1 in range(n-2):
p1 = points[k1]
for k2 in range(k1+1,n-1):
if k1 != k2:
p2 = points[k2]
for k3 in range(k2+1,n):
if k3 != k1 and k3 != k2:
p3 = points[k3]
d1 = D(p1[0],p1[1], p2[0],p2[1])
d2 = D(p1[0],p1[1], p3[0],p3[1])
d3 = D(p2[0],p2[1], p3[0],p3[1])
mean = round((d1+d2+d3)/3, 3) #arithmetic mean
var = round(sum([(d-mean)**2 for d in [d1,d2,d3]])/3, 3) #variance
tris.append([var, mean, [p1,p2,p3]])
tris.sort()
if debug:
for t in tris: print(t)
def overlap(L1, L2):
L1s = sorted(L1)
L2s = sorted(L2)
i,j = 0,0
while i < len(L1s) and j < len(L2s):
if L1s[i] == L2s[j]: return 1
if L1s[i] < L2s[j]: i += 1
elif L1s[i] > L2s[j]: j += 1
return 0
m = 0
mi,mj = 0, 0
def mean(X): return sum(X)/len(X)
def dot(X,Y): return sum(a*b for a,b in zip(X,Y))
def diff(X,Y): return [y-x for x,y in zip(X,Y)] #\vec{Y} - \vec{X}
for i in range(5):
for j in range(1,6):
if not overlap(tris[i][2], tris[j][2]):
triCenter1 = list(map(mean, zip(*tris[i][2])))
triCenter2 = list(map(mean, zip(*tris[j][2])))
centerDiff = diff(triCenter1, triCenter2)
tri1m = max([abs(dot(centerDiff, diff(triCenter1, p))) for p in tris[i][2]])
tri2m = max([abs(dot(centerDiff, diff(triCenter2, p))) for p in tris[j][2]])
if tri1m + tri2m > m:
m = tri1m + tri2m
mi,mj = i,j
return [tris[mi], tris[mj]]
### SEARCH ###
def search_squareSpiral(blobs, xdim, ydim, startX=-1, startY=-1, spacing=1, debug=0):
if startX == -1 or startY == -1:
startX = xdim//2
startY = ydim//2
spiral = squareSpiral(startX,startY, spacing, min(xdim,ydim)//(2*spacing)-1)
## if debug: print(startX,startY)
while 1:
try:
centerX, centerY = next(spiral)
## if debug: print(centerX,centerY)
except StopIteration:
return
if 0 <= centerX <= xdim and 0 <= centerY <= ydim:
if debug: image[centerY][centerX] = col(255,0,0)
goodEnough = (cD(image[centerY][centerX]) <= maxColDis)
if goodEnough:
## if debug: print('???')
dupe = 0
for blob in blobs:
if (blob.x-centerX)**2 + (blob.y-centerY)**2 <= blob.r**2*1.5:
dupe = 1
break
if not dupe:
yield centerX, centerY
### SURVEY ###
def survey_floodFill(sX,sY):
cX,cY,cR = (None,None,None)
queue = [[sX,sY]]
q = 0
while q < len(queue):
qx,qy = queue[q]
for dx,dy in [[1,0],[0,1],[-1,0],[0,-1]]:
x2, y2 = qx+dx, qy+dy
if cD(image[y2][x2]) <= maxColDis:
image[y2][x2] = col(255,0,0)
queue.append([x2,y2])
q += 1
sumX,sumY = list(map(sum, zip(*queue)))
cX, cY = round(sumX/q), round(sumY/q)
cR = round(sqrt( q/pi ))
return (cX,cY,cR)
def survey_circumcircle(sX,sY, numIterations=3, showVerts=0): #showVerts is also "minRadius"
cX,cY,cR = (None,None,None)
for k in range(numIterations): #refines circumcircle estimate by using prior guess
vertices = []
for dx,dy in [[1,0],[0,1],[-1,0],[0,-1]]:
pX = sX+dx
pY = sY+dy
while cD(image[pY][pX]) <= maxColDis:
pX += dx
pY += dy
vertices.append( [pX,pY] )
v = vertices
#get four possible circle centers/radii
c1 = circumcircle(v[0],v[1],v[2])
c2 = circumcircle(v[0],v[1],v[3])
c3 = circumcircle(v[0],v[2],v[3])
c4 = circumcircle(v[1],v[2],v[3])
#average them
avgX = (c1[0]+c2[0]+c3[0]+c4[0])/4
avgY = (c1[1]+c2[1]+c3[1]+c4[1])/4
avgR = (c1[2]+c2[2]+c3[2]+c4[2])/4
c5 = [avgX,avgY,avgR]
cX,cY,cR = c5
sX = cX
sY = cY
if showVerts and cR >= showVerts:
image[cY][cX] = col(0,0,255)
for vert in vertices:
image[vert[1]][vert[0]] = col(255,0,0)
return (cX,cY,cR)
### The actual start of the program ###
targetColor = [0,255,0]
cD = colDis(targetColor)
minRadius = 6 #pixels
maxColDis = 40
### Search algorithm entries go like this:
### ['name', function_name, <optional parameter 1>, <opt. param. 2>, <etc>]
### Functions are expected to take -blobs, xdim, ydim- as their first three args
### They must -yield- some -sx, sy- to survey
search_algs = [['square spiral', search_squareSpiral, minRadius-1],
]
### Survey algorithm entries go like this:
### ['name', function_name, <optional parameter 1>, <opt. param. 2>, <etc>]
### Functions are expected to take -sx, sy- as their first two args
### They must -return- a tuple: -(cX,cY,cR)-; that is, blob x,y, and radius
survey_algs = [#['flood fill', survey_floodFill],
['circumcircle', survey_circumcircle],
]
n = 0
VIDEO_PATHs = [r".\Demo vids 1\demovid1_left0001-0075.mp4",
r".\Demo vids 1\demovid1_top0001-0075.mp4",
r".\Demo vids 1\demovid1_right0001-0075.mp4",
]
videos = []
for pathname in VIDEO_PATHs:
videos.append( Video(pathname) )
search_alg = search_algs[0]
survey_alg = survey_algs[0]
firstFrameTimeTotal = 0
getFrameTimeTotal = 0
startTime = clock()
times = counter = 50
while counter > 0:
counter -= 1
for v in videos:
st = clock()
origImage, xdim,ydim = getVideoFrame(v.pipe)
image = deepcopy(origImage) #ensures that flood fill doesn't mess up other algorithms
et = clock()
getFrameTimeTotal += (et-st)
if v.frame == 0:
st = clock()
blobs = []
search = search_alg[1](blobs, xdim,ydim, spacing = search_alg[2])
while 1:
try:
sx,sy = next(search)
except StopIteration:
break
tempblob = survey_alg[1](sx,sy, *survey_alg[2:])
if tempblob[2] >= minRadius: blobs.append(Blob( *tempblob ))
if len(blobs) != 6:
print("Uh oh! Not enough or too many blobs were detected!")
break
v.blobs = blobs
et = clock()
print("Time for first frame: %.3f seconds." % (et-st))
firstFrameTimeTotal += (et-st)
else:
tempblobs = []
for blob in v.blobs:
bx, by = blob.predict()
search = search_alg[1](tempblobs, xdim,ydim,
startX = round(bx), startY = round(by),
spacing = 2, debug=0)
while 1:
try:
sx,sy = next(search)
except StopIteration:
break
tempblob = survey_alg[1](sx,sy)
if tempblob[2] >= minRadius:
blob.addToPath(tempblob[0],tempblob[1])
tempblobs.append(blob)
break
## tris = identifyTriangles(blobs)
## for t in tris:
## for k in range(3):
## sub.plot([t[2][k-1][0],t[2][k][0]],
## [t[2][k-1][1],t[2][k][1]],
## 'b-')
if counter == 0:
n += 1
fig = plt.figure(n)
sub = fig.add_subplot(111)
sub.imshow(image)
for blob in v.blobs:
pathx, pathy = list(zip(*blob.path))
sub.plot(pathx, pathy, 'b-')
sub.set_xlim([0,xdim])
sub.set_ylim([ydim,0])
for v in videos:
v.frame += 1
for v in videos:
v.finish()
endTime = clock()
print("(Time spent reading in frame image data (%.3f seconds) is excluded.)" % getFrameTimeTotal)
totalTime = endTime-startTime - getFrameTimeTotal
print("Total time for %d frames: %.3f seconds." % (times, totalTime))
print("Time spent on first frames only: %.3f seconds." % firstFrameTimeTotal)
print("Remaining time: %.3f seconds." % (totalTime - firstFrameTimeTotal))
print("Average time per non-first frame: %.3f seconds." % ((totalTime - firstFrameTimeTotal)/(3*times-3)))
plt.show()
|
|
# /usr/bin/env python
# coding=utf-8
# Copyright 2010 Jonathan Bowman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Pure Python implementation of the Skein 512-bit hashing algorithm"""
import array
import binascii
import os
import struct
from .threefish import (add64, bigint, bytes2words, Threefish512, words,
words2bytes, words_format, xrange,
zero_bytes, zero_words)
# An empty bytestring that behaves itself whether in Python 2 or 3
empty_bytes = array.array('B').tostring()
class Skein512(object):
"""Skein 512-bit hashing algorithm
The message to be hashed may be set as `msg` when initialized, or
passed in later using the ``update`` method.
Use `key` (a bytestring with arbitrary length) for MAC
functionality.
`block_type` will typically be "msg", but may also be one of:
"key", "nonce", "cfg_final", or "out_final". These will affect the
tweak value passed to the underlying Threefish block cipher. Again,
if you don't know which one to choose, "msg" is probably what you
want.
Example:
>>> geesefly.Skein512("Hello, world!").hexdigest()
'8449f597f1764274f8bf4a03ead22e0404ea2dc63c8737629e6e282303aebfd5dd96f07e21ae2e7a8b2bdfadd445bd1d71dfdd9745c95b0eb05dc01f289ad765'
"""
block_size = 64
block_bits = 512
block_type = {'key': 0,
'nonce': 0x5400000000000000,
'msg': 0x7000000000000000,
'cfg_final': 0xc400000000000000,
'out_final': 0xff00000000000000}
def __init__(self, msg='', digest_bits=512, key=None,
block_type='msg'):
self.tf = Threefish512()
if key:
self.digest_bits = 512
self._start_new_type('key')
self.update(key)
self.tf.key = bytes2words(self.final(False))
self.digest_bits = digest_bits
self.digest_size = (digest_bits + 7) >> 3
self._start_new_type('cfg_final')
b = words2bytes((0x133414853,digest_bits,0,0,0,0,0,0))
self._process_block(b,32)
self._start_new_type(block_type)
if msg:
self.update(msg)
def _start_new_type(self, block_type):
"""Setup new tweak values and internal buffer.
Primarily for internal use.
"""
self.buf = empty_bytes
self.tf.tweak = words([0, self.block_type[block_type]])
def _process_block(self, block, byte_count_add):
"""Encrypt internal state using Threefish.
Primarily for internal use.
"""
block_len = len(block)
for i in xrange(0,block_len,64):
w = bytes2words(block[i:i+64])
self.tf.tweak[0] = add64(self.tf.tweak[0], byte_count_add)
self.tf.prepare_tweak()
self.tf.prepare_key()
self.tf.key = self.tf.encrypt_block(w)
self.tf._feed_forward(self.tf.key, w)
# set second tweak value to ~SKEIN_T1_FLAG_FIRST:
self.tf.tweak[1] &= bigint(0xbfffffffffffffff)
def update(self, msg):
"""Update internal state with new data to be hashed.
`msg` is a bytestring, and should be a bytes object in Python 3
and up, or simply a string in Python 2.5 and 2.6.
"""
self.buf += msg
buflen = len(self.buf)
if buflen > 64:
end = -(buflen % 64) or (buflen-64)
data = self.buf[0:end]
self.buf = self.buf[end:]
try:
self._process_block(data, 64)
except:
print(len(data))
print(binascii.b2a_hex(data))
def final(self, output=True):
"""Return hashed data as bytestring.
`output` is primarily for internal use. It should only be False
if you have a clear reason for doing so.
This function can be called as either ``final`` or ``digest``.
"""
self.tf.tweak[1] |= bigint(0x8000000000000000) # SKEIN_T1_FLAG_FINAL
buflen = len(self.buf)
self.buf += zero_bytes[:64-buflen]
self._process_block(self.buf, buflen)
if not output:
hash_val = words2bytes(self.tf.key)
else:
hash_val = empty_bytes
self.buf = zero_bytes[:]
key = self.tf.key[:] # temporary copy
i=0
while i*64 < self.digest_size:
self.buf = words_format[1].pack(i) + self.buf[8:]
self.tf.tweak = [0, self.block_type['out_final']]
self._process_block(self.buf, 8)
n = self.digest_size - i*64
if n >= 64:
n = 64
hash_val += words2bytes(self.tf.key)[0:n]
self.tf.key = key
i+=1
return hash_val
digest = final
def hexdigest(self):
"""Return a hexadecimal representation of the hashed data"""
return binascii.b2a_hex(self.digest())
class Skein512Random(Skein512):
"""A Skein-based pseudo-random bytestring generator.
If `seed` is unspecified, ``os.urandom`` will be used to provide the
seed.
In case you are using this as an iterator, rather than generating
new data at each iteration, a pool of length `queue_size` is
generated periodically.
"""
def __init__(self, seed=None, queue_size=512):
Skein512.__init__(self, block_type='nonce')
self.queue = []
self.queue_size = queue_size
self.tf.key = zero_words[:]
if not seed:
seed = os.urandom(100)
self.reseed(seed)
def reseed(self, seed):
"""(Re)seed the generator."""
self.digest_size = 64
self.update(words2bytes(self.tf.key) + seed)
self.tf.key = bytes2words(self.final())
def getbytes(self, request_bytes):
"""Return random bytestring of length `request_bytes`."""
self.digest_size = 64 + request_bytes
self.update(words2bytes(self.tf.key))
output = self.final()
self.tf.key = bytes2words(output[0:64])
return output[64:]
def __iter__(self):
return self
def next(self):
if not self.queue:
self.queue = array.array('B', self.getbytes(self.queue_size))
return self.queue.pop()
|
|
import traceback
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import get_current_thread_id, IS_IRONPYTHON, NO_FTRACE, IS_WINDOWS
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from pydevd_tracing import SetTrace
# IFDEF CYTHON
# # In Cython, set_additional_thread_info is bundled in the file.
# from cpython.object cimport PyObject
# from cpython.ref cimport Py_INCREF, Py_XDECREF
# ELSE
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_frame import PyDBFrame
# ENDIF
from os.path import basename, splitext
from _pydevd_bundle.pydevd_breakpoints import stop_on_unhandled_exception
from _pydevd_bundle.pydevd_collect_try_except_info import collect_try_except_info
threadingCurrentThread = threading.currentThread
get_file_type = DONT_TRACE.get
# Note: this is different from pydevd_constants.thread_get_ident because we want Jython
# to be None here because it also doesn't have threading._active.
try:
threading_get_ident = threading.get_ident # Python 3
except:
try:
threading_get_ident = threading._get_ident # Python 2
except:
threading_get_ident = None # Jython
# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)
# cdef dict global_cache_skips
# cdef dict global_cache_frame_skips
# ELSE
# ENDIF
# Cache where we should keep that we completely skipped entering some context.
# It needs to be invalidated when:
# - Breakpoints are changed
# It can be used when running regularly (without step over/step in/step return)
global_cache_skips = {}
global_cache_frame_skips = {}
# IFDEF CYTHON
# cdef class SafeCallWrapper:
# cdef method_object
# def __init__(self, method_object):
# self.method_object = method_object
# def __call__(self, *args):
# #Cannot use 'self' once inside the delegate call since we are borrowing the self reference f_trace field
# #in the frame, and that reference might get destroyed by set trace on frame and parents
# cdef PyObject* method_obj = <PyObject*> self.method_object
# Py_INCREF(<object>method_obj)
# ret = (<object>method_obj)(*args)
# Py_XDECREF (method_obj)
# return SafeCallWrapper(ret) if ret is not None else None
# ELSE
# ENDIF
def fix_top_level_trace_and_get_trace_func(py_db, frame):
# IFDEF CYTHON
# cdef str filename;
# cdef str name;
# cdef tuple args;
# ENDIF
# Note: this is always the first entry-point in the tracing for any thread.
# After entering here we'll set a new tracing function for this thread
# where more information is cached (and will also setup the tracing for
# frames where we should deal with unhandled exceptions).
thread = None
# Cache the frame which should be traced to deal with unhandled exceptions.
# (i.e.: thread entry-points).
f_unhandled = frame
# print('called at', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)
force_only_unhandled_tracer = False
while f_unhandled is not None:
filename = f_unhandled.f_code.co_filename
name = splitext(basename(filename))[0]
if name == 'threading':
if f_unhandled.f_code.co_name in ('__bootstrap', '_bootstrap'):
# We need __bootstrap_inner, not __bootstrap.
return None, False
elif f_unhandled.f_code.co_name in ('__bootstrap_inner', '_bootstrap_inner'):
# Note: be careful not to use threading.currentThread to avoid creating a dummy thread.
t = f_unhandled.f_locals.get('self')
force_only_unhandled_tracer = True
if t is not None and isinstance(t, threading.Thread):
thread = t
break
elif name == 'pydev_monkey':
if f_unhandled.f_code.co_name == '__call__':
force_only_unhandled_tracer = True
break
elif name == 'pydevd':
if f_unhandled.f_code.co_name in ('run', 'main'):
# We need to get to _exec
return None, False
if f_unhandled.f_code.co_name == '_exec':
force_only_unhandled_tracer = True
break
elif f_unhandled.f_back is None:
break
f_unhandled = f_unhandled.f_back
if thread is None:
# Important: don't call threadingCurrentThread if we're in the threading module
# to avoid creating dummy threads.
if threading_get_ident is not None:
thread = threading._active.get(threading_get_ident())
if thread is None:
if IS_WINDOWS and f_unhandled and not f_unhandled.f_code.co_filename.startswith('threading'):
# When attaching to a process on Windows, its main thread ID may not be in `threading._active`
# unless the module imports `threading` by its own.
thread = threadingCurrentThread()
else:
return None, False
else:
# Jython does not have threading.get_ident().
thread = threadingCurrentThread()
if getattr(thread, 'pydev_do_not_trace', None):
SetTrace(None)
return None, False
try:
additional_info = thread.additional_info
if additional_info is None:
raise AttributeError()
except:
additional_info = set_additional_thread_info(thread)
# print('enter thread tracer', thread, get_current_thread_id(thread))
args = (py_db, thread, additional_info, global_cache_skips, global_cache_frame_skips)
if f_unhandled is not None:
if f_unhandled.f_back is None and not force_only_unhandled_tracer:
# Happens when we attach to a running program.
top_level_thread_tracer = TopLevelThreadTracerNoBackFrame(ThreadTracer(args), args)
else:
# Stop in some internal place to report about unhandled exceptions
top_level_thread_tracer = TopLevelThreadTracerOnlyUnhandledExceptions(args)
# IFDEF CYTHON
# thread._top_level_thread_tracer = top_level_thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# ELSE
# ENDIF
# print(' --> found to trace unhandled', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)
f_trace = top_level_thread_tracer.get_trace_dispatch_func()
# IFDEF CYTHON
# f_unhandled.f_trace = SafeCallWrapper(f_trace)
# ELSE
f_unhandled.f_trace = f_trace
# ENDIF
if frame is f_unhandled:
return f_unhandled.f_trace, False
thread_tracer = ThreadTracer(args)
# IFDEF CYTHON
# thread._tracer = thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# ELSE
# ENDIF
return thread_tracer, True
# IFDEF CYTHON
# def trace_dispatch(py_db, frame, str event, arg):
# ELSE
def trace_dispatch(py_db, frame, event, arg):
# ENDIF
if fix_top_level_trace_and_get_trace_func is None or threadingCurrentThread is None or splitext is None:
# When the application is being exited with live daemon threads, it's possible that some
# of the names we require are already None, so, check that tokens we need are there.
# Code to diagnose where this happens below.
# msg = ''
# msg += 'fix_top_level_trace_and_get_trace_func: %s\n' % (fix_top_level_trace_and_get_trace_func,)
# msg += 'threadingCurrentThread: %s\n' % (threadingCurrentThread,)
# msg += 'splitext: %s\n' % (splitext,)
# while frame is not None:
# msg += 'location 1: %s %s %s=n' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename)
# if 't' in frame.f_locals:
# t = frame.f_locals['t']
# if hasattr(t, 'run'):
# msg += 'Error 1 in thread with function: %s %s %s\n' % (t._Thread__target, t.run, t.__class__)
# t = None
#
# frame = frame.f_back
# print(msg)
return None
thread_trace_func, apply_to_settrace = fix_top_level_trace_and_get_trace_func(py_db, frame)
if thread_trace_func is None:
if event != 'call': frame.f_trace = NO_FTRACE
return None
if apply_to_settrace:
py_db.enable_tracing(thread_trace_func)
return thread_trace_func(frame, event, arg)
# IFDEF CYTHON
# cdef class TopLevelThreadTracerOnlyUnhandledExceptions:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class TopLevelThreadTracerOnlyUnhandledExceptions:
def __init__(self, args):
self._args = args
# ENDIF
def trace_unhandled_exceptions(self, frame, event, arg):
# Note that we ignore the frame as this tracing method should only be put in topmost frames already.
# print('trace_unhandled_exceptions', event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)
if event == 'exception' and arg is not None:
py_db, t, additional_info = self._args[0:3]
if arg is not None:
if not additional_info.suspended_at_unhandled:
additional_info.suspended_at_unhandled = True
stop_on_unhandled_exception(py_db, t, additional_info, arg)
# No need to reset frame.f_trace to keep the same trace function.
# IFDEF CYTHON
# return SafeCallWrapper(self.trace_unhandled_exceptions)
# ELSE
return self.trace_unhandled_exceptions
# ENDIF
def get_trace_dispatch_func(self):
return self.trace_unhandled_exceptions
# IFDEF CYTHON
# cdef class TopLevelThreadTracerNoBackFrame:
#
# cdef public object _frame_trace_dispatch;
# cdef public tuple _args;
# cdef public object _try_except_info;
# cdef public object _last_exc_arg;
# cdef public set _raise_lines;
# cdef public int _last_raise_line;
#
# def __init__(self, frame_trace_dispatch, tuple args):
# self._frame_trace_dispatch = frame_trace_dispatch
# self._args = args
# self._try_except_info = None
# self._last_exc_arg = None
# self._raise_lines = set()
# self._last_raise_line = -1
# ELSE
class TopLevelThreadTracerNoBackFrame:
'''
This tracer is pretty special in that it's dealing with a frame without f_back (i.e.: top frame
on remote attach or QThread).
This means that we have to carefully inspect exceptions to discover whether the exception will
be unhandled or not (if we're dealing with an unhandled exception we need to stop as unhandled,
otherwise we need to use the regular tracer -- unfortunately the debugger has little info to
work with in the tracing -- see: https://bugs.python.org/issue34099, so, we inspect bytecode to
determine if some exception will be traced or not... note that if this is not available -- such
as on Jython -- we consider any top-level exception to be unnhandled).
'''
def __init__(self, frame_trace_dispatch, args):
self._frame_trace_dispatch = frame_trace_dispatch
self._args = args
self._try_except_info = None
self._last_exc_arg = None
self._raise_lines = set()
self._last_raise_line = -1
# ENDIF
def trace_dispatch_and_unhandled_exceptions(self, frame, event, arg):
# print('trace_dispatch_and_unhandled_exceptions', event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)
if self._frame_trace_dispatch is not None:
self._frame_trace_dispatch = self._frame_trace_dispatch(frame, event, arg)
if event == 'exception':
self._last_exc_arg = arg
self._raise_lines.add(frame.f_lineno)
self._last_raise_line = frame.f_lineno
elif event == 'return' and self._last_exc_arg is not None:
# For unhandled exceptions we actually track the return when at the topmost level.
try:
py_db, t, additional_info = self._args[0:3]
if not additional_info.suspended_at_unhandled: # Note: only check it here, don't set.
if frame.f_lineno in self._raise_lines:
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
else:
if self._try_except_info is None:
self._try_except_info = collect_try_except_info(frame.f_code)
if not self._try_except_info:
# Consider the last exception as unhandled because there's no try..except in it.
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
else:
# Now, consider only the try..except for the raise
valid_try_except_infos = []
for try_except_info in self._try_except_info:
if try_except_info.is_line_in_try_block(self._last_raise_line):
valid_try_except_infos.append(try_except_info)
if not valid_try_except_infos:
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
else:
# Note: check all, not only the "valid" ones to cover the case
# in "pydev_tests_python.test_tracing_on_top_level.raise_unhandled10"
# where one try..except is inside the other with only a raise
# and it's gotten in the except line.
for try_except_info in self._try_except_info:
if try_except_info.is_line_in_except_block(frame.f_lineno):
if (
frame.f_lineno == try_except_info.except_line or
frame.f_lineno in try_except_info.raise_lines_in_except
):
# In a raise inside a try..except block or some except which doesn't
# match the raised exception.
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
break
else:
break # exited during the except block (no exception raised)
finally:
# Remove reference to exception after handling it.
self._last_exc_arg = None
# IFDEF CYTHON
# ret = SafeCallWrapper(self.trace_dispatch_and_unhandled_exceptions)
# ELSE
ret = self.trace_dispatch_and_unhandled_exceptions
# ENDIF
# Need to reset (the call to _frame_trace_dispatch may have changed it).
frame.f_trace = ret
return ret
def get_trace_dispatch_func(self):
return self.trace_dispatch_and_unhandled_exceptions
# IFDEF CYTHON
# cdef class ThreadTracer:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class ThreadTracer:
def __init__(self, args):
self._args = args
# ENDIF
def __call__(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
:param PyDB py_db:
This is the global debugger (this method should actually be added as a method to it).
'''
# IFDEF CYTHON
# cdef str filename;
# cdef str base;
# cdef int pydev_step_cmd;
# cdef tuple frame_cache_key;
# cdef dict cache_skips;
# cdef bint is_stepping;
# cdef tuple abs_path_real_path_and_base;
# cdef PyDBAdditionalThreadInfo additional_info;
# ENDIF
# print('ENTER: trace_dispatch', frame.f_code.co_filename, frame.f_lineno, event, frame.f_code.co_name)
py_db, t, additional_info, cache_skips, frame_skips_cache = self._args
pydev_step_cmd = additional_info.pydev_step_cmd
is_stepping = pydev_step_cmd != -1
try:
if py_db._finish_debugging_session:
if not py_db._termination_event_set:
# that was not working very well because jython gave some socket errors
try:
if py_db.output_checker_thread is None:
kill_all_pydev_threads()
except:
traceback.print_exc()
py_db._termination_event_set = True
if event != 'call': frame.f_trace = NO_FTRACE
return None
# if thread is not alive, cancel trace_dispatch processing
if not is_thread_alive(t):
py_db.notify_thread_not_alive(get_current_thread_id(t))
if event != 'call': frame.f_trace = NO_FTRACE
return None # suspend tracing
if py_db.thread_analyser is not None:
py_db.thread_analyser.log_event(frame)
if py_db.asyncio_analyser is not None:
py_db.asyncio_analyser.log_event(frame)
# Note: it's important that the context name is also given because we may hit something once
# in the global context and another in the local context.
frame_cache_key = (frame.f_code.co_firstlineno, frame.f_code.co_name, frame.f_code.co_filename)
if not is_stepping and frame_cache_key in cache_skips:
# print('skipped: trace_dispatch (cache hit)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
if event != 'call': frame.f_trace = NO_FTRACE
return None
try:
# Make fast path faster!
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
filename = abs_path_real_path_and_base[1]
file_type = get_file_type(abs_path_real_path_and_base[-1]) # we don't want to debug threading or anything related to pydevd
if file_type is not None:
if file_type == 1: # inlining LIB_FILE = 1
if not py_db.in_project_scope(filename):
# print('skipped: trace_dispatch (not in scope)', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[frame_cache_key] = 1
if event != 'call': frame.f_trace = NO_FTRACE
return None
else:
# print('skipped: trace_dispatch', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[frame_cache_key] = 1
if event != 'call': frame.f_trace = NO_FTRACE
return None
if is_stepping:
if py_db.is_filter_enabled and py_db.is_ignored_by_filters(filename):
# ignore files matching stepping filters
if event != 'call': frame.f_trace = NO_FTRACE
return None
if py_db.is_filter_libraries and not py_db.in_project_scope(filename):
# ignore library files while stepping
if event != 'call': frame.f_trace = NO_FTRACE
return None
# print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name, file_type)
if additional_info.is_tracing:
if event != 'call': frame.f_trace = NO_FTRACE
return None # we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
# Just create PyDBFrame directly (removed support for Python versions < 2.5, which required keeping a weak
# reference to the frame).
ret = PyDBFrame(
(
py_db, filename, additional_info, t, frame_skips_cache, frame_cache_key,
)
).trace_dispatch(frame, event, arg)
if ret is None:
cache_skips[frame_cache_key] = 1
if event != 'call': frame.f_trace = NO_FTRACE
return None
# IFDEF CYTHON
# ret = SafeCallWrapper(ret)
# ENDIF
frame.f_trace = ret # Make sure we keep the returned tracer.
return ret
except SystemExit:
if event != 'call': frame.f_trace = NO_FTRACE
return None
except Exception:
if py_db._finish_debugging_session:
if event != 'call': frame.f_trace = NO_FTRACE
return None # Don't log errors when we're shutting down.
# Log it
try:
if traceback is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
if event != 'call': frame.f_trace = NO_FTRACE
return None
if IS_IRONPYTHON:
# This is far from ideal, as we'll leak frames (we'll always have the last created frame, not really
# the last topmost frame saved -- this should be Ok for our usage, but it may leak frames and things
# may live longer... as IronPython is garbage-collected, things should live longer anyways, so, it
# shouldn't be an issue as big as it's in CPython -- it may still be annoying, but this should
# be a reasonable workaround until IronPython itself is able to provide that functionality).
#
# See: https://github.com/IronLanguages/main/issues/1630
from _pydevd_bundle.pydevd_additional_thread_info_regular import _tid_to_last_frame
_original_call = ThreadTracer.__call__
def __call__(self, frame, event, arg):
_tid_to_last_frame[self._args[1].ident] = frame
return _original_call(self, frame, event, arg)
ThreadTracer.__call__ = __call__
|
|
from functools import partial
from collections import namedtuple, defaultdict
import struct
from zorro.util import cached_property
try:
from .shm import ShmPixbuf
except ImportError:
import warnings
warnings.warn('Shm is not available, expect poor performance.')
try:
from .pixbuf import Pixbuf
except ImportError:
import warnings
warnings.warn('Cairo is not available, no drawing would work')
fmtlen = {
0: 0,
8: 1,
16: 2,
32: 4,
}
fmtchar = {
8: 'B',
16: 'H',
32: 'L',
}
class Rectangle(namedtuple('_Rectangle', 'x y width height')):
__slots__ = ()
class Const(int):
def __new__(cls, val, name):
return super().__new__(cls, val)
def __init__(self, val, name):
self.name = name
def __repr__(self):
return '<{} {}:{}>'.format(self.__class__.__name__, self.name, self)
class Atom(Const):
pass
class AtomWrapper(object):
def __init__(self, connection, proto):
self._conn = connection
self.proto = proto
self._atoms = {}
for k, v in self.proto.enums['Atom'].items():
atom = Atom(v, k)
self._atoms[v] = atom
setattr(self, k, atom)
def __getattr__(self, name):
assert name.isidentifier()
props = self._conn.do_request(self.proto.requests['InternAtom'],
only_if_exists=False,
name=name,
)
atom = Atom(props['atom'], name)
self._atoms[props['atom']] = atom
setattr(self, name, atom)
return atom
def __getitem__(self, value):
try:
return self._atoms[value]
except KeyError:
props = self._conn.do_request(self.proto.requests['GetAtomName'],
atom=value)
atom = Atom(value, props['name'])
self._atoms[value] = atom
setattr(self, props['name'], atom)
return atom
class EnumWrapper(object):
def __init__(self, enums):
for k, v in enums.items():
setattr(self, k, Const(v, k))
class RawWrapper(object):
def __init__(self, conn, proto, opcode=None):
self._conn = conn
self._proto = proto
self._opcode = opcode
def __getattr__(self, name):
return partial(self._conn.do_request,
self._proto.requests[name], _opcode=self._opcode)
class Core(object):
def __init__(self, connection):
self._conn = connection
self._conn.connection()
self.proto = connection.proto.subprotos['xproto']
self.atom = AtomWrapper(connection, self.proto)
self.raw = RawWrapper(connection, self.proto)
for k, lst in self.proto.enums.items():
setattr(self, k, EnumWrapper(lst))
for k, v in connection.proto.subprotos.items():
if not v.extension:
continue
ext = connection.query_extension(k)
if not ext['present']:
continue
rw = RawWrapper(self._conn, v, ext['major_opcode'])
setattr(self, k, rw)
for ename, lst in v.enums.items():
setattr(rw, ename, EnumWrapper(lst))
self.root = self._conn.init_data['roots'][0]
self.root_window = self.root['root']
pad = self._conn.init_data['bitmap_format_scanline_pad']
assert pad % 8 == 0
self.bitmap_stride = pad//8
self.current_event = None
self.last_event = None
self.last_time = 0
self._event_iterator = self._events()
def init_keymap(self):
self.keycode_to_keysym = {}
self.shift_keycode_to_keysym = {}
self.keysym_to_keycode = defaultdict(list)
idata = self._conn.init_data
mapping = self.raw.GetKeyboardMapping(
first_keycode=idata['min_keycode'],
count=idata['max_keycode'] - idata['min_keycode'],
)
mapiter = iter(mapping['keysyms'])
for row in zip(range(idata['min_keycode'], idata['max_keycode']),
*(mapiter for i in range(mapping['keysyms_per_keycode']))):
self.keycode_to_keysym[row[0]] = row[1]
self.shift_keycode_to_keysym[row[0]] = row[2]
self.keysym_to_keycode[row[1]].append(row[0])
caps = self.ModMask.Lock # caps lock
num = getattr(self.ModMask, '2') # mod2 is usually numlock
mode = getattr(self.ModMask, '5') # mod5 is usually mode_switch
self.modifiers_mask = ~(caps|num|mode)
def create_toplevel(self, bounds, border=0, klass=None, params={}):
return self.create_window(bounds,
border=border,
klass=klass,
parent=self.root_window,
params=params)
def create_window(self, bounds, border=0, klass=None, parent=0, params={}):
wid = self._conn.new_xid()
root = self.root
self.raw.CreateWindow(**{
'wid': wid,
'root': root['root'],
'depth': 0,
'parent': parent or root['root'],
'visual': 0,
'x': bounds.x,
'y': bounds.y,
'width': bounds.width,
'height': bounds.height,
'border_width': border,
'class': klass,
'params': params,
})
return wid
def send_event(self, event_type, event_mask, dest, **kw):
etype = self.proto.events[event_type]
buf = bytearray([etype.number])
etype.write_to(buf, kw)
buf[2:2] = b'\x00\x00'
buf += b'\x00'*(32 - len(buf))
self.raw.SendEvent(
propagate=False,
destination=dest,
event_mask=event_mask,
event=buf,
)
def get_property(self, win, name):
result = self.raw.GetProperty(
delete=False,
window=win,
property=name,
type=self.atom.Any,
long_offset=0,
long_length=65536)
typ = self.atom[result['type']]
if result['format'] == 0:
return typ, None
elif typ in (self.atom.STRING, self.atom.UTF8_STRING):
return typ, result['value'].decode('utf-8', 'replace')
return typ, struct.unpack('<{}{}'.format(
len(result['value']) // fmtlen[result['format']],
fmtchar[result['format']]),
result['value'])
def _events(self):
for i in self._conn.get_events():
try:
self.current_event = i
self.last_event = i
if hasattr(i, 'time'):
self.last_time = i.time
yield i
finally:
self.current_event = None
def get_events(self):
return self._event_iterator
def pixbuf(self, width, height):
if width*height < 1024:
return Pixbuf(width, height, self)
elif hasattr(self, 'shm') and ShmPixbuf:
return ShmPixbuf(width, height, self)
elif hasattr(self, 'bigreq') or width*height*4 < 260000:
return Pixbuf(width, height, self)
@cached_property
def pixbuf_gc(self):
res = self._conn.new_xid()
self.raw.CreateGC(
cid=res,
drawable=self.root_window,
params={},
)
return res
|
|
from visual import *
from visual.graph import *
from scipy.integrate import odeint
from fuzzy import *
from cart import *
from world import *
import fuzzy.storage.fcl.Reader
import numpy as np
import scipy as sp
import visual.controls as cntrl
global angles
global position
isPaused = False
isStarted = False
def uhol(y, x):
y0 = y[0]
y1 = y[1]
y2 = (-a12/z)*y0 - Fk/mk
return y1, y2
def poloha(y, x):
global i
i = 0
x0 = y[0]
x1 = y[1]
x2 = (a11/z)*angles[i,0] + Fk/mk
i = i + 1
return x1, x2
def initSystem():
global cart1,distance,l,mk,mg,s1,s2,s3,s4
s1.value = 50
s2.value = 50
s3.value = 50
s4.value = 50
cart1.setPos(50/100 * 15 - 8)
cart1.setLength(50/200*5+5)
cart1.setCartMass(50/100)
cart1.setMass(75/100 * 1.9 + 0.1)
distance = s1.value/100 * 15 + 5
l = s2.value/100 * 5 + 5
mk = s3.value/100 * 850 + 150
mg = s4.value/1000 * 2500 + 500
def setStart():
global isPaused, isStarted
if not(isPaused):
isStarted = True
if isPaused:
isPaused = False
def setReset():
global isPaused,isStarted
global a,b,x,Fk,init1,init2
isPaused = False
isStarted = False
a = 0
b = 0
x = 0
Fk = 0
init1 = 0.0, 0.0
init2 = 0.0, 0.0
def setPause():
global isPaused
isPaused = True
def setExit():
exit()
# init parameters
mk = 150.0
mg = 500.0
l = 10
g = 9.81
a11 = mg / mk
a12 = 1 + mg / mk
z = l / g
dest = 3
lengthL = 2
# create fuzzy regulator
system = fuzzy.storage.fcl.Reader.Reader().load_from_file("modifiedRules.fcl")
#system = fuzzy.storage.fcl.Reader.Reader().load_from_file("diplomovkaRules.fcl")
#control window
c = cntrl.controls(x=0, y=600, width=920, height=210, background = color.black, range=60)
s1 = slider(pos=(-25,6), width=3, length=30, axis=(1,0,0), color = color.blue)
s2 = slider(pos=(-25,2), width=3, length=30, axis=(1,0,0), color = color.blue)
s3 = slider(pos=(-25,-2), width=3, length=30, axis=(1,0,0), color = color.blue)
s4 = slider(pos=(-25, -6), width=3, length=30, axis=(1,0,0), color = color.blue)
m1 = menu(pos=(-43,6), width=30, height=3, text = 'DESTINATION:')
m2 = menu(pos=(-43,2), width=30, height=3, text = 'ROPE LENGTH:')
m3 = menu(pos=(-43,-2), width=30, height=3, text = 'CART MASS:')
m4 = menu(pos=(-43,-6), width=30, height=3, text = 'LOAD MASS:')
bl = button(pos=(15,5), height=8, width=13, text='START', action=lambda: setStart())
b2 = button(pos=(15,-5), height=8, width=13, text='RESET', action=lambda: setReset())
b3 = button(pos=(30,5), height=8, width=13, text='PAUSE', action=lambda: setPause())
b4 = button(pos=(30,-5), height=8, width=13, text='EXIT', action=lambda: setExit())
#main scene
scene = display(title='Fuzzy GantryCrane Simulator',
x=0, y=0, width=920, height=600, center=(0,0,0), background=color.black)
scene.fov = 1.5
#create objects in scene
world1 = World()
cart1 = Cart()
cart1.setPos(-13)
cart1.setLength(l/2)
# set scene camera position
scene.forward = (10,-8,10)
# graph for position of cart
gd1 = gdisplay(x = 920, y = 0, width = 920, height = 270,
title = 'Position of cart', xtitle = 'time',ytitle = 'position', ymax = 25)
f1 = gcurve(color = color.red)
f1a = gcurve(color = color.yellow)
# graph of angle of rope
gd2 = gdisplay(x = 920, y = 270, width = 920, height = 270,
title = 'Angle', xtitle = 'time',ytitle = 'angle(degrees)', ymax = 30, ymin = -30)
f2 = gcurve(color = color.cyan)
# graph for power output
gd3 = gdisplay(x = 920, y = 270*2, width = 920, height = 270,
title = 'Power for pushing the cart', xtitle = 'time',ytitle = 'Power')
f3 = gcurve(color = color.green)
while 1:
# preallocate input and output values
my_input = {
"Range" : 0.0,
"Speed" :0.0,
"Alfa" : 0.0,
"AngularVelocity" : 0.0
}
my_output = {
"Power" : 0.0
}
initSystem()
while isStarted == False:
rate(20)
#set graphic model
cart1.setPos(s1.value/100 * 15 - 8)
cart1.setLength(s2.value/200*5+5)
cart1.setMass(s4.value/100 + 1)
cart1.setCartMass(s3.value/80 + 0.2)
#set physical model parameters
distance = s1.value/100 * 15 + 5
l = s2.value/100 * 5 + 5
mk = s3.value/100 * 850 + 150
mg = s4.value/1000 * 2500 + 500
a = 0
b = 0
x = 0
Fk = 0
init1 = 0.0, 0.0
init2 = 0.0, 0.0
goback = 0
while goback < distance:
cart1.setPos(distance-goback-13)
goback = goback + 0.2
rate(20)
while isStarted:
rate(50)
if not(isPaused):
x = np.linspace(a, b, 2)
a = b
b = b + 0.025
angles = odeint(uhol, init1, x)
position = odeint(poloha, init2, x)
init1 = angles[1, 0], angles[1, 1]
init2 = position[1, 0], position[1, 1]
#set cart position
cart1.setPos(position[1, 0]-13)
cart1.setRot(-angles[1, 0]);
my_input = {
"Range" : distance - position[1,0],
"Speed" :position[1,1],
"Alfa" : angles[1,0],
"AngularVelocity" : angles[1,1]
}
my_output = {
"Power" : Fk
}
temp = angles[1,0]*180/3.14
f2.plot(pos = (b,temp))
f1.plot(pos = (b,position[1,0]))
f1a.plot(pos = (b, distance))
f3.plot(pos = (b,Fk))
system.calculate(my_input, my_output)
# if position[1,0] < KAM + 0.5 and position[1,0] > KAM - 0.5 : gain = 20
# else : gain=2220
Fk= my_output["Power"]*125
|
|
from datetime import timedelta
from django.urls import reverse
from django.utils import timezone
from workshops.models import Person
from workshops.tests.base import TestBase
class TestEmptyDuplicates(TestBase):
"""Tests to return empty context variables when no matches found."""
def setUp(self):
self._setUpUsersAndLogin()
self.harry = Person.objects.create(
personal="Harry",
family="Potter",
username="potter_harry",
email="[email protected]",
)
self.kira = Person.objects.create(
personal="Light",
family="Yagami",
username="light_yagami",
email="[email protected]",
)
self.batman = Person.objects.create(
personal="Bruce",
family="Wayne",
username="bruce_wayne",
email="[email protected]",
)
self.ironman = Person.objects.create(
personal="Tony",
family="Stark",
username="tony_stark",
email="[email protected]",
)
self.url = reverse("duplicate_persons")
def test_switched_names_persons(self):
"""Ensure none of the above persons are in `switched_persons`."""
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
self.assertNotIn(self.harry, switched)
self.assertNotIn(self.kira, switched)
self.assertNotIn(self.batman, switched)
self.assertNotIn(self.ironman, switched)
def test_duplicate_persons(self):
"""Ensure none of the above persons are in `duplicate_persons`."""
rv = self.client.get(self.url)
switched = rv.context["duplicate_persons"]
self.assertNotIn(self.harry, switched)
self.assertNotIn(self.kira, switched)
self.assertNotIn(self.batman, switched)
self.assertNotIn(self.ironman, switched)
class TestFindingDuplicates(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
self.harry = Person.objects.create(
personal="Harry",
family="Potter",
username="potter_harry",
email="[email protected]",
)
self.potter = Person.objects.create(
personal="Potter",
family="Harry",
username="harry_potter",
email="[email protected]",
)
self.ron = Person.objects.create(
personal="Ron",
family="Weasley",
username="weasley_ron",
email="[email protected]",
)
self.ron2 = Person.objects.create(
personal="Ron",
family="Weasley",
username="weasley_ron_2",
email="[email protected]",
)
self.url = reverse("duplicate_persons")
def test_switched_names_persons(self):
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
self.assertIn(self.harry, switched)
self.assertIn(self.potter, switched)
self.assertNotIn(self.ron, switched)
self.assertNotIn(self.ron2, switched)
def test_duplicate_persons(self):
rv = self.client.get(self.url)
duplicated = rv.context["duplicate_persons"]
self.assertIn(self.ron, duplicated)
self.assertIn(self.ron2, duplicated)
self.assertNotIn(self.harry, duplicated)
self.assertNotIn(self.potter, duplicated)
class TestFindingReviewedDuplicates(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
self.harry = Person.objects.create(
personal="Harry",
family="Potter",
username="potter_harry",
email="[email protected]",
)
self.potter = Person.objects.create(
personal="Potter",
family="Harry",
username="harry_potter",
email="[email protected]",
)
self.ron = Person.objects.create(
personal="Ron",
family="Weasley",
username="weasley_ron",
email="[email protected]",
)
self.ron2 = Person.objects.create(
personal="Ron",
family="Weasley",
username="weasley_ron_2",
email="[email protected]",
)
self.url = reverse("duplicate_persons")
self.review_url = reverse("review_duplicate_persons")
def test_finding_unreviewed_duplicates(self):
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
duplicates = rv.context["duplicate_persons"]
self.assertEqual(self.harry.duplication_reviewed_on, None)
self.assertEqual(self.potter.duplication_reviewed_on, None)
self.assertEqual(self.ron.duplication_reviewed_on, None)
self.assertEqual(self.ron2.duplication_reviewed_on, None)
self.assertIn(self.harry, switched)
self.assertIn(self.potter, switched)
self.assertNotIn(self.ron, switched)
self.assertNotIn(self.ron2, switched)
self.assertIn(self.ron, duplicates)
self.assertIn(self.ron2, duplicates)
self.assertNotIn(self.harry, duplicates)
self.assertNotIn(self.potter, duplicates)
def test_not_finding_reviewed_duplicates(self):
"""Ensure records with `last_changed_at` timestamp close to their
`duplication_reviewed_on` timestamp don't show up in the results."""
# modify duplication_reviewed_on to point to the
# same timestamp (or very close) that last_updated_at will
# after save() so that these records don't show up in results
review_date = timezone.now()
self.harry.duplication_reviewed_on = review_date
self.harry.save()
self.potter.duplication_reviewed_on = review_date
self.potter.save()
self.ron.duplication_reviewed_on = review_date
self.ron.save()
self.ron2.duplication_reviewed_on = review_date
self.ron2.save()
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
duplicates = rv.context["duplicate_persons"]
self.assertNotIn(self.harry, switched)
self.assertNotIn(self.potter, switched)
self.assertNotIn(self.ron, switched)
self.assertNotIn(self.ron2, switched)
self.assertNotIn(self.ron, duplicates)
self.assertNotIn(self.ron2, duplicates)
self.assertNotIn(self.harry, duplicates)
self.assertNotIn(self.potter, duplicates)
def test_finding_duplicates_changed_soon_after_reviewed(self):
# make sure after changing the timestamp difference between
# `last_updated_at` and `duplication_reviewed_on` to couple minutes,
# the records show up
review_date = timezone.now() - timedelta(minutes=2)
self.harry.duplication_reviewed_on = review_date
self.harry.save()
self.potter.duplication_reviewed_on = review_date
self.potter.save()
self.ron.duplication_reviewed_on = review_date
self.ron.save()
self.ron2.duplication_reviewed_on = review_date
self.ron2.save()
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
duplicates = rv.context["duplicate_persons"]
self.assertIn(self.harry, switched)
self.assertIn(self.potter, switched)
self.assertNotIn(self.ron, switched)
self.assertNotIn(self.ron2, switched)
self.assertIn(self.ron, duplicates)
self.assertIn(self.ron2, duplicates)
self.assertNotIn(self.harry, duplicates)
self.assertNotIn(self.potter, duplicates)
def test_finding_reviewed_changed_duplicates(self):
# modify last_updated_at and duplication_reviewed_on
# so that these records don't show up in results
change_timestamp = timezone.now()
review_date = change_timestamp - timedelta(days=1)
self.harry.duplication_reviewed_on = review_date
self.harry.last_updated_at = change_timestamp
self.harry.save()
self.potter.duplication_reviewed_on = review_date
self.potter.last_updated_at = change_timestamp
self.potter.save()
self.ron.duplication_reviewed_on = review_date
self.ron.last_updated_at = change_timestamp
self.ron.save()
self.ron2.duplication_reviewed_on = review_date
self.ron2.last_updated_at = change_timestamp
self.ron2.save()
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
duplicates = rv.context["duplicate_persons"]
self.assertIn(self.harry, switched)
self.assertIn(self.potter, switched)
self.assertNotIn(self.ron, switched)
self.assertNotIn(self.ron2, switched)
self.assertIn(self.ron, duplicates)
self.assertIn(self.ron2, duplicates)
self.assertNotIn(self.harry, duplicates)
self.assertNotIn(self.potter, duplicates)
def test_not_finding_partially_reviewed_duplicates(self):
"""Ensure that if some records from the duplicated/switched
names pair don't show up in the results, the other records won't
either."""
# modify duplication_reviewed_on to point to the
# same date that last_updated_at will after save()
# so that these records don't show up in results
review_date = timezone.now()
self.harry.duplication_reviewed_on = review_date
self.harry.save()
# self.potter.duplication_reviewed_on = review_date
# self.potter.save()
self.ron.duplication_reviewed_on = review_date
self.ron.save()
# self.ron2.duplication_reviewed_on = review_date
# self.ron2.save()
rv = self.client.get(self.url)
switched = rv.context["switched_persons"]
duplicates = rv.context["duplicate_persons"]
self.assertNotIn(self.harry, switched)
self.assertNotIn(self.potter, switched)
self.assertNotIn(self.ron, switched)
self.assertNotIn(self.ron2, switched)
self.assertNotIn(self.ron, duplicates)
self.assertNotIn(self.ron2, duplicates)
self.assertNotIn(self.harry, duplicates)
self.assertNotIn(self.potter, duplicates)
def test_reviewing_persons(self):
self.assertFalse(self.harry.duplication_reviewed_on)
self.assertFalse(self.ron.duplication_reviewed_on)
self.client.post(self.review_url, {"person_id": [self.harry.pk, self.ron.pk]})
self.harry.refresh_from_db()
self.ron.refresh_from_db()
self.assertTrue(self.harry.duplication_reviewed_on)
self.assertTrue(self.ron.duplication_reviewed_on)
|
|
import copy
import datetime as dt
import io
import sys
import numpy as np
import thalesians.tsa.checks as checks
import thalesians.tsa.conversions as conversions
import thalesians.tsa.utils as utils
def format_q_time(thing):
if isinstance(thing, dt.datetime) or isinstance(thing, dt.time):
hour = thing.hour
minute = thing.minute
second = thing.second
microsecond = thing.microsecond
else:
raise ValueError('Cannot create a q time string representation of "%s"' % repr(thing))
millisecond = microsecond / 1000
return '%02d:%02d:%02d.%03d' % (hour, minute, second, millisecond)
def format_q_date(thing):
if isinstance(thing, dt.datetime) or isinstance(thing, dt.date):
year = thing.year
month = thing.month
day = thing.day
else:
raise ValueError('Cannot create a q date string representation of "%s"' % repr(thing))
return '%04d.%02d.%02d' % (year, month, day)
def format_q_datetime(thing1, thing2=None):
if thing2 is not None:
if isinstance(thing1, dt.date) and isinstance(thing2, dt.time):
year = thing1.year
month = thing1.month
day = thing1.day
hour = thing2.hour
minute = thing2.minute
second = thing2.second
microsecond = thing2.microsecond
elif isinstance(thing1, dt.time) and isinstance(thing2, dt.date):
year = thing2.year
month = thing2.month
day = thing2.day
hour = thing1.hour
minute = thing1.minute
second = thing1.second
microsecond = thing1.microsecond
else:
raise ValueError('Cannot create a q datetime string representation of "%s" and "%s"' % (repr(thing1), repr(thing2)))
else:
if isinstance(thing1, dt.datetime):
year = thing1.year
month = thing1.month
day = thing1.day
hour = thing1.hour
minute = thing1.minute
second = thing1.second
microsecond = thing1.microsecond
elif isinstance(thing1, dt.date):
year = thing1.year
month = thing1.month
day = thing1.day
hour = 0
minute = 0
second = 0
microsecond = 0
else:
raise ValueError('Cannot create a q datetime string representation of "%s"' % repr(thing1))
millisecond = microsecond / 1000
return '%04d.%02d.%02dT%02d:%02d:%02d.%03d' % (year, month, day, hour, minute, second, millisecond)
class QType(object):
def __init__(self, aggr, name, symbol, char, num, null_value, size, maker):
self.__aggr = aggr
self.__name = name
self.__symbol = symbol
self.__char = char
self.__num = num
self.__null_value = null_value
self.__size = size
self.__maker = maker
def __is_aggr(self):
return self.__aggr
aggr = property(fget=__is_aggr)
def __get_name(self):
return self.__name
name = property(fget=__get_name)
def __get_symbol(self):
return self.__symbol
symbol = property(fget=__get_symbol)
def __get_char(self):
return self.__char
char = property(fget=__get_char)
def __get_num(self):
return self.__num
num = property(fget=__get_num)
def __get_null_value(self):
return self.__null_value
null_value = property(fget=__get_null_value)
def __get_size(self):
return self.__size
size = property(fget=__get_size)
def __get_maker(self):
return self.__maker
maker = property(fget=__get_maker)
def __str__(self):
return self.__name
def __repr__(self):
return self.__name
def __eq__(self, other):
return isinstance(other, QType) and self.num == other.num
def __hash__(self):
return hash(self.__name)
class QValue(object):
def __init__(self, value, q_type):
self.__value = copy.copy(value)
self.__q_type = q_type
def __get_value(self):
return self.__value
value = property(fget=__get_value)
def __get_q_type(self):
return self.__q_type
q_type = property(fget=__get_q_type)
def to_literal(self):
raise NotImplementedError('Pure virtual method')
def __str__(self):
return self.to_literal()
def __repr__(self):
return self.to_literal()
class QBooleanValue(QValue):
def __init__(self, value=None):
super(QBooleanValue, self).__init__(value, QTypes.BOOLEAN)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return '1b' if self.value else '0b'
def make_q_boolean_value(value=None):
return QBooleanValue(value)
class QCharValue(QValue):
def __init__(self, value=None):
super(QCharValue, self).__init__(value, QTypes.CHAR)
def to_literal(self, QValue):
if self.value is None:
return self.q_type.null_value
return '"%s"' % str(self.value)
def make_q_char_value(value=None):
return QCharValue(value)
def to_q_string_literal(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('\n', '\\n')
return '"%s"' % s
def to_q_symbol_literal(s):
return '(`$%s)' % to_q_string_literal(s)
class QSymbolValue(QValue):
def __init__(self, value=None):
super(QSymbolValue, self).__init__(value, QTypes.SYMBOL)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return to_q_symbol_literal(str(self.value))
def make_q_symbol_value(value=None):
return QSymbolValue(value)
class QStringValue(QValue):
def __init__(self, value=None):
super(QStringValue, self).__init__(value, QTypes.CHAR_LIST)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return to_q_string_literal(str(self.value))
def make_q_string_value(value=None):
return QStringValue(value)
class QDateTimeValue(QValue):
def __init__(self, value=None):
if isinstance(value, dt.datetime):
# round down the microseconds
milliseconds = int(value.microsecond / 1000)
timeTuple = value.timetuple()
value = (timeTuple[0], timeTuple[1], timeTuple[2], timeTuple[3], \
timeTuple[4], timeTuple[5], milliseconds)
super(QDateTimeValue, self).__init__(value, QTypes.DATETIME)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return '%04d.%02d.%02dT%02d:%02d:%02d.%03d' % self.value[0:7]
def make_q_datetime_value(value=None):
return QDateTimeValue(value)
class QDateValue(QValue):
def __init__(self, value=None):
if isinstance(value, dt.date):
value = value.timetuple()[0:3]
super(QDateValue, self).__init__(value, QTypes.DATE)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return '%04d.%02d.%02d' % self.value[0:3]
def make_q_date_value(value=None):
return QDateValue(value)
class QTimeValue(QValue):
def __init__(self, value=None):
if isinstance(value, dt.time):
# round down the microseconds
milliseconds = int(value.microsecond / 1000)
value = (value.hour, value.minute, value.second, milliseconds)
super(QTimeValue, self).__init__(value, QTypes.TIME)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return '%02d:%02d:%02d.%03d' % self.value[0:4]
def make_q_time_value(value=None):
return QTimeValue(value)
class QNumericValue(QValue):
def __init__(self, value, q_type):
super(QNumericValue, self).__init__(value, q_type)
def to_literal(self):
if self.value is None or np.isnan(self.value):
return self.q_type.null_value
return '%s%s' % (str(self.value), self.q_type.char)
class QShortValue(QNumericValue):
def __init__(self, value=None):
super(QShortValue, self).__init__(value, QTypes.SHORT)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return '%s%s' % (str(int(self.value)), self.q_type.char)
def make_q_short_value(value=None):
return QShortValue(value)
class QIntValue(QNumericValue):
def __init__(self, value=None):
super(QIntValue, self).__init__(value, QTypes.INT)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
return '%s%s' % (str(int(self.value)), self.q_type.char)
def make_q_int_value(value=None):
return QIntValue(value)
class QLongValue(QNumericValue):
def __init__(self, value=None):
super(QLongValue, self).__init__(value, QTypes.LONG)
def to_literal(self):
if self.value is None:
return self.q_type.null_value
if sys.version_info >= (3, 0):
result = '%s%s' % (str(int(self.value)), self.q_type.char)
else:
result = '%s%s' % (str(long(self.value)), self.q_type.char)
return result
def make_q_long_value(value=None):
return QLongValue(value)
class QRealValue(QNumericValue):
def __init__(self, value=None):
super(QRealValue, self).__init__(value, QTypes.REAL)
def make_q_real_value(value=None):
return QRealValue(value)
class QFloatValue(QNumericValue):
def __init__(self, value=None):
super(QFloatValue, self).__init__(value, QTypes.FLOAT)
def make_q_float_value(value=None):
return QFloatValue(value)
class QUntypedListValue(QValue):
def __init__(self, value):
if not isinstance(value, QValue):
if not hasattr(value, '__iter__'):
raise ValueError('Cannot construct a q untyped list value from a Python object of type %s, which does not seem to be iterable' % str(type(value)))
value = [make_q_value(element) for element in value]
super(QUntypedListValue, self).__init__(value, QTypes.UNTYPED_LIST)
def to_literal(self):
literal = io.StringIO()
if len(self.value) == 1:
literal.write('enlist')
literal.write('(')
for index, element in enumerate(self.value):
if index > 0:
literal.write(';')
if hasattr(element, 'to_literal'):
literal.write(element.to_literal())
else:
literal.write(str(element))
literal.write(')')
return literal.getvalue()
def make_q_untyped_list_value(value):
return QUntypedListValue(value)
class QIdentifierValue(QValue):
def __init__(self, value):
super(QIdentifierValue, self).__init__(value, QTypes.IDENTIFIER)
def to_literal(self):
return self.value
def make_q_identifier_value(value):
return QIdentifierValue(value)
def infer_q_type(value, prefer_strings_to_symbols=False):
if isinstance(value, QValue):
return value.q_type
elif isinstance(value, bool):
return QTypes.BOOLEAN
elif isinstance(value, int):
return QTypes.INT if sys.version_info < (3, 0) else QTypes.LONG
elif sys.version_info < (3, 0) and isinstance(value, long):
return QTypes.LONG
elif isinstance(value, float):
return QTypes.FLOAT
elif isinstance(value, str) or (sys.version_info < (3, 0) and isinstance(value, unicode)):
return QTypes.CHAR_LIST if prefer_strings_to_symbols else QTypes.SYMBOL
elif isinstance(value, dt.datetime):
return QTypes.DATETIME
elif isinstance(value, dt.date):
return QTypes.DATE
elif isinstance(value, dt.time):
return QTypes.TIME
elif hasattr(value, '__iter__'):
return QTypes.UNTYPED_LIST
else:
raise ValueError('Unable to infer the q type corresponding to the Python type %s' % str(type(value)))
def make_q_value(value, q_type=None, prefer_strings_to_symbols=False):
if isinstance(value, QValue):
return value
else:
q_type = infer_q_type(value, prefer_strings_to_symbols) if q_type is None else q_type
maker = q_type.maker
if maker is None:
raise ValueError('Unable to make the q type %s from Python type %s' % (q_type.name, str(type(value))))
return maker(value)
class QTypes(object):
# This isn't really a type
IDENTIFIER = QType( False, None , None , None, None, None , None, None )
UNTYPED_LIST = QType( True , None , None , None, 0 , None , None, make_q_untyped_list_value )
# Primitive types
BOOLEAN = QType( False, 'boolean' , '`boolean' , 'b' , -1 , '0b' , 1 , make_q_boolean_value )
BYTE = QType( False, 'byte' , '`byte' , 'x' , -4 , '0x00' , 1 , None )
SHORT = QType( False, 'short' , '`short' , 'h' , -5 , '0Nh' , 2 , make_q_short_value )
INT = QType( False, 'int' , '`int' , 'i' , -6 , '0N' , 4 , make_q_int_value )
LONG = QType( False, 'long' , '`long' , 'j' , -7 , '0Nj' , 8 , make_q_long_value )
REAL = QType( False, 'real' , '`real' , 'e' , -8 , '0Ne' , 4 , make_q_real_value )
FLOAT = QType( False, 'float' , '`float' , 'f' , -9 , '0n' , 8 , make_q_float_value )
CHAR = QType( False, 'char' , '`char' , 'c' , -10 , '""' , 1 , make_q_char_value )
SYMBOL = QType( False, 'symbol' , '`' , 's' , -11 , '`' , None, make_q_symbol_value )
MONTH = QType( False, 'month' , '`month' , 'm' , -13 , '0Nm' , 4 , None )
DATE = QType( False, 'date' , '`date' , 'd' , -14 , '0Nd' , 4 , make_q_date_value )
DATETIME = QType( False, 'datetime', '`datetime', 'z' , -15 , '0Nz' , 4 , make_q_datetime_value )
MINUTE = QType( False, 'minute' , '`minute' , 'u' , -17 , '0Nu' , 4 , None )
SECOND = QType( False, 'second' , '`second' , 'v' , -18 , '0Nv' , 4 , None )
TIME = QType( False, 'time' , '`time' , 't' , -19 , '0Nt' , 4 , make_q_time_value )
# Typed lists
BOOLEAN_LIST = QType( True , 'boolean' , '`boolean' , 'b' , 1 , '0b' , 1 , None )
BYTE_LIST = QType( True , 'byte' , '`byte' , 'x' , 4 , '0x00' , 1 , None )
SHORT_LIST = QType( True , 'short' , '`short' , 'h' , 5 , '0Nh' , 2 , None )
INT_LIST = QType( True , 'int' , '`int' , 'i' , 6 , '0N' , 4 , None )
LONG_LIST = QType( True , 'long' , '`long' , 'j' , 7 , '0Nj' , 8 , None )
REAL_LIST = QType( True , 'real' , '`real' , 'e' , 8 , '0Ne' , 4 , None )
FLOAT_LIST = QType( True , 'float' , '`float' , 'f' , 9 , '0n' , 8 , None )
CHAR_LIST = QType( True , 'char' , '`char' , 'c' , 10 , '""' , 1 , make_q_string_value )
SYMBOL_LIST = QType( True , 'symbol' , '`' , 's' , 11 , '`' , None, None )
MONTH_LIST = QType( True , 'month' , '`month' , 'm' , 13 , '0Nm' , 4 , None )
DATE_LIST = QType( True , 'date' , '`date' , 'd' , 14 , '0Nd' , 4 , None )
DATETIME_LIST = QType( True , 'datetime', '`datetime', 'z' , 15 , '0Nz' , 4 , None )
MINUTE_LIST = QType( True , 'minute' , '`minute' , 'u' , 17 , '0Nu' , 4 , None )
SECOND_LIST = QType( True , 'second' , '`second' , 'v' , 18 , '0Nv' , 4 , None )
TIME_LIST = QType( True , 'time' , '`time' , 't' , 19 , '0Nt' , 4 , None )
class QCreateTableStatementBuilder(object):
def __init__(self, overwrite=False):
self.__table = None
self.__columns = []
self.__appended_non_key_columns = False
self.__key_column_count = 0
self.overwrite = overwrite
def set_table(self, table):
self.__table = table
return self
def append_column(self, name, q_type=None, key=False):
assert not (key and self.__appended_non_key_columns), 'Cannot append a key column after non-key columns'
self.__columns.append((name, q_type))
if key:
self.__key_column_count += 1
return self
def to_string(self):
assert self.__table is not None, 'Table is not set'
statement = io.StringIO()
statement.write(self.__table)
statement.write(':')
if not self.overwrite:
statement.write('$[')
statement.write(to_q_symbol_literal(self.__table))
statement.write(' in value["\\\\v"]; ')
statement.write(self.__table)
statement.write('; ')
statement.write('([')
for column_index in range(self.__key_column_count):
column = self.__columns[column_index]
statement.write(column[0])
statement.write(':')
if column[1] is not None and not column[1] == QTypes.CHAR_LIST:
statement.write(column[1].symbol)
statement.write('$')
statement.write('()')
if column_index < self.__key_column_count - 1:
statement.write(';')
statement.write('];')
column_count = len(self.__columns)
for column_index in range(self.__key_column_count, len(self.__columns)):
column = self.__columns[column_index]
statement.write(column[0])
statement.write(':')
if column[1] is not None and not column[1] == QTypes.CHAR_LIST:
statement.write(column[1].symbol)
statement.write('$')
statement.write('()')
if column_index < column_count - 1:
statement.write(';')
statement.write(')')
if not self.overwrite:
statement.write(']')
return statement.getvalue()
def __str__(self):
return self.to_string()
def __repr__(self):
return self.to_string()
class QUpsertStatementBuilder(object):
def __init__(self):
self.__table = None
self.__q_values = []
def set_table(self, table):
self.__table = table
return self
def append(self, q_value):
self.__q_values.append(q_value)
return self
def to_string(self):
assert self.__table is not None, 'Table is not set'
statement = io.StringIO()
statement.write('upsert[')
statement.write(to_q_symbol_literal(self.__table))
statement.write('](')
for index, q_value in enumerate(self.__q_values):
if isinstance(q_value, QValue):
statement.write(q_value.to_literal())
else:
statement.write(q_value)
if index < len(self.__q_values) - 1:
statement.write(';')
statement.write(')')
return statement.getvalue()
class QBatchAppendStatementBuilder(object):
def __init__(self, rows_per_batch=100):
self.__table = None
self.__rows_per_batch = rows_per_batch
self.__rows = []
def set_table(self, table):
self.__table = table
return self
def start_new_row(self):
self.__rows.append([])
def append(self, q_value):
assert len(self.__rows) > 0, 'No row has been started'
self.__rows[-1].append(q_value)
def to_list(self):
assert self.__table is not None, 'Table is not set'
batches = []
i = 0
while i < len(self.__rows):
statement = io.StringIO()
statement.write('.[')
statement.write(to_q_symbol_literal(self.__table))
statement.write(';();,;(')
for j in range(self.__rows_per_batch):
if i >= len(self.__rows): break
statement.write('(')
for index, q_value in enumerate(self.__rows[i]):
if isinstance(q_value, QValue):
statement.write(q_value.to_literal())
else:
statement.write(q_value)
if index < len(self.__rows[i]) - 1:
statement.write(';')
statement.write(')')
if j < self.__rows_per_batch - 1 and i < len(self.__rows) - 1:
statement.write(';')
i += 1
statement.write(')]')
batches.append(statement.getvalue())
return batches
class QExpression(object):
def __init__(self, operator, lhs, rhs):
self.operator = operator
self.lhs = lhs
self.rhs = rhs
def __str__(self):
s = io.StringIO()
s.write('(')
s.write(self.operator)
s.write(';')
s.write(str(self.lhs))
s.write(';')
s.write(str(self.rhs))
s.write(')')
return s.getvalue()
class QExpressionFactory(object):
def __init__(self):
pass
def make_plus_expression(self, lhs, rhs):
return QExpression('+', lhs, rhs)
class QConstraint(object):
def __init__(self, relation, lhs, rhs):
self.relation = relation
self.lhs = lhs
self.rhs = rhs
def __str__(self):
s = io.StringIO()
s.write('(')
s.write(str(self.relation))
s.write(';')
s.write(str(self.lhs))
s.write(';(')
if not hasattr(self.rhs, '__iter__'):
if isinstance(self.rhs, QValue) and self.rhs.q_type == QTypes.SYMBOL:
s.write('enlist ')
s.write(str(self.rhs))
else:
if len(self.rhs) == 1 and self.rhs[0].q_type == QTypes.SYMBOL:
s.append('enlist ')
rhsLen = len(self.rhs)
for rhsIndex, rhsItem in enumerate(self.rhs):
if rhsIndex > 0:
s.write(';')
s.write(str(rhsItem))
s.write('))')
return s.getvalue()
class QConstraintFactory(object):
def make_equal_constraint(self, lhs, rhs):
return QConstraint('=', lhs, rhs)
def make_not_equal_constraint(self, lhs, rhs):
return QConstraint('<>', lhs, rhs)
def make_less_than_constraint(self, lhs, rhs):
return QConstraint('<', lhs, rhs)
def make_greater_than_constraint(self, lhs, rhs):
return QConstraint('>', lhs, rhs)
def make_less_than_or_equal_constraint(self, lhs, rhs):
return QConstraint('<=', lhs, rhs)
def make_greater_than_or_equal_constraint(self, lhs, rhs):
return QConstraint('>=', lhs, rhs)
def make_like_constraint(self, lhs, rhs):
return QConstraint('like', lhs, rhs)
def make_in_constraint(self, lhs, rhs):
return QConstraint('in', lhs, make_q_untyped_list_value((rhs,)))
def make_within_constraint(self, lhs, rhs):
return QConstraint('within', lhs, rhs)
class QQueryBuilder(object):
def __init__(self):
self.__table = None
self.__select_columns = []
self.__by_phrase_columns = []
self.__constraints = []
def set_table(self, table):
self.__table = table
return self
def append_select_column(self, value, name=None):
if name is None:
if isinstance(value, QValue) and value.q_type == QTypes.SYMBOL:
name = value
else:
raise ValueError('Cannot deduce the name of the select column "%s"' % str(value))
self.__select_columns.append((name, value))
return self
def append_constraint(self, constraint):
self.__constraints.append(constraint)
return self
def append_by_phrase_column(self, value, name=None):
if name is None:
if isinstance(value, QValue) and value.q_type == QTypes.SYMBOL:
name = value
else:
raise ValueError('Cannot deduce the name of the by-phrase column "%s"' % str(value))
self.__by_phrase_columns.append((name, value))
return self
def to_string(self):
assert self.__table is not None, 'Table is not set'
query = io.StringIO()
query.write('?[')
query.write(self.__table)
query.write(';(')
constraints_len = len(self.__constraints)
if constraints_len == 0:
query.write('enlist 1b')
elif constraints_len == 1:
query.write('enlist ')
for constraint_index, constraint in enumerate(self.__constraints):
if constraint_index > 0:
query.write(';')
query.write(str(constraint))
query.write(');')
if len(self.__by_phrase_columns) == 0:
query.write('0b')
else:
query.write('(')
by_phrase_columns_len = len(self.__by_phrase_columns)
if by_phrase_columns_len == 1:
query.write('enlist ')
for by_phrase_column_index, by_phrase_column in enumerate(self.__by_phrase_columns):
if by_phrase_column_index > 0:
query.write(';')
query.write(str(by_phrase_column[0]))
query.write(')!(')
if by_phrase_columns_len == 1:
query.write('enlist ')
for by_phrase_column_index, by_phrase_column in enumerate(self.__by_phrase_columns):
if by_phrase_column_index > 0:
query.write(';')
query.write(str(by_phrase_column[1]))
query.write(')')
query.write(';(')
select_columns_len = len(self.__select_columns)
if select_columns_len == 1:
query.write('enlist ')
for select_column_index, select_column in enumerate(self.__select_columns):
if select_column_index > 0:
query.write(';')
query.write(str(select_column[0]))
query.write(')!(')
if select_columns_len == 1:
query.write('enlist ')
for select_column_index, select_column in enumerate(self.__select_columns):
if select_column_index > 0:
query.write(';')
query.write(str(select_column[1]))
query.write(')')
query.write(']')
return query.getvalue()
def __str__(self):
return self.to_string()
def __repr__(self):
return self.to_string()
class QBatchQueryBuilder(object):
def __init__(self):
self.__query_template = None
self.__start = None
self.__end = None
self.__delta = None
def append_query_template_constraints(self, q_query_builder):
q_constraint_factory = QConstraintFactory()
q_query_builder.append_constraint(
q_constraint_factory.make_greater_than_or_equal_constraint(make_q_value('date'), QIdentifierValue('${START_DATE}')))
q_query_builder.append_constraint(
q_constraint_factory.make_less_than_or_equal_constraint(make_q_value('date'), QIdentifierValue('${END_DATE}')))
q_expression_factory = QExpressionFactory()
q_query_builder.append_constraint(
q_constraint_factory.make_greater_than_or_equal_constraint(
q_expression_factory.make_plus_expression(make_q_value('date'), make_q_value('time')), QIdentifierValue('${START}')))
q_query_builder.append_constraint(
q_constraint_factory.make_less_than_constraint(
q_expression_factory.make_plus_expression(make_q_value('date'), make_q_value('time')), QIdentifierValue('${END}')))
def set_query_template(self, query_template):
self.__query_template = query_template
return self
def set_start(self, start):
self.__start = start
return self
def set_end(self, end):
self.__end = end
return self
def set_delta(self, delta):
self.__delta = delta
return self
def to_list_of_strings(self):
assert self.__query_template is not None, 'Query template is not set'
assert self.__start is not None, 'Start is not set'
assert self.__end is not None, 'End is not set'
assert self.__delta is not None, 'Delta is not set'
result = []
intervals = utils.intervals(self.__start, self.__end, self.__delta)
for interval in intervals:
query = self.__query_template
query = query.replace('${START}', str(make_q_value(interval.left)))
if checks.is_some_datetime(interval.left):
start_date = conversions.to_python_date(interval.left)
query = query.replace('${START_DATE}', str(make_q_value(start_date)))
query = query.replace('${END}', str(make_q_value(interval.right)))
if checks.is_some_datetime(interval.right):
end_date = conversions.to_python_date(interval.right)
query = query.replace('${END_DATE}', str(make_q_value(end_date)))
result.append(query)
return result
def convert_time_zone(q_result, from_time_zone, to_time_zone, column_indices=((0,1),), implicit_date=None):
if not hasattr(column_indices, '__iter__'):
column_indices = [column_indices]
if implicit_date is None:
implicit_date = dt.datetime.today().date()
processed_q_result = []
for row in q_result:
row_copy = [v for v in row]
for column_index in column_indices:
if hasattr(column_index, '__iter__'):
if row_copy[column_index[0]] is None or row_copy[column_index[1]] is None:
continue
if isinstance(row_copy[column_index[0]], dt.date) and isinstance(row_copy[column_index[1]], dt.time):
datetime = dt.datetime.combine(row_copy[column_index[0]], row_copy[column_index[1]])
elif isinstance(row_copy[column_index[1]], dt.date) and isinstance(row_copy[column_index[0]], dt.time):
datetime = dt.datetime.combine(row_copy[column_index[1]], row_copy[column_index[0]])
else:
raise ValueError('Date and time expected at specified column indices (%s)' % str(column_index))
datetime = from_time_zone.localize(datetime)
datetime = to_time_zone.normalize(datetime.astimezone(to_time_zone))
if isinstance(row_copy[column_index[0]], dt.date):
row_copy[column_index[0]] = datetime.date()
else:
row_copy[column_index[0]] = datetime.time()
if isinstance(row_copy[column_index[1]], dt.date):
row_copy[column_index[1]] = datetime.date()
else:
row_copy[column_index[1]] = datetime.time()
else:
if row_copy[column_index] is None:
continue
if isinstance(row_copy[column_index], dt.time):
datetime = dt.datetime.combine(implicit_date, row_copy[column_index])
elif isinstance(row_copy[column_index], dt.datetime):
datetime = row_copy[column_index]
else:
raise ValueError('Time or date-time expected at specified column index(%s)' % str(column_index))
datetime = from_time_zone.localize(datetime)
datetime = to_time_zone.normalize(datetime.astimezone(to_time_zone))
row_copy[column_index] = datetime
processed_q_result.append(row_copy)
return processed_q_result
dtype_to_qtype = {
'object': None,
'int64': QTypes.LONG,
'float64': QTypes.FLOAT,
'bool': QTypes.BOOLEAN,
'datetime64[ns]': QTypes.DATETIME,
'timedelta[ns]': QTypes.TIME,
'category': QTypes.SYMBOL
}
def infer_q_type_for_df_column(df, column, string_columns=set()):
typ = dtype_to_qtype[df.dtypes[column].name]
if typ is None:
if all([isinstance(x, str) for x in df[column].values if x is not None]):
typ = QTypes.SYMBOL
if typ == QTypes.SYMBOL and column in string_columns:
typ = QTypes.UNTYPED_LIST
return typ
def df_to_q_table_schema(df, name, string_columns=set()):
builder = QCreateTableStatementBuilder(overwrite=True)
builder.set_table(name)
for column in df.columns:
typ = infer_q_type_for_df_column(df, column, string_columns)
builder.append_column(column, typ, key=False)
return builder.to_string()
def df_to_upsert_statements(df, name, string_columns=set(), q_types=None):
statements = []
if q_types is None:
q_types = {}
for column in df.columns:
q_types[column] = infer_q_type_for_df_column(df, column, string_columns)
for index, row in df.iterrows():
builder = QUpsertStatementBuilder()
builder.set_table(name)
for column in df.columns:
builder.append(make_q_value(row[column], q_types[column]))
statements.append(builder.to_string())
return statements
def df_to_batch_append_statements(df, name, string_columns=set(), q_types=None, rows_per_batch=100):
builder = QBatchAppendStatementBuilder(rows_per_batch=rows_per_batch)
builder.set_table(name)
if q_types is None:
q_types = {}
for column in df.columns:
q_types[column] = infer_q_type_for_df_column(df, column, string_columns)
for index, row in df.iterrows():
builder.start_new_row()
for column in df.columns:
builder.append(make_q_value(row[column], q_types[column]))
return builder.to_list()
|
|
#!/usr/bin/env python3
# This script tests different configurations of Ramulator using several traces.
# After making changes to the source code, run this script to make sure your changes do not break Ramulator
# in terms of simulation accuracy (i.e., cycles to complete a given trace), simulation speed, and system memory usage.
import subprocess
import sys, tempfile, psutil, time, random
import gzip, shutil
import colorama
from os import path
MEM_USAGE_THRESHOLD = 5 # Ramulator typically uses a few MiB (may depend on the compiler and compiler options). Give a warning if it exceeds this threshold
RUNTIME_ERROR_MARGIN = 0.2 # Prints a warning message if the simulation time exceeds the 'expected_runtime' of the 'traces'
# This is not an ideal way for checking the simulation performance since the runtime may differ a lot based on the machine Ramulator is running on
# It might be a good idea to edit the 'expected_runtime' of each trace in case of a mismatch in the runtime
configs = [ # EDIT
'./configs/DDR3-config.cfg',
'./configs/DDR4-config.cfg'
]
traces = [ # EDIT: when adding new traces, you can run the script with 0 'expected_runtime' and later modify it based on how long the simulation took
{'trace_path': 'random-1m-0.8', 'expected_runtime': 15}, # random access DRAM trace with 1 millon requests and 80% READ requests
{'trace_path': 'stream-1m-0.8', 'expected_runtime': 13}, # random access DRAM trace with 1 million requests and 80% READ requests
{'trace_path': './cputraces/401.bzip2.gz', 'expected_runtime': 15},
{'trace_path': './cputraces/403.gcc.gz', 'expected_runtime': 7},
]
SYNTHETIC_TRACE_TYPES = ['random', 'stream'] # DO NOT EDIT unless you are adding a new synthetic trace type
RAMULATOR_BIN = './ramulator' # EDIT if the Ramulator binary is in a different directory
def invalidTraceFormat(trace_name):
print(f"ERROR: Invalid synthetic trace format: {trace_name}. Required format <{SYNTHETIC_TRACE_TYPES}>-<NUM_REQUESTS>-<READ_RATIO>. E.g., random-100k-0.8")
sys.exit(-2)
def convert_str_to_number(x):
num = 0
num_map = {'K':1000, 'M':1000000, 'B':1000000000}
if x.isdigit():
num = int(x)
else:
if len(x) > 1:
num = float(x[:-1]) * num_map.get(x[-1].upper(), 1)
return int(num)
# generate the synthetic trace file file if it does not exist
def generateSyntheticTrace(trace_name):
trace_options = trace_name.split('-')
if len(trace_options) != 3:
invalidTraceFormat(trace_name)
trace_type = trace_options[0]
if trace_type not in SYNTHETIC_TRACE_TYPES:
invalidTraceFormat(trace_name)
try:
num_reqs = convert_str_to_number(trace_options[1])
read_ratio = float(trace_options[2])
except:
invalidTraceFormat(trace_name)
trace_filename = f'./{trace_type}_{trace_options[1]}_{trace_options[2]}.trace'
if path.exists(trace_filename):
return trace_filename # no need to regenerate the trace if it already exists
print(f"Generating '{trace_type}' synthetic trace: {trace_filename} with {num_reqs} memory requests")
trace_file = open(trace_filename, 'w')
if trace_type == 'random':
s = 64
bits = 31
l = int(s/64)
b = int(num_reqs/l)
for i in range(b):
base = random.getrandbits(bits) & 0xffffffffffc0
r = bool(random.random() < read_ratio)
for j in range(l):
trace_file.write('0x%x %s\n' % (base+j*64, 'R' if r else 'W'))
if trace_type == 'stream':
r = int(num_reqs * read_ratio)
w = num_reqs - r
for i in range(r):
trace_file.write('0x%x %s\n' % (i*64, 'R'))
for i in range(w):
trace_file.write('0x%x %s\n' % ((r+i)*64, 'W'))
trace_file.close()
return trace_filename
def extractTrace(trace_path):
uncompressed_path = trace_path.replace('.gz', '')
if path.exists(uncompressed_path):
return uncompressed_path # no need to extract if the .gz is already extracted
print(f"Extracting {trace_path}")
with gzip.open(trace_path, 'rb') as compressed_file:
with open(uncompressed_path, 'wb') as uncompressed_file:
shutil.copyfileobj(compressed_file, uncompressed_file)
return uncompressed_path
def get_stat(stat_file, stat_name):
stat_file.seek(0)
for l in stat_file.readlines():
if stat_name in l:
return int(l.split()[1])
def isSyntheticTrace(trace):
return any(tt in trace for tt in SYNTHETIC_TRACE_TYPES)
def compareAgainstGolden(stats_filename):
STATS_ERROR_MARGIN = 0.1 # Allowing the stats to be up to 10% off wrt the golden stats
stats_to_check = ['ramulator.dram_cycles'] # EDIT to check more statistics
golden_stats_filename = stats_filename.replace('.stat', '.golden_stat')
if not path.exists(golden_stats_filename):
# save the current stat file as golden if no golden stat file exists
print('Saving the current simulation result file {stats_filename} as a golden simulation result')
shutil.copyfile(stats_filename, golden_stats_filename)
return True
stats_file = open(stats_filename, 'r')
golden_stats_file = open(golden_stats_filename, 'r')
mismatch = False
for stat in stats_to_check:
cur_val = get_stat(stats_file, stat)
golden_val = get_stat(golden_stats_file, stat)
if abs(cur_val - golden_val) > golden_val*(STATS_ERROR_MARGIN/2): # dividing by 2 since cur_val can be smaller or greater than the golden value
print(f"WARNING: '{stat}' value is off by more than {int(STATS_ERROR_MARGIN*100)}% compared to the golden simulation results.")
mismatch = True
stats_file.close()
golden_stats_file.close()
return not mismatch
def main():
blackhole = open('/dev/null', 'w')
colorama.init()
ok_str = colorama.Fore.GREEN + 'OK' + colorama.Style.RESET_ALL
fail_str = colorama.Fore.RED + 'FAIL' + colorama.Style.RESET_ALL
warn_str = colorama.Fore.YELLOW + 'WARNING:' + colorama.Style.RESET_ALL
for trace in traces:
trace_path = trace['trace_path']
expected_trace_runtime = trace['expected_runtime']
# Simulate each trace with each Ramulator config
mode = '--mode=cpu'
if isSyntheticTrace(trace_path):
trace_path = generateSyntheticTrace(trace_path)
mode = '--mode=dram' # synthetic traces are for --mode=dram
if ".gz" in trace_path:
trace_path = extractTrace(trace_path)
for config in configs:
trace_name = path.basename(trace_path).replace('.gz', '')
dram_type = path.basename(config).replace('-config', '').replace('.cfg', '')
stats_filename = f"{trace_name.replace('.trace', '')}_{dram_type}.stat"
args = [RAMULATOR_BIN, config, mode, '--stats', stats_filename, trace_path]
tmp = tempfile.NamedTemporaryFile()
print(f"Starting simulation: {' '.join(args)}")
p = subprocess.Popen(args, stdout=tmp.file, stderr=blackhole)
proc = psutil.Process(p.pid)
# monitor execution time and memory usage
execution_time_sec, mem_usage_bytes = 0, 0
while p.poll() is None:
try:
mem_usage_bytes = max(mem_usage_bytes, proc.memory_info()[0])
execution_time_sec = sum(proc.cpu_times())
except: print(f"======== Oops monitoring PID {p.pid} failed ===============")
time.sleep(0.1)
mem_usage_mib = float(mem_usage_bytes)/2**20
mem_usage_ok = True
if mem_usage_mib > MEM_USAGE_THRESHOLD:
# If you see this warning, it is possible that your changes caused a memory leak or added data structures that made Ramulator use more memory
print(f"{warn_str} Ramulator used {'{:.2f}'.format(mem_usage_mib)} MiB memory, which is more than the pre-defined threshold: {MEM_USAGE_THRESHOLD} MiB.")
mem_usage_ok = False
runtime_ok = True
if execution_time_sec > expected_trace_runtime*(1 + RUNTIME_ERROR_MARGIN):
print(f"{warn_str} Ramulator completed the simulation in {execution_time_sec} seconds, which is more than {int(RUNTIME_ERROR_MARGIN*100)}% higher than the expected runtime: {expected_trace_runtime} seconds.")
runtime_ok = False
stats_ok = compareAgainstGolden(stats_filename)
print(f"Stat Consistency: {ok_str if stats_ok else fail_str}, Runtime: {ok_str if runtime_ok else fail_str}, Memory Usage: {ok_str if mem_usage_ok else fail_str}")
blackhole.close()
if __name__ == '__main__':
main()
|
|
# ext/orderinglist.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`_orm.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`_orm.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, alleviating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from ..orm.collections import collection
from ..orm.collections import collection_adapter
__all__ = ["ordering_list"]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = "count_from_%i" % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop("count_from", None)
if kw.get("ordering_func", None) is None and count_from is not None:
if count_from == 0:
kw["ordering_func"] = count_from_0
elif count_from == 1:
kw["ordering_func"] = count_from_1
else:
kw["ordering_func"] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`_orm.relationship` function.
"""
def __init__(
self, ordering_attr=None, ordering_func=None, reorder_on_append=False
):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
"""Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
|
|
# Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import secrets
from typing import List, cast, Dict
from urllib.parse import urljoin, urlparse, urlunparse
from bs4 import BeautifulSoup
from packaging import version
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.evidence import Evidence
from yawast.scanner.plugins.http import version_checker, response_scanner
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output, utils
def check_all(url: str) -> List[Result]:
results: List[Result] = []
results += check_asp_net_debug(url)
results += check_aspnet_handlers(url)
return results
def check_version(banner: str, raw: str, url: str, headers: Dict) -> List[Result]:
results: List[Result] = []
if not banner.startswith("Microsoft-IIS/"):
return results
# we've got an IIS version
results.append(
Result(
f"IIS Version Exposed: {banner}",
Vulnerabilities.HTTP_BANNER_IIS_VERSION,
url,
raw,
)
)
# parse the version, and get the latest version - see if the server is up to date
ver = cast(version.Version, version.parse(banner.split("/")[1]))
curr_version = version_checker.get_latest_version("iis", ver)
if curr_version is not None and curr_version > ver:
results.append(
Result(
f"IIS Outdated: {ver} - Current: {curr_version}",
Vulnerabilities.SERVER_IIS_OUTDATED,
url,
raw,
)
)
# IIS servers may expose a couple other versions, related to ASP.NET, check for those
if "X-AspNetMvc-Version" in headers:
results.append(
Result(
f'ASP.NET MVC Version Exposed: {headers["X-AspNetMvc-Version"]}',
Vulnerabilities.HTTP_HEADER_X_ASPNETMVC_VERSION,
url,
raw,
)
)
ver = cast(version.Version, version.parse(headers["X-AspNetMvc-Version"]))
curr_version = version_checker.get_latest_version("aspnet_mvc", ver)
if curr_version is not None and curr_version > ver:
results.append(
Result(
f"ASP.NET MVC Outdated: {ver} - Current: {curr_version}",
Vulnerabilities.SERVER_ASPNETMVC_OUTDATED,
url,
raw,
)
)
if "X-AspNet-Version" in headers:
results.append(
Result(
f'ASP.NET CLR Version Exposed: {headers["X-AspNet-Version"]}',
Vulnerabilities.HTTP_HEADER_X_ASPNET_VERSION,
url,
raw,
)
)
ver = cast(version.Version, version.parse(headers["X-AspNet-Version"]))
curr_version = version_checker.get_latest_version("aspnet", ver)
if curr_version is not None and curr_version > ver:
results.append(
Result(
f"ASP.NET Outdated: {ver} - Current: {curr_version}",
Vulnerabilities.SERVER_ASPNET_OUTDATED,
url,
raw,
)
)
return results
def check_aspnet_handlers(url: str) -> List[Result]:
results = []
file_name = secrets.token_hex(12)
exts = ["ashx", "aspx", "asmx", "soap", "rem"]
for ext in exts:
target = urljoin(url, f"{file_name}.{ext}")
vuln = False
res = network.http_get(target, False)
body = res.text
if "Location" in res.headers and "aspxerrorpath" in res.headers["Location"]:
vuln = True
elif (
res.status_code >= 400
and "Remoting.RemotingException" in body
or "HttpException" in body
or "FileNotFoundException" in body
):
vuln = True
if vuln:
results.append(
Result.from_evidence(
Evidence.from_response(res, {"handler": ext}),
f"ASP.NET Handler Enumeration: {ext}",
Vulnerabilities.SERVER_ASPNET_HANDLER_ENUM,
)
)
return results
def check_asp_net_debug(url: str) -> List[Result]:
results: List[Result] = []
res = network.http_custom(
"DEBUG", url, additional_headers={"Command": "stop-debug", "Accept": "*/*"}
)
if res.status_code == 200 and "OK" in res.text:
# we've got a hit, but could be a false positive
# try this again, with a different verb
xres = network.http_custom(
"XDEBUG", url, additional_headers={"Command": "stop-debug", "Accept": "*/*"}
)
# if we get a 200 when using an invalid verb, it's a false positive
# if we get something else, then the DEBUG actually did something
if xres.status_code != 200:
results.append(
Result(
"ASP.NET Debugging Enabled",
Vulnerabilities.SERVER_ASPNET_DEBUG_ENABLED,
url,
[
network.http_build_raw_request(res.request),
network.http_build_raw_response(res),
],
)
)
else:
output.debug("Server responds to invalid HTTP verbs with status 200")
results += response_scanner.check_response(url, res)
return results
def check_telerik_rau_enabled(soup: BeautifulSoup, url: str) -> List[Result]:
results: List[Result] = []
parsed = urlparse(url)
domain = utils.get_domain(parsed.netloc)
try:
# get all the scripts
files = [i.get("src") for i in soup.find_all("script") if i.get("src")]
for file in files:
if "Telerik.Web.UI.WebResource.axd" in file:
# ok, they are using Telerik UI for ASP.NET AJAX
# fix-up the URL
if str(file).startswith("//"):
file = f"https:{file}"
if str(file).startswith("/") or (not str(file).startswith("http")):
if parsed.scheme == "https":
file = urljoin(f"https://{domain}", file)
else:
file = urljoin(f"http://{domain}", file)
target = urlparse(file)
target = target._replace(query="type=rau")
if domain in target:
res = network.http_get(urlunparse(target), False)
# NOTE: Typo in "succesfully" is intentional - do not fix
if "RadAsyncUpload handler is registered succesfully" in res.text:
results.append(
Result.from_evidence(
Evidence.from_response(res, {"original_url": url}),
f"Telerik UI for ASP.NET AJAX RadAsyncUpload Enabled "
f"(Check for CVE-2019-18935) at {target}",
Vulnerabilities.APP_TELERIK_UI_RAD_ASYNC_UPLOAD_ENABLED,
)
)
break
except Exception:
output.debug_exception()
return results
|
|
# coding=utf-8
import json
import logging
import sys
import networkx
import pytest
from bireus.client.download_service import DownloadError
from bireus.client.repository import ClientRepository, CheckoutError
from bireus.server.repository_manager import RepositoryManager
from bireus.shared import *
from bireus.shared.repository import ProtocolException
from tests import assert_file_equals, assert_zip_file_equals
from tests.create_test_server_data import create_test_server_data
from tests.mocks.mock_download_service import MockDownloadService
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)-25s - %(levelname)-5s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
server_path = Path.cwd().joinpath("example-server")
client_path = Path.cwd().joinpath("example-client")
test_url = "http://localhost:12345/subfolder/subsub"
@pytest.fixture()
def prepare_server():
# create demo repo
create_test_server_data(server_path, "inst-bi")
RepositoryManager(server_path).full_update()
if client_path.exists():
remove_folder(client_path)
yield prepare_server
# teardown
def get_latest_version(mocker, downloader) -> ClientRepository:
global client_repo
downloader.add_read_action(lambda url: server_path.joinpath("repo_demo", "info.json").read_bytes())
version_graph = server_path.joinpath("repo_demo", "versions.gml")
server_latest = server_path.joinpath("repo_demo", "latest.tar.xz")
downloader.add_download_action(lambda path_from, path_to: copy_file(version_graph, path_to))
downloader.add_download_action(lambda path_from, path_to: copy_file(server_latest, path_to))
return ClientRepository.get_from_url(client_path, test_url, downloader, file_logging=False)
def test_get_from_url_folder_exists():
Path("example-client").mkdir(exist_ok=True)
with pytest.raises(FileExistsError):
ClientRepository.get_from_url(client_path, test_url, MockDownloadService(lambda: None))
def test_get_from_url_http_error():
remove_folder(client_path)
with pytest.raises(DownloadError):
ClientRepository.get_from_url(client_path, test_url,
MockDownloadService(lambda: None, lambda url: (_ for _ in ()).throw(
DownloadError(None, url))))
def test_get_from_url_success(mocker, prepare_server):
downloader = MockDownloadService()
get_latest_version(mocker, downloader)
assert len(downloader.urls_called) == 3
assert downloader.urls_called[0] == test_url + "/info.json"
assert downloader.urls_called[1] == test_url + "/versions.gml"
assert downloader.urls_called[2] == test_url + "/latest.tar.xz"
assert client_path.joinpath(".bireus", "info.json").exists()
assert client_path.joinpath(".bireus", "versions.gml").exists()
original_source_path = server_path.joinpath("repo_demo", "v2")
assert not original_source_path.joinpath("removed_folder").joinpath("obsolete.txt").exists()
assert_file_equals(client_path, original_source_path, Path("new_folder", "new_file.txt"))
assert_file_equals(client_path, original_source_path, Path("zip_sub", "changed-subfolder.test"))
assert_file_equals(client_path, original_source_path, "changed.txt")
assert_file_equals(client_path, original_source_path, "changed.zip")
assert_file_equals(client_path, original_source_path, "unchanged.txt")
def test_checkout_version_success(mocker, prepare_server):
downloader = MockDownloadService()
client_repo = get_latest_version(mocker, downloader)
server_update = server_path.joinpath("repo_demo", "__patches__", "v2_to_v1.tar.xz")
downloader.add_download_action(lambda path_from, path_to: copy_file(server_update, path_to))
client_repo.checkout_version("v1")
assert len(downloader.urls_called) == 4
# repo initialization
assert downloader.urls_called[0] == test_url + "/info.json"
assert downloader.urls_called[1] == test_url + "/versions.gml"
assert downloader.urls_called[2] == test_url + "/latest.tar.xz"
# checkout version -> download patch
assert downloader.urls_called[3] == test_url + "/__patches__/v2_to_v1.tar.xz"
original_source_path = server_path.joinpath("repo_demo", "v1")
assert not client_path.joinpath("new_folder").joinpath("new_file.txt").exists()
assert_file_equals(client_path, original_source_path, Path("removed_folder", "obsolete.txt"))
assert_file_equals(client_path, original_source_path, "changed.txt")
assert_file_equals(client_path, original_source_path, "unchanged.txt")
assert_zip_file_equals(client_path, original_source_path, Path("zip_sub", "changed-subfolder.test"))
assert_zip_file_equals(client_path, original_source_path, "changed.zip")
def test_checkout_version_unknown(mocker, prepare_server):
downloader = MockDownloadService()
client_repo = get_latest_version(mocker, downloader)
downloader.add_read_action(lambda url: server_path.joinpath("repo_demo", "info.json").read_bytes())
with pytest.raises(CheckoutError):
client_repo.checkout_version("unknown_version")
assert len(downloader.urls_called) == 4
# repo initialization
assert downloader.urls_called[0] == test_url + "/info.json"
assert downloader.urls_called[1] == test_url + "/versions.gml"
assert downloader.urls_called[2] == test_url + "/latest.tar.xz"
# unknown version -> check whether we know the latest version
assert downloader.urls_called[3] == test_url + "/info.json"
def test_checkout_version_twice_success(mocker, prepare_server):
downloader = MockDownloadService()
client_repo = get_latest_version(mocker, downloader)
server_patch_2_to_1_zip = str(server_path.joinpath("repo_demo", "__patches__", "v2_to_v1.tar.xz"))
downloader.add_download_action(lambda path_from, path_to: copy_file(server_patch_2_to_1_zip, path_to))
client_repo.checkout_version("v1")
server_patch_1_to_2_zip = str(server_path.joinpath("repo_demo", "__patches__", "v1_to_v2.tar.xz"))
downloader.add_download_action(lambda path_from, path_to: copy_file(server_patch_1_to_2_zip, path_to))
downloader.add_read_action(lambda url: server_path.joinpath("repo_demo", "info.json").read_bytes())
client_repo.checkout_version("v2")
assert len(downloader.urls_called) == 5
# repo initialization
assert downloader.urls_called[0] == test_url + "/info.json"
assert downloader.urls_called[1] == test_url + "/versions.gml"
assert downloader.urls_called[2] == test_url + "/latest.tar.xz"
# checkout version -> download patch
assert downloader.urls_called[3] == test_url + "/__patches__/v2_to_v1.tar.xz"
assert downloader.urls_called[4] == test_url + "/__patches__/v1_to_v2.tar.xz"
original_source_path = server_path.joinpath("repo_demo", "v2")
assert not client_path.joinpath("removed_folder").joinpath("obsolete.txt").exists()
assert_file_equals(client_path, original_source_path, Path("new_folder", "new_file.txt"))
assert_file_equals(client_path, original_source_path, "changed.txt")
assert_file_equals(client_path, original_source_path, "unchanged.txt")
assert_zip_file_equals(client_path, original_source_path, Path("zip_sub", "changed-subfolder.test"))
assert_zip_file_equals(client_path, original_source_path, "changed.zip")
def test_checkout_version_crc_mismatch_before_patching(mocker, prepare_server):
downloader = MockDownloadService()
client_repo = get_latest_version(mocker, downloader)
with client_path.joinpath("changed.txt").open("wb") as file:
file.write("test".encode("utf-8"))
server_update = server_path.joinpath("repo_demo", "__patches__", "v2_to_v1.tar.xz")
downloader.add_download_action(lambda path_from, path_to: copy_file(server_update, path_to))
server_single_file = server_path.joinpath("repo_demo", "v1", "changed.txt")
downloader.add_download_action(lambda path_from, path_to: copy_file(server_single_file, path_to))
client_repo.checkout_version("v1")
assert len(downloader.urls_called) == 5
# repo initialization
assert downloader.urls_called[0] == test_url + "/info.json"
assert downloader.urls_called[1] == test_url + "/versions.gml"
assert downloader.urls_called[2] == test_url + "/latest.tar.xz"
# checkout version -> download patch
assert downloader.urls_called[3] == test_url + "/__patches__/v2_to_v1.tar.xz"
# version mismatch -> download file from original repo instead
assert downloader.urls_called[4] == test_url + "/v1/changed.txt"
original_source_path = server_path.joinpath("repo_demo", "v1")
assert not client_path.joinpath("new_folder").joinpath("new_file.txt").exists()
assert_file_equals(client_path, original_source_path, Path("removed_folder", "obsolete.txt"))
assert_file_equals(client_path, original_source_path, "changed.txt")
assert_file_equals(client_path, original_source_path, "unchanged.txt")
assert_zip_file_equals(client_path, original_source_path, Path("zip_sub", "changed-subfolder.test"))
assert_zip_file_equals(client_path, original_source_path, "changed.zip")
def test_protocol_exception(tmpdir):
repo_folder = tmpdir.mkdir("repo_demo")
bireus_folder = repo_folder.mkdir(".bireus")
info_json = bireus_folder.join("info.json")
with info_json.open("w") as file:
json.dump(
{
"name": "repo_demo",
"first_version": "v1",
"latest_version": "v1",
"current_version": "v1",
"strategy": "inst-bi",
"protocol": 999
},
file
)
version_graph = networkx.DiGraph()
version_graph.add_node("v1")
networkx.write_gml(version_graph, str(bireus_folder.join("versions.gml")))
with pytest.raises(ProtocolException):
ClientRepository(Path(repo_folder.strpath))
|
|
import inspect, textwrap
import codegen as cg
import os
_rank = 0
namespaces = {}
RESERVED = frozenset(['None'])
def makedir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class SubModule(object):
def __init__(self):
self.methods = []
self.enums = []
self.classes = []
self.namespaces = []
self.attrs = []
self.includes = set()
def aggregate_includes(self):
includes = set(self.includes)
for unit in self.iter_all():
if isinstance(unit, SubModule):
includes |= unit.aggregate_includes()
else:
includes |= unit.includes
return includes
def aggregate_downcast(self):
dclist = []
for cls in self.classes:
for bcls in cls.downcastables:
from_to = bcls.fullname, cls.fullname
name = 'downcast_%s_to_%s' % tuple(map(cg.mangle, from_to))
fn = Function(namespaces[''], name, ptr(cls), ptr(bcls))
dclist.append((from_to, fn))
for ns in self.namespaces:
dclist.extend(ns.aggregate_downcast())
return dclist
def iter_all(self):
for fn in self.methods:
yield fn
for cls in self.classes:
yield cls
for enum in self.enums:
yield enum
for attr in self.attrs:
yield attr
for ns in self.namespaces:
yield ns
def generate_method_table(self, println):
writer = cg.CppCodeWriter(println)
writer.println('static')
writer.println('PyMethodDef meth_%s[] = {' % cg.mangle(self.fullname))
with writer.indent():
fmt = '{ "%(name)s", (PyCFunction)%(func)s, METH_VARARGS, NULL },'
for meth in self.methods:
name = meth.name
func = meth.c_name
writer.println(fmt % locals())
for enumkind in self.enums:
for enum in enumkind.value_names:
name = enum
func = enumkind.c_name(enum)
writer.println(fmt % locals())
for attr in self.attrs:
# getter
name = attr.getter_name
func = attr.getter_c_name
writer.println(fmt % locals())
# setter
name = attr.setter_name
func = attr.setter_c_name
writer.println(fmt % locals())
writer.println('{ NULL },')
writer.println('};')
writer.println()
# def generate_downcasts(self, println):
# for ((fromty, toty), fn) in self.downcastlist:
# name = fn.name
# fmt = '''
#static
#%(toty)s* %(name)s(%(fromty)s* arg)
#{
# return typecast< %(toty)s >::from(arg);
#}
# '''
# println(fmt % locals())
#
# fn.generate_cpp(println)
def generate_cpp(self, println, extras=()):
for unit in self.iter_all():
unit.generate_cpp(println)
self.generate_method_table(println)
self.generate_submodule_table(println, extras=extras)
def generate_submodule_table(self, println, extras=()):
writer = cg.CppCodeWriter(println)
writer.println('static')
name = cg.mangle(self.fullname)
writer.println('SubModuleEntry submodule_%(name)s[] = {' % locals())
with writer.indent():
for cls in self.classes:
name = cls.name
table = cg.mangle(cls.fullname)
writer.println('{ "%(name)s", meth_%(table)s, NULL },' %
locals())
for ns in self.namespaces:
name = ns.localname
table = cg.mangle(ns.fullname)
fmt = '{ "%(name)s", meth_%(table)s, submodule_%(table)s },'
writer.println(fmt % locals())
for name, table in extras:
writer.println('{ "%(name)s", %(table)s, NULL },' % locals())
writer.println('{ NULL }')
writer.println('};')
writer.println('')
def generate_py(self, rootdir='.', name=''):
name = name or self.localname
if self.namespaces: # should make new directory
path = os.path.join(rootdir, name)
makedir(path)
filepath = os.path.join(path, '__init__.py')
else:
filepath = os.path.join(rootdir, '%s.py' % name)
with open(filepath, 'w') as pyfile:
println = cg.wrap_println_from_file(pyfile)
println('from llvmpy import _api, capsule')
for ns in self.namespaces:
println('from . import %s' % ns.localname)
println()
for unit in self.iter_all():
if not isinstance(unit, Namespace):
writer = cg.PyCodeWriter(println)
unit.compile_py(writer)
for ns in self.namespaces:
ns.generate_py(rootdir=path)
class Namespace(SubModule):
def __init__(self, name):
SubModule.__init__(self)
self.name = name = name.lstrip(':')
namespaces[name] = self
def Class(self, *bases):
cls = Class(self, *bases)
self.classes.append(cls)
return cls
def Function(self, *args):
fn = Function(self, *args)
self.methods.append(fn)
return fn
def CustomFunction(self, *args):
fn = CustomFunction(self, *args)
self.methods.append(fn)
return fn
def Enum(self, name, *value_names):
enum = Enum(*value_names)
enum.parent = self
enum.name = name
self.enums.append(enum)
assert name not in vars(self), 'Duplicated'
setattr(self, name, enum)
return enum
def Namespace(self, name):
ns = Namespace('::'.join([self.name, name]))
self.namespaces.append(ns)
return ns
@property
def fullname(self):
return self.name
@property
def py_name(self):
return self.name.replace('::', '.')
@property
def localname(self):
return self.name.rsplit('::', 1)[-1]
def __str__(self):
return self.name
class _Type(object):
pass
class BuiltinTypes(_Type):
def __init__(self, name):
self.name = name
@property
def fullname(self):
return self.name
def wrap(self, writer, var):
return var
def unwrap(self, writer, var):
return var
Void = BuiltinTypes('void')
Unsigned = BuiltinTypes('unsigned')
UnsignedLongLong = BuiltinTypes('unsigned long long') # used in llvm-3.2
LongLong = BuiltinTypes('long long')
Float = BuiltinTypes('float')
Double = BuiltinTypes('double')
Uint64 = BuiltinTypes('uint64_t')
Int64 = BuiltinTypes('int64_t')
Int = BuiltinTypes('int')
Size_t = BuiltinTypes('size_t')
VoidPtr = BuiltinTypes('void*')
Bool = BuiltinTypes('bool')
StdString = BuiltinTypes('std::string')
ConstStdString = BuiltinTypes('const std::string')
ConstCharPtr = BuiltinTypes('const char*')
PyObjectPtr = BuiltinTypes('PyObject*')
PyObjectPtr.format='O'
class Class(SubModule, _Type):
format = 'O'
def __init__(self, ns, *bases):
SubModule.__init__(self)
self.ns = ns
self.bases = bases
self._is_defined = False
self.pymethods = []
self.downcastables = set()
def __call__(self, defn):
assert not self._is_defined
# process the definition in "defn"
self.name = getattr(defn, '_name_', defn.__name__)
for k, v in defn.__dict__.items():
if isinstance(v, Method):
self.methods.append(v)
if isinstance(v, Constructor):
for sig in v.signatures:
sig[0] = ptr(self)
v.name = k
v.parent = self
elif isinstance(v, Enum):
self.enums.append(v)
v.name = k
v.parent = self
assert k not in vars(self), "Duplicated: %s" % k
setattr(self, k, v)
elif isinstance(v, Attr):
self.attrs.append(v)
v.name = k
v.parent = self
elif isinstance(v, CustomPythonMethod):
self.pymethods.append(v)
elif k == '_include_':
if isinstance(v, str):
self.includes.add(v)
else:
for i in v:
self.includes.add(i)
elif k == '_realname_':
self.realname = v
elif k == '_downcast_':
if isinstance(v, Class):
self.downcastables.add(v)
else:
for i in v:
self.downcastables.add(i)
return self
def compile_py(self, writer):
clsname = self.name
bases = 'capsule.Wrapper'
if self.bases:
bases = ', '.join(x.name for x in self.bases)
writer.println('@capsule.register_class("%s")' % self.fullname)
with writer.block('class %(clsname)s(%(bases)s):' % locals()):
writer.println('_llvm_type_ = "%s"' % self.fullname)
if self.bases:
writer.println('__slots__ = ()')
else:
writer.println('__slots__ = "__weakref__"')
for enum in self.enums:
enum.compile_py(writer)
for meth in self.methods:
meth.compile_py(writer)
for meth in self.pymethods:
meth.compile_py(writer)
for attr in self.attrs:
attr.compile_py(writer)
writer.println()
@property
def capsule_name(self):
if self.bases:
return self.bases[-1].capsule_name
else:
return self.fullname
@property
def fullname(self):
try:
name = self.realname
except AttributeError:
name = self.name
return '::'.join([self.ns.fullname, name])
@property
def py_name(self):
ns = self.ns.name.split('::')
return '.'.join(ns + [self.name])
def __str__(self):
return self.fullname
def unwrap(self, writer, val):
fmt = 'PyCapsule_GetPointer(%(val)s, "%(name)s")'
name = self.capsule_name
raw = writer.declare('void*', fmt % locals())
writer.die_if_false(raw, verbose=name)
ptrty = ptr(self).fullname
ty = self.fullname
fmt = 'unwrap_as<%(ty)s, %(name)s >::from(%(raw)s)'
casted = writer.declare(ptrty, fmt % locals())
writer.die_if_false(casted)
return casted
def wrap(self, writer, val):
copy = 'new %s(%s)' % (self.fullname, val)
return writer.pycapsule_new(copy, self.capsule_name, self.fullname)
class Enum(object):
format = 'O'
def __init__(self, *value_names):
self.parent = None
if len(value_names) == 1:
value_names = list(filter(bool, value_names[0].replace(',', ' ').split()))
self.value_names = value_names
self.includes = set()
@property
def fullname(self):
try:
name = self.realname
except AttributeError:
name = self.name
return '::'.join([self.parent.fullname, name])
def __str__(self):
return self.fullname
def wrap(self, writer, val):
ret = writer.declare('PyObject*', 'PyInt_FromLong(%s)' % val)
return ret
def unwrap(self, writer, val):
convert_long_to_enum = '(%s)PyInt_AsLong(%s)' % (self.fullname, val)
ret = writer.declare(self.fullname, convert_long_to_enum)
return ret
def c_name(self, enum):
return cg.mangle("%s_%s_%s" % (self.parent, self.name, enum))
def generate_cpp(self, println):
self.compile_cpp(cg.CppCodeWriter(println))
def compile_cpp(self, writer):
for enum in self.value_names:
with writer.py_function(self.c_name(enum)):
ret = self.wrap(writer, '::'.join([self.parent.fullname, enum]))
writer.return_value(ret)
def compile_py(self, writer):
with writer.block('class %s:' % self.name):
writer.println('_llvm_type_ = "%s"' % self.fullname)
for v in self.value_names:
if v in RESERVED:
k = '%s_' % v
fmt = '%(k)s = getattr(%(p)s, "%(v)s")()'
else:
k = v
fmt = '%(k)s = %(p)s.%(v)s()'
p = '.'.join(['_api'] + self.parent.fullname.split('::'))
writer.println(fmt % locals())
writer.println()
class Method(object):
_kind_ = 'meth'
def __init__(self, return_type=Void, *args):
self.parent = None
self.signatures = []
self.includes = set()
self._add_signature(return_type, *args)
self.disowning = False
def _add_signature(self, return_type, *args):
prev_lens = set(map(len, self.signatures))
cur_len = len(args) + 1
if cur_len in prev_lens:
raise Exception('Only support overloading with different number'
' of arguments')
self.signatures.append([return_type] + list(args))
def __ior__(self, method):
assert type(self) is type(method)
for sig in method.signatures:
self._add_signature(sig[0], *sig[1:])
return self
@property
def fullname(self):
return '::'.join([self.parent.fullname, self.realname]).lstrip(':')
@property
def realname(self):
try:
return self.__realname
except AttributeError:
return self.name
@realname.setter
def realname(self, v):
self.__realname = v
@property
def c_name(self):
return cg.mangle("%s_%s" % (self.parent, self.name))
def __str__(self):
return self.fullname
def generate_cpp(self, println):
self.compile_cpp(cg.CppCodeWriter(println))
def compile_cpp(self, writer):
with writer.py_function(self.c_name):
if len(self.signatures) == 1:
sig = self.signatures[0]
retty = sig[0]
argtys = sig[1:]
self.compile_cpp_body(writer, retty, argtys)
else:
nargs = writer.declare('Py_ssize_t', 'PyTuple_Size(args)')
for sig in self.signatures:
retty = sig[0]
argtys = sig[1:]
expect = len(argtys)
if (not isinstance(self, StaticMethod) and
isinstance(self.parent, Class)):
# Is a instance method, add 1 for "this".
expect += 1
with writer.block('if (%(expect)d == %(nargs)s)' % locals()):
self.compile_cpp_body(writer, retty, argtys)
writer.raises(TypeError, 'Invalid number of args')
def compile_cpp_body(self, writer, retty, argtys):
args = writer.parse_arguments('args', ptr(self.parent), *argtys)
ret = writer.method_call(self.realname, retty.fullname, *args)
writer.return_value(retty.wrap(writer, ret))
def compile_py(self, writer):
decl = writer.function(self.name, args=('self',), varargs='args')
with decl as (this, varargs):
unwrap_this = writer.unwrap(this)
if self.disowning:
writer.release_ownership(unwrap_this)
unwrapped = writer.unwrap_many(varargs)
self.process_ownedptr_args(writer, unwrapped)
func = '.'.join([self.parent.py_name, self.name])
ret = writer.call('_api.%s' % func,
args=(unwrap_this,), varargs=unwrapped)
wrapped = writer.wrap(ret, self.is_return_ownedptr())
writer.return_value(wrapped)
writer.println()
def require_only(self, num):
'''Require only "num" of argument.
'''
assert len(self.signatures) == 1
sig = self.signatures[0]
ret = sig[0]
args = sig[1:]
arg_ct = len(args)
for i in range(num, arg_ct):
self._add_signature(ret, *args[:i])
return self
def is_return_ownedptr(self):
retty = self.signatures[0][0]
return isinstance(retty, ownedptr)
def process_ownedptr_args(self, writer, unwrapped):
argtys = self.signatures[0][1:]
for i, ty in enumerate(argtys):
if isinstance(ty, ownedptr):
with writer.block('if len(%s) > %d:' % (unwrapped, i)):
writer.release_ownership('%s[%d]' % (unwrapped, i))
class CustomMethod(Method):
def __init__(self, methodname, retty, *argtys):
super(CustomMethod, self).__init__(retty, *argtys)
self.methodname = methodname
def compile_cpp_body(self, writer, retty, argtys):
args = writer.parse_arguments('args', ptr(self.parent), *argtys)
ret = writer.call(self.methodname, retty.fullname, *args)
writer.return_value(retty.wrap(writer, ret))
class StaticMethod(Method):
def compile_cpp_body(self, writer, retty, argtys):
assert isinstance(self.parent, Class)
args = writer.parse_arguments('args', *argtys)
ret = self.compile_cpp_call(writer, retty, args)
writer.return_value(retty.wrap(writer, ret))
def compile_cpp_call(self, writer, retty, args):
ret = writer.call(self.fullname, retty.fullname, *args)
return ret
def compile_py(self, writer):
writer.println('@staticmethod')
decl = writer.function(self.name, varargs='args')
with decl as varargs:
unwrapped = writer.unwrap_many(varargs)
self.process_ownedptr_args(writer, unwrapped)
func = '.'.join([self.parent.py_name, self.name])
ret = writer.call('_api.%s' % func, varargs=unwrapped)
wrapped = writer.wrap(ret, self.is_return_ownedptr())
writer.return_value(wrapped)
writer.println()
class CustomStaticMethod(StaticMethod):
def __init__(self, methodname, retty, *argtys):
super(CustomStaticMethod, self).__init__(retty, *argtys)
self.methodname = methodname
def compile_cpp_body(self, writer, retty, argtys):
args = writer.parse_arguments('args', *argtys)
ret = writer.call(self.methodname, retty.fullname, *args)
writer.return_value(retty.wrap(writer, ret))
class Function(Method):
_kind_ = 'func'
def __init__(self, parent, name, return_type=Void, *args):
super(Function, self).__init__(return_type, *args)
self.parent = parent
self.name = name
def compile_cpp_body(self, writer, retty, argtys):
args = writer.parse_arguments('args', *argtys)
ret = writer.call(self.fullname, retty.fullname, *args)
writer.return_value(retty.wrap(writer, ret))
def compile_py(self, writer):
with writer.function(self.name, varargs='args') as varargs:
unwrapped = writer.unwrap_many(varargs)
self.process_ownedptr_args(writer, unwrapped)
func = '.'.join([self.parent.py_name, self.name]).lstrip('.')
ret = writer.call('_api.%s' % func, varargs=unwrapped)
wrapped = writer.wrap(ret, self.is_return_ownedptr())
writer.return_value(wrapped)
writer.println()
class CustomFunction(Function):
def __init__(self, parent, name, realname, return_type=Void, *args):
super(CustomFunction, self).__init__(parent, name, return_type, *args)
self.realname = realname
@property
def fullname(self):
return self.realname
class Destructor(Method):
_kind_ = 'dtor'
def __init__(self):
super(Destructor, self).__init__()
def compile_cpp_body(self, writer, retty, argtys):
assert isinstance(self.parent, Class)
assert not argtys
args = writer.parse_arguments('args', ptr(self.parent), *argtys)
writer.println('delete %s;' % args[0])
writer.return_value(None)
def compile_py(self, writer):
func = '.'.join([self.parent.py_name, self.name])
writer.println('_delete_ = _api.%s' % func)
class Constructor(StaticMethod):
_kind_ = 'ctor'
def __init__(self, *args):
super(Constructor, self).__init__(Void, *args)
def compile_cpp_call(self, writer, retty, args):
alloctype = retty.fullname.rstrip(' *')
arglist = ', '.join(args)
stmt = 'new %(alloctype)s(%(arglist)s)' % locals()
ret = writer.declare(retty.fullname, stmt)
return ret
class ref(_Type):
def __init__(self, element):
assert isinstance(element, Class), type(element)
self.element = element
self.const = False
def __str__(self):
return self.fullname
@property
def fullname(self):
if self.const:
return 'const %s&' % self.element.fullname
else:
return '%s&' % self.element.fullname
@property
def capsule_name(self):
return self.element.capsule_name
@property
def format(self):
return self.element.format
def wrap(self, writer, val):
p = writer.declare(const(ptr(self.element)).fullname, '&%s' % val)
return writer.pycapsule_new(p, self.capsule_name, self.element.fullname)
def unwrap(self, writer, val):
p = self.element.unwrap(writer, val)
return writer.declare(self.fullname, '*%s' % p)
class ptr(_Type):
def __init__(self, element):
assert isinstance(element, Class)
self.element = element
self.const = False
@property
def fullname(self):
if self.const:
return 'const %s*' % self.element
else:
return '%s*' % self.element
@property
def format(self):
return self.element.format
def unwrap(self, writer, val):
ret = writer.declare(self.fullname, 'NULL')
with writer.block('if (%(val)s != Py_None)' % locals()):
val = self.element.unwrap(writer, val)
writer.println('%(ret)s = %(val)s;' % locals())
return ret
def wrap(self, writer, val):
return writer.pycapsule_new(val, self.element.capsule_name,
self.element.fullname)
class ownedptr(ptr):
pass
def const(ptr_or_ref):
ptr_or_ref.const = True
return ptr_or_ref
class cast(_Type):
format = 'O'
def __init__(self, original, target):
self.original = original
self.target = target
@property
def fullname(self):
return self.binding_type.fullname
@property
def python_type(self):
if not isinstance(self.target, _Type):
return self.target
else:
return self.original
@property
def binding_type(self):
if isinstance(self.target, _Type):
return self.target
else:
return self.original
def wrap(self, writer, val):
dst = self.python_type.__name__
if dst == 'int':
unsigned = set([Unsigned, UnsignedLongLong, Uint64,
Size_t, VoidPtr])
signed = set([LongLong, Int64, Int])
assert self.binding_type in unsigned|signed
if self.binding_type in signed:
signflag = 'signed'
else:
signflag = 'unsigned'
fn = 'py_%(dst)s_from_%(signflag)s' % locals()
else:
fn = 'py_%(dst)s_from' % locals()
return writer.call(fn, 'PyObject*', val)
def unwrap(self, writer, val):
src = self.python_type.__name__
dst = self.binding_type.fullname
ret = writer.declare(dst)
fn = 'py_%(src)s_to' % locals()
status = writer.call(fn, 'int', val, ret)
writer.die_if_false(status)
return ret
class CustomPythonMethod(object):
def __init__(self, fn):
src = inspect.getsource(fn)
lines = textwrap.dedent(src).splitlines()
for i, line in enumerate(lines):
if not line.startswith('@'):
break
self.sourcelines = lines[i:]
def compile_py(self, writer):
for line in self.sourcelines:
writer.println(line)
class CustomPythonStaticMethod(CustomPythonMethod):
def compile_py(self, writer):
writer.println('@staticmethod')
super(CustomPythonStaticMethod, self).compile_py(writer)
class Attr(object):
def __init__(self, getter, setter):
self.getter = getter
self.setter = setter
self.includes = set()
@property
def fullname(self):
try:
name = self.realname
except AttributeError:
name = self.name
return '::'.join([self.parent.fullname, name])
def __str__(self):
return self.fullname
@property
def getter_name(self):
return '%s_get' % self.name
@property
def setter_name(self):
return '%s_set' % self.name
@property
def getter_c_name(self):
return cg.mangle('%s_get' % self.fullname)
@property
def setter_c_name(self):
return cg.mangle('%s_set' % self.fullname)
def generate_cpp(self, println):
self.compile_cpp(cg.CppCodeWriter(println))
def compile_cpp(self, writer):
# getter
with writer.py_function(self.getter_c_name):
(this,) = writer.parse_arguments('args', ptr(self.parent))
attr = self.name
ret = writer.declare(self.getter.fullname,
'%(this)s->%(attr)s' % locals())
writer.return_value(self.getter.wrap(writer, ret))
# setter
with writer.py_function(self.setter_c_name):
(this, value) = writer.parse_arguments('args', ptr(self.parent),
self.setter)
attr = self.name
writer.println('%(this)s->%(attr)s = %(value)s;' % locals())
writer.return_value(None)
def compile_py(self, writer):
name = self.name
parent = '.'.join(self.parent.fullname.split('::'))
getter = '.'.join([parent, self.getter_name])
setter = '.'.join([parent, self.setter_name])
writer.println('@property')
with writer.block('def %(name)s(self):' % locals()):
unself = writer.unwrap('self')
ret = writer.new_symbol('ret')
writer.println('%(ret)s = _api.%(getter)s(%(unself)s)' % locals())
is_ownedptr = isinstance(self.getter, ownedptr)
writer.return_value(writer.wrap(ret, is_ownedptr))
writer.println()
writer.println('@%(name)s.setter' % locals())
with writer.block('def %(name)s(self, value):' % locals()):
unself = writer.unwrap('self')
unvalue = writer.unwrap('value')
if isinstance(self.setter, ownedptr):
writer.release_ownership(unvalue)
writer.println('return _api.%(setter)s(%(unself)s, %(unvalue)s)' %
locals())
writer.println()
#
# Pick-up environ var
#
TARGETS_BUILT = os.environ.get('LLVM_TARGETS_BUILT', '').split()
def _parse_llvm_version(ver):
import re
m = re.compile(r'(\d+)\.(\d+)').match(ver)
assert m
major, minor = m.groups()
return int(major), int(minor)
LLVM_VERSION = _parse_llvm_version(os.environ['LLVMPY_LLVM_VERSION'])
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import pytest
import inspect
import json
import logging
import fnmatch
from osbs.core import Openshift
from osbs.http import HttpResponse
from osbs.conf import Configuration
from osbs.api import OSBS
from osbs.constants import ANNOTATION_SOURCE_REPO, ANNOTATION_INSECURE_REPO
from tests.constants import (TEST_BUILD, TEST_CANCELLED_BUILD, TEST_ORCHESTRATOR_BUILD,
TEST_GIT_BRANCH, TEST_BUILD_CONFIG, TEST_GIT_URI_HUMAN_NAME,
TEST_KOJI_TASK_ID, TEST_IMAGESTREAM, TEST_IMAGESTREAM_NO_TAGS,
TEST_IMAGESTREAM_WITH_ANNOTATION,
TEST_IMAGESTREAM_WITHOUT_IMAGEREPOSITORY, TEST_GIT_URI_SANITIZED)
from tempfile import NamedTemporaryFile
from textwrap import dedent
from six.moves import http_client
from six.moves.urllib.parse import urlparse
logger = logging.getLogger("osbs.tests")
API_VER = Configuration.get_openshift_api_version()
OAPI_PREFIX = "/oapi/{v}/".format(v=API_VER)
API_PREFIX = "/api/{v}/".format(v=API_VER)
class StreamingResponse(object):
def __init__(self, status_code=200, content=b'', headers=None):
self.status_code = status_code
self.content = content
self.headers = headers or {}
def iter_lines(self):
yield self.content
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
class Connection(object):
def __init__(self, version="3.9.41"):
self.version = version
self.response_mapping = ResponseMapping(version,
lookup=self.get_definition_for)
# mapping of urls or tuples of urls to responses; use get_definition_for
# to get values from this dict
#
# The files are captured using the command line tool's
# --capture-dir parameter, and edited as needed.
self.DEFINITION = {
(OAPI_PREFIX + "namespaces/default/builds",
OAPI_PREFIX + "namespaces/default/builds/",
OAPI_PREFIX + "namespaces/default/builds/?labelSelector=koji-task-id%3D123456789"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
},
"post": {
# Contains a single build named test-build-123
"file": "build_test-build-123.json",
},
},
(OAPI_PREFIX + "namespaces/default/builds/?labelSelector=koji-task-id%3D987654321"): {
"get": {
# no build for this koji id
"file": "builds_list_empty.json",
},
},
(OAPI_PREFIX + "namespaces/default/builds/?labelSelector=koji-task-id%3D123459876"): {
"get": {
# one build for this koji id
"file": "builds_list_one.json",
},
},
(OAPI_PREFIX + "namespaces/default/builds?fieldSelector=status%3DRunning",
OAPI_PREFIX + "namespaces/default/builds/?fieldSelector=status%3DRunning"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
(OAPI_PREFIX + "namespaces/default/builds?fieldSelector=status%21%3DFailed%2C"
"status%21%3DComplete%2Cstatus%21%3DError%2Cstatus%21%3DCancelled",
OAPI_PREFIX + "namespaces/default/builds/?fieldSelector=status%21%3DFailed%2C"
"status%21%3DComplete%2Cstatus%21%3DError%2Cstatus%21%3DCancelled"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
(OAPI_PREFIX + "namespaces/default/builds?fieldSelector=foo%3Doof%2C"
"status%21%3DFailed%2Cstatus%21%3DComplete%2Cstatus%21%3DError%2C"
"status%21%3DCancelled",
OAPI_PREFIX + "namespaces/default/builds/?fieldSelector=foo%3Doof%2C"
"status%21%3DFailed%2Cstatus%21%3DComplete%2Cstatus%21%3DError%2C"
"status%21%3DCancelled"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
(OAPI_PREFIX + "namespaces/default/builds?fieldSelector=foo%3Doof",
OAPI_PREFIX + "namespaces/default/builds/?fieldSelector=foo%3Doof"): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
OAPI_PREFIX + "namespaces/default/builds/"
"?labelSelector=koji-task-id%3D{task}".format(task=TEST_KOJI_TASK_ID): {
"get": {
# Contains a list of builds
"file": "builds_list.json",
}
},
# Some 'builds' requests are with a trailing slash, some without:
(OAPI_PREFIX + "namespaces/default/builds/%s" % TEST_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/" % TEST_BUILD): {
"get": {
# Contains a single build in Completed phase
# named test-build-123
"file": "build_test-build-123.json",
},
"put": {
"file": "build_test-build-123.json",
}
},
# Some 'builds' requests are with a trailing slash, some without:
(OAPI_PREFIX + "namespaces/default/builds/%s" % TEST_ORCHESTRATOR_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/" % TEST_ORCHESTRATOR_BUILD): {
"get": {
# Contains a single build in Completed phase
# named test-orchestrator-build-123
"file": "build_test-orchestrator-build-123.json",
},
"put": {
"file": "build_test-orchestrator-build-123.json",
}
},
# Simulate build cancellation
(OAPI_PREFIX + "namespaces/default/builds/%s" % TEST_CANCELLED_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/" % TEST_CANCELLED_BUILD): {
"get": {
# Contains a single build in Completed phase
# named test-build-123
"file": "build_test-build-cancel-123_get.json",
},
"put": {
"file": "build_test-build-cancel-123_put.json",
}
},
(OAPI_PREFIX + "namespaces/default/builds/%s/log/" % TEST_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=0" % TEST_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=1" % TEST_BUILD): {
"get": {
# Lines of text
"file": "build_test-build-123_logs.txt",
},
},
(OAPI_PREFIX + "namespaces/default/builds/%s/log/" % TEST_ORCHESTRATOR_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=0" % TEST_ORCHESTRATOR_BUILD,
OAPI_PREFIX + "namespaces/default/builds/%s/log/?follow=1"
% TEST_ORCHESTRATOR_BUILD): {
"get": {
# Lines of text
"file": "build_test-orchestrator-build-123_logs.txt",
},
},
("/oauth/authorize",
"/oauth/authorize?client_id=openshift-challenging-client&response_type=token",
"/oauth/authorize?response_type=token&client_id=openshift-challenging-client"): {
"get": {
"file": "authorize.txt",
"custom_callback": self.process_authorize,
}
},
OAPI_PREFIX + "users/~/": {
"get": {
"file": "get_user.json",
}
},
OAPI_PREFIX + "watch/namespaces/default/builds/%s/" % TEST_BUILD: {
"get": {
# Single MODIFIED item, with a Build object in
# Completed phase named test-build-123
"file": "watch_build_test-build-123.json",
}
},
OAPI_PREFIX + "watch/namespaces/default/builds/%s/" % TEST_ORCHESTRATOR_BUILD: {
"get": {
# Single MODIFIED item, with a Build object in
# Completed phase named test-build-123
"file": "watch_build_test-orchestrator-build-123.json",
}
},
OAPI_PREFIX + "namespaces/default/buildconfigs/": {
"post": {
# Contains a BuildConfig named test-build-config-123
"file": "created_build_config_test-build-config-123.json",
}
},
OAPI_PREFIX + "namespaces/default/buildconfigs/%s/instantiate" % TEST_BUILD_CONFIG: {
"post": {
# A Build named test-build-123 instantiated from a
# BuildConfig named test-build-config-123
"file": "instantiated_test-build-config-123.json",
}
},
# use both version with ending slash and without it
(OAPI_PREFIX + "namespaces/default/buildconfigs/%s" % TEST_BUILD_CONFIG,
OAPI_PREFIX + "namespaces/default/buildconfigs/%s/" % TEST_BUILD_CONFIG,
((OAPI_PREFIX + "namespaces/default/buildconfigs/?labelSelector=" +
"git-repo-name%%3D%s" "%%2C" "git-branch%%3D%s"
) % (TEST_GIT_URI_HUMAN_NAME, TEST_GIT_BRANCH)),
((OAPI_PREFIX + "namespaces/default/buildconfigs/?labelSelector=" +
"git-repo-name%%3D%s" "%%2C" "git-branch%%3D%s" "%%2C" "git-full-repo%%3D%s"
) % (TEST_GIT_URI_HUMAN_NAME, TEST_GIT_BRANCH, TEST_GIT_URI_SANITIZED)),
): {
"get": {
"custom_callback":
self.with_status_code(http_client.NOT_FOUND),
# Empty file (no response content as the status is 404
"file": None,
}
},
OAPI_PREFIX + "namespaces/default/builds/?labelSelector=buildconfig%%3D%s" %
TEST_BUILD_CONFIG: {
"get": {
# Contains a BuildList with Builds labeled with
# buildconfig=fedora23-something, none of which
# are running
"file": "builds_list.json"
}
},
OAPI_PREFIX + "namespaces/default/imagestreams/%s" %
TEST_IMAGESTREAM: {
"get": {
# Contains imagestream
# with 3 tags
"file": "imagestream.json"
},
"put": {
# Contains imagestream
# with 3 tags but with different resourceVersion
"file": "imagestream.json",
"custom_callback": self.increment_resource_version
}
},
OAPI_PREFIX + "namespaces/default/imagestreams/%s" %
TEST_IMAGESTREAM_NO_TAGS: {
"get": {
# Contains imagestream with no tags
"file": "imagestream.json",
"custom_callback": self.remove_tags
},
"put": {
# Contains imagestream with no tags
"file": "imagestream.json",
"custom_callback": self.remove_tags
}
},
OAPI_PREFIX + "namespaces/default/imagestreams/%s" %
TEST_IMAGESTREAM_WITH_ANNOTATION: {
"get": {
# Contains imagestream with 3 tags; source repository
# is listed in annotation instead of spec.
"file": "imagestream.json",
"custom_callback": self.move_repo_to_annotation
},
"put": {
# Contains imagestream with 3 tags; source repository
# is listed in annotation instead of spec.
"file": "imagestream.json",
"custom_callback": self.move_repo_to_annotation
}
},
OAPI_PREFIX + "namespaces/default/imagestreams/%s" %
TEST_IMAGESTREAM_WITHOUT_IMAGEREPOSITORY: {
"get": {
# Contains imagestream with 3 tags; source repository
# is listed in annotation instead of spec.
"file": "imagestream.json",
"custom_callback": self.remove_imagerepository
},
"put": {
# Contains imagestream with 3 tags; source repository
# is listed in annotation instead of spec.
"file": "imagestream.json",
"custom_callback": self.remove_imagerepository
}
},
OAPI_PREFIX + "namespaces/default/imagestreamimports/": {
"post": {
"file": "imagestreamimport.json",
}
},
API_PREFIX + "namespaces/default/pods/?labelSelector=openshift.io%%2Fbuild.name%%3D%s" %
TEST_BUILD: {
"get": {
# Contains a list of build pods, just needs not to
# be empty
"file": "pods.json",
},
},
API_PREFIX + "namespaces/default/pods/?labelSelector=openshift.io%%2Fbuild.name%%3D%s" %
TEST_ORCHESTRATOR_BUILD: {
"get": {
# Contains a list of build pods, just needs not to
# be empty
"file": "pods.json",
},
},
API_PREFIX + "namespaces/default/resourcequotas/": {
# Make the POST fail so we can test PUT
"post": {
"custom_callback": self.with_status_code(http_client.CONFLICT),
# Reponse is not really empty but it isn't relevant to
# the testing
"file": None,
},
},
API_PREFIX + "namespaces/default/resourcequotas/pause": {
"put": {
"file": None,
},
"delete": {
"file": None, # not really empty but not relevant
},
},
(API_PREFIX + "namespaces/default/configmaps/",
API_PREFIX + "namespaces/default/configmaps/special-config"): {
"post": {
# Contains a configMap
"file": "create_config_map.json",
},
"get": {
# Contains a configMap
"file": "create_config_map.json",
},
"delete": {
# doesn't return anything
"file": None,
},
},
}
@staticmethod
def process_authorize(key, content):
match = re.findall("[Ll]ocation: (.+)", content.decode("utf-8"))
headers = {
"location": match[0],
}
logger.debug("headers: %s", headers)
return {
"headers": headers
}
@staticmethod
def with_status_code(status_code):
def custom_func(key, content):
return {
"content": content,
"status_code": status_code,
}
return custom_func
@staticmethod
def increment_resource_version(key, content):
content = json.loads(content)
ver = int(content['metadata']['resourceVersion']) + 1
content['metadata']['resourceVersion'] = str(ver)
return {"content": json.dumps(content).encode('utf-8')}
@staticmethod
def remove_tags(key, content):
content = json.loads(content)
content['spec']['tags'] = []
return {"content": json.dumps(content).encode('utf-8')}
@staticmethod
def remove_imagerepository(key, content):
content = json.loads(content)
content['spec'].pop('dockerImageRepository', None)
content['metadata']['annotations'].pop(ANNOTATION_INSECURE_REPO)
content['spec']['tags'] = []
return {"content": json.dumps(content).encode('utf-8')}
@staticmethod
def move_repo_to_annotation(key, content):
content = json.loads(content)
repo = content['spec'].pop('dockerImageRepository', None)
content['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = repo
return {"content": json.dumps(content).encode('utf-8')}
def get_definition_for(self, key):
"""
Returns key and value associated with given key in DEFINITION dict.
This means that either key is an actual dict key in DEFINITION or it is member
of a tuple that serves as a dict key in DEFINITION.
"""
try:
# Try a direct look-up
return key, self.DEFINITION[key]
except KeyError:
# Try all the tuples
for k, v in self.DEFINITION.items():
if isinstance(k, tuple):
for tup in k:
if fnmatch.fnmatch(key, tup):
return k, v
else:
if fnmatch.fnmatch(key, k):
return k, v
raise ValueError("Can't find '%s' in url mapping definition" % key)
@staticmethod
def response(status_code=200, content=b'', headers=None):
return HttpResponse(status_code, headers or {}, content=content)
def request(self, url, method, stream=None, *args, **kwargs):
parsed_url = urlparse(url)
# fragment = parsed_url.fragment
# parsed_fragment = urllib.parse_qs(fragment)
url_path = parsed_url.path
if parsed_url.query:
url_path += '?' + parsed_url.query
logger.info("URL path is '%s'", url_path)
kwargs = self.response_mapping.response_mapping(url_path, method)
if stream:
return StreamingResponse(**kwargs)
else:
return self.response(**kwargs)
def get(self, url, *args, **kwargs):
return self.request(url, "get", *args, **kwargs)
def post(self, url, *args, **kwargs):
return self.request(url, "post", *args, **kwargs)
def put(self, url, *args, **kwargs):
return self.request(url, "put", *args, **kwargs)
def delete(self, url, *args, **kwargs):
return self.request(url, "delete", *args, **kwargs)
@pytest.fixture(params=["1.0.4", "3.9.41"])
def openshift(request):
os_inst = Openshift(OAPI_PREFIX, API_VER, "/oauth/authorize",
k8s_api_url=API_PREFIX)
os_inst._con = Connection(request.param)
return os_inst
@pytest.fixture(params=[{'kwargs': None, 'additional_config': None, 'platform_descriptors': None}])
def osbs(request, openshift):
kwargs = request.param['kwargs'] or {}
platform_descriptors = request.param['platform_descriptors'] or {}
kwargs.setdefault('build_json_dir', 'inputs')
kwargs.setdefault('registry_uri', 'registry.example.com')
kwargs.setdefault('additional_general', '')
with NamedTemporaryFile(mode="wt") as fp:
config = dedent("""\
[general]
build_json_dir = {build_json_dir}
{additional_general}
[default]
openshift_url = /
registry_uri = {registry_uri}
sources_command = fedpkg sources
vendor = Example, Inc.
build_host = localhost
authoritative_registry = registry.example.com
distribution_scope = authoritative-source-only
koji_root = http://koji.example.com/kojiroot
koji_hub = http://koji.example.com/kojihub
flatpak_base_image = registry.fedoraproject.org/fedora:latest
odcs_url = https://odcs.example.com/odcs/1
pdc_url = https://pdc.example.com/rest_api/v1
use_auth = false
can_orchestrate = true
build_from = image:buildroot:latest
""")
if request.param['additional_config'] is not None:
config += request.param['additional_config']
config += '\n'
for platform, platform_info in platform_descriptors.items():
if not platform_info:
continue
config += '[platform:{}]\n'.format(platform)
for item, value in platform_info.items():
config += '{} = {}\n'.format(item, value)
fp.write(config.format(**kwargs))
fp.flush()
dummy_config = Configuration(fp.name)
osbs = OSBS(dummy_config, dummy_config)
osbs.os = openshift
return osbs
def get_pulp_additional_config(with_group=False):
if with_group:
conf = dedent("""\
pulp_registry_name = pulp
pulp_secret = secret
group_manifests = true""")
else:
conf = dedent("""\
pulp_registry_name = pulp
pulp_secret = secret""")
return conf
@pytest.fixture
def osbs_cant_orchestrate(openshift):
with NamedTemporaryFile(mode="wt") as fp:
fp.write("""
[general]
build_json_dir = {build_json_dir}
[default]
openshift_url = /
registry_uri = registry.example.com
sources_command = fedpkg sources
vendor = Example, Inc.
build_host = localhost
authoritative_registry = registry.example.com
distribution_scope = authoritative-source-only
koji_root = http://koji.example.com/kojiroot
koji_hub = http://koji.example.com/kojihub
use_auth = false
""".format(build_json_dir="inputs"))
fp.flush()
dummy_config = Configuration(fp.name)
osbs = OSBS(dummy_config, dummy_config)
osbs.os = openshift
return osbs
@pytest.fixture
def osbs106(openshift):
with NamedTemporaryFile(mode="wt") as fp:
fp.write("""
[general]
build_json_dir = {build_json_dir}
openshift_required_version = 1.0.6
[default]
openshift_url = /
registry_uri = registry.example.com
sources_command = fedpkg sources
vendor = Example, Inc.
build_host = localhost
authoritative_registry = registry.example.com
distribution_scope = authoritative-source-only
koji_root = http://koji.example.com/kojiroot
koji_hub = http://koji.example.com/kojihub
use_auth = false
build_from = image:buildroot:latest
""".format(build_json_dir="inputs"))
fp.flush()
dummy_config = Configuration(fp.name)
osbs = OSBS(dummy_config, dummy_config)
osbs.os = openshift
return osbs
class ResponseMapping(object):
def __init__(self, version, lookup):
self.version = version
self.lookup = lookup
def get_response_content(self, file_name):
this_file = inspect.getfile(ResponseMapping)
this_dir = os.path.dirname(this_file)
json_path = os.path.join(this_dir, "mock_jsons", self.version, file_name)
logger.debug("File: %s", json_path)
with open(json_path, "rb") as fd:
return fd.read()
def response_mapping(self, url_path, method):
key, value_to_use = self.lookup(url_path)
file_name = value_to_use[method]["file"]
logger.debug("API response content: %s", file_name)
custom_callback = value_to_use[method].get("custom_callback", None)
if file_name is None:
content = b''
else:
content = self.get_response_content(file_name)
if custom_callback:
logger.debug("Custom API callback: %s", custom_callback)
return custom_callback(key, content)
else:
return {"content": content}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Metacloud, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_group_default_rules
from nova.api.openstack import wsgi
from nova import context
import nova.db
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_default_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'TCP')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('cidr', '10.10.10.0/24')
return rule
def security_group_default_rule_db(security_group_default_rule, id=None):
attrs = security_group_default_rule.copy()
if id is not None:
attrs['id'] = id
return AttrDict(attrs)
class TestSecurityGroupDefaultRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRules, self).setUp()
self.controller = \
security_group_default_rules.SecurityGroupDefaultRulesController()
def test_create_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
sgr_dict = dict(security_group_default_rule=sgr)
res_dict = self.controller.create(req, sgr_dict)
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_create_security_group_default_rule_with_no_to_port(self):
sgr = security_group_default_rule_template()
del sgr['to_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_from_port(self):
sgr = security_group_default_rule_template()
del sgr['from_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_ip_protocol(self):
sgr = security_group_default_rule_template()
del sgr['ip_protocol']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_cidr(self):
sgr = security_group_default_rule_template()
del sgr['cidr']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEquals(security_group_default_rule['id'], 0)
self.assertEquals(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_with_blank_to_port(self):
sgr = security_group_default_rule_template(to_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_from_port(self):
sgr = security_group_default_rule_template(from_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_cidr(self):
sgr = security_group_default_rule_template(cidr='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEquals(security_group_default_rule['id'], 0)
self.assertEquals(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_non_numerical_to_port(self):
sgr = security_group_default_rule_template(to_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_non_numerical_from_port(self):
sgr = security_group_default_rule_template(from_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_cidr(self):
sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_to_port(self):
sgr = security_group_default_rule_template(to_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_from_port(self):
sgr = security_group_default_rule_template(from_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_body(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
def test_create_duplicate_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.create(req, {'security_group_default_rule': sgr})
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list(self):
self.test_create_security_group_default_rule()
rules = [dict(id=1,
ip_protocol='TCP',
from_port=22,
to_port=22,
ip_range=dict(cidr='10.10.10.0/24'))]
expected = {'security_group_default_rules': rules}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_default_security_group_default_rule_show(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.show(req, '1')
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_delete_security_group_default_rule(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
self.called = False
def security_group_default_rule_destroy(context, id):
self.called = True
def return_security_group_default_rule(context, id):
self.assertEquals(sgr['id'], id)
return security_group_default_rule_db(sgr)
self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
security_group_default_rule_destroy)
self.stubs.Set(nova.db, 'security_group_default_rule_get',
return_security_group_default_rule)
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_security_group_ensure_default(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
ctxt = context.get_admin_context()
setattr(ctxt, 'project_id', 'new_project_id')
_, sg = nova.db.security_group_ensure_default(ctxt)
rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
security_group_rule = rules[0]
self.assertEqual(sgr['id'], security_group_rule.id)
self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
self.assertEqual(sgr['from_port'], security_group_rule.from_port)
self.assertEqual(sgr['to_port'], security_group_rule.to_port)
self.assertEqual(sgr['cidr'], security_group_rule.cidr)
class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
deserializer = security_group_default_rules.\
SecurityGroupDefaultRulesXMLDeserializer()
self.deserializer = deserializer
def test_create_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_to_port_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_from_port_request(self):
serial_request = """
<security_group_default_rule>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_ip_protocol_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_cidr_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
},
}
self.assertEqual(request['body'], expected)
class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer =\
security_group_default_rules.SecurityGroupDefaultRuleTemplate()
self.index_serializer =\
security_group_default_rules.SecurityGroupDefaultRulesTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_default_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
seen.add(child_tag)
if child_tag == 'ip_range':
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertTrue(gr_child_tag in raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def test_rule_serializer(self):
raw_rule = dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24'))
rule = dict(security_group_default_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rule', self._tag(tree))
self._verify_security_group_default_rule(raw_rule, tree)
def test_index_serializer(self):
rules = [dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24')),
dict(id='234',
ip_protocol='UDP',
from_port='23456',
to_port='234567',
ip_range=dict(cidr='10.12.0.0/18')),
dict(id='345',
ip_protocol='tcp',
from_port='3456',
to_port='4567',
ip_range=dict(cidr='192.168.1.0/32'))]
rules_dict = dict(security_group_default_rules=rules)
text = self.index_serializer.serialize(rules_dict)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rules', self._tag(tree))
self.assertEqual(len(rules), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group_default_rule(rules[idx], child)
|
|
# Copyright 2013-2015 University of Warsaw
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import shutil
import tempfile
import os.path
import itertools
import filecmp
import distutils.dir_util
from avroknife.test.command_line_runner import CommandLineRunner, \
CommandLineRunnerException
from avroknife.test import example_data_stores
class CommandLineTestCaseBase(unittest.TestCase):
__source_data_dir = None
_r = None
__hdfs_tests_env_name = 'AVROKNIFE_HDFS_TESTS'
__hdfs_tests = False
@classmethod
def setUpClass(cls):
cls.__source_data_dir = tempfile.mkdtemp()
distutils.dir_util.copy_tree(
os.path.join(os.path.dirname(__file__), 'data/local_input_dir'),
cls.__source_data_dir)
standard_ds_path = os.path.join(cls.__source_data_dir, 'standard')
nested_ds_path = os.path.join(cls.__source_data_dir, 'nested')
binary_ds_path = os.path.join(cls.__source_data_dir, 'binary')
example_data_stores.create(
standard_ds_path, nested_ds_path, binary_ds_path)
enforce_local = True
env_name = CommandLineTestCaseBase.__hdfs_tests_env_name
if os.getenv(env_name, 'FALSE')=='TRUE':
cls.__hdfs_tests = True
enforce_local = False
cls._r = CommandLineRunner(
'./scripts/avroknife', cls.__source_data_dir, enforce_local)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.__source_data_dir)
cls._r.close()
@staticmethod
def _read(file_path):
with open(file_path, 'r') as f:
content = f.read()
return content
@staticmethod
def __bool_to_str(x):
if x:
return ' '
else:
return ' NOT '
@staticmethod
def __get_problem_str(in_local, out_local):
return 'Test failed when inputs are{}local and outputs are{}local.'.\
format(CommandLineTestCaseBase.__bool_to_str(in_local),
CommandLineTestCaseBase.__bool_to_str(out_local))
def _iterate(self, fun):
in_local_values = [True]
out_local_values = [True]
if self.__hdfs_tests:
in_local_values = [True, False]
out_local_values = [True, False]
for in_local, out_local in itertools.product(
in_local_values, out_local_values):
try:
fun(in_local, out_local)
except Exception:
print(self.__get_problem_str(in_local, out_local))
raise
def _check_output(self, command_args, expected_stdout, in_local, out_local):
ret = self._r.run(command_args, in_local, out_local)
self.assertEqual(expected_stdout, ret.get_stdout())
def _check_output_file_raw_content(self, command_args, expected_content,
output_file_name, in_local, out_local):
ret = self._r.run(command_args, in_local, out_local)
actual = self._read(ret.get_output_path(output_file_name))
self.assertEqual(expected_content, actual)
def _check_output_avro_file(self, command_args, expected_avro_json_content,
output_file_name, in_local, out_local):
ret = self._r.run(command_args, in_local, out_local)
output_path = ret.get_output_path(output_file_name)
actual = self._r.run_raw('tojson local:{}'.format(output_path))
self.assertEqual(expected_avro_json_content, actual)
@staticmethod
def _get_expected_standard_schema():
return """\
{
"namespace": "avroknife.test.data",
"type": "record",
"name": "User",
"fields": [
{
"type": "int",
"name": "position"
},
{
"type": "string",
"name": "name"
},
{
"type": [
"int",
"null"
],
"name": "favorite_number"
},
{
"type": [
"string",
"null"
],
"name": "favorite_color"
},
{
"type": [
"bytes",
"null"
],
"name": "secret"
}
]
}
"""
@staticmethod
def _get_expected_standard_contents():
return """\
{"position": 0, "name": "Alyssa", "favorite_number": 256, "favorite_color": null, "secret": null}
{"position": 1, "name": "Ben", "favorite_number": 4, "favorite_color": "red", "secret": null}
{"position": 2, "name": "Alyssa2", "favorite_number": 512, "favorite_color": null, "secret": null}
{"position": 3, "name": "Ben2", "favorite_number": 8, "favorite_color": "blue", "secret": "MDk4NzY1NDMyMQ=="}
{"position": 4, "name": "Ben3", "favorite_number": 2, "favorite_color": "green", "secret": "MTIzNDVhYmNk"}
{"position": 5, "name": "Alyssa3", "favorite_number": 16, "favorite_color": null, "secret": null}
{"position": 6, "name": "Mallet", "favorite_number": null, "favorite_color": "blue", "secret": "YXNkZmdm"}
{"position": 7, "name": "Mikel", "favorite_number": null, "favorite_color": "", "secret": null}
"""
class GetSchemaTestsCase(CommandLineTestCaseBase):
def test_basic(self):
self._iterate(self.subtest_basic)
def subtest_basic(self, in_local, out_local):
self._check_output('getschema @in:standard',
self._get_expected_standard_schema(), in_local, out_local)
def test_output_file(self):
self._iterate(self.subtest_output_file)
def subtest_output_file(self, in_local, out_local):
self._check_output_file_raw_content('getschema @in:standard --output @out:actual.txt',
self._get_expected_standard_schema(), 'actual.txt',
in_local, out_local)
class ToJSONTestsCase(CommandLineTestCaseBase):
def test_basic(self):
self._iterate(self.subtest_basic)
def subtest_basic(self, in_local, out_local):
self._check_output('tojson @in:standard',
self._get_expected_standard_contents(), in_local, out_local)
def test_pretty(self):
self._iterate(self.subtest_pretty)
def subtest_pretty(self, in_local, out_local):
self._check_output('tojson --pretty @in:standard', """\
[{
"position": 0,
"name": "Alyssa",
"favorite_number": 256,
"favorite_color": null,
"secret": null
},
{
"position": 1,
"name": "Ben",
"favorite_number": 4,
"favorite_color": "red",
"secret": null
},
{
"position": 2,
"name": "Alyssa2",
"favorite_number": 512,
"favorite_color": null,
"secret": null
},
{
"position": 3,
"name": "Ben2",
"favorite_number": 8,
"favorite_color": "blue",
"secret": "MDk4NzY1NDMyMQ=="
},
{
"position": 4,
"name": "Ben3",
"favorite_number": 2,
"favorite_color": "green",
"secret": "MTIzNDVhYmNk"
},
{
"position": 5,
"name": "Alyssa3",
"favorite_number": 16,
"favorite_color": null,
"secret": null
},
{
"position": 6,
"name": "Mallet",
"favorite_number": null,
"favorite_color": "blue",
"secret": "YXNkZmdm"
},
{
"position": 7,
"name": "Mikel",
"favorite_number": null,
"favorite_color": "",
"secret": null
}]
""", in_local, out_local)
def test_range_stdout(self):
self._iterate(self.subtest_range_stdout)
def subtest_range_stdout(self, in_local, out_local):
self._check_output('tojson @in:standard --index 3-4', """\
{"position": 3, "name": "Ben2", "favorite_number": 8, "favorite_color": "blue", "secret": "MDk4NzY1NDMyMQ=="}
{"position": 4, "name": "Ben3", "favorite_number": 2, "favorite_color": "green", "secret": "MTIzNDVhYmNk"}
""", in_local, out_local)
def test_range_file_out(self):
self._iterate(self.subtest_range_file_out)
def subtest_range_file_out(self, in_local, out_local):
self._check_output_file_raw_content('tojson @in:standard --index 3-4 --output @out:actual.txt', """\
{"position": 3, "name": "Ben2", "favorite_number": 8, "favorite_color": "blue", "secret": "MDk4NzY1NDMyMQ=="}
{"position": 4, "name": "Ben3", "favorite_number": 2, "favorite_color": "green", "secret": "MTIzNDVhYmNk"}
""", 'actual.txt', in_local, out_local)
def test_range_without_beginning(self):
self._iterate(self.subtest_range_without_beginning)
def subtest_range_without_beginning(self, in_local, out_local):
self._check_output('tojson @in:standard --index -4', """\
{"position": 0, "name": "Alyssa", "favorite_number": 256, "favorite_color": null, "secret": null}
{"position": 1, "name": "Ben", "favorite_number": 4, "favorite_color": "red", "secret": null}
{"position": 2, "name": "Alyssa2", "favorite_number": 512, "favorite_color": null, "secret": null}
{"position": 3, "name": "Ben2", "favorite_number": 8, "favorite_color": "blue", "secret": "MDk4NzY1NDMyMQ=="}
{"position": 4, "name": "Ben3", "favorite_number": 2, "favorite_color": "green", "secret": "MTIzNDVhYmNk"}
""", in_local, out_local)
def test_range_without_end(self):
self._iterate(self.subtest_range_without_end)
def subtest_range_without_end(self, in_local, out_local):
self._check_output('tojson @in:standard --index 4-', """\
{"position": 4, "name": "Ben3", "favorite_number": 2, "favorite_color": "green", "secret": "MTIzNDVhYmNk"}
{"position": 5, "name": "Alyssa3", "favorite_number": 16, "favorite_color": null, "secret": null}
{"position": 6, "name": "Mallet", "favorite_number": null, "favorite_color": "blue", "secret": "YXNkZmdm"}
{"position": 7, "name": "Mikel", "favorite_number": null, "favorite_color": "", "secret": null}
""", in_local, out_local)
def test_single_index(self):
self._iterate(self.subtest_single_index)
def subtest_single_index(self, in_local, out_local):
self._check_output('tojson @in:standard --index 4', """\
{"position": 4, "name": "Ben3", "favorite_number": 2, "favorite_color": "green", "secret": "MTIzNDVhYmNk"}
""", in_local, out_local)
def test_edge_case_without_beginning(self):
self._iterate(self.subtest_edge_case_without_beginning)
def subtest_edge_case_without_beginning(self, in_local, out_local):
self._check_output('tojson @in:standard --index -0', """\
{"position": 0, "name": "Alyssa", "favorite_number": 256, "favorite_color": null, "secret": null}
""", in_local, out_local)
def test_edge_case_without_end(self):
self._iterate(self.subtest_edge_case_without_end)
def subtest_edge_case_without_end(self, in_local, out_local):
self._check_output('tojson @in:standard --index 7-', """\
{"position": 7, "name": "Mikel", "favorite_number": null, "favorite_color": "", "secret": null}
""", in_local, out_local)
class CopyTestsCase(CommandLineTestCaseBase):
def test_basic(self):
self._iterate(self.subtest_basic)
def subtest_basic(self, in_local, out_local):
self._check_output_avro_file('copy @in:standard --output @out:whole_copy',
self._get_expected_standard_contents(), 'whole_copy', in_local, out_local)
class ExtractTestsCase(CommandLineTestCaseBase):
def test_text_fields(self):
self._iterate(self.subtest_text_fields)
def subtest_text_fields(self, in_local, out_local):
ret = self._r.run('extract @in:standard --index 2-3 --value_field name --output @out:extracted_name',
in_local, out_local)
output_path = ret.get_output_path('extracted_name')
actual_2 = self._read(os.path.join(output_path, '2'))
actual_3 = self._read(os.path.join(output_path, '3'))
self.assertEqual('Alyssa2', actual_2)
self.assertEqual('Ben2', actual_3)
def test_create_dirs(self):
self._iterate(self.subtest_create_dirs)
def subtest_create_dirs(self, in_local, out_local):
ret = self._r.run('extract @in:standard --index 2-3 --value_field name --create_dirs --output @out:extracted_name',
in_local, out_local)
output_path = ret.get_output_path('extracted_name')
actual_2 = self._read(os.path.join(output_path, '2', '0'))
self.assertEqual(1, len(os.listdir(os.path.join(output_path, '2'))))
actual_3 = self._read(os.path.join(output_path, '3', '0'))
self.assertEqual(1, len(os.listdir(os.path.join(output_path, '3'))))
self.assertEqual('Alyssa2', actual_2)
self.assertEqual('Ben2', actual_3)
def test_name_field(self):
self._iterate(self.subtest_name_field)
def subtest_name_field(self, in_local, out_local):
ret = self._r.run('extract @in:standard --index 2-3 --value_field name --name_field favorite_color --output @out:extracted_name',
in_local, out_local)
output_path = ret.get_output_path('extracted_name')
null = self._read(os.path.join(output_path, 'null'))
blue = self._read(os.path.join(output_path, 'blue'))
self.assertEqual('Alyssa2', null)
self.assertEqual('Ben2', blue)
def test_empty_name_field(self):
self._iterate(self.subtest_empty_name_field)
def subtest_empty_name_field(self, in_local, out_local):
ret = self._r.run('extract @in:standard --index 7 --value_field name --name_field favorite_color --output @out:extracted_name',
in_local, out_local)
output_path = ret.get_output_path('extracted_name')
null = self._read(os.path.join(output_path, 'null'))
self.assertEqual('Mikel', null)
def test_create_dirs_name_field(self):
self._iterate(self.subtest_create_dirs_name_field)
def subtest_create_dirs_name_field(self, in_local, out_local):
ret = self._r.run('extract @in:standard --index 2-3 --value_field name --name_field favorite_color --create_dirs --output @out:extracted_name',
in_local, out_local)
output_path = ret.get_output_path('extracted_name')
null = self._read(os.path.join(output_path, 'null', '0'))
self.assertEqual(1, len(os.listdir(os.path.join(output_path, 'null'))))
blue = self._read(os.path.join(output_path, 'blue', '0'))
self.assertEqual(1, len(os.listdir(os.path.join(output_path, 'blue'))))
self.assertEqual('Alyssa2', null)
self.assertEqual('Ben2', blue)
def test_name_field_with_repeated_names(self):
self._iterate(self.subtest_name_field_with_repeated_names)
def subtest_name_field_with_repeated_names(self, in_local, out_local):
with self.assertRaises(CommandLineRunnerException):
## We suppress stderr because the program uses it to write
## some information about the error related to the fact that the
## files to be created have the same name and this looks bad
## in the unit tests report printed on console.
self._r.run('extract @in:standard --index 3-7 --value_field name --name_field favorite_color --output @out:extracted_name',
in_local, out_local, discard_stderr=True)
def test_create_dirs_name_field_with_repeated_names(self):
self._iterate(self.subtest_create_dirs_name_field_with_repeated_names)
def subtest_create_dirs_name_field_with_repeated_names(self, in_local, out_local):
ret = self._r.run('extract @in:standard --value_field name --name_field favorite_color --create_dirs --output @out:extracted_name',
in_local, out_local)
output_path = ret.get_output_path('extracted_name')
null0 = self._read(os.path.join(output_path, 'null', '0'))
red = self._read(os.path.join(output_path, 'red', '0'))
null1 = self._read(os.path.join(output_path, 'null', '1'))
blue0 = self._read(os.path.join(output_path, 'blue', '0'))
green = self._read(os.path.join(output_path, 'green', '0'))
null2 = self._read(os.path.join(output_path, 'null', '2'))
blue1 = self._read(os.path.join(output_path, 'blue', '1'))
empty = self._read(os.path.join(output_path, 'null', '3'))
self.assertEqual(1, len(os.listdir(os.path.join(output_path, 'red'))))
self.assertEqual(4, len(os.listdir(os.path.join(output_path, 'null'))))
self.assertEqual(2, len(os.listdir(os.path.join(output_path, 'blue'))))
self.assertEqual(1, len(os.listdir(os.path.join(output_path, 'green'))))
self.assertEqual('Alyssa', null0)
self.assertEqual('Ben', red)
self.assertEqual('Alyssa2', null1)
self.assertEqual('Ben2', blue0)
self.assertEqual('Ben3', green)
self.assertEqual('Alyssa3', null2)
self.assertEqual('Mallet', blue1)
self.assertEqual('Mikel', empty)
@staticmethod
def __are_files_identical(path0, path1):
return filecmp.cmp(path0, path1, shallow=False)
def test_binary_fields(self):
self._iterate(self.subtest_binary_fields)
def subtest_binary_fields(self, in_local, out_local):
ret = self._r.run('extract @in:binary --value_field packed_files --output @out:extracted_packed_files',
in_local, out_local)
output_path = ret.get_output_path('extracted_packed_files')
self.assertTrue(self.__are_files_identical(
os.path.join(os.path.dirname(__file__), 'data/binary_stuff/various_stuff.tar.gz'),
os.path.join(output_path, '0')))
self.assertTrue(self.__are_files_identical(
os.path.join(os.path.dirname(__file__), 'data/binary_stuff/greetings.tar.gz'),
os.path.join(output_path, '1')))
def test_text_field_stdout(self):
self._iterate(self.subtest_text_field_stdout)
def subtest_text_field_stdout(self, in_local, out_local):
self._check_output('extract @in:standard --index 2 --value_field name', """\
Alyssa2
""", in_local, out_local)
def test_nested_fields(self):
self._iterate(self.subtest_nested_fields)
def subtest_nested_fields(self, in_local, out_local):
ret = self._r.run('extract @in:nested --value_field sub.level2 --output @out:nested',
in_local, out_local)
output_path = ret.get_output_path('nested')
self.assertEqual('2', open(os.path.join(output_path, '0'), 'r').read())
self.assertEqual('1', open(os.path.join(output_path, '1'), 'r').read())
def test_nested_name_field(self):
self._iterate(self.subtest_nested_name_field)
def subtest_nested_name_field(self, in_local, out_local):
ret = self._r.run('extract @in:nested --value_field sup --name_field sub.level2 --output @out:nested',
in_local, out_local)
output_path = ret.get_output_path('nested')
self.assertEqual('1', open(os.path.join(output_path, '2'), 'r').read())
self.assertEqual('2', open(os.path.join(output_path, '1'), 'r').read())
class SelectTestsCase(CommandLineTestCaseBase):
def test_number(self):
self._iterate(self.subtest_number)
def subtest_number(self, in_local, out_local):
self._check_output('tojson @in:standard --select position=1', """\
{"position": 1, "name": "Ben", "favorite_number": 4, "favorite_color": "red", "secret": null}
""", in_local, out_local)
def test_string(self):
self._iterate(self.subtest_string)
def subtest_string(self, in_local, out_local):
self._check_output('tojson @in:standard --select name=Ben', """\
{"position": 1, "name": "Ben", "favorite_number": 4, "favorite_color": "red", "secret": null}
""", in_local, out_local)
def test_empty_string(self):
self._iterate(self.subtest_empty_string)
def subtest_empty_string(self, in_local, out_local):
self._check_output('tojson @in:standard --select favorite_color=""', """\
{"position": 7, "name": "Mikel", "favorite_number": null, "favorite_color": "", "secret": null}
""", in_local, out_local)
def test_null(self):
self._iterate(self.subtest_null)
def subtest_null(self, in_local, out_local):
self._check_output('tojson @in:standard --select favorite_color=null', """\
{"position": 0, "name": "Alyssa", "favorite_number": 256, "favorite_color": null, "secret": null}
{"position": 2, "name": "Alyssa2", "favorite_number": 512, "favorite_color": null, "secret": null}
{"position": 5, "name": "Alyssa3", "favorite_number": 16, "favorite_color": null, "secret": null}
""", in_local, out_local)
def test_no_records(self):
self._iterate(self.subtest_no_records)
def subtest_no_records(self, in_local, out_local):
self._check_output('tojson @in:standard --select name=Ben --index 2-', '', in_local, out_local)
def test_no_records_pretty(self):
self._iterate(self.subtest_no_records_pretty)
def subtest_no_records_pretty(self, in_local, out_local):
self._check_output('tojson --pretty @in:standard --select name=Ben --index 2-', '[]\n', in_local, out_local)
def test_copy(self):
self._iterate(self.subtest_copy)
def subtest_copy(self, in_local, out_local):
self._check_output_avro_file('copy @in:standard --select name=Ben --output @out:ben_copy',
"""\
{"position": 1, "name": "Ben", "favorite_number": 4, "favorite_color": "red", "secret": null}
""", 'ben_copy', in_local, out_local)
def test_extract(self):
self._iterate(self.subtest_extract)
def subtest_extract(self, in_local, out_local):
ret = self._r.run('extract @in:standard --value_field name --select name=Ben --output @out:ben_name',
in_local, out_local)
output_path = ret.get_output_path('ben_name')
actual_1 = self._read(os.path.join(output_path, '1'))
self.assertEqual('Ben', actual_1)
def test_nested_field(self):
self._iterate(self.subtest_nested_field)
def subtest_nested_field(self, in_local, out_local):
self._check_output('tojson @in:nested --select sub.level2=2', """\
{"sup": 1, "sub": {"level2": 2}}
""", in_local, out_local)
class LimitTestsCase(CommandLineTestCaseBase):
def test_basic(self):
self._iterate(self.subtest_basic)
def subtest_basic(self, in_local, out_local):
self._check_output('tojson @in:standard --limit 1', """\
{"position": 0, "name": "Alyssa", "favorite_number": 256, "favorite_color": null, "secret": null}
""", in_local, out_local)
def test_select_no_output(self):
self._iterate(self.subtest_select_no_output)
def subtest_select_no_output(self, in_local, out_local):
self._check_output('tojson @in:standard --select name=Ben --index 2-', '', in_local, out_local)
def test_select(self):
self._iterate(self.subtest_select)
def subtest_select(self, in_local, out_local):
self._check_output('tojson @in:standard --select favorite_color=blue', """\
{"position": 3, "name": "Ben2", "favorite_number": 8, "favorite_color": "blue", "secret": "MDk4NzY1NDMyMQ=="}
{"position": 6, "name": "Mallet", "favorite_number": null, "favorite_color": "blue", "secret": "YXNkZmdm"}
""", in_local, out_local)
def test_select_with_limit(self):
self._iterate(self.subtest_select_with_limit)
def subtest_select_with_limit(self, in_local, out_local):
self._check_output('tojson @in:standard --select favorite_color=blue --limit 1', """\
{"position": 3, "name": "Ben2", "favorite_number": 8, "favorite_color": "blue", "secret": "MDk4NzY1NDMyMQ=="}
""", in_local, out_local)
class SchemaProjectionTestsCase(CommandLineTestCaseBase):
def test_index(self):
self._iterate(self.subtest_index)
def subtest_index(self, in_local, out_local):
self._check_output('tojson @in:standard --index -4 --schema @in:user_projection.avsc', """\
{"position": 0, "name": "Alyssa"}
{"position": 1, "name": "Ben"}
{"position": 2, "name": "Alyssa2"}
{"position": 3, "name": "Ben2"}
{"position": 4, "name": "Ben3"}
""", in_local, out_local)
def test_copy(self):
self._iterate(self.subtest_copy)
def subtest_copy(self, in_local, out_local):
self._check_output_avro_file('copy @in:standard --schema @in:user_projection.avsc --output @out:projected',
"""\
{"position": 0, "name": "Alyssa"}
{"position": 1, "name": "Ben"}
{"position": 2, "name": "Alyssa2"}
{"position": 3, "name": "Ben2"}
{"position": 4, "name": "Ben3"}
{"position": 5, "name": "Alyssa3"}
{"position": 6, "name": "Mallet"}
{"position": 7, "name": "Mikel"}
""", 'projected', in_local, out_local)
class CountTestsCase(CommandLineTestCaseBase):
def test_basic(self):
self._iterate(self.subtest_basic)
def subtest_basic(self, in_local, out_local):
self._check_output('count @in:standard', '8\n', in_local, out_local)
def test_file_output(self):
self._iterate(self.subtest_file_output)
def subtest_file_output(self, in_local, out_local):
self._check_output_file_raw_content('count @in:standard --output @out:actual.txt' ,
'8\n', 'actual.txt', in_local, out_local)
def test_select(self):
self._iterate(self.subtest_select)
def subtest_select(self, in_local, out_local):
self._check_output('count @in:standard --select name=Ben', '1\n', in_local, out_local)
|
|
# Copyright (c) 2017, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from netCDF4 import Dataset
class YamboStaticScreeningDB():
"""
Class to handle static screening databases from Yambo
This reads the databases ``ndb.em1s*``
There :math:`v\chi(\omega=0)` is stored.
To calculate epsilon (static dielectric function) we do:
.. math::
\epsilon^{-1} = 1-v\chi
"""
def __init__(self,save='.',filename='ndb.em1s',db1='ns.db1'):
self.save = save
self.filename = filename
#read the lattice paramaters
try:
#posibilities where to find db1
for filename in ['%s/%s'%(save,db1),'%s/../SAVE/%s'%(save,db1)]:
if os.path.isfile(filename):
break
database = Dataset(filename, 'r')
self.alat = database['LATTICE_PARAMETER'][:]
self.lat = database['LATTICE_VECTORS'][:].T
self.volume = np.linalg.det(self.lat)
except:
raise IOError("Error opening %s in YamboStaticScreeningDB"%filename)
#read em1s database
try:
database = Dataset("%s/%s"%(self.save,self.filename), 'r')
except:
raise IOError("Error opening %s/%s in YamboStaticScreeningDB"%(self.save,self.filename))
#read some parameters
size,nbands,eh = database['X_PARS_1'][:3]
self.size = int(size)
self.nbands = int(nbands)
self.eh = eh
#read gvectors
gvectors = np.rint(database['X_RL_vecs'][:].T)
self.gvectors = np.array([g/self.alat for g in gvectors])
self.ngvectors = len(self.gvectors)
#read q-points
qpoints = database['HEAD_QPT'][:].T
self.qpoints = np.array([q/self.alat for q in qpoints])
self.nqpoints = len(self.qpoints)
#are we usign coulomb cutoff?
self.cutoff = "".join(database['CUTOFF'][:][0]).strip()
self.readDBs()
def readDBs(self):
"""
Read the yambo databases
"""
#create database to hold all the X data
self.X = np.zeros([self.nqpoints,self.size,self.size],dtype=np.complex64)
for nq in range(self.nqpoints):
#open database for each k-point
filename = "%s/%s_fragment_%d"%(self.save,self.filename,nq+1)
try:
db = Dataset(filename)
except:
print "warning: failed to read %s"%filename
#static screening means we have only one frequency
# this try except is because the way this is sotored has changed in yambo
try:
re, im = db['X_Q_%d'%(nq+1)][0,:]
except:
re, im = db['X_Q_%d'%(nq+1)][0,:].T
self.X[nq] = re + 1j*im
#close database
db.close()
def saveDBS(self,path):
"""
Save the database
"""
if os.path.isdir(path): shutil.rmtree(path)
os.mkdir(path)
#copy all the files
oldpath = self.save
filename = self.filename
shutil.copyfile("%s/%s"%(oldpath,filename),"%s/%s"%(path,filename))
for nq in xrange(self.nqpoints):
fname = "%s_fragment_%d"%(filename,nq+1)
shutil.copyfile("%s/%s"%(oldpath,fname),"%s/%s"%(path,fname))
#edit with the new wfs
X = self.X
for nq in xrange(self.nqpoints):
fname = "%s_fragment_%d"%(filename,nq+1)
db = Dataset("%s/%s"%(path,fname),'r+')
db['X_Q_%d'%(nq+1)][0,0,:] = X[nq].real
db['X_Q_%d'%(nq+1)][0,1,:] = X[nq].imag
db.close()
def writetxt(self,filename='em1s.dat',ng1=0,ng2=0,volume=False):
"""
Write vVepsilon_{g1=0,g2=0} (q) as a funciton of |q| on a text file
volume -> multiply by the volume
"""
x,y = self._geteq(ng1=ng1,ng2=ng2,volume=volume)
np.savetxt(filename,np.array([x,y]).T)
def get_g_index(self,g):
"""
get the index of the gvectors.
If the gvector is not present return None
"""
for ng,gvec in enumerate(self.gvectors):
if np.isclose(g,gvec).all():
return ng
return None
def _geteq(self,volume=False):
"""
Get epsilon_{0,0} = [1/(1+vX)]_{0,0} a function of |q|
vX is a matrix with size equal to the number of local fields components
In the database we find vX(\omega=0) where:
v -> coulomb interaction (truncated or not)
X -> electronic response function
Arguments:
ng1, ng2 -> Choose local field components
volume -> Normalize with the volume of the cell
"""
x = [np.linalg.norm(q) for q in self.qpoints]
y = [np.linalg.inv(np.eye(self.ngvectors)+xq)[0,0] for xq in self.X ]
#order according to the distance
x, y = zip(*sorted(zip(x, y)))
y = np.array(y)
#scale by volume?
if volume: y *= self.volume
return x,y
def _getvxq(self,ng1=0,ng2=0,volume=False):
"""
Get vX_{ng1,ng2} a function of |q|
vX is a matrix with size equal to the number of local fields components
In the database we find vX(\omega=0) where:
v -> coulomb interaction (truncated or not)
X -> electronic response function
Arguments:
ng1, ng2 -> Choose local field components
volume -> Normalize with the volume of the cell
"""
x = [np.linalg.norm(q) for q in self.qpoints]
y = [xq[ng2,ng1] for xq in self.X ]
#order according to the distance
x, y = zip(*sorted(zip(x, y)))
y = np.array(y)
#scale by volume?
if volume: y *= self.volume
return x,y
def plot(self,ax,volume=False,**kwargs):
"""
Plot the static screening as a function of |q|
Arguments
ax -> Instance of the matplotlib axes or some other object with the plot method
func -> Function to apply to the dielectric function
"""
#get vX_{00}
x,vX = self._getvxq(volume=volume)
#when plotting we apply a funciton to epsilon to represent it, by default the |x|
ax.plot(x,(1+vX).real,**kwargs)
ax.set_xlabel('$|q|$')
ax.set_ylabel('$\epsilon^{-1}_{00}(\omega=0)$')
def __str__(self):
s = ""
s += "nqpoints: %d\n"%self.nqpoints
s += "X size: %d\n"%self.size
s += "cutoff: %s\n"%self.cutoff
return s
if __name__ == "__main__":
ys = YamboStaticScreeningDB()
print ys
#plot static screening
ax = plt.gca()
ys.plot(ax)
plt.show()
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_log import log as logging
from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
from octavia.controller.worker.v1.tasks import cert_task
from octavia.controller.worker.v1.tasks import compute_tasks
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import network_tasks
from octavia.controller.worker.v1.tasks import retry_tasks
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class AmphoraFlows(object):
def get_create_amphora_flow(self):
"""Creates a flow to create an amphora.
:returns: The flow for creating the amphora
"""
create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW)
create_amphora_flow.add(database_tasks.CreateAmphoraInDB(
provides=constants.AMPHORA_ID))
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
requires=constants.AMPHORA_ID))
create_amphora_flow.add(cert_task.GenerateServerPEMTask(
provides=constants.SERVER_PEM))
create_amphora_flow.add(
database_tasks.UpdateAmphoraDBCertExpiration(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
create_amphora_flow.add(compute_tasks.CertComputeCreate(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR, constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_ID))
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amphora_flow.add(compute_tasks.ComputeActiveWait(
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID),
provides=constants.COMPUTE_OBJ))
create_amphora_flow.add(database_tasks.UpdateAmphoraInfo(
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
provides=constants.AMPHORA))
create_amphora_flow.add(
amphora_driver_tasks.AmphoraComputeConnectivityWait(
requires=constants.AMPHORA))
create_amphora_flow.add(database_tasks.ReloadAmphora(
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize(
requires=constants.AMPHORA))
create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB(
requires=constants.AMPHORA))
return create_amphora_flow
def _get_post_map_lb_subflow(self, prefix, role):
"""Set amphora type after mapped to lb."""
sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW
post_map_amp_to_lb = linear_flow.Flow(
sf_name)
post_map_amp_to_lb.add(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate(
name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK,
requires=(constants.AMPHORA, constants.FLAVOR)))
if role == constants.ROLE_MASTER:
post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB(
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_BACKUP:
post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB(
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_STANDALONE:
post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB(
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
requires=constants.AMPHORA))
return post_map_amp_to_lb
def _get_create_amp_for_lb_subflow(self, prefix, role):
"""Create a new amphora for lb."""
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
create_amp_for_lb_subflow = linear_flow.Flow(sf_name)
create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB(
name=sf_name + '-' + constants.CREATE_AMPHORA_INDB,
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORA_ID))
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
provides=constants.SERVER_PEM))
create_amp_for_lb_subflow.add(
database_tasks.UpdateAmphoraDBCertExpiration(
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR, constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_ID))
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB(
name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB,
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait(
name=sf_name + '-' + constants.COMPUTE_WAIT,
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID,
constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_OBJ))
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo(
name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO,
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
provides=constants.AMPHORA))
create_amp_for_lb_subflow.add(
amphora_driver_tasks.AmphoraComputeConnectivityWait(
name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT,
requires=constants.AMPHORA))
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
requires=constants.AMPHORA))
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraAllocatedInDB(
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA_ID,
provides=constants.AMPHORA))
if role == constants.ROLE_MASTER:
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB(
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_BACKUP:
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB(
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_STANDALONE:
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraStandAloneInDB(
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
requires=constants.AMPHORA))
return create_amp_for_lb_subflow
def get_amphora_for_lb_subflow(
self, prefix, role=constants.ROLE_STANDALONE):
return self._get_create_amp_for_lb_subflow(prefix, role)
def get_delete_amphora_flow(
self, amphora,
retry_attempts=CONF.controller_worker.amphora_delete_retries,
retry_interval=(
CONF.controller_worker.amphora_delete_retry_interval)):
"""Creates a subflow to delete an amphora and it's port.
This flow is idempotent and safe to retry.
:param amphora: An amphora object.
:param retry_attempts: The number of times the flow is retried.
:param retry_interval: The time to wait, in seconds, between retries.
:returns: The subflow for deleting the amphora.
:raises AmphoraNotFound: The referenced Amphora was not found.
"""
delete_amphora_flow = linear_flow.Flow(
name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id,
retry=retry_tasks.SleepingRetryTimesController(
name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' +
amphora.id,
attempts=retry_attempts, interval=retry_interval))
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(
database_tasks.MarkAmphoraPendingDeleteInDB(
name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy(
name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(compute_tasks.ComputeDelete(
name=constants.DELETE_AMPHORA + '-' + amphora.id,
inject={constants.AMPHORA: amphora,
constants.PASSIVE_FAILURE: True}))
delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring(
name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id,
inject={constants.AMPHORA: amphora}))
if amphora.vrrp_port_id:
delete_amphora_flow.add(network_tasks.DeletePort(
name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' +
str(amphora.vrrp_port_id)),
inject={constants.PORT_ID: amphora.vrrp_port_id,
constants.PASSIVE_FAILURE: True}))
# TODO(johnsom) What about cleaning up any member ports?
# maybe we should get the list of attached ports prior to delete
# and call delete on them here. Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
return delete_amphora_flow
def get_vrrp_subflow(self, prefix, timeout_dict=None,
create_vrrp_group=True):
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
vrrp_subflow = linear_flow.Flow(sf_name)
# Optimization for failover flow. No reason to call this
# when configuring the secondary amphora.
if create_vrrp_group:
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
requires=constants.LOADBALANCER_ID))
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE_NETWORK_CONFIG))
# VRRP update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow')
# We have three tasks to run in order, per amphora
amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow')
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict},
provides=constants.AMP_VRRP_INT))
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
constants.AMP_VRRP_INT),
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
name=sf_name + '-0-' + constants.AMP_VRRP_START,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow')
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict},
provides=constants.AMP_VRRP_INT))
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
constants.AMP_VRRP_INT),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
name=sf_name + '-1-' + constants.AMP_VRRP_START,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
update_amps_subflow.add(amp_0_subflow)
update_amps_subflow.add(amp_1_subflow)
vrrp_subflow.add(update_amps_subflow)
return vrrp_subflow
def cert_rotate_amphora_flow(self):
"""Implement rotation for amphora's cert.
1. Create a new certificate
2. Upload the cert to amphora
3. update the newly created certificate info to amphora
4. update the cert_busy flag to be false after rotation
:returns: The flow for updating an amphora
"""
rotated_amphora_flow = linear_flow.Flow(
constants.CERT_ROTATE_AMPHORA_FLOW)
rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA))
# create a new certificate, the returned value is the newly created
# certificate
rotated_amphora_flow.add(cert_task.GenerateServerPEMTask(
provides=constants.SERVER_PEM))
# update it in amphora task
rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload(
requires=(constants.AMPHORA, constants.SERVER_PEM)))
# update the newly created certificate info to amphora
rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
# update the cert_busy flag to be false after rotation
rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse(
requires=constants.AMPHORA))
return rotated_amphora_flow
def update_amphora_config_flow(self):
"""Creates a flow to update the amphora agent configuration.
:returns: The flow for updating an amphora
"""
update_amphora_flow = linear_flow.Flow(
constants.UPDATE_AMPHORA_CONFIG_FLOW)
update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA))
update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate(
requires=(constants.AMPHORA, constants.FLAVOR)))
return update_amphora_flow
def get_amphora_for_lb_failover_subflow(
self, prefix, role=constants.ROLE_STANDALONE,
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False):
"""Creates a new amphora that will be used in a failover flow.
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
:provides: amphora_id, amphora
:param prefix: The flow name prefix to use on the flow and tasks.
:param role: The role this amphora will have in the topology.
:param failed_amp_vrrp_port_id: The base port ID of the failed amp.
:param is_vrrp_ipv6: True if the base port IP is IPv6.
:return: A Taskflow sub-flow that will create the amphora.
"""
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW
amp_for_failover_flow = linear_flow.Flow(sf_name)
# Try to allocate or boot an amphora instance (unconfigured)
amp_for_failover_flow.add(self.get_amphora_for_lb_subflow(
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
role=role))
# Create the VIP base (aka VRRP) port for the amphora.
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
requires=(constants.VIP, constants.VIP_SG_ID,
constants.AMPHORA_ID),
provides=constants.BASE_PORT))
# Attach the VIP base (aka VRRP) port to the amphora.
amp_for_failover_flow.add(compute_tasks.AttachPort(
name=prefix + '-' + constants.ATTACH_PORT,
requires=(constants.AMPHORA, constants.PORT),
rebind={constants.PORT: constants.BASE_PORT}))
# Update the amphora database record with the VIP base port info.
amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails(
name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS,
requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT)))
# Make sure the amphora in the flow storage is up to date
# or the vrrp_ip will be empty
amp_for_failover_flow.add(database_tasks.ReloadAmphora(
name=prefix + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA_ID, provides=constants.AMPHORA))
# Update the amphora networking for the plugged VIP port
amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID,
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
provides=constants.AMPHORAE_NETWORK_CONFIG))
# Disable the base (vrrp) port on the failed amphora
# This prevents a DAD failure when bringing up the new amphora.
# Keepalived will handle this for act/stdby.
if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and
is_vrrp_ipv6):
amp_for_failover_flow.add(network_tasks.AdminDownPort(
name=prefix + '-' + constants.ADMIN_DOWN_PORT,
inject={constants.PORT_ID: failed_amp_vrrp_port_id}))
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG,
requires=(constants.AMPHORA, constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
# Plug member ports
amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta(
name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA,
requires=(constants.LOADBALANCER, constants.AMPHORA,
constants.AVAILABILITY_ZONE, constants.VRRP_PORT),
rebind={constants.VRRP_PORT: constants.BASE_PORT},
provides=constants.DELTA))
amp_for_failover_flow.add(network_tasks.HandleNetworkDelta(
name=prefix + '-' + constants.HANDLE_NETWORK_DELTA,
requires=(constants.AMPHORA, constants.DELTA),
provides=constants.ADDED_PORTS))
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG,
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
return amp_for_failover_flow
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
"""Get a Taskflow flow to failover an amphora.
1. Build a replacement amphora.
2. Delete the old amphora.
3. Update the amphorae listener configurations.
4. Update the VRRP configurations if needed.
:param failed_amphora: The amphora object to failover.
:param lb_amp_count: The number of amphora on this load balancer.
:returns: The flow that will provide the failover.
"""
failover_amp_flow = linear_flow.Flow(
constants.FAILOVER_AMPHORA_FLOW)
# Revert amphora to status ERROR if this flow goes wrong
failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
if failed_amphora.role in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amphora.role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amphora.id,
"load_balancer_id": failed_amphora.load_balancer_id,
"lb_network_ip": failed_amphora.lb_network_ip,
"compute_id": failed_amphora.compute_id,
"role": amp_role})
failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID(
requires=constants.LOADBALANCER_ID,
provides=constants.VIP_SG_ID))
is_vrrp_ipv6 = False
if failed_amphora.load_balancer_id:
if failed_amphora.vrrp_ip:
is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip)
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the
# compute service failing to boot an instance for other reasons.
# TODO(johnsom) Move this back out to run for spares after
# delete amphora API is available.
failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=failed_amphora.role,
failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id,
is_vrrp_ipv6=is_vrrp_ipv6))
failover_amp_flow.add(
self.get_delete_amphora_flow(
failed_amphora,
retry_attempts=CONF.controller_worker.amphora_delete_retries,
retry_interval=(
CONF.controller_worker.amphora_delete_retry_interval)))
failover_amp_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
if not failed_amphora.load_balancer_id:
# This is an unallocated amphora (bogus), we are done.
return failover_amp_flow
failover_amp_flow.add(database_tasks.GetLoadBalancer(
requires=constants.LOADBALANCER_ID,
inject={constants.LOADBALANCER_ID:
failed_amphora.load_balancer_id},
provides=constants.LOADBALANCER))
failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
name=constants.GET_AMPHORAE_FROM_LB,
requires=constants.LOADBALANCER_ID,
inject={constants.LOADBALANCER_ID:
failed_amphora.load_balancer_id},
provides=constants.AMPHORAE))
# Setup timeouts for our requests to the amphorae
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_retry_interval}
# Listeners update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow(
constants.UPDATE_AMPS_SUBFLOW)
for amp_index in range(0, lb_amp_count):
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
failover_amp_flow.add(update_amps_subflow)
# Configure and enable keepalived in the amphora
if lb_amp_count == 2:
failover_amp_flow.add(
self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
timeout_dict, create_vrrp_group=False))
# Reload the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "U" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "W" cycle
reload_listener_subflow = unordered_flow.Flow(
constants.AMPHORA_LISTENER_RELOAD_SUBFLOW)
for amp_index in range(0, lb_amp_count):
reload_listener_subflow.add(
amphora_driver_tasks.AmphoraIndexListenersReload(
name=(str(amp_index) + '-' +
constants.AMPHORA_RELOAD_LISTENER),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
failover_amp_flow.add(reload_listener_subflow)
# Remove any extraneous ports
# Note: Nova sometimes fails to delete ports attached to an instance.
# For example, if you create an LB with a listener, then
# 'openstack server delete' the amphora, you will see the vrrp
# port attached to that instance will remain after the instance
# is deleted.
# TODO(johnsom) Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
# Mark LB ACTIVE
failover_amp_flow.add(
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
requires=constants.LOADBALANCER))
return failover_amp_flow
|
|
from stomp import exception as exc
from kombu_stomp import transport
from kombu_stomp.utils import mock
from kombu_stomp.utils import unittest
class MessageTests(unittest.TestCase):
def setUp(self):
self.raw_message = {
'content-encoding': 'utf-8',
'content-type': 'application/json',
'properties': {
'body_encoding': u'base64',
u'delivery_info': {
u'priority': 0,
'routing_key': u'simple_queue',
'exchange': u'simple_queue'
},
'delivery_mode': 2,
'delivery_tag': '423e3830-e67a-458d-9aa0-f58df4d01639'
},
'body': 'eyJoZWxsbyI6ICJ3b3JsZCJ9'
}
self.channel = mock.Mock(**{
'decode_body.return_value': self.raw_message['body'],
})
self.msg_id = 'msg-id'
def test_init__raw_message_only(self):
message = transport.Message(self.channel, self.raw_message)
# The encode is required in Python 3, since kombu is doing it
self.assertEqual(self.raw_message['body'].encode(), message.body)
self.assertIsNone(message.msg_id)
def test_init__raw_message_and_id(self):
message = transport.Message(
self.channel,
(self.raw_message, self.msg_id),
)
# The encode is required in Python 3, since kombu is doing it
self.assertEqual(self.raw_message['body'].encode(), message.body)
self.assertEqual(message.msg_id, self.msg_id)
class QoSTests(unittest.TestCase):
def setUp(self):
self.channel = mock.MagicMock()
self.qos = transport.QoS(self.channel)
self.msg_id = 'msg-id'
self.msg = mock.Mock(msg_id=self.msg_id)
self.delivery_tag = '423e3830-e67a-458d-9aa0-f58df4d01639'
@mock.patch('kombu.transport.virtual.QoS.append')
def test_append__calls_super(self, append):
self.qos.append(self.msg, self.delivery_tag)
append.assert_called_once_with(self.msg, self.delivery_tag)
def test_append__saves_message_id_reference(self):
self.qos.append(self.msg, self.delivery_tag)
self.assertDictEqual(self.qos.ids, {self.delivery_tag: self.msg_id})
@mock.patch('kombu.transport.virtual.QoS.ack')
def test_ack__calls_super(self, ack):
self.qos.ack(self.delivery_tag)
ack.assert_called_once_with(self.delivery_tag)
@mock.patch('kombu_stomp.transport.QoS._stomp_ack')
def test_ack__delegates_to_stomp_ack(self, stomp_ack):
self.qos.ack(self.delivery_tag)
stomp_ack.assert_called_once_with(self.delivery_tag)
def test_stomp_ack(self):
# next line is requierd because we are not calling append first
self.qos.ids[self.delivery_tag] = self.msg_id
self.qos._stomp_ack(self.delivery_tag)
conn = self.channel.conn_or_acquire.return_value.__enter__.return_value
conn.ack.assert_called_once_with(self.msg_id)
def test_stomp_ack__no_sg_id(self):
self.qos._stomp_ack(self.delivery_tag)
self.assertFalse(self.channel.conn_or_acquire.called)
class ChannelConnectionTests(unittest.TestCase):
def setUp(self):
self.userid = 'user'
self.passcode = 'pass'
self.connection = mock.Mock(**{
'client.transport_options': {},
'client.userid': self.userid,
'client.password': self.passcode,
})
self.channel = transport.Channel(connection=self.connection)
self.queue = 'queue'
@mock.patch('kombu_stomp.stomp.Connection')
def test_conn_or_acquire__return_context_manager(self, Connection):
with self.channel.conn_or_acquire() as conn:
self.assertEqual(conn, Connection.return_value)
self.assertEqual(conn, Connection.return_value)
@mock.patch('kombu_stomp.stomp.Connection')
def test_conn_or_acquire__start_conn_if_not_connected(self, Connection):
Connection.return_value.is_connected.return_value = False
with self.channel.conn_or_acquire() as conn:
pass
conn.start.assert_called_once_with()
#conn.disconnect.assert_called_once_with()
@mock.patch('kombu_stomp.stomp.Connection')
def test_conn_or_acquire__connect_if_not_connected(self, Connection):
Connection.return_value.is_connected.return_value = False
with self.channel.conn_or_acquire() as conn:
pass
conn.connect.assert_called_once_with(
username=self.userid,
passcode=self.passcode,
wait=True,
)
@mock.patch('kombu_stomp.stomp.Connection')
def test_conn_or_acquire__do_not_disconnect(self, Connection):
Connection.return_value.is_connected.return_value = False
with self.channel.conn_or_acquire() as conn:
pass
self.assertFalse(conn.disconnect.called)
@mock.patch('kombu_stomp.stomp.Connection')
def test_conn_or_acquire__do_disconnect_on_demmand(self, Connection):
Connection.return_value.is_connected.return_value = False
with self.channel.conn_or_acquire(True) as conn:
pass
conn.disconnect.assert_called_once_with()
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_get_many(self, conn_or_acquire):
stomp_conn = conn_or_acquire.return_value.__enter__.return_value
iterator = stomp_conn.message_listener.iterator
iterator.return_value = iter([1])
self.channel._get_many([self.queue])
self.assertSetEqual(self.channel._subscriptions, set([self.queue]))
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_get_many__return(self, conn_or_acquire):
stomp_conn = conn_or_acquire.return_value.__enter__.return_value
iterator = stomp_conn.message_listener.iterator
iterator.return_value = iter([1])
self.assertEqual(self.channel._get_many([self.queue]), 1)
iterator.assert_called_once_with()
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_put(self, conn_or_acquire):
message = {'body': 'body'}
stomp_conn = conn_or_acquire.return_value.__enter__.return_value
self.channel._put(self.queue, message)
stomp_conn.send.assert_called_once_with(
'/queue/{0}'.format(self.queue),
'body'
)
@mock.patch('kombu.transport.virtual.Channel.basic_consume')
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_basic_consume__subscribe(self, conn_or_acquire, basic_consume):
stomp_conn = conn_or_acquire.return_value.__enter__.return_value
self.channel.basic_consume(self.queue)
stomp_conn.subscribe.assert_called_once_with(
'/queue/{0}'.format(self.queue),
ack='client-individual',
)
@mock.patch('kombu.transport.virtual.Channel.basic_consume')
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_basic_consume__super(self, conn_or_acquire, basic_consume):
self.channel.basic_consume(self.queue)
basic_consume.assert_called_once_with(self.queue)
def test_subscribe__already_subscribed(self):
self.channel.subscribe(self.connection, self.queue)
self.assertIsNone(self.channel.subscribe(self.connection, self.queue))
self.assertEqual(self.connection.subscribe.call_count, 1)
def test_subscribe__super(self):
self.channel.subscribe(self.connection, self.queue)
self.connection.subscribe.assert_called_once_with(
'/queue/{0}'.format(self.queue),
ack='client-individual',
)
@mock.patch('kombu.transport.virtual.Channel.queue_unbind')
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_queue_unbind__calls_super(self, conn_or_acquire, queue_unbind):
self.channel.queue_unbind(self.queue)
queue_unbind.assert_called_once_with(self.queue, None, '', None)
@mock.patch('kombu.transport.virtual.Channel.queue_unbind')
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_queue_unbind__unsubscribe(self, conn_or_acquire, queue_unbind):
stomp_conn = conn_or_acquire.return_value.__enter__.return_value
self.channel.queue_unbind(self.queue)
stomp_conn.unsubscribe.assert_called_once_with(
'/queue/{0}'.format(self.queue)
)
@mock.patch('kombu.transport.virtual.Channel.queue_unbind')
@mock.patch('kombu_stomp.transport.Channel.conn_or_acquire',
new_callable=mock.MagicMock) # for the context manager
def test_queue_unbind__subscriptions_discard(self,
conn_or_acquire,
queue_unbind):
self.channel.subscribe(mock.MagicMock(), self.queue)
self.assertSetEqual(self.channel._subscriptions, set([self.queue]))
self.channel.queue_unbind(self.queue)
self.assertSetEqual(self.channel._subscriptions, set())
@mock.patch('kombu.transport.virtual.Channel.close')
@mock.patch('kombu_stomp.stomp.Connection')
def test_close__call_super(self, Connection, close):
self.channel.close()
close.assert_called_once_with()
@mock.patch('kombu.transport.virtual.Channel.close')
@mock.patch('kombu_stomp.stomp.Connection')
def test_close__disconnect(self, Connection, close):
self.channel.close()
Connection.return_value.disconnect.assert_called_once_with()
@mock.patch('kombu.transport.virtual.Channel.close')
@mock.patch('kombu_stomp.stomp.Connection')
def test_close__close_closed_connection(self, Connection, close):
Connection.close.side_effect = exc.NotConnectedException
self.channel.close() # just check this doesn't trigger exceptions
def test_queue_destination__prefix(self):
self.connection.client.transport_options = {
'queue_name_prefix': 'prefix.',
}
self.assertEqual(
self.channel.queue_destination(self.queue),
'/queue/prefix.queue',
)
|
|
from .common import random_str, check_subject_in_rb
from rancher import ApiError
from .conftest import wait_until, wait_for, DEFAULT_TIMEOUT
import pytest
import time
import kubernetes
rb_resource = 'rolebinding'
def test_create_cluster_template_with_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
_ = \
create_cluster_template_revision(admin_mc.client, templateId)
_ = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
template_reloaded = client.by_id_cluster_template(cluster_template.id)
assert template_reloaded.links.revisions is not None
def test_create_template_revision_k8s_translation(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"kubernetesVersion": "1.15"
}
}
with pytest.raises(ApiError) as e:
client.create_cluster_template_revision(clusterConfig=cconfig,
clusterTemplateId=tId,
enabled="true")
assert e.value.error.status == 422
# template k8s question needed if using generic version
cconfig = {
"rancherKubernetesEngineConfig": {
"kubernetesVersion": "1.15.x"
}
}
questions = [{
"variable": "dockerRootDir",
"required": "false",
"type": "string",
"default": "/var/lib/docker"
}]
with pytest.raises(ApiError) as e:
client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
assert e.value.error.status == 422
def test_default_pod_sec(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
enabled="true")
time.sleep(2)
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.defaultPodSecurityPolicyTemplateId == "restricted"
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_check_default_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
first_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
wait_for_default_revision(client, templateId, first_revision.id)
# delete the cluster template revision, it should error out
with pytest.raises(ApiError) as e:
client.delete(first_revision)
assert e.value.error.status == 403
def test_create_cluster_with_template(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
template_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
answers = {
"values": {
"dockerRootDir": "/var/lib/docker123",
"rancherKubernetesEngineConfig.ignoreDockerVersion":
"false"
}
}
revId = template_revision.id
client = admin_mc.client
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=revId,
description="template from cluster",
answers=answers)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.questions is not None
k8s_version = cluster.rancherKubernetesEngineConfig.kubernetesVersion
assert k8s_version != "v1.15.x"
# edit cluster should not fail
client.update(cluster, name=random_str(), clusterTemplateRevisionId=revId)
# edit cluster to remove template must fail
with pytest.raises(ApiError) as e:
client.update(cluster, name=random_str(), clusterTemplateId=None,
clusterTemplateRevisionId=None)
assert e.value.error.status == 422
# delete the cluster template, it should error out
with pytest.raises(ApiError) as e:
client.delete(cluster_template)
assert e.value.error.status == 422
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_create_cluster_validations(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
template_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
revId = template_revision.id
client = admin_mc.client
rConfig = getRKEConfig()
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=revId,
description="template from cluster",
rancherKubernetesEngineConfig=rConfig)
except ApiError as e:
assert e.error.status == 500
@pytest.mark.nonparallel
def test_create_cluster_template_with_members(admin_mc, remove_resource,
user_factory):
client = admin_mc.client
user_member = user_factory()
remove_resource(user_member)
user_not_member = user_factory()
remove_resource(user_not_member)
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
time.sleep(30)
# check who has access to the cluster template
# admin and user_member should be able to list it
id = cluster_template.id
ct = client.by_id_cluster_template(id)
assert ct is not None
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_member.user.id, rb_name),
timeout=60,
fail_handler=lambda: "failed to check rolebinding")
um_client = user_member.client
ct = um_client.by_id_cluster_template(id)
assert ct is not None
# user not added as member shouldn't be able to access
unm_client = user_not_member.client
try:
unm_client.by_id_cluster_template(id)
except ApiError as e:
assert e.error.status == 403
# add * as member to share with all
new_members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}, {"groupPrincipalId": "*",
"accessType": "read-only"}]
client.update(ct, members=new_members)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
'system:authenticated', rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
time.sleep(30)
ct = user_not_member.client.by_id_cluster_template(id)
assert ct is not None
def test_creation_standard_user(admin_mc, remove_resource, user_factory):
user_member = user_factory()
remove_resource(user_member)
um_client = user_member.client
with pytest.raises(ApiError) as e:
um_client.create_cluster_template(name="user template",
description="user template")
assert e.value.error.status == 403
@pytest.mark.nonparallel
def test_check_enforcement(admin_mc, remove_resource,
list_remove_resource, user_factory):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
# turn on the enforcement
client.update_by_id_setting(id='cluster-template-enforcement',
value="true")
# a globaladmin can create a rke cluster without a template
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_list.insert(0, cluster)
# a user cannot create an rke cluster without template
user = user_factory()
remove_resource(user)
crtb_owner = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userId=user.user.id)
remove_resource(crtb_owner)
wait_until(rtb_cb(client, crtb_owner))
user_client = user.client
with pytest.raises(ApiError) as e:
user_client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
assert e.value.error.status == 422
# a user can create a non-rke cluster without template
cluster2 = user_client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_list.insert(0, cluster2)
# a user can create an rke cluster with a public template
template_reloaded = client.by_id_cluster_template(templateId)
new_members = [{"groupPrincipalId": "*", "accessType": "read-only"}]
client.update(template_reloaded, members=new_members)
cluster3 = wait_for_cluster_create(user_client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster3)
client.update_by_id_setting(id='cluster-template-enforcement',
value="false")
def test_revision_creation_permission(admin_mc, remove_resource,
user_factory):
user_readonly = user_factory()
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_readonly.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
templateId = cluster_template.id
# user with accessType=owner should be able to create revision
# since a standard user can add revisions to template shared
# with owner access
create_cluster_template_revision(user_owner.client, templateId)
# user with read-only accessType should get Forbidden error
with pytest.raises(ApiError) as e:
create_cluster_template_revision(user_readonly.client, templateId)
assert e.value.error.status == 403
def test_updated_members_revision_access(admin_mc, remove_resource,
user_factory):
# create cluster template without members and a revision
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
# update template to add a user as member
user_member = user_factory()
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}]
admin_mc.client.update(cluster_template, members=members)
# this member should get access to existing revision "rev"
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = rev.id.split(":")
name = split[1]
rb_name = name + "-ctr-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_member.user.id, rb_name),
timeout=120,
fail_handler=fail_handler(rb_resource))
revision = user_member.client.by_id_cluster_template_revision(rev.id)
assert revision is not None
# remove this user from cluster_template members list
admin_mc.client.update(cluster_template, members=[])
# now this user should not be able to see that revision
try:
user_member.client.by_id_cluster_template_revision(rev.id)
except ApiError as e:
assert e.error.status == 403
def test_permissions_removed_on_downgrading_access(admin_mc, remove_resource,
user_factory):
user_owner = user_factory()
remove_resource(user_owner)
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
# create cluster template with one member having "member" accessType
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
# user with accessType=owner should be able to update template
# so adding new member by the user_member should be allowed
new_member = user_factory()
remove_resource(new_member)
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"},
{"userPrincipalId": "local://" + new_member.user.id,
"accessType": "read-only"}]
user_owner.client.update(cluster_template, members=members)
# now change user_owner's accessType to read-only
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + new_member.user.id,
"accessType": "read-only"}]
admin_mc.client.update(cluster_template, members=members)
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
# user_owner should not be allowed to update cluster template now
# test updating members field by removing new_member
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "read-only"}]
try:
user_owner.client.update(cluster_template, members=members)
except ApiError as e:
assert e.error.status == 403
def test_required_template_question(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"variable":
"rancherKubernetesEngineConfig.ignoreDockerVersion",
"required": "false",
"type": "boolean",
"default": "true"
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template with no answer should fail
answers = {
"values": {
"rancherKubernetesEngineConfig.ignoreDockerVersion":
"false"
}
}
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster",
answers=answers)
except ApiError as e:
assert e.error.status == 422
def test_secret_template_answers(admin_mc, remove_resource,
list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
azureClientId = "rancherKubernetesEngineConfig.cloudProvider.\
azureCloudProvider.aadClientId"
azureClientSecret = "rancherKubernetesEngineConfig.cloudProvider.\
azureCloudProvider.aadClientSecret"
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": azureClientId,
"required": "true",
"type": "string",
"default": "abcdClientId"
},
{
"variable": azureClientSecret,
"required": "true",
"type": "string",
"default": ""
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template
answers = {
"values": {
"dockerRootDir": "/var/lib/docker123",
azureClientId: "abcdClientId",
azureClientSecret: "abcdClientSecret"
}
}
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster",
answers=answers)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.answers.values[azureClientId] is not None
assert azureClientSecret not in cluster.answers.values
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_member_accesstype_check(admin_mc, user_factory, remove_resource):
client = admin_mc.client
user_readonly = user_factory()
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "member"}]
# creation with a member with accessType "member" shouldn't be allowed
try:
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
except ApiError as e:
assert e.error.status == 422
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
updated_members = \
[{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "member"}]
# updating a cluster template to add user with access type "member"
# shouldn't be allowed
try:
client.update(cluster_template, members=updated_members)
except ApiError as e:
assert e.error.status == 422
def test_create_cluster_with_invalid_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
# templaterevision with question with invalid format
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"default": "map[enabled:true type:localClusterAuthEndpoint]",
"required": "false",
"type": "string",
"variable": "localClusterAuthEndpoint"
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
except ApiError as e:
assert e.error.status == 422
def test_disable_template_revision(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
rev = \
create_cluster_template_revision(admin_mc.client, tId)
# creating a cluster with this template
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# disable the revision
client.action(obj=rev, action_name="disable")
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id)
except ApiError as e:
assert e.error.status == 500
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_template_delete_by_members(admin_mc, remove_resource,
list_remove_resource, user_factory):
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
templateId = cluster_template.id
rev = create_cluster_template_revision(user_owner.client, templateId)
cluster = wait_for_cluster_create(admin_mc.client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# user with accessType=owner should not be able to delete this
# template since a cluster exists
wait_for_clusterTemplate_update_failure(admin_mc.client, rev)
with pytest.raises(ApiError) as e:
user_owner.client.delete(cluster_template)
assert e.value.error.status == 422
admin_mc.client.delete(cluster)
wait_for_cluster_to_be_deleted(admin_mc.client, cluster.id)
def test_template_access(admin_mc, remove_resource, user_factory):
user = user_factory()
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
rev = create_cluster_template_revision(admin_mc.client, templateId)
wait_for_clusterTemplate_list_failure(user.client, rev)
with pytest.raises(ApiError) as e:
user.client.create_cluster(name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
assert e.value.error.status == 404
def test_save_as_template_action(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = create_cluster_template_revision(admin_mc.client, templateId)
cluster = wait_for_cluster_create(admin_mc.client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate", )
except AttributeError as e:
assert e is not None
def test_cluster_desc_update(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
client = admin_mc.client
cname = random_str()
cluster = wait_for_cluster_create(admin_mc.client, name=cname,
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.description == 'template from cluster'
# edit cluster description
updatedC = client.update(cluster, name=cname,
clusterTemplateRevisionId=rev.id,
description="updated desc")
assert updatedC.description == 'updated desc'
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_update_cluster_monitoring(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"enableClusterMonitoring": "true",
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev1 = client.create_cluster_template_revision(clusterConfig=cconfig,
name="v1",
clusterTemplateId=tId,
enabled="true")
cconfig2 = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"enableClusterMonitoring": "false",
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev2 = client.create_cluster_template_revision(clusterConfig=cconfig2,
name="v2",
clusterTemplateId=tId,
enabled="true")
cluster_name = random_str()
cluster = wait_for_cluster_create(client, name=cluster_name,
clusterTemplateRevisionId=rev1.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# update cluster to use rev2 that turns off monitoring
# expect no change to monitoring
client.update(cluster,
name=cluster_name, clusterTemplateRevisionId=rev2.id)
reloaded_cluster = client.by_id_cluster(cluster.id)
assert reloaded_cluster.enableClusterMonitoring is True
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def rtb_cb(client, rtb):
"""Wait for the prtb to have the userId populated"""
def cb():
rt = client.reload(rtb)
return rt.userPrincipalId is not None
return cb
def grb_cb(client, grb):
"""Wait for the grb to have the userId populated"""
def cb():
rt = client.reload(grb)
return rt.userId is not None
return cb
# When calling this function you _must_ remove the cluster_template manually
# If a cluster is created also it must be removed after the template
def create_cluster_template(creator, members, admin_mc):
template_name = random_str()
cluster_template = \
creator.client.create_cluster_template(
name=template_name,
description="demo template",
members=members)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
rb_name = cluster_template.id.split(":")[1] + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
creator.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
return cluster_template
def create_cluster_template_revision(client, clusterTemplateId):
rke_config = getRKEConfig()
cluster_config = {
"dockerRootDir": "/var/lib/docker",
"enableClusterAlerting": "false",
"enableClusterMonitoring": "false",
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"rancherKubernetesEngineConfig": rke_config
}
questions = [{
"variable": "dockerRootDir",
"required": "false",
"type": "string",
"default": "/var/lib/docker"
},
{
"variable":
"rancherKubernetesEngineConfig.ignoreDockerVersion",
"required": "false",
"type": "boolean",
"default": "true"
},
{
"variable":
"rancherKubernetesEngineConfig.kubernetesVersion",
"required": "false",
"type": "string",
"default": "1.19.x"
}]
revision_name = random_str()
cluster_template_revision = \
client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=clusterTemplateId,
disabled="false",
questions=questions
)
return cluster_template_revision
def getRKEConfig():
rke_config = {
"addonJobTimeout": 30,
"ignoreDockerVersion": "true",
"sshAgentAuth": "false",
"type": "rancherKubernetesEngineConfig",
"kubernetesVersion": "1.15.x",
"authentication": {
"strategy": "x509",
"type": "authnConfig"
},
"network": {
"plugin": "canal",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan"
}
},
"ingress": {
"provider": "nginx",
"type": "ingressConfig"
},
"monitoring": {
"provider": "metrics-server",
"type": "monitoringConfig"
},
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
},
"etcd": {
"creation": "12h",
"extraArgs": {
"heartbeat-interval": 500,
"election-timeout": 5000
},
"retention": "72h",
"snapshot": "false",
"type": "etcdService",
"backupConfig": {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig"
}
}
}
}
return rke_config
def wait_for_cluster_to_be_deleted(client, clusterId, timeout=45):
deleted = False
start = time.time()
interval = 0.5
while not deleted:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clusters")
cluster = client.by_id_cluster(clusterId)
if cluster is None:
deleted = True
time.sleep(interval)
interval *= 2
def wait_for_default_revision(client, templateId, revisionId, timeout=60):
updated = False
interval = 0.5
start = time.time()
while not updated:
if time.time() - start > timeout:
raise Exception('Timeout waiting for clustertemplate to update')
template_reloaded = client.by_id_cluster_template(templateId)
if template_reloaded.defaultRevisionId is not None:
updated = True
time.sleep(interval)
interval *= 2
def fail_handler(resource):
return "failed waiting for clustertemplate" + resource + " to get updated"
def wait_for_cluster_create(client, **kwargs):
timeout = DEFAULT_TIMEOUT
interval = 0.5
start = time.time()
while True:
try:
return client.create_cluster(kwargs)
except ApiError as e:
if e.error.status != 404:
raise e
if time.time() - start > timeout:
exception_msg = 'Timeout waiting for condition.'
raise Exception(exception_msg)
time.sleep(interval)
interval *= 2
def wait_for_clusterTemplate_update_failure(client, revision, timeout=45):
updateWorks = True
start = time.time()
interval = 0.5
cconfig = {
"rancherKubernetesEngineConfig": {
}
}
while updateWorks:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clustertemplate update failure")
try:
client.update(revision, name=random_str(), clusterConfig=cconfig)
except ApiError as e:
if e.error.status == 422:
updateWorks = False
time.sleep(interval)
interval *= 2
def wait_for_clusterTemplate_list_failure(client, revision, timeout=45):
listWorks = True
start = time.time()
interval = 0.5
while listWorks:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clustertemplate list failure")
try:
client.by_id_cluster_template_revision(revision.id)
except ApiError as e:
if e.error.status == 403:
listWorks = False
time.sleep(interval)
interval *= 2
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import hashlib
import inspect
import json
import logging
import os
import re
import time
import unittest
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError("String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using the `wait_until()` members
from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
def sha256sum_file(filename):
h = hashlib.sha256()
with open(filename, 'rb') as f:
d = f.read(4096)
while len(d) > 0:
h.update(d)
d = f.read(4096)
return h.digest()
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = int(timeout)
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
write_config(os.path.join(datadir, "bitcoin.conf"), n=n, chain=chain)
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def write_config(config_path, *, n, chain, extra_config=""):
# Translate chain subdirectory name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(config_path, 'w', encoding='utf8') as f:
if chain_name_conf_arg:
f.write("{}=1\n".format(chain_name_conf_arg))
if chain_name_conf_section:
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("natpmp=0\n")
f.write("shrinkdebugfile=0\n")
f.write(extra_config)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
# TODO: Change to pow(a, -1, n) available in Python 3.8
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class TestFrameworkUtil(unittest.TestCase):
def test_modinv(self):
test_vectors = [
[7, 11],
[11, 29],
[90, 13],
[1891, 3797],
[6003722857, 77695236973],
]
for a, n in test_vectors:
self.assertEqual(modinv(a, n), pow(a, n-2, n))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkProfilesOperations:
"""NetworkProfilesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_profile_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_profile_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the NetworkProfile.
:type network_profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_profile_name=network_profile_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_profile_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkProfile":
"""Gets the specified network profile in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the public IP prefix.
:type network_profile_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
network_profile_name: str,
parameters: "_models.NetworkProfile",
**kwargs: Any
) -> "_models.NetworkProfile":
"""Creates or updates a network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to the create or update network profile operation.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.NetworkProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_profile_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkProfile":
"""Updates network profile tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to update network profile tags.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkProfileListResult"]:
"""Gets all the network profiles in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_05_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkProfiles'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkProfileListResult"]:
"""Gets all network profiles in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_05_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles'} # type: ignore
|
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import traceback
import conf
import g
from grax.access_level import Access_Level
from gwis.exception.gwis_nothing_found import GWIS_Nothing_Found
from item import item_base
from item import item_user_access
from item.util import revision
from item.util.item_type import Item_Type
from item.util.watcher_frequency import Watcher_Frequency
log = g.log.getLogger('item_user_watchn')
class One(item_user_access.One):
item_type_id = None
#item_type_id = Item_Type.ITEM_USER_WATCHING
item_type_table = None
item_gwis_abbrev = None # A derived class will override this.
#item_gwis_abbrev = 'iuw'
# Set child_item_types to None since our parent class set it to an
# empty collection, but we don't want to allow callers to specify
# this class's item type to get items.
child_item_types = None
local_defns = [
]
attr_defns = item_user_access.One.attr_defns + local_defns
psql_defns = item_user_access.One.psql_defns + local_defns
gwis_defns = item_base.One.attr_defns_reduce_for_gwis(attr_defns)
#
cols_copy_nok = item_user_access.One.cols_copy_nok + []
__slots__ = [] + [attr_defn[0] for attr_defn in local_defns]
# *** Constructor
def __init__(self, qb=None, row=None, req=None, copy_from=None):
item_user_access.One.__init__(self, qb, row, req, copy_from)
# ***
# ***
class Many(item_user_access.Many):
one_class = One
__slots__ = ()
def __init__(self):
item_user_access.Many.__init__(self)
#
def qb_join_item_event_alert(self, qb):
log.error(
'FIXME: BUG nnnn: qb_join_item_event_alert: not implemented')
# and the table is empty, too.
if qb.sql_clauses.inner.join.find('item_event_read') == -1:
if qb.username != conf.anonymous_username:
qb.sql_clauses.inner.select += (
"""
, ievt.messaging_id AS item_read_id
"""
)
qb.sql_clauses.inner.join += (
"""
LEFT OUTER JOIN item_event_alert AS ievt
ON (gia.item_id = ievt.item_id)
""")
qb.sql_clauses.inner.group_by += (
"""
, ievt.messaging_id
"""
)
else:
qb.sql_clauses.inner.select += (
"""
, NULL AS item_read_id
"""
)
qb.sql_clauses.outer.shared += (
"""
, group_item.item_read_id
"""
)
#
def qb_join_item_event_read(self, qb):
g.assurt(False) # Deprecated.
# See: qb_add_item_event_read. We should just select in the outer.
log.error('qb_join_item_event_read: Deprecated')
if qb.sql_clauses.inner.join.find('item_event_read') == -1:
if qb.username != conf.anonymous_username:
qb.sql_clauses.inner.select += (
"""
, itrd.id AS itrd_event_id
"""
)
qb.sql_clauses.inner.join += (
"""
LEFT OUTER JOIN item_event_read AS itrd
ON ((gia.item_id = itrd.item_id)
AND (itrd.username = %s))
""" % (qb.db.quoted(qb.username),))
qb.sql_clauses.inner.group_by += (
"""
, itrd.id
"""
)
# If we joined using stack_id and not also version and branch_id,
# we'd want to order by revision ID:
# qb.sql_clauses.inner.order_by += (
# # Order by latest read event: we can use event id or rev id.
# # I.e., the following is effectively same as: itrd.id DESC
# """
# , itrd.revision_id DESC
# """
# )
#
# Argh. [lb] wants to deprecate this fcn: it justs add to an
# alreayd long join chain, and by joining, and since we add
# multiple rows for the same system ID, it maybe makes more
# sense to do an outer select fcn...
else:
qb.sql_clauses.inner.select += (
"""
, NULL AS itrd_event_id
"""
)
qb.sql_clauses.outer.shared += (
"""
, group_item.itrd_event_id
"""
)
#
def qb_add_item_event_read(self, qb):
qb.sql_clauses.outer.enabled = True
# MAYBE: This seems inefficient. Maybe qb can track what's been added,
# instead of searching strings all the time.
if qb.sql_clauses.outer.group_by.find('user_has_read_item') == -1:
qb.sql_clauses.outer.select += (
"""
, CASE
WHEN EXISTS(SELECT id FROM item_event_read
WHERE item_id = group_item.system_id
AND username = %s
LIMIT 1) THEN TRUE
ELSE NULL END AS user_has_read_item
""" % (qb.db.quoted(qb.username),))
qb.sql_clauses.outer.group_by += (
"""
, user_has_read_item
""")
#
def sql_apply_query_filters(self, qb, where_clause="", conjunction=""):
g.assurt((not conjunction) or (conjunction == "AND"))
if qb.filters.filter_by_unread:
# User must be logged in. Client should prevent this.
g.assurt(qb.username and (qb.username != conf.anonymous_username))
# BUG nnnn: Display alerts in the client.
# Questions: Would we still send digest item watcher emails?
# How would you finish designing item_event_alert table?
# - Define the different msg_type_id types.
#
# For now, we use the item_event_read table, which is basically
# the thread_read_event table from CcpV1, but now it works on
# any item type. The client can ask that we return only items
# that a user has not read, or it can ask us to mark what's
# been read and what's not been read.
#
# The first implementation was a join:
#
# self.qb_join_item_event_read(qb)
# # Or, using the new, unimplemented item_event_alert table:
# # self.qb_join_item_event_alert(qb)
#
# But that creates two problems: 1., we already join a ton of tables,
# which ends up impacting SQL performance, and 2., the server saves
# multiple read events for the same item (same system ID), so the join
# could cause a magnifying effect on the number of rows fetched in the
# inner query. It seems to make more sense to run an EXISTS in the
# outer SELECT. This causes one more SQL statement for every row
# fetched... but how bad can it be?
#
# This is the code used when joing item_event_read:
#
# # Look for items that have no read record, or whose record is old.
# # We checked that the record belongs to the user in the join, so
# # we just check that a record doesn't exist or that it's rev_id is
# # dated.
# #
# # NOTE: Since we're using system IDs, we shouldn't need to look at
# # revision IDs (or versions). So, this is not necessary:
# # overkill: ((itrd.id IS NULL)
# # OR (itrd.revision_id < gia.valid_start_rid))
# where_clause += (
# """
# %s (itrd.id IS NULL)
# """ % (conjunction,))
# conjunction = "AND"
#
# And this is untested code for use when joining item_event_alert:
#
# where_clause += (
# """
# %s
# (ievt.messaging_id IS NOT NULL)
# AND (ievt.username = %s)
# AND (ievt.date_alerted IS NOT NULL)
# AND (ievt.msg_type_id = ??? /* none defined yet... */)
# """ % (conjunction,
# qb.db.quoted(qb.username),))
# conjunction = "AND"
# Add the SELECT and GROUP BY for the EXISTS that tests
# item_event_read for presence of a (username, item_system_id)
# read event record.
self.qb_add_item_event_read(qb)
# Now add a WHERE that says the item must not have been read.
# And we cannot use the where_clause, which is for the inner select.
#
# The column is in the SELECT, so the WHERE must use its own
# calculation.
# qb.sql_clauses.outer.where += (
# """
# AND (user_has_read_item IS NULL)
# """)
qb.sql_clauses.outer.where += (
"""
AND (NOT EXISTS(SELECT id FROM item_event_read
WHERE item_id = group_item.system_id
AND username = '%s'
LIMIT 1))
""" % (qb.username,))
# We didn't add to where_clauses, so skipping: conjunction = "AND"
if qb.filters.filter_by_watch_item:
# NOTE: The idea with the filter_by_watch_item enum is/was, if it:
# == 0: Don't use.
# == 1: Use items I'm watching at revision.Current.
# == 2: Use items I was watching at qb.revision (which could
# be revision.Historic).
# But the way it got wired, the feature just uses qb.revision.
# So this is always option 2. Option 1 seems more meaningful,
# otherwise the user cannot make a new set of item watchers
# and then go to a historic revision to see things about those
# watchers. But who cares. That sounds like a silly use case.
# And the user can still use regions they are watching, since
# we always fetch those at revision.Current.
# 2013.10.10: MAYBE delete this comment: Something about:
# Statewide UI: Debugging: We want to skip this if block the second
# time through...
# which means what, exactly? [lb] can only guess that I saw the code
# come through this block twice, but then we'd be adding the same
# column names to the SELECT statement, and SQL would bail. So I had
# to have meant something else... meh.
# 2014.05.04: Is this slow? [lb] seeing joining tables on lhs_stack_id
# or rhs_stack_id taking seconds, and WHEREing on stack_id
# IN (SELECT ...) instead taking less than a second.
# See: recent changes to searching note- and tag-matches when
# geocoding: make temp tables of stack IDs and then WHERE IN
# (SELECT FROM) instead of JOINing.
watched_items_where = self.sql_where_filter_watched(qb)
g.assurt(watched_items_where)
where_clause += " %s %s " % (conjunction, watched_items_where,)
conjunction = "AND"
# NOTE: See qb.filters.only_in_multi_geometry for including watched
# items by geometry.
return item_user_access.Many.sql_apply_query_filters(
self, qb, where_clause, conjunction)
#
def sql_where_filter_watched(self, qb):
# E.g., find user's watched regions.
# MAYBE: If we added new alerts (twitter, sms, etc.) we'd have to
# add more attributes here. For now, there's just the one
# alert: /item/alert_email.
#
# Get the well-known item watcher attribute.
g.assurt(qb.item_mgr is not None)
attr_qb = qb.clone(skip_clauses=True, skip_filtport=True)
internal_name = '/item/alert_email'
# get_system_attr is implemented by attribute.Many but we can't import
# attribute so we jump through object hoops instead.
# 2013.03.29: Using qb.item_mgr.get_system_attr because this fcn. used to
# live in geofeature but now that we're in region maybe we
# can import attribute with causing an infinite import loop?
# Oh, well, this works just as well:
attr_alert_email = attr_qb.item_mgr.get_system_attr(attr_qb,
internal_name)
g.assurt(attr_alert_email is not None)
# It doesn't make sense for the anonymous user to use this filter.
if qb.username == conf.anonymous_username:
# This happens if client specifies 'wgeo=1' but user is anon.
log.error('silly anon client has no watchers')
raise GWIS_Nothing_Found()
join_on_to_self = "gia.stack_id = flv.rhs_stack_id"
where_on_other = "(flv.lhs_stack_id = %d)" % (attr_alert_email.stack_id,)
watched_items_where = self.sql_where_filter_linked(qb, join_on_to_self,
where_on_other)
qb.sql_clauses.inner.select += (
"""
, flv.value_integer AS flv_value_integer
""")
qb.sql_clauses.inner.group_by += (
"""
, flv.value_integer
""")
qb.sql_clauses.outer.where += (
"""
AND (flv_value_integer > %d)
""" % (Watcher_Frequency.never,))
return watched_items_where
#
# [lb] is not quite sure where this fcn. should live. Here is fine for now.
def sql_where_filter_linked(self, qb, join_on_to_self,
where_on_other,
join_on_temp=""):
linked_items_where = Many.sql_where_filter_linked_impl(
qb, join_on_to_self, where_on_other, join_on_temp)
return linked_items_where
#
@staticmethod
def sql_where_filter_linked_impl(qb, join_on_to_self,
where_on_other,
join_on_temp=""):
# 2014.05.05: This fcn. is slow: specifically, we could set the
# join_collapse_limit to 1 and then select from the
# temp table first ([lb] saw the sql searching for
# items via notes taking 4 secs. instead of 10 secs.
# with better join ordering); more importantly, we
# could use a WHERE rather than JOINing, e.g.,
# WHERE stack_id IN (SELECT stack_id FROM temp_table)
# is usually a lot faster than
# FROM link_value JOIN temp_table
# simply because JOINing a large table like link_value
# is slower than searching for IDs... although [lb]
# would've expected them to be similar in runtimes,
# since the JOIN is an inner join and joins on the
# stack ID, so the results are basically the same...
#log.debug('sql_where_filter_linked_impl')
g.assurt(qb.sql_clauses.inner.join.find('JOIN link_value AS flv') == -1)
# 2013.03.27: [lb] hopes this isn't too costly. We have to check
# the user's permissions on the link-attribute, since
# item watcher links are private (i.e., if we didn't
# check permissions, we'd get all users' watchers).
# 2014.05.04: It is costly, like, ten seconds to look for notes
# matching "gateway fountain". But using explicit join
# ordering and disabling join_collapse_limit, we can
# improve the search to a little under 4 seconds...
# [lb] is not sure what the search is still so costly
# because we're selecting from a temporary table with
# only 21 rows (in the 'gateway fountain' example).
qb.sql_clauses.inner.join += (
"""
JOIN link_value AS flv
ON (%s)
JOIN group_item_access AS flv_gia
ON (flv.system_id = flv_gia.item_id)
%s
""" % (join_on_to_self,
join_on_temp,))
qb.sql_clauses.inner.select += (
"""
, flv.lhs_stack_id AS flv_lhs_stack_id
, flv_gia.deleted AS flv_deleted
, flv_gia.access_level_id AS flv_access_level_id
""")
qb.sql_clauses.inner.group_by += (
"""
, flv_gia.deleted
, flv_gia.access_level_id
""")
qb.sql_clauses.inner.order_by += (
"""
, flv_gia.branch_id DESC
, flv_gia.acl_grouping DESC
, flv_gia.access_level_id ASC
""")
#
g.assurt(qb.branch_hier)
g.assurt(qb.revision.gids)
linked_items_where = ""
conjunction = ""
if where_on_other:
linked_items_where += (
"""
%s %s
"""
% (conjunction,
where_on_other,))
conjunction = "AND"
linked_items_where += (
"""
%s %s
"""
% (conjunction,
# Check user's access to the link. This is similar to:
# no "AND (flv_gia.group_id IN (%s))" % ','.join(qb.revision.gids)
# no qb.revision.sql_where_group_ids(qb.revision.gids, 'flv_gia.')
revision.Revision.branch_hier_where_clause(
qb.branch_hier, 'flv_gia', include_gids=True,
allow_deleted=True),))
conjunction = "AND"
#
linked_items_where = " (%s) " % (linked_items_where,)
#
# NOTE: To avoid deleted links, we have to use an outer join,
# otherwise we'll find the previous undeleted links, since
# link_values use lhs and rhs stack_ids and not system_ids.
# MAYBE: This may be enabled for branches.
qb.sql_clauses.outer.enabled = True
qb.sql_clauses.outer.where += (
"""
AND (NOT group_item.flv_deleted)
AND (group_item.flv_access_level_id <= %d)
""" % (Access_Level.client,))
return linked_items_where
# ***
# ***
|
|
from unittest import TestCase
import simplejson as json
from mock import patch, PropertyMock, Mock
from pyqrllib.pyqrllib import bin2hstr
from qrl.core.misc import logger
from qrl.core.AddressState import AddressState
from qrl.core.txs.TokenTransaction import TokenTransaction
from qrl.core.txs.Transaction import Transaction
from qrl.generated import qrl_pb2
from tests.core.txs.testdata import test_json_Token, test_signature_Token
from tests.misc.helper import get_alice_xmss, get_bob_xmss, get_slave_xmss
logger.initialize_default()
@patch('qrl.core.txs.Transaction.logger')
class TestTokenTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTokenTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self._decimals = 15
self.alice.set_ots_index(10)
self.maxDiff = None
def setUp(self):
self.initial_balances_valid = [qrl_pb2.AddressAmount(address=self.alice.address, amount=1000),
qrl_pb2.AddressAmount(address=self.bob.address, amount=1000)]
self.params = {"symbol": b'QRL',
"name": b'Quantum Resistant Ledger',
"owner": self.alice.address,
"decimals": self._decimals,
"initial_balances": self.initial_balances_valid,
"fee": 1,
"xmss_pk": self.alice.pk}
def make_tx(self, **kwargs):
self.params.update(kwargs)
tx = TokenTransaction.create(**self.params)
return tx
def test_create(self, m_logger):
# Alice creates Token
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = self.make_tx(decimals=4, initial_balances=initial_balances)
self.assertTrue(tx)
def test_create_negative_fee(self, m_logger):
with self.assertRaises(ValueError):
TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=self.alice.address,
decimals=4,
initial_balances=[],
fee=-1,
xmss_pk=self.alice.pk)
def test_to_json(self, m_logger):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_Token), json.loads(txjson))
def test_from_json(self, m_logger):
tx = Transaction.from_json(test_json_Token)
tx.sign(self.alice)
self.assertIsInstance(tx, TokenTransaction)
# Test that common Transaction components were copied over.
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'QRL', tx.symbol)
self.assertEqual(b'Quantum Resistant Ledger', tx.name)
self.assertEqual('010317463dcd581b679b4754f46c6425125489a2826894e3c42a590efb6806450ce6bf52716c',
bin2hstr(tx.owner))
self.assertEqual('ff84da605e9c9cd04d68503be7922110b4cc147837f8687ad18aa54b7bc5632d', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_Token, bin2hstr(tx.signature))
total_supply = 0
for initial_balance in tx.initial_balances:
total_supply += initial_balance.amount
self.assertEqual(600000000, total_supply)
self.assertEqual(1, tx.fee)
def test_validate_tx(self, m_logger):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = self.make_tx(decimals=4, initial_balances=initial_balances)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_validate_tx2(self, m_logger):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=10000000000000000001))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=10000000000000000001))
# Transaction Validation should fail as the decimals is higher than the possible decimals
with self.assertRaises(ValueError):
self.make_tx(decimals=4, initial_balances=initial_balances)
def test_validate_tx3(self, m_logger):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=1000 * 10 ** self._decimals))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=1000 * 10 ** self._decimals))
tx = self.make_tx(initial_balances=initial_balances)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_validate_tx4(self, m_logger):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=1000 * 10 ** self._decimals))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=1000 * 10 ** self._decimals))
tx = self.make_tx(initial_balances=initial_balances)
tx.sign(self.alice)
self.assertTrue(tx.validate_or_raise())
tx._data.transaction_hash = b'abc'
with self.assertRaises(ValueError):
tx.validate_or_raise()
def test_validate_custom(self, m_logger):
# Token symbol too long
with self.assertRaises(ValueError):
tx = self.make_tx(symbol=b'QRLSQRLSQRL')
tx.sign(self.alice)
# Token name too long
with self.assertRaises(ValueError):
tx = self.make_tx(name=b'Quantum Resistant LedgerQuantum')
tx.sign(self.alice)
# Token symbol missing
with self.assertRaises(ValueError):
tx = self.make_tx(symbol=b'')
tx.sign(self.alice)
# Token name missing
with self.assertRaises(ValueError):
tx = self.make_tx(name=b'')
tx.sign(self.alice)
# Empty initial_balances
with self.assertRaises(ValueError):
tx = self.make_tx(initial_balances=[])
tx.sign(self.alice)
# Invalid initial balances... 0!
with self.assertRaises(ValueError):
initial_balances_0_0 = [qrl_pb2.AddressAmount(address=self.alice.address, amount=0),
qrl_pb2.AddressAmount(address=self.bob.address, amount=0)]
tx = self.make_tx(initial_balances=initial_balances_0_0)
tx.sign(self.alice)
# Fee is -1
with patch('qrl.core.txs.TokenTransaction.TokenTransaction.fee', new_callable=PropertyMock) as m_fee:
m_fee.return_value = -1
with self.assertRaises(ValueError):
tx = self.make_tx()
tx.sign(self.alice)
# Invalid initial balances... -1!
# tx = self.make_tx()
# tx.sign(self.alice)
# with patch('qrl.core.txs.TokenTransaction.TokenTransaction.initial_balances', new_callable=PropertyMock) as m_i_balances:
# m_i_balances.return_value = [-1, -1]
# with self.assertRaises(ValueError):
# tx.validate_or_raise()
@patch('qrl.core.txs.Transaction.Transaction.validate_slave', return_value=True)
def test_validate_extended(self, m_validate_slave, m_logger):
"""
TokenTransaction.validate_extended checks for:
1. valid master/slave
2. from address is valid
3. owner address is valid
4. addresses that own the initial balances are valid
5. that the AddressState has enough coins to pay the Transaction fee (because no coins are being transferred)
6. OTS key reuse
"""
tx = TokenTransaction.create(**self.params)
m_addr_from_state = Mock(autospec=AddressState, name='addr_from State', balance=100)
m_addr_from_pk_state = Mock(autospec=AddressState, name='addr_from_pk State')
m_addr_from_pk_state.ots_key_reuse.return_value = False
tx.sign(self.alice)
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertTrue(result)
m_validate_slave.return_value = False
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertFalse(result)
m_validate_slave.return_value = True
with patch('qrl.core.txs.TokenTransaction.TokenTransaction.addr_from',
new_callable=PropertyMock) as m_addr_from:
m_addr_from.return_value = b'Invalid Address'
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertFalse(result)
with patch('qrl.core.txs.TokenTransaction.TokenTransaction.owner', new_callable=PropertyMock) as m_owner:
m_owner.return_value = b'Invalid Address'
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertFalse(result)
with patch('qrl.core.txs.TokenTransaction.TokenTransaction.initial_balances',
new_callable=PropertyMock) as m_address_balance:
m_address_balance.return_value = [qrl_pb2.AddressAmount(address=b'Invalid Address 1', amount=1000),
qrl_pb2.AddressAmount(address=b'Invalid Address 2', amount=1000)]
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertFalse(result)
m_addr_from_state.balance = 0
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertFalse(result)
m_addr_from_state.balance = 100
m_addr_from_pk_state.ots_key_reuse.return_value = True
result = tx.validate_extended(m_addr_from_state, m_addr_from_pk_state)
self.assertFalse(result)
def test_affected_address(self, m_logger):
tx = TokenTransaction.create(**self.params)
# Default params should result in 2 affected addresses
result = set()
tx.set_affected_address(result)
self.assertEqual(2, len(result))
# If the slave is a recipient of tokens, he should be included too.
slave = get_slave_xmss()
result = set()
self.initial_balances_valid.append(qrl_pb2.AddressAmount(address=slave.address, amount=1000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=self.alice.address,
decimals=15,
initial_balances=self.initial_balances_valid,
fee=1,
xmss_pk=self.alice.pk)
tx.set_affected_address(result)
self.assertEqual(3, len(result))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for MobileNet v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets import mobilenet_v1
slim = contrib_slim
class MobilenetV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'MobilenetV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'MobilenetV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildCustomNetworkUsingConvDefs(self):
batch_size = 5
height, width = 224, 224
conv_defs = [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
]
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 56, 56, 512])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise',
use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 7, 7, 512],
'Conv2d_12_pointwise': [batch_size, 7, 7, 1024],
'Conv2d_13_depthwise': [batch_size, 7, 7, 1024],
'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 16
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 14, 14, 512],
'Conv2d_12_pointwise': [batch_size, 14, 14, 1024],
'Conv2d_13_depthwise': [batch_size, 14, 14, 1024],
'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 8
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 28, 28, 256],
'Conv2d_6_pointwise': [batch_size, 28, 28, 512],
'Conv2d_7_depthwise': [batch_size, 28, 28, 512],
'Conv2d_7_pointwise': [batch_size, 28, 28, 512],
'Conv2d_8_depthwise': [batch_size, 28, 28, 512],
'Conv2d_8_pointwise': [batch_size, 28, 28, 512],
'Conv2d_9_depthwise': [batch_size, 28, 28, 512],
'Conv2d_9_pointwise': [batch_size, 28, 28, 512],
'Conv2d_10_depthwise': [batch_size, 28, 28, 512],
'Conv2d_10_pointwise': [batch_size, 28, 28, 512],
'Conv2d_11_depthwise': [batch_size, 28, 28, 512],
'Conv2d_11_pointwise': [batch_size, 28, 28, 512],
'Conv2d_12_depthwise': [batch_size, 28, 28, 512],
'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],
'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],
'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsApproximateFaceNet(self):
batch_size = 5
height, width = 128, 128
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,
use_explicit_padding=True)
# For the Conv2d_0 layer FaceNet has depth=16
endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],
'Conv2d_1_depthwise': [batch_size, 64, 64, 24],
'Conv2d_1_pointwise': [batch_size, 64, 64, 48],
'Conv2d_2_depthwise': [batch_size, 32, 32, 48],
'Conv2d_2_pointwise': [batch_size, 32, 32, 96],
'Conv2d_3_depthwise': [batch_size, 32, 32, 96],
'Conv2d_3_pointwise': [batch_size, 32, 32, 96],
'Conv2d_4_depthwise': [batch_size, 16, 16, 96],
'Conv2d_4_pointwise': [batch_size, 16, 16, 192],
'Conv2d_5_depthwise': [batch_size, 16, 16, 192],
'Conv2d_5_pointwise': [batch_size, 16, 16, 192],
'Conv2d_6_depthwise': [batch_size, 8, 8, 192],
'Conv2d_6_pointwise': [batch_size, 8, 8, 384],
'Conv2d_7_depthwise': [batch_size, 8, 8, 384],
'Conv2d_7_pointwise': [batch_size, 8, 8, 384],
'Conv2d_8_depthwise': [batch_size, 8, 8, 384],
'Conv2d_8_pointwise': [batch_size, 8, 8, 384],
'Conv2d_9_depthwise': [batch_size, 8, 8, 384],
'Conv2d_9_pointwise': [batch_size, 8, 8, 384],
'Conv2d_10_depthwise': [batch_size, 8, 8, 384],
'Conv2d_10_pointwise': [batch_size, 8, 8, 384],
'Conv2d_11_depthwise': [batch_size, 8, 8, 384],
'Conv2d_11_pointwise': [batch_size, 8, 8, 384],
'Conv2d_12_depthwise': [batch_size, 4, 4, 384],
'Conv2d_12_pointwise': [batch_size, 4, 4, 768],
'Conv2d_13_depthwise': [batch_size, 4, 4, 768],
'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
mobilenet_v1.mobilenet_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(3217920, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.compat.v1.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.compat.v1.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
reuse=True)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random.uniform([1, 224, 224, 3])
logits, _ = mobilenet_v1.mobilenet_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet_v1.mobilenet_v1_arg_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from beeswax import hive_site
from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, BROWSE_PARTITIONED_TABLE_LIMIT
from beeswax.design import hql_query
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import QueryHistory, QUERY_TYPES
from filebrowser.views import location_to_url
from desktop.lib.django_util import format_preserving_redirect
from desktop.lib.i18n import smart_str
LOG = logging.getLogger(__name__)
DBMS_CACHE = {}
DBMS_CACHE_LOCK = threading.Lock()
def get(user, query_server=None):
global DBMS_CACHE
global DBMS_CACHE_LOCK
# Avoid circular dependency
from beeswax.server.hive_server2_lib import HiveServerClientCompatible, HiveServerClient
if query_server is None:
query_server = get_query_server_config()
DBMS_CACHE_LOCK.acquire()
try:
DBMS_CACHE.setdefault(user.username, {})
if query_server['server_name'] not in DBMS_CACHE[user.username]:
DBMS_CACHE[user.username][query_server['server_name']] = HiveServer2Dbms(HiveServerClientCompatible(HiveServerClient(query_server, user)), QueryHistory.SERVER_TYPE[1][0])
return DBMS_CACHE[user.username][query_server['server_name']]
finally:
DBMS_CACHE_LOCK.release()
def get_query_server_config(name='beeswax', server=None):
if name == 'impala':
from impala.conf import SERVER_HOST as IMPALA_SERVER_HOST, SERVER_PORT as IMPALA_SERVER_PORT, \
IMPALA_PRINCIPAL, IMPERSONATION_ENABLED, QUERYCACHE_ROWS, QUERY_TIMEOUT_S
query_server = {
'server_name': 'impala',
'server_host': IMPALA_SERVER_HOST.get(),
'server_port': IMPALA_SERVER_PORT.get(),
'principal': IMPALA_PRINCIPAL.get(),
'impersonation_enabled': IMPERSONATION_ENABLED.get(),
'querycache_rows': QUERYCACHE_ROWS.get(),
'QUERY_TIMEOUT_S': QUERY_TIMEOUT_S.get(),
}
else:
kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
query_server = {
'server_name': 'beeswax', # Aka HiveServer2 now
'server_host': HIVE_SERVER_HOST.get(),
'server_port': HIVE_SERVER_PORT.get(),
'principal': kerberos_principal,
'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
'protocol': 'https' if hiveserver2_use_ssl() else 'http',
'host': HIVE_SERVER_HOST.get(),
'port': hive_site.hiveserver2_thrift_http_port(),
'end_point': hive_site.hiveserver2_thrift_http_path()
},
'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
}
LOG.debug("Query Server: %s" % query_server)
return query_server
class QueryServerException(Exception):
# Ideally the query handle will be stored here too.
def __init__(self, e, message=''):
super(QueryServerException, self).__init__(e)
self.message = message
class NoSuchObjectException: pass
class HiveServer2Dbms(object):
def __init__(self, client, server_type):
self.client = client
self.server_type = server_type
self.server_name = self.client.query_server['server_name']
def get_table(self, database, table_name):
# DB name not supported in SHOW PARTITIONS required in Table
self.use(database)
return self.client.get_table(database, table_name)
def get_tables(self, database='default', table_names='*'):
hql = "SHOW TABLES IN %s '%s'" % (database, table_names) # self.client.get_tables(database, table_names) is too slow
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=15.0)
if handle:
result = self.fetch(handle, rows=5000)
self.close(handle)
return [name for table in result.rows() for name in table]
else:
return []
def get_databases(self):
return self.client.get_databases()
def execute_query(self, query, design):
return self.execute_and_watch(query, design=design)
def select_star_from(self, database, table):
hql = "SELECT * FROM `%s.%s` %s" % (database, table.name, self._get_browse_limit_clause(table))
return self.execute_statement(hql)
def execute_statement(self, hql):
if self.server_name == 'impala':
query = hql_query(hql, QUERY_TYPES[1])
else:
query = hql_query(hql, QUERY_TYPES[0])
return self.execute_and_watch(query)
def fetch(self, query_handle, start_over=False, rows=None):
no_start_over_support = [config_variable for config_variable in self.get_default_configuration(False)
if config_variable.key == 'support_start_over'
and config_variable.value == 'false']
if no_start_over_support:
start_over = False
return self.client.fetch(query_handle, start_over, rows)
def close_operation(self, query_handle):
return self.client.close_operation(query_handle)
def open_session(self, user):
return self.client.open_session(user)
def close_session(self, session):
return self.client.close_session(session)
def cancel_operation(self, query_handle):
resp = self.client.cancel_operation(query_handle)
if self.client.query_server['server_name'] == 'impala':
resp = self.client.close_operation(query_handle)
return resp
def get_sample(self, database, table):
"""No samples if it's a view (HUE-526)"""
if not table.is_view:
limit = min(100, BROWSE_PARTITIONED_TABLE_LIMIT.get())
partition_query = ""
if table.partition_keys:
partitions = self.get_partitions(database, table, 1)
partition_query = 'WHERE ' + ' AND '.join(["%s='%s'" % (table.partition_keys[idx].name, key) for idx, key in enumerate(partitions[0].values)])
hql = "SELECT * FROM `%s.%s` %s LIMIT %s" % (database, table.name, partition_query, limit)
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
return result
def analyze_table_table(self, database, table):
hql = 'analyze table `%(database)s.%(table_name)` compute statistics' % {'database': database, 'table_name': table.name}
query = hql_query(hql, database)
return self.execute_query(query)
def analyze_table_column(self):
# analyze table <table_name> partition <part_name> compute statistics for columns <col_name1>, <col_name2>...
pass
def drop_table(self, database, table):
if table.is_view:
hql = "DROP VIEW `%s.%s`" % (database, table.name,)
else:
hql = "DROP TABLE `%s.%s`" % (database, table.name,)
return self.execute_statement(hql)
def load_data(self, database, table, form, design):
hql = "LOAD DATA INPATH"
hql += " '%s'" % form.cleaned_data['path']
if form.cleaned_data['overwrite']:
hql += " OVERWRITE"
hql += " INTO TABLE "
hql += "`%s.%s`" % (database, table.name,)
if form.partition_columns:
hql += " PARTITION ("
vals = []
for key, column_name in form.partition_columns.iteritems():
vals.append("%s='%s'" % (column_name, form.cleaned_data[key]))
hql += ", ".join(vals)
hql += ")"
query = hql_query(hql, database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def drop_tables(self, database, tables, design):
hql = []
for table in tables:
if table.is_view:
hql.append("DROP VIEW `%s.%s`" % (database, table.name,))
else:
hql.append("DROP TABLE `%s.%s`" % (database, table.name,))
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def invalidate_tables(self, database, tables):
handle = None
for table in tables:
try:
hql = "INVALIDATE METADATA %s.%s" % (database, table,)
query = hql_query(hql, database, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
except Exception, e:
LOG.warn('Refresh tables cache out of sync: %s' % smart_str(e))
finally:
if handle:
self.close(handle)
def drop_database(self, database):
return self.execute_statement("DROP DATABASE `%s`" % database)
def drop_databases(self, databases, design):
hql = []
for database in databases:
hql.append("DROP DATABASE `%s`" % database)
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def _get_and_validate_select_query(self, design, query_history):
query = design.get_query_statement(query_history.statement_number)
if not query.strip().lower().startswith('select'):
raise Exception(_('Only SELECT statements can be saved. Provided query: %(query)s') % {'query': query})
return query
def insert_query_into_directory(self, query_history, target_dir):
design = query_history.design.get_design()
database = design.query['database']
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = "INSERT OVERWRITE DIRECTORY '%s' %s" % (target_dir, query)
return self.execute_statement(hql)
def create_table_as_a_select(self, request, query_history, target_database, target_table, result_meta):
design = query_history.design.get_design()
database = design.query['database']
# Case 1: Hive Server 2 backend or results straight from an existing table
if result_meta.in_tablename:
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = 'CREATE TABLE %s.%s AS %s' % (target_database, target_table, query)
query_history = self.execute_statement(hql)
else:
# Case 2: The results are in some temporary location
# Beeswax backward compatibility and optimization
# 1. Create table
cols = ''
schema = result_meta.schema
for i, field in enumerate(schema.fieldSchemas):
if i != 0:
cols += ',\n'
cols += '`%s` %s' % (field.name, field.type)
# The representation of the delimiter is messy.
# It came from Java as a string, which might has been converted from an integer.
# So it could be "1" (^A), or "10" (\n), or "," (a comma literally).
delim = result_meta.delim
if not delim.isdigit():
delim = str(ord(delim))
hql = '''
CREATE TABLE `%s` (
%s
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\%s'
STORED AS TextFile
''' % (target_table, cols, delim.zfill(3))
query = hql_query(hql)
self.execute_and_wait(query)
try:
# 2. Move the results into the table's storage
table_obj = self.get_table('default', target_table)
table_loc = request.fs.urlsplit(table_obj.path_location)[2]
result_dir = request.fs.urlsplit(result_meta.table_dir)[2]
request.fs.rename_star(result_dir, table_loc)
LOG.debug("Moved results from %s to %s" % (result_meta.table_dir, table_loc))
request.info(request, _('Saved query results as new table %(table)s.') % {'table': target_table})
query_history.save_state(QueryHistory.STATE.expired)
except Exception, ex:
query = hql_query('DROP TABLE `%s`' % target_table)
try:
self.execute_and_wait(query)
except Exception, double_trouble:
LOG.exception('Failed to drop table "%s" as well: %s' % (target_table, double_trouble))
raise ex
url = format_preserving_redirect(request, reverse('metastore:index'))
return query_history
def use(self, database):
query = hql_query('USE %s' % database)
return self.client.use(query)
def get_log(self, query_handle, start_over=True):
return self.client.get_log(query_handle, start_over)
def get_state(self, handle):
return self.client.get_state(handle)
def get_operation_status(self, handle):
return self.client.get_operation_status(handle)
def execute_and_wait(self, query, timeout_sec=30.0, sleep_interval=0.5):
"""
Run query and check status until it finishes or timeouts.
Check status until it finishes or timeouts.
"""
handle = self.client.query(query)
curr = time.time()
end = curr + timeout_sec
while curr <= end:
state = self.client.get_state(handle)
if state not in (QueryHistory.STATE.running, QueryHistory.STATE.submitted):
return handle
time.sleep(sleep_interval)
curr = time.time()
try:
self.cancel_operation(handle)
except:
self.close_operation(handle)
return None
def execute_next_statement(self, query_history, hql_query):
if query_history.is_success() or query_history.is_expired():
# We need to go to the next statement only if the previous one passed
query_history.statement_number += 1
else:
# We need to update the query in case it was fixed
query_history.refresh_design(hql_query)
query_history.last_state = QueryHistory.STATE.submitted.index
query_history.save()
query = query_history.design.get_design()
return self.execute_and_watch(query, query_history=query_history)
def execute_and_watch(self, query, design=None, query_history=None):
"""
Run query and return a QueryHistory object in order to see its progress on a Web page.
"""
hql_query = query.hql_query
if query_history is None:
query_history = QueryHistory.build(
owner=self.client.user,
query=hql_query,
server_host='%(server_host)s' % self.client.query_server,
server_port='%(server_port)d' % self.client.query_server,
server_name='%(server_name)s' % self.client.query_server,
server_type=self.server_type,
last_state=QueryHistory.STATE.submitted.index,
design=design,
notify=query.query.get('email_notify', False),
query_type=query.query['type'],
statement_number=0
)
query_history.save()
LOG.debug("Made new QueryHistory id %s user %s query: %s..." % (query_history.id, self.client.user, query_history.query[:25]))
try:
handle = self.client.query(query, query_history.statement_number)
if not handle.is_valid():
msg = _("Server returning invalid handle for query id %(id)d [%(query)s]...") % {'id': query_history.id, 'query': query[:40]}
raise QueryServerException(msg)
except QueryServerException, ex:
LOG.exception(ex)
# Kind of expected (hql compile/syntax error, etc.)
if hasattr(ex, 'handle') and ex.handle:
query_history.server_id, query_history.server_guid = ex.handle.id, ex.handle.id
query_history.log_context = ex.handle.log_context
query_history.save_state(QueryHistory.STATE.failed)
raise ex
# All good
query_history.server_id, query_history.server_guid = handle.get()
query_history.operation_type = handle.operation_type
query_history.has_results = handle.has_result_set
query_history.modified_row_count = handle.modified_row_count
query_history.log_context = handle.log_context
query_history.query_type = query.query['type']
query_history.set_to_running()
query_history.save()
LOG.debug("Updated QueryHistory id %s user %s statement_number: %s" % (query_history.id, self.client.user, query_history.statement_number))
return query_history
def get_results_metadata(self, handle):
return self.client.get_results_metadata(handle)
def close(self, handle):
return self.client.close(handle)
def get_partitions(self, db_name, table, max_parts=None):
if max_parts is None or max_parts > BROWSE_PARTITIONED_TABLE_LIMIT.get():
max_parts = BROWSE_PARTITIONED_TABLE_LIMIT.get()
# DB name not supported in SHOW PARTITIONS
self.use(db_name)
return self.client.get_partitions(db_name, table.name, max_parts)
def get_partition(self, db_name, table_name, partition_id):
table = self.get_table(db_name, table_name)
partitions = self.get_partitions(db_name, table, max_parts=None)
partition_query = ""
for idx, key in enumerate(partitions[partition_id].values):
partition_query += (idx > 0 and " AND " or "") + table.partition_keys[idx].name + "='%s'" % key
hql = "SELECT * FROM `%s.%s` WHERE %s" % (db_name, table_name, partition_query)
return self.execute_statement(hql)
def explain(self, query):
return self.client.explain(query)
def getStatus(self):
return self.client.getStatus()
def get_default_configuration(self, include_hadoop):
return self.client.get_default_configuration(include_hadoop)
def _get_browse_limit_clause(self, table):
"""Get the limit clause when browsing a partitioned table"""
if table.partition_keys:
limit = BROWSE_PARTITIONED_TABLE_LIMIT.get()
if limit > 0:
return "LIMIT %d" % (limit,)
return ""
class Table:
"""
Represents the metadata of a Hive Table.
"""
@property
def hdfs_link(self):
return location_to_url(self.path_location)
class DataTable:
"""
Represents the data of a Hive Table.
If the dataset has more rows, a new fetch should be done in order to return a new data table with the next rows.
"""
pass
# TODO decorator?
def expand_exception(exc, db, handle=None):
try:
if handle is not None:
log = db.get_log(handle)
elif hasattr(exc, 'get_rpc_handle') or hasattr(exc, 'log_context'):
log = db.get_log(exc)
else:
log = ''
except Exception, e:
# Always show something, even if server has died on the job.
log = _("Could not retrieve logs: %s." % e)
if not hasattr(exc, 'message') or not exc.message:
error_message = _("Unknown exception.")
else:
error_message = force_unicode(exc.message, strings_only=True, errors='replace')
return error_message, log
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
Pbxproj.py
Working with the pbxproj file format is a pain in the ass.
This object provides a couple basic features for parsing pbxproj files:
* Getting a dependency list
* Adding one pbxproj to another pbxproj as a dependency
Version 1.0.
History:
1.0 - October 20, 2010: Initial hacked-together version finished. It is alive!
Created by Jeff Verkoeyen on 2010-10-18.
Copyright 2009-2010 Facebook
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import logging
import os
import re
import sys
import Paths
pbxproj_cache = {}
# The following relative path methods recyled from:
# http://code.activestate.com/recipes/208993-compute-relative-path-from-one-directory-to-anothe/
# Author: Cimarron Taylor
# Date: July 6, 2003
def pathsplit(p, rest=[]):
(h,t) = os.path.split(p)
if len(h) < 1: return [t]+rest
if len(t) < 1: return [h]+rest
return pathsplit(h,[t]+rest)
def commonpath(l1, l2, common=[]):
if len(l1) < 1: return (common, l1, l2)
if len(l2) < 1: return (common, l1, l2)
if l1[0] != l2[0]: return (common, l1, l2)
return commonpath(l1[1:], l2[1:], common+[l1[0]])
def relpath(p1, p2):
(common,l1,l2) = commonpath(pathsplit(p1), pathsplit(p2))
p = []
if len(l1) > 0:
p = [ '../' * len(l1) ]
p = p + l2
return os.path.join( *p )
class Pbxproj(object):
@staticmethod
def get_pbxproj_by_name(name):
if name not in pbxproj_cache:
pbxproj_cache[name] = Pbxproj(name)
return pbxproj_cache[name]
# Valid names
# Three20
# Three20:Three20-Xcode3.2.5
# /path/to/project.xcodeproj/project.pbxproj
def __init__(self, name):
self._project_data = None
parts = name.split(':')
self.name = parts[0]
if len(parts) > 1:
self.target = parts[1]
else:
if re.match('^[a-zA-Z0-9\.\-:+"]+$', self.name):
self.target = self.name
else:
result = re.search('([a-zA-Z0-9\.\-+"]+)\.xcodeproj', self.name)
if not result:
self.target = self.name
else:
(self.target, ) = result.groups()
match = re.search('([a-zA-Z0-9\.\-+"]+)\.xcodeproj', self.name)
if not match:
self._project_name = self.name
else:
(self._project_name, ) = match.groups()
self._guid = None
self._deps = None
self.guid()
def __str__(self):
return str(self.name)+" target:"+str(self.target)+" guid:"+str(self._guid)+" prodguid: "+self._product_guid+" prodname: "+self._product_name
def uniqueid(self):
return self.name + ':' + self.target
def path(self):
# TODO: No sense calculating this every time, just store it when we get the name.
if re.match('^[a-zA-Z0-9\.\-:+"]+$', self.name):
return os.path.join(Paths.src_dir, self.name.strip('"'), self.name.strip('"')+'.xcodeproj', 'project.pbxproj')
elif not re.match('project.pbxproj$', self.name):
return os.path.join(self.name, 'project.pbxproj')
else:
return self.name
# A pbxproj file is contained within an xcodeproj file.
# This method simply strips off the project.pbxproj part of the path.
def xcodeprojpath(self):
return os.path.dirname(self.path())
def guid(self):
if not self._guid:
self.dependencies()
return self._guid
# Load the project data from disk.
def get_project_data(self):
if self._project_data is None:
if not os.path.exists(self.path()):
logging.info("Couldn't find the project at this path:")
logging.info(self.path())
return None
project_file = open(self.path(), 'r')
self._project_data = project_file.read()
return self._project_data
# Write the project data to disk.
def set_project_data(self, project_data):
if self._project_data != project_data:
self._project_data = project_data
project_file = open(self.path(), 'w')
project_file.write(self._project_data)
# Get and cache the dependencies for this project.
def dependencies(self):
if self._deps is not None:
return self._deps
project_data = self.get_project_data()
if project_data is None:
return None
result = re.search('([A-Z0-9]+) \/\* '+re.escape(self.target)+' \*\/ = {\n[ \t]+isa = PBXNativeTarget;(?:.|\n)+?buildPhases = \(\n((?:.|\n)+?)\);\n(?:.|\n)+?dependencies = \(\n((?:[ \t]+[A-Z0-9]+ \/\* PBXTargetDependency \*\/,\n)*)[ \t]*\);\n(?:.|\n)+?productReference = ([A-Z0-9]+) \/\* (.+?) \*\/;',
project_data)
if not result:
return None
(self._guid, buildPhases, dependency_set, self._product_guid, self._product_name, ) = result.groups()
dependency_guids = re.findall('[ \t]+([A-Z0-9]+) \/\* PBXTargetDependency \*\/,\n', dependency_set)
match = re.search('([A-Z0-9]+) \/\* Resources \*\/', buildPhases)
if match:
(self._resources_guid, ) = match.groups()
else:
self._resources_guid = None
match = re.search('([A-Z0-9]+) \/\* Frameworks \*\/', buildPhases)
if not match:
logging.error("Couldn't find the Frameworks phase.")
return None
(self._frameworks_guid, ) = match.groups()
if not result:
return None
dependency_names = []
for guid in dependency_guids:
result = re.search(guid+' \/\* PBXTargetDependency \*\/ = \{\n[ \t]+isa = PBXTargetDependency;\n[ \t]*name = (["a-zA-Z0-9\.\-]+);',
project_data)
if result:
(dependency_name, ) = result.groups()
dependency_names.append(dependency_name)
self._deps = dependency_names
return self._deps
# Add a line to the PBXBuildFile section.
#
# <default_guid> /* <name> in Frameworks */ = {isa = PBXBuildFile; fileRef = <file_ref_hash> /* <name> */; };
#
# Returns: <default_guid> if a line was added.
# Otherwise, the existing guid is returned.
def add_buildfile(self, name, file_ref_hash, default_guid):
project_data = self.get_project_data()
match = re.search('\/\* Begin PBXBuildFile section \*\/\n((?:.|\n)+?)\/\* End PBXBuildFile section \*\/', project_data)
if not match:
logging.error("Couldn't find PBXBuildFile section.")
return None
(subtext, ) = match.groups()
buildfile_hash = None
match = re.search('([A-Z0-9]+).+?fileRef = '+re.escape(file_ref_hash), subtext)
if match:
(buildfile_hash, ) = match.groups()
logging.info("This build file already exists: "+buildfile_hash)
if buildfile_hash is None:
match = re.search('\/\* Begin PBXBuildFile section \*\/\n', project_data)
buildfile_hash = default_guid
libfiletext = "\t\t"+buildfile_hash+" /* "+name+" in Frameworks */ = {isa = PBXBuildFile; fileRef = "+file_ref_hash+" /* "+name+" */; };\n"
project_data = project_data[:match.end()] + libfiletext + project_data[match.end():]
self.set_project_data(project_data)
return buildfile_hash
# Add a line to the PBXFileReference section.
#
# <default_guid> /* <name> */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.<file_type>"; name = <name>; path = <rel_path>; sourceTree = <source_tree>; };
#
# Returns: <default_guid> if a line was added.
# Otherwise, the existing guid is returned.
def add_filereference(self, name, file_type, default_guid, rel_path, source_tree):
project_data = self.get_project_data()
fileref_hash = None
match = re.search('([A-Z0-9]+) \/\* '+re.escape(name)+' \*\/ = \{isa = PBXFileReference; lastKnownFileType = "wrapper.'+file_type+'"; name = '+re.escape(name)+'; path = '+re.escape(rel_path)+';', project_data)
if match:
logging.info("This file has already been added.")
(fileref_hash, ) = match.groups()
else:
match = re.search('\/\* Begin PBXFileReference section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXFileReference section.")
return False
fileref_hash = default_guid
pbxfileref = "\t\t"+fileref_hash+" /* "+name+" */ = {isa = PBXFileReference; lastKnownFileType = \"wrapper."+file_type+"\"; name = "+name+"; path = "+rel_path+"; sourceTree = "+source_tree+"; };\n"
project_data = project_data[:match.end()] + pbxfileref + project_data[match.end():]
self.set_project_data(project_data)
return fileref_hash
# Add a file to the given PBXGroup.
#
# <guid> /* <name> */,
def add_file_to_group(self, name, guid, group):
project_data = self.get_project_data()
match = re.search('\/\* '+re.escape(group)+' \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n((?:.|\n)+?)\);', project_data)
if not match:
logging.error("Couldn't find the "+group+" children.")
return False
(children,) = match.groups()
match = re.search(re.escape(guid), children)
if match:
logging.info("This file is already a member of the "+name+" group.")
else:
match = re.search('\/\* '+re.escape(group)+' \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n', project_data)
if not match:
logging.error("Couldn't find the "+group+" group.")
return False
pbxgroup = "\t\t\t\t"+guid+" /* "+name+" */,\n"
project_data = project_data[:match.end()] + pbxgroup + project_data[match.end():]
self.set_project_data(project_data)
return True
# Add a file to the Frameworks PBXGroup.
#
# <guid> /* <name> */,
def add_file_to_frameworks(self, name, guid):
return self.add_file_to_group(name, guid, 'Frameworks')
# Add a file to the Resources PBXGroup.
#
# <guid> /* <name> */,
def add_file_to_resources(self, name, guid):
return self.add_file_to_group(name, guid, 'Resources')
def add_file_to_phase(self, name, guid, phase_guid, phase):
project_data = self.get_project_data()
match = re.search(re.escape(phase_guid)+" \/\* "+re.escape(phase)+" \*\/ = {(?:.|\n)+?files = \(((?:.|\n)+?)\);", project_data)
if not match:
logging.error("Couldn't find the "+phase+" phase.")
return False
(files, ) = match.groups()
match = re.search(re.escape(guid), files)
if match:
logging.info("The file has already been added.")
else:
match = re.search(re.escape(phase_guid)+" \/\* "+phase+" \*\/ = {(?:.|\n)+?files = \(\n", project_data)
if not match:
logging.error("Couldn't find the "+phase+" files")
return False
frameworktext = "\t\t\t\t"+guid+" /* "+name+" in "+phase+" */,\n"
project_data = project_data[:match.end()] + frameworktext + project_data[match.end():]
self.set_project_data(project_data)
return True
def get_rel_path_to_products_dir(self):
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
build_path = os.path.join(os.path.join(os.path.dirname(Paths.src_dir), 'Build'), 'Products')
return relpath(project_path, build_path)
def add_file_to_frameworks_phase(self, name, guid):
return self.add_file_to_phase(name, guid, self._frameworks_guid, 'Frameworks')
def add_file_to_resources_phase(self, name, guid):
if self._resources_guid is None:
logging.error("No resources build phase found in the destination project")
return False
return self.add_file_to_phase(name, guid, self._resources_guid, 'Resources')
def add_header_search_path(self, configuration):
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
build_path = os.path.join(os.path.join(os.path.join(os.path.dirname(Paths.src_dir), 'Build'), 'Products'), 'three20')
rel_path = relpath(project_path, build_path)
return self.add_build_setting(configuration, 'HEADER_SEARCH_PATHS', '"'+rel_path+'"')
def add_build_setting(self, configuration, setting_name, value):
project_data = self.get_project_data()
match = re.search('\/\* '+configuration+' \*\/ = {\n[ \t]+isa = XCBuildConfiguration;\n[ \t]+buildSettings = \{\n((?:.|\n)+?)\};', project_data)
if not match:
print "Couldn't find this configuration."
return False
settings_start = match.start(1)
settings_end = match.end(1)
(build_settings, ) = match.groups()
match = re.search(re.escape(setting_name)+' = ((?:.|\n)+?);', build_settings)
if not match:
# Add a brand new build setting. No checking for existing settings necessary.
settingtext = '\t\t\t\t'+setting_name+' = '+value+';\n'
project_data = project_data[:settings_start] + settingtext + project_data[settings_start:]
else:
# Build settings already exist. Is there one or many?
(search_paths,) = match.groups()
if re.search('\(\n', search_paths):
# Many
match = re.search(re.escape(value), search_paths)
if not match:
# If value has any spaces in it, Xcode will split it up into
# multiple entries.
escaped_value = re.escape(value).replace(' ', '",\n[ \t]+"')
match = re.search(escaped_value, search_paths)
if not match:
match = re.search(re.escape(setting_name)+' = \(\n', build_settings)
build_settings = build_settings[:match.end()] + '\t\t\t\t\t'+value+',\n' + build_settings[match.end():]
project_data = project_data[:settings_start] + build_settings + project_data[settings_end:]
else:
# One
if search_paths != value:
existing_path = search_paths
path_set = '(\n\t\t\t\t\t'+value+',\n\t\t\t\t\t'+existing_path+'\n\t\t\t\t)'
build_settings = build_settings[:match.start(1)] + path_set + build_settings[match.end(1):]
project_data = project_data[:settings_start] + build_settings + project_data[settings_end:]
self.set_project_data(project_data)
return True
def get_hash_base(self, uniquename):
examplehash = '320FFFEEEDDDCCCBBBAAA000'
uniquehash = hashlib.sha224(uniquename).hexdigest().upper()
uniquehash = uniquehash[:len(examplehash) - 4]
return '320'+uniquehash
def add_framework(self, framework):
tthash_base = self.get_hash_base(framework)
fileref_hash = self.add_filereference(framework, 'frameworks', tthash_base+'0', 'System/Library/Frameworks/'+framework, 'SDK_ROOT')
libfile_hash = self.add_buildfile(framework, fileref_hash, tthash_base+'1')
if not self.add_file_to_frameworks(framework, fileref_hash):
return False
if not self.add_file_to_frameworks_phase(framework, libfile_hash):
return False
return True
def add_bundle(self):
tthash_base = self.get_hash_base('Three20.bundle')
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
build_path = os.path.join(Paths.src_dir, 'Three20.bundle')
rel_path = relpath(project_path, build_path)
fileref_hash = self.add_filereference('Three20.bundle', 'plug-in', tthash_base+'0', rel_path, 'SOURCE_ROOT')
libfile_hash = self.add_buildfile('Three20.bundle', fileref_hash, tthash_base+'1')
if not self.add_file_to_resources('Three20.bundle', fileref_hash):
return False
if not self.add_file_to_resources_phase('Three20.bundle', libfile_hash):
return False
return True
def add_dependency(self, dep):
project_data = self.get_project_data()
dep_data = dep.get_project_data()
if project_data is None or dep_data is None:
return False
logging.info("\nAdding "+str(dep)+" to "+str(self))
project_path = os.path.dirname(os.path.abspath(self.xcodeprojpath()))
dep_path = os.path.abspath(dep.xcodeprojpath())
rel_path = relpath(project_path, dep_path)
logging.info("")
logging.info("Project path: "+project_path)
logging.info("Dependency path: "+dep_path)
logging.info("Relative path: "+rel_path)
tthash_base = self.get_hash_base(dep.uniqueid())
###############################################
logging.info("")
logging.info("Step 1: Add file reference to the dependency...")
self.set_project_data(project_data)
pbxfileref_hash = self.add_filereference(dep._project_name+'.xcodeproj', 'pb-project', tthash_base+'0', rel_path, 'SOURCE_ROOT')
project_data = self.get_project_data()
logging.info("Done: Added file reference: "+pbxfileref_hash)
###############################################
logging.info("")
logging.info("Step 2: Add file to Frameworks group...")
self.set_project_data(project_data)
if not self.add_file_to_frameworks(dep._project_name+".xcodeproj", pbxfileref_hash):
return False
project_data = self.get_project_data()
logging.info("Done: Added file to Frameworks group.")
###############################################
logging.info("")
logging.info("Step 3: Add dependencies...")
pbxtargetdependency_hash = None
pbxcontaineritemproxy_hash = None
match = re.search('\/\* Begin PBXTargetDependency section \*\/\n((?:.|\n)+?)\/\* End PBXTargetDependency section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXTargetDependency section...")
match = re.search('\/\* End PBXSourcesBuildPhase section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXSourcesBuildPhase section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXTargetDependency section */\n\n/* End PBXTargetDependency section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search('([A-Z0-9]+) \/\* PBXTargetDependency \*\/ = {\n[ \t]+isa = PBXTargetDependency;\n[ \t]+name = '+re.escape(dep._project_name)+';\n[ \t]+targetProxy = ([A-Z0-9]+) \/\* PBXContainerItemProxy \*\/;', project_data)
if match:
(pbxtargetdependency_hash, pbxcontaineritemproxy_hash,) = match.groups()
logging.info("This dependency already exists.")
if pbxtargetdependency_hash is None or pbxcontaineritemproxy_hash is None:
match = re.search('\/\* Begin PBXTargetDependency section \*\/\n', project_data)
pbxtargetdependency_hash = tthash_base+'1'
pbxcontaineritemproxy_hash = tthash_base+'2'
pbxtargetdependency = "\t\t"+pbxtargetdependency_hash+" /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\tname = "+dep._project_name+";\n\t\t\ttargetProxy = "+pbxcontaineritemproxy_hash+" /* PBXContainerItemProxy */;\n\t\t};\n"
project_data = project_data[:match.end()] + pbxtargetdependency + project_data[match.end():]
logging.info("Done: Added dependency.")
###############################################
logging.info("")
logging.info("Step 3.1: Add container proxy for dependencies...")
containerExists = False
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n((?:.|\n)+?)\/\* End PBXContainerItemProxy section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXContainerItemProxy section...")
match = re.search('\/\* End PBXBuildFile section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXBuildFile section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXContainerItemProxy section */\n\n/* End PBXContainerItemProxy section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search(re.escape(pbxcontaineritemproxy_hash), subtext)
if match:
logging.info("This container proxy already exists.")
containerExists = True
if not containerExists:
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n', project_data)
pbxcontaineritemproxy = "\t\t"+pbxcontaineritemproxy_hash+" /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = "+pbxfileref_hash+" /* "+dep._project_name+".xcodeproj */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = "+dep.guid()+";\n\t\t\tremoteInfo = "+dep._project_name+";\n\t\t};\n"
project_data = project_data[:match.end()] + pbxcontaineritemproxy + project_data[match.end():]
logging.info("Done: Added container proxy.")
###############################################
logging.info("")
logging.info("Step 3.2: Add module to the dependency list...")
match = re.search(self.guid()+' \/\* .+? \*\/ = {\n[ \t]+(?:.|\n)+?[ \t]+dependencies = \(\n((?:.|\n)+?)\);', project_data)
dependency_exists = False
if not match:
logging.error("Couldn't find the dependency list.")
return False
else:
(dependencylist, ) = match.groups()
match = re.search(re.escape(pbxtargetdependency_hash), dependencylist)
if match:
logging.info("This dependency has already been added.")
dependency_exists = True
if not dependency_exists:
match = re.search(self.guid()+' \/\* .+? \*\/ = {\n[ \t]+(?:.|\n)+?[ \t]+dependencies = \(\n', project_data)
if not match:
logging.error("Couldn't find the dependency list.")
return False
dependency_item = '\t\t\t\t'+pbxtargetdependency_hash+' /* PBXTargetDependency */,\n'
project_data = project_data[:match.end()] + dependency_item + project_data[match.end():]
logging.info("Done: Added module to the dependency list.")
###############################################
logging.info("")
logging.info("Step 4: Create project references...")
match = re.search('\/\* Begin PBXProject section \*\/\n((?:.|\n)+?)\/\* End PBXProject section \*\/', project_data)
if not match:
logging.error("Couldn't find the project section.")
return False
project_start = match.start(1)
project_end = match.end(1)
(project_section, ) = match.groups()
reference_exists = False
did_change = False
productgroup_hash = None
match = re.search('projectReferences = \(\n((?:.|\n)+?)\n[ \t]+\);', project_section)
if not match:
logging.info("Creating project references...")
match = re.search('projectDirPath = ".*?";\n', project_section)
if not match:
logging.error("Couldn't find project references anchor.")
return False
did_change = True
project_section = project_section[:match.end()] + '\t\t\tprojectReferences = (\n\t\t\t);\n' + project_section[match.end():]
else:
(refs, ) = match.groups()
match = re.search('\{\n[ \t]+ProductGroup = ([A-Z0-9]+) \/\* Products \*\/;\n[ \t]+ProjectRef = '+re.escape(pbxfileref_hash), refs)
if match:
(productgroup_hash, ) = match.groups()
logging.info("This product group already exists: "+productgroup_hash)
reference_exists = True
if not reference_exists:
match = re.search('projectReferences = \(\n', project_section)
if not match:
logging.error("Missing the project references item.")
return False
productgroup_hash = tthash_base+'3'
reference_text = '\t\t\t\t{\n\t\t\t\t\tProductGroup = '+productgroup_hash+' /* Products */;\n\t\t\t\t\tProjectRef = '+pbxfileref_hash+' /* '+dep._project_name+'.xcodeproj */;\n\t\t\t\t},\n'
project_section = project_section[:match.end()] + reference_text + project_section[match.end():]
did_change = True
if did_change:
project_data = project_data[:project_start] + project_section + project_data[project_end:]
logging.info("Done: Created project reference.")
###############################################
logging.info("")
logging.info("Step 4.1: Create product group...")
match = re.search('\/\* Begin PBXGroup section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the group section.")
return False
group_start = match.end()
lib_hash = None
match = re.search(re.escape(productgroup_hash)+" \/\* Products \*\/ = \{\n[ \t]+isa = PBXGroup;\n[ \t]+children = \(\n((?:.|\n)+?)\);", project_data)
if match:
logging.info("This product group already exists.")
(children, ) = match.groups()
match = re.search('([A-Z0-9]+) \/\* '+re.escape(dep._product_name)+' \*\/', children)
if not match:
logging.error("No product found")
return False
# TODO: Add this product.
else:
(lib_hash, ) = match.groups()
else:
lib_hash = tthash_base+'4'
productgrouptext = "\t\t"+productgroup_hash+" /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t"+lib_hash+" /* "+dep._product_name+" */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n"
project_data = project_data[:group_start] + productgrouptext + project_data[group_start:]
logging.info("Done: Created product group: "+lib_hash)
###############################################
logging.info("")
logging.info("Step 4.2: Add container proxy for target product...")
containerExists = False
targetproduct_hash = tthash_base+'6'
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n((?:.|\n)+?)\/\* End PBXContainerItemProxy section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXContainerItemProxy section...")
match = re.search('\/\* End PBXBuildFile section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXBuildFile section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXContainerItemProxy section */\n\n/* End PBXContainerItemProxy section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search(re.escape(targetproduct_hash), subtext)
if match:
logging.info("This container proxy already exists.")
containerExists = True
if not containerExists:
match = re.search('\/\* Begin PBXContainerItemProxy section \*\/\n', project_data)
pbxcontaineritemproxy = "\t\t"+targetproduct_hash+" /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = "+pbxfileref_hash+" /* "+dep._project_name+".xcodeproj */;\n\t\t\tproxyType = 2;\n\t\t\tremoteGlobalIDString = "+dep._product_guid+";\n\t\t\tremoteInfo = "+dep._project_name+";\n\t\t};\n"
project_data = project_data[:match.end()] + pbxcontaineritemproxy + project_data[match.end():]
logging.info("Done: Added target container proxy.")
###############################################
logging.info("")
logging.info("Step 4.3: Create reference proxy...")
referenceExists = False
match = re.search('\/\* Begin PBXReferenceProxy section \*\/\n((?:.|\n)+?)\/\* End PBXReferenceProxy section \*\/', project_data)
if not match:
logging.info("\tAdding a PBXReferenceProxy section...")
match = re.search('\/\* End PBXProject section \*\/\n', project_data)
if not match:
logging.error("Couldn't find the PBXProject section.")
return False
project_data = project_data[:match.end()] + "\n/* Begin PBXReferenceProxy section */\n\n/* End PBXReferenceProxy section */\n" + project_data[match.end():]
else:
(subtext, ) = match.groups()
match = re.search(re.escape(lib_hash), subtext)
if match:
logging.info("This reference proxy already exists.")
referenceExists = True
if not referenceExists:
match = re.search('\/\* Begin PBXReferenceProxy section \*\/\n', project_data)
referenceproxytext = "\t\t"+lib_hash+" /* "+dep._product_name+" */ = {\n\t\t\tisa = PBXReferenceProxy;\n\t\t\tfileType = archive.ar;\n\t\t\tpath = \""+dep._product_name+"\";\n\t\t\tremoteRef = "+targetproduct_hash+" /* PBXContainerItemProxy */;\n\t\t\tsourceTree = BUILT_PRODUCTS_DIR;\n\t\t};\n"
project_data = project_data[:match.end()] + referenceproxytext + project_data[match.end():]
logging.info("Done: Created reference proxy.")
###############################################
logging.info("")
logging.info("Step 5: Add target file...")
self.set_project_data(project_data)
libfile_hash = self.add_buildfile(dep._product_name, lib_hash, tthash_base+'5')
project_data = self.get_project_data()
logging.info("Done: Added target file.")
###############################################
logging.info("")
logging.info("Step 6: Add frameworks...")
self.set_project_data(project_data)
self.add_file_to_frameworks_phase(dep._product_name, libfile_hash)
project_data = self.get_project_data()
logging.info("Done: Adding module.")
self.set_project_data(project_data)
return True
|
|
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.core.exceptions import ValidationError, PermissionDenied
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import permission_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin import tasks, hooks
from wagtail.wagtailcore.models import Page, PageRevision, get_page_types
@permission_required('wagtailadmin.access_admin')
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
else:
parent_page = Page.get_first_root_node()
pages = parent_page.get_children().prefetch_related('content_type')
# Get page ordering
if 'ordering' in request.GET:
ordering = request.GET['ordering']
if ordering in ['title', '-title', 'content_type', '-content_type', 'live', '-live']:
pages = pages.order_by(ordering)
else:
ordering = 'title'
# Pagination
if ordering != 'ord':
p = request.GET.get('p', 1)
paginator = Paginator(pages, 50)
try:
pages = paginator.page(p)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page,
'ordering': ordering,
'pages': pages,
})
@permission_required('wagtailadmin.access_admin')
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = sorted([ContentType.objects.get_for_model(model_class) for model_class in parent_page.clean_subpage_types()], key=lambda pagetype: pagetype.name.lower())
all_page_types = sorted(get_page_types(), key=lambda pagetype: pagetype.name.lower())
return render(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'all_page_types': all_page_types,
})
@permission_required('wagtailadmin.access_admin')
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
p = request.GET.get("p", 1)
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all()
paginator = Paginator(pages, 10)
try:
pages = paginator.page(p)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
return render(request, 'wagtailadmin/pages/content_type_use.html', {
'pages': pages,
'app_name': content_type_app_name,
'content_type': content_type,
'page_class': page_class,
})
@permission_required('wagtailadmin.access_admin')
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page must be in the list of allowed subpage types for this parent ID
# == Restriction temporarily relaxed so that as superusers we can add index pages and things -
# == TODO: reinstate this for regular editors when we have distinct user types
#
# if page_class not in parent_page.clean_subpage_types():
# messages.error(request, "Sorry, you do not have access to create a page of type '%s' here." % content_type.name)
# return redirect('wagtailadmin_pages_select_type')
page = page_class(owner=request.user)
edit_handler_class = get_page_edit_handler(page_class)
form_class = edit_handler_class.get_form_class(page_class)
if request.POST:
form = form_class(request.POST, request.FILES, instance=page)
# Stick an extra validator into the form to make sure that the slug is not already in use
def clean_slug(slug):
# Make sure the slug isn't already in use
if parent_page.get_children().filter(slug=slug).count() > 0:
raise ValidationError(_("This slug is already in use"))
return slug
form.fields['slug'].clean = clean_slug
if form.is_valid():
page = form.save(commit=False) # don't save yet, as we need treebeard to assign tree params
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit'))
if is_publishing:
page.live = True
page.has_unpublished_changes = False
else:
page.live = False
page.has_unpublished_changes = True
parent_page.add_child(instance=page) # assign tree parameters - will cause page to be saved
page.save_revision(user=request.user, submitted_for_moderation=is_submitting)
if is_publishing:
messages.success(request, _("Page '{0}' published.").format(page.title))
elif is_submitting:
messages.success(request, _("Page '{0}' submitted for moderation.").format(page.title))
tasks.send_notification.delay(page.get_latest_revision().id, 'submitted', request.user.id)
else:
messages.success(request, _("Page '{0}' created.").format(page.title))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
messages.error(request, _("The page could not be created due to errors."))
edit_handler = edit_handler_class(instance=page, form=form)
else:
form = form_class(instance=page)
edit_handler = edit_handler_class(instance=page, form=form)
return render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'display_modes': page.get_page_modes(),
})
@permission_required('wagtailadmin.access_admin')
def edit(request, page_id):
latest_revision = get_object_or_404(Page, id=page_id).get_latest_revision()
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
parent = page.get_parent()
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
edit_handler_class = get_page_edit_handler(page.__class__)
form_class = edit_handler_class.get_form_class(page.__class__)
errors_debug = None
if request.POST:
form = form_class(request.POST, request.FILES, instance=page)
# Stick an extra validator into the form to make sure that the slug is not already in use
def clean_slug(slug):
# Make sure the slug isn't already in use
if parent.get_children().filter(slug=slug).exclude(id=page_id).count() > 0:
raise ValidationError(_("This slug is already in use"))
return slug
form.fields['slug'].clean = clean_slug
if form.is_valid():
is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish()
is_submitting = bool(request.POST.get('action-submit'))
if is_publishing:
page.live = True
page.has_unpublished_changes = False
form.save()
page.revisions.update(submitted_for_moderation=False)
else:
# not publishing the page
if page.live:
# To avoid overwriting the live version, we only save the page
# to the revisions table
form.save(commit=False)
Page.objects.filter(id=page.id).update(has_unpublished_changes=True)
else:
page.has_unpublished_changes = True
form.save()
page.save_revision(user=request.user, submitted_for_moderation=is_submitting)
if is_publishing:
messages.success(request, _("Page '{0}' published.").format(page.title))
elif is_submitting:
messages.success(request, _("Page '{0}' submitted for moderation.").format(page.title))
tasks.send_notification.delay(page.get_latest_revision().id, 'submitted', request.user.id)
else:
messages.success(request, _("Page '{0}' updated.").format(page.title))
for fn in hooks.get_hooks('after_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
messages.error(request, _("The page could not be saved due to validation errors"))
edit_handler = edit_handler_class(instance=page, form=form)
errors_debug = (
repr(edit_handler.form.errors)
+ repr([(name, formset.errors) for (name, formset) in edit_handler.form.formsets.iteritems() if formset.errors])
)
else:
form = form_class(instance=page)
edit_handler = edit_handler_class(instance=page, form=form)
# Check for revisions still undergoing moderation and warn
if latest_revision and latest_revision.submitted_for_moderation:
messages.warning(request, _("This page is currently awaiting moderation"))
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'edit_handler': edit_handler,
'errors_debug': errors_debug,
'display_modes': page.get_page_modes(),
})
@permission_required('wagtailadmin.access_admin')
def delete(request, page_id):
page = get_object_or_404(Page, id=page_id)
if not page.permissions_for_user(request.user).can_delete():
raise PermissionDenied
if request.POST:
parent_id = page.get_parent().id
page.delete()
messages.success(request, _("Page '{0}' deleted.").format(page.title))
for fn in hooks.get_hooks('after_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailadmin_explore', parent_id)
return render(request, 'wagtailadmin/pages/confirm_delete.html', {
'page': page,
'descendant_count': page.get_descendant_count()
})
@permission_required('wagtailadmin.access_admin')
def view_draft(request, page_id):
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
return page.serve(request)
@permission_required('wagtailadmin.access_admin')
def preview_on_edit(request, page_id):
# Receive the form submission that would typically be posted to the 'edit' view. If submission is valid,
# return the rendered page; if not, re-render the edit form
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
edit_handler_class = get_page_edit_handler(page.__class__)
form_class = edit_handler_class.get_form_class(page.__class__)
form = form_class(request.POST, request.FILES, instance=page)
if form.is_valid():
form.save(commit=False)
# This view will generally be invoked as an AJAX request; as such, in the case of
# an error Django will return a plaintext response. This isn't what we want, since
# we will be writing the response back to an HTML page regardless of success or
# failure - as such, we strip out the X-Requested-With header to get Django to return
# an HTML error response
request.META.pop('HTTP_X_REQUESTED_WITH', None)
try:
display_mode = request.GET['mode']
except KeyError:
display_mode = page.get_page_modes()[0][0]
response = page.show_as_mode(display_mode)
response['X-Wagtail-Preview'] = 'ok'
return response
else:
edit_handler = edit_handler_class(instance=page, form=form)
response = render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'edit_handler': edit_handler,
'display_modes': page.get_page_modes(),
})
response['X-Wagtail-Preview'] = 'error'
return response
@permission_required('wagtailadmin.access_admin')
def preview_on_create(request, content_type_app_name, content_type_model_name, parent_page_id):
# Receive the form submission that would typically be posted to the 'create' view. If submission is valid,
# return the rendered page; if not, re-render the edit form
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
page = page_class()
edit_handler_class = get_page_edit_handler(page_class)
form_class = edit_handler_class.get_form_class(page_class)
form = form_class(request.POST, request.FILES, instance=page)
if form.is_valid():
form.save(commit=False)
# ensure that our unsaved page instance has a suitable url set
parent_page = get_object_or_404(Page, id=parent_page_id).specific
page.set_url_path(parent_page)
# This view will generally be invoked as an AJAX request; as such, in the case of
# an error Django will return a plaintext response. This isn't what we want, since
# we will be writing the response back to an HTML page regardless of success or
# failure - as such, we strip out the X-Requested-With header to get Django to return
# an HTML error response
request.META.pop('HTTP_X_REQUESTED_WITH', None)
try:
display_mode = request.GET['mode']
except KeyError:
display_mode = page.get_page_modes()[0][0]
response = page.show_as_mode(display_mode)
response['X-Wagtail-Preview'] = 'ok'
return response
else:
edit_handler = edit_handler_class(instance=page, form=form)
parent_page = get_object_or_404(Page, id=parent_page_id).specific
response = render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'display_modes': page.get_page_modes(),
})
response['X-Wagtail-Preview'] = 'error'
return response
def preview_placeholder(request):
"""
The HTML of a previewed page is written to the destination browser window using document.write.
This overwrites any previous content in the window, while keeping its URL intact. This in turn
means that any content we insert that happens to trigger an HTTP request, such as an image or
stylesheet tag, will report that original URL as its referrer.
In Webkit browsers, a new window opened with window.open('', 'window_name') will have a location
of 'about:blank', causing it to omit the Referer header on those HTTP requests. This means that
any third-party font services that use the Referer header for access control will refuse to
serve us.
So, instead, we need to open the window on some arbitrary URL on our domain. (Provided that's
also the same domain as our editor JS code, the browser security model will happily allow us to
document.write over the page in question.)
This, my friends, is that arbitrary URL.
Since we're going to this trouble, we'll also take the opportunity to display a spinner on the
placeholder page, providing some much-needed visual feedback.
"""
return render(request, 'wagtailadmin/pages/preview_placeholder.html')
@permission_required('wagtailadmin.access_admin')
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id)
if not page.permissions_for_user(request.user).can_unpublish():
raise PermissionDenied
if request.POST:
parent_id = page.get_parent().id
page.live = False
page.save()
messages.success(request, _("Page '{0}' unpublished.").format(page.title))
return redirect('wagtailadmin_explore', parent_id)
return render(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
})
@permission_required('wagtailadmin.access_admin')
def move_choose_destination(request, page_to_move_id, viewed_page_id=None):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
page_perms = page_to_move.permissions_for_user(request.user)
if not page_perms.can_move():
raise PermissionDenied
if viewed_page_id:
viewed_page = get_object_or_404(Page, id=viewed_page_id)
else:
viewed_page = Page.get_first_root_node()
viewed_page.can_choose = page_perms.can_move_to(viewed_page)
child_pages = []
for target in viewed_page.get_children():
# can't move the page into itself or its descendants
target.can_choose = page_perms.can_move_to(target)
target.can_descend = not(target == page_to_move or target.is_child_of(page_to_move)) and target.get_children_count()
child_pages.append(target)
return render(request, 'wagtailadmin/pages/move_choose_destination.html', {
'page_to_move': page_to_move,
'viewed_page': viewed_page,
'child_pages': child_pages,
})
@permission_required('wagtailadmin.access_admin')
def move_confirm(request, page_to_move_id, destination_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
destination = get_object_or_404(Page, id=destination_id)
if not page_to_move.permissions_for_user(request.user).can_move_to(destination):
raise PermissionDenied
if request.POST:
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
page_to_move.move(destination, pos='last-child')
messages.success(request, _("Page '{0}' moved.").format(page_to_move.title))
return redirect('wagtailadmin_explore', destination.id)
return render(request, 'wagtailadmin/pages/confirm_move.html', {
'page_to_move': page_to_move,
'destination': destination,
})
@permission_required('wagtailadmin.access_admin')
def set_page_position(request, page_to_move_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
parent_page = page_to_move.get_parent()
if not parent_page.permissions_for_user(request.user).can_reorder_children():
raise PermissionDenied
if request.POST:
# Get position parameter
position = request.GET.get('position', None)
# Find page thats already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
# Move page into this position
page_to_move.move(position_page, pos='left')
else:
# Move page to end
page_to_move.move(parent_page, pos='last-child')
return HttpResponse('')
PAGE_EDIT_HANDLERS = {}
def get_page_edit_handler(page_class):
if page_class not in PAGE_EDIT_HANDLERS:
PAGE_EDIT_HANDLERS[page_class] = TabbedInterface([
ObjectList(page_class.content_panels, heading='Content'),
ObjectList(page_class.promote_panels, heading='Promote')
])
return PAGE_EDIT_HANDLERS[page_class]
@permission_required('wagtailadmin.access_admin')
@vary_on_headers('X-Requested-With')
def search(request):
pages = []
q = None
is_searching = False
if 'q' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data['q']
# page number
p = request.GET.get("p", 1)
is_searching = True
pages = Page.search(q, show_unpublished=True, search_title_only=True, prefetch_related=['content_type'])
# Pagination
paginator = Paginator(pages, 20)
try:
pages = paginator.page(p)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
else:
form = SearchForm()
if request.is_ajax():
return render(request, "wagtailadmin/pages/search_results.html", {
'pages': pages,
'is_searching': is_searching,
'query_string': q,
})
else:
return render(request, "wagtailadmin/pages/search.html", {
'search_form': form,
'pages': pages,
'is_searching': is_searching,
'query_string': q,
})
@permission_required('wagtailadmin.access_admin')
def approve_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.title))
return redirect('wagtailadmin_home')
if request.POST:
revision.publish()
messages.success(request, _("Page '{0}' published.").format(revision.page.title))
tasks.send_notification.delay(revision.id, 'approved', request.user.id)
return redirect('wagtailadmin_home')
@permission_required('wagtailadmin.access_admin')
def reject_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format( revision.page.title))
return redirect('wagtailadmin_home')
if request.POST:
revision.submitted_for_moderation = False
revision.save(update_fields=['submitted_for_moderation'])
messages.success(request, _("Page '{0}' rejected for publication.").format(revision.page.title))
tasks.send_notification.delay(revision.id, 'rejected', request.user.id)
return redirect('wagtailadmin_home')
@permission_required('wagtailadmin.access_admin')
def preview_for_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.title))
return redirect('wagtailadmin_home')
page = revision.as_page_object()
request.revision_id = revision_id
return page.serve(request)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
from time import time
from typing import Any, Callable
from supvisors.strategy import conciliate_conflicts
from supvisors.ttypes import AddressStates, SupvisorsStates, Payload
from supvisors.utils import supvisors_shortcuts
class AbstractState(object):
""" Base class for a state with simple entry / next / exit actions.
Attributes are:
- supvisors: the reference to the global Supvisors structure,
- *shortcuts*: the references to Supvisors attributes used here,
- address_name: the name of the local node.
"""
def __init__(self, supvisors: Any) -> None:
""" Initialization of the attributes.
:param supvisors: the global Supvisors structure
"""
self.supvisors = supvisors
supvisors_shortcuts(self, ['context', 'failure_handler', 'logger', 'options',
'starter', 'stopper'])
self.address_name = supvisors.address_mapper.local_address
def enter(self) -> None:
""" Actions performed when entering the state.
May be redefined in subclasses.
:return: None
"""
def next(self) -> None:
""" Actions performed upon reception of an event.
May be redefined in subclasses.
:return: None
"""
def exit(self) -> None:
""" Actions performed when leaving the state.
May be redefined in subclasses.
:return: None
"""
def apply_addresses_func(self, func: Callable[[str], None]) -> None:
""" Perform the action func on all addresses.
The local address is the last to be performed.
:param func: the function callable using a node name as parameter
:return: None
"""
# send func request to all locals (but self address)
for status in self.context.addresses.values():
if status.address_name != self.address_name:
if status.state == AddressStates.RUNNING:
func(status.address_name)
self.logger.warn('supervisord {} on {}'.format(func.__name__, status.address_name))
else:
self.logger.info('cannot {} supervisord on {}: Remote state is {}'
.format(func.__name__, status.address_name, status.state_string()))
# send request to self supervisord
func(self.address_name)
class InitializationState(AbstractState):
""" In the INITIALIZATION state, Supvisors synchronizes to all known instances.
Attributes are:
- start_date: the date when entering this state.
"""
def __init__(self, supvisors: Any):
""" Initialization of the attributes.
:param supvisors: the global Supvisors structure
"""
AbstractState.__init__(self, supvisors)
self.start_date = 0
def enter(self):
""" When entering in the INITIALIZATION state, reset the status of addresses. """
self.context.master_address = ''
self.start_date = int(time())
# clear any existing job
self.failure_handler.clear_jobs()
# re-init addresses that are not isolated
for status in self.context.addresses.values():
if not status.in_isolation():
# do NOT use state setter as transition may be rejected
status._state = AddressStates.UNKNOWN
def next(self) -> int:
""" Wait for nodes to publish until:
- all are active,
- or all defined in the optional *force_synchro_if* option are active,
- or timeout is reached.
:return: the new Supvisors state
"""
# cannot get out of this state without local supervisor RUNNING
addresses = self.context.running_addresses()
if self.address_name in addresses:
# synchro done if the state of all addresses is known
if len(self.context.unknown_addresses()) == 0:
self.logger.info('InitializationState.next: all nodes are RUNNING')
return SupvisorsStates.DEPLOYMENT
# synchro done if the state of all forced addresses is known
if self.context.forced_addresses and len(self.context.unknown_forced_addresses()) == 0:
self.logger.info('InitializationState.next: all forced nodes are RUNNING')
return SupvisorsStates.DEPLOYMENT
# if synchro timeout reached, stop synchro and work with known addresses
if (time() - self.start_date) > self.supvisors.options.synchro_timeout:
self.logger.warn('InitializationState.next: synchro timed out')
return SupvisorsStates.DEPLOYMENT
self.logger.debug('InitializationState.next: still waiting for remote Supvisors instances to publish')
else:
self.logger.debug('InitializationState.next: local node {} still not RUNNING'.format(self.address_name))
return SupvisorsStates.INITIALIZATION
def exit(self):
""" When leaving the INITIALIZATION state, the working addresses are defined.
One of them is elected as the MASTER. """
# force state of missing Supvisors instances
self.context.end_synchro()
# arbitrarily choice : master address is the 'lowest' address among running addresses
addresses = self.context.running_addresses()
self.logger.info('InitializationState.exit: working with nodes {}'.format(addresses))
# elect master node among working addresses only if not fixed before
if not self.context.master_address:
self.context.master_address = min(addresses)
class DeploymentState(AbstractState):
""" In the DEPLOYMENT state, Supvisors starts automatically the applications having a starting model. """
def enter(self):
""" When entering in the DEPLOYMENT state, define the start and stop sequences.
Only the MASTER can perform the automatic start and stop. """
for application in self.context.applications.values():
application.update_sequences()
application.update_status()
# only the Supvisors master starts applications
if self.context.master:
self.starter.start_applications()
def next(self):
""" Wait for applications to be started. """
if not self.context.master or self.starter.check_starting():
return SupvisorsStates.CONCILIATION if self.context.conflicting() else SupvisorsStates.OPERATION
return SupvisorsStates.DEPLOYMENT
class OperationState(AbstractState):
""" In the OPERATION state, Supvisors is waiting for requests. """
def next(self):
""" Check that all addresses are still active.
Look after possible conflicts due to multiple running instances of the same program. """
# check eventual jobs in progress
if self.starter.check_starting() and self.stopper.check_stopping():
# check if master and local are still RUNNING
if self.context.addresses[self.address_name].state != AddressStates.RUNNING:
return SupvisorsStates.INITIALIZATION
if self.context.addresses[self.context.master_address].state != AddressStates.RUNNING:
return SupvisorsStates.INITIALIZATION
# check duplicated processes
if self.context.conflicting():
return SupvisorsStates.CONCILIATION
return SupvisorsStates.OPERATION
class ConciliationState(AbstractState):
""" In the CONCILIATION state, Supvisors conciliates the conflicts. """
def enter(self):
""" When entering in the CONCILIATION state, conciliate automatically the conflicts.
Only the MASTER can conciliate conflicts. """
if self.context.master:
conciliate_conflicts(self.supvisors,
self.supvisors.options.conciliation_strategy,
self.context.conflicts())
def next(self):
""" Check that all addresses are still active.
Wait for all conflicts to be conciliated. """
# check eventual jobs in progress
if self.starter.check_starting() and self.stopper.check_stopping():
# check if local is still RUNNING
local_status = self.context.addresses[self.address_name]
if local_status.state != AddressStates.RUNNING:
return SupvisorsStates.INITIALIZATION
# check if master is still RUNNING
master_status = self.context.addresses[self.context.master_address]
if master_status.state != AddressStates.RUNNING:
return SupvisorsStates.INITIALIZATION
# back to OPERATION when there is no conflict anymore
if not self.context.conflicting():
return SupvisorsStates.OPERATION
# new conflicts may happen while conciliation is in progress
# call enter again to trigger a new conciliation
self.enter()
return SupvisorsStates.CONCILIATION
class RestartingState(AbstractState):
""" In the RESTARTING state, Supvisors stops all applications before triggering a full restart. """
def enter(self):
""" When entering in the RESTARTING state, stop all applications. """
self.failure_handler.clear_jobs()
self.starter.abort()
self.stopper.stop_applications()
def next(self):
""" Wait for all processes to be stopped. """
# check eventual jobs in progress
if self.stopper.check_stopping():
return SupvisorsStates.SHUTDOWN
return SupvisorsStates.RESTARTING
def exit(self):
""" When leaving the RESTARTING state, request the full restart. """
self.apply_addresses_func(self.supvisors.zmq.pusher.send_restart)
class ShuttingDownState(AbstractState):
""" In the SHUTTING_DOWN state, Supvisors stops all applications before
triggering a full shutdown. """
def enter(self):
""" When entering in the SHUTTING_DOWN state, stop all applications. """
self.failure_handler.clear_jobs()
self.starter.abort()
self.stopper.stop_applications()
def next(self):
""" Wait for all processes to be stopped. """
# check eventual jobs in progress
if self.stopper.check_stopping():
return SupvisorsStates.SHUTDOWN
return SupvisorsStates.SHUTTING_DOWN
def exit(self):
""" When leaving the SHUTTING_DOWN state, request the full shutdown. """
self.apply_addresses_func(self.supvisors.zmq.pusher.send_shutdown)
class ShutdownState(AbstractState):
""" This is the final state. """
class FiniteStateMachine:
""" This class implements a very simple behaviour of FiniteStateMachine based on a single event.
A state is able to evaluate itself for transitions. """
def __init__(self, supvisors):
""" Reset the state machine and the associated context """
self.supvisors = supvisors
supvisors_shortcuts(self, ['context', 'failure_handler', 'starter', 'stopper', 'logger'])
self.state = SupvisorsStates.INITIALIZATION
self.instance = None
# Trigger first state / INITIALIZATION
self.update_instance(SupvisorsStates.INITIALIZATION)
def state_string(self):
""" Return the supvisors state as a string. """
return SupvisorsStates.to_string(self.state)
def next(self):
""" Send the event to the state and transitions if possible.
The state machine re-sends the event as long as it transitions. """
self.set_state(self.instance.next())
def set_state(self, next_state):
""" Send the event to the state and transitions if possible.
The state machine re-sends the event as long as it transitions. """
while next_state != self.state and next_state in self.__Transitions[self.state]:
self.instance.exit()
self.update_instance(next_state)
next_state = self.instance.next()
def update_instance(self, state):
""" Change the current state.
The method also triggers the publication of the change. """
self.state = state
self.logger.info('FiniteStateMachine.update_instance: Supvisors in {}'.format(self.state_string()))
self.instance = self.__StateInstances[state](self.supvisors)
self.instance.enter()
# publish SupvisorsStatus event
# the zmq does not exist yet for the first occurrence here
if self.supvisors.zmq:
self.supvisors.zmq.publisher.send_supvisors_status(self.serial())
def on_timer_event(self):
""" Periodic task used to check if remote Supvisors instances are still active.
This is also the main event on this state machine. """
self.context.on_timer_event()
self.next()
# fix failures if any (can happen after a node invalidation, a process crash or a conciliation request)
self.failure_handler.trigger_jobs()
# check if new isolating remotes and return the list of newly isolated addresses
# TODO: create an internal event to confirm that socket has been disconnected ?
return self.context.handle_isolation()
def on_tick_event(self, address, when):
""" This event is used to refresh the data related to the address. """
self.context.on_tick_event(address, when)
# could call the same behaviour as on_timer_event if necessary
def on_process_event(self, address, event):
""" This event is used to refresh the process data related to the event and address.
This event also triggers the application starter and/or stopper. """
process = self.context.on_process_event(address, event)
if process:
# check if event is related to a starting or stopping application
starting = self.starter.has_application(process.application_name)
stopping = self.stopper.has_application(process.application_name)
# feed starter with event
self.starter.on_event(process)
# feed stopper with event
self.stopper.on_event(process)
# only the master is allowed to trigger an automatic behaviour for a running failure
if self.context.master and process.crashed() and not (starting or stopping):
self.failure_handler.add_default_job(process)
self.failure_handler.trigger_jobs()
def on_process_info(self, address_name: str, info) -> None:
""" This event is used to fill the internal structures with processes available on node. """
self.context.load_processes(address_name, info)
def on_authorization(self, address_name: str, authorized: bool, master_address: str) -> None:
""" This event is used to finalize the port-knocking between Supvisors instances.
When a new node that has not be part of Supvisors comes in the group, back to INITIALIZATION
for a new Master election and a possible deployment
:param address_name: the node name from which the event comes
:param authorized: the authorization status as seen by the remote node
:param master_address: the master node perceived by the remote node
:return: None
"""
self.logger.info('FiniteStateMachine.on_authorization: address_name={} authorized={} master_address={}'
.format(address_name, authorized, master_address))
if self.context.on_authorization(address_name, authorized):
if master_address:
if not self.context.master_address:
# local Supvisors doesn't know about a master yet but remote Supvisors does
# this is likely due to the fact that the local Supervisor has just been started whereas
# a Supvisors group was already operating
# so accept remote perception in order to avoid going back to INITIALIZATION state for all
self.logger.warn('FiniteStateMachine.on_authorization: accept master={} declared by node={}'
.format(master_address, address_name))
self.context.master_address = master_address
elif master_address != self.context.master_address:
# 2 different perceptions of the master, likely due to a split-brain situation
# so going back to INITIALIZATION to fix
self.logger.warn('FiniteStateMachine.on_authorization: master discrepancy. '
' local declares {} - remote ({}) declares {}'
.format(self.context.master_address, address_name, master_address))
self.set_state(SupvisorsStates.INITIALIZATION)
def on_restart(self) -> None:
""" This event is used to transition the state machine to the RESTARTING state.
:return: None
"""
self.set_state(SupvisorsStates.RESTARTING)
def on_shutdown(self) -> None:
""" This event is used to transition the state machine to the SHUTTING_DOWN state.
:return: None
"""
self.set_state(SupvisorsStates.SHUTTING_DOWN)
# serialization
def serial(self) -> Payload:
""" Return a serializable form of the SupvisorsState.
:return: the Supvisors state as a dictionary
"""
return {'statecode': self.state, 'statename': self.state_string()}
# Map between state enumerations and classes
__StateInstances = {
SupvisorsStates.INITIALIZATION: InitializationState,
SupvisorsStates.DEPLOYMENT: DeploymentState,
SupvisorsStates.OPERATION: OperationState,
SupvisorsStates.CONCILIATION: ConciliationState,
SupvisorsStates.RESTARTING: RestartingState,
SupvisorsStates.SHUTTING_DOWN: ShuttingDownState,
SupvisorsStates.SHUTDOWN: ShutdownState
}
# Transitions allowed between states
__Transitions = {
SupvisorsStates.INITIALIZATION: [SupvisorsStates.DEPLOYMENT],
SupvisorsStates.DEPLOYMENT: [SupvisorsStates.OPERATION,
SupvisorsStates.CONCILIATION,
SupvisorsStates.RESTARTING,
SupvisorsStates.SHUTTING_DOWN],
SupvisorsStates.OPERATION: [SupvisorsStates.CONCILIATION,
SupvisorsStates.INITIALIZATION,
SupvisorsStates.RESTARTING,
SupvisorsStates.SHUTTING_DOWN],
SupvisorsStates.CONCILIATION: [SupvisorsStates.OPERATION,
SupvisorsStates.INITIALIZATION,
SupvisorsStates.RESTARTING,
SupvisorsStates.SHUTTING_DOWN],
SupvisorsStates.RESTARTING: [SupvisorsStates.SHUTDOWN],
SupvisorsStates.SHUTTING_DOWN: [SupvisorsStates.SHUTDOWN],
SupvisorsStates.SHUTDOWN: []
}
|
|
# Documented in https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
import logging
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Union
from django.conf import settings
from django.db import transaction
from django.db.models import Max
from django.utils.timezone import now as timezone_now
from sentry_sdk import capture_exception
from zerver.lib.logging_util import log_to_file
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Recipient,
Subscription,
UserActivity,
UserMessage,
UserProfile,
)
logger = logging.getLogger("zulip.soft_deactivation")
log_to_file(logger, settings.SOFT_DEACTIVATION_LOG_PATH)
BULK_CREATE_BATCH_SIZE = 10000
def filter_by_subscription_history(
user_profile: UserProfile,
all_stream_messages: DefaultDict[int, List[Message]],
all_stream_subscription_logs: DefaultDict[int, List[RealmAuditLog]],
) -> List[UserMessage]:
user_messages_to_insert: List[UserMessage] = []
def store_user_message_to_insert(message: Message) -> None:
message = UserMessage(user_profile=user_profile, message_id=message["id"], flags=0)
user_messages_to_insert.append(message)
for (stream_id, stream_messages_raw) in all_stream_messages.items():
stream_subscription_logs = all_stream_subscription_logs[stream_id]
# Make a copy of the original list of messages, which we will
# mutate in the loop below.
stream_messages = list(stream_messages_raw)
for log_entry in stream_subscription_logs:
# For each stream, we iterate through all of the changes
# to the user's subscription to that stream, ordered by
# event_last_message_id, to determine whether the user was
# subscribed to the target stream at that time.
#
# For each message, we're looking for the first event for
# the user's subscription to the target stream after the
# message was sent.
# * If it's an unsubscribe, we know the user was subscribed
# when the message was sent, and create a UserMessage
# * If it's a subscribe, we know the user was not, and we
# skip the message by mutating the stream_messages list
# to skip that message.
if len(stream_messages) == 0:
# Because stream_messages gets mutated below, this
# check belongs in this inner loop, not the outer loop.
break
if log_entry.event_type == RealmAuditLog.SUBSCRIPTION_DEACTIVATED:
# If the event shows the user was unsubscribed after
# event_last_message_id, we know they must have been
# subscribed immediately before the event.
for stream_message in stream_messages:
if stream_message["id"] <= log_entry.event_last_message_id:
store_user_message_to_insert(stream_message)
else:
break
elif log_entry.event_type in (
RealmAuditLog.SUBSCRIPTION_ACTIVATED,
RealmAuditLog.SUBSCRIPTION_CREATED,
):
initial_msg_count = len(stream_messages)
for i, stream_message in enumerate(stream_messages):
if stream_message["id"] > log_entry.event_last_message_id:
stream_messages = stream_messages[i:]
break
final_msg_count = len(stream_messages)
if initial_msg_count == final_msg_count:
if stream_messages[-1]["id"] <= log_entry.event_last_message_id:
stream_messages = []
else:
raise AssertionError(f"{log_entry.event_type} is not a subscription event.")
if len(stream_messages) > 0:
# We do this check for last event since if the last subscription
# event was a subscription_deactivated then we don't want to create
# UserMessage rows for any of the remaining messages.
if stream_subscription_logs[-1].event_type in (
RealmAuditLog.SUBSCRIPTION_ACTIVATED,
RealmAuditLog.SUBSCRIPTION_CREATED,
):
for stream_message in stream_messages:
store_user_message_to_insert(stream_message)
return user_messages_to_insert
def add_missing_messages(user_profile: UserProfile) -> None:
"""This function takes a soft-deactivated user, and computes and adds
to the database any UserMessage rows that were not created while
the user was soft-deactivated. The end result is that from the
perspective of the message database, it should be impossible to
tell that the user was soft-deactivated at all.
At a high level, the algorithm is as follows:
* Find all the streams that the user was at any time a subscriber
of when or after they were soft-deactivated (`recipient_ids`
below).
* Find all the messages sent to those streams since the user was
soft-deactivated. This will be a superset of the target
UserMessages we need to create in two ways: (1) some UserMessage
rows will have already been created in do_send_messages because
the user had a nonzero set of flags (the fact that we do so in
do_send_messages simplifies things considerably, since it means
we don't need to inspect message content to look for things like
mentions here), and (2) the user might not have been subscribed
to all of the streams in recipient_ids for the entire time
window.
* Correct the list from the previous state by excluding those with
existing UserMessage rows.
* Correct the list from the previous state by excluding those
where the user wasn't subscribed at the time, using the
RealmAuditLog data to determine exactly when the user was
subscribed/unsubscribed.
* Create the UserMessage rows.
For further documentation, see:
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
"""
assert user_profile.last_active_message_id is not None
all_stream_subs = list(
Subscription.objects.filter(
user_profile=user_profile, recipient__type=Recipient.STREAM
).values("recipient_id", "recipient__type_id")
)
# For stream messages we need to check messages against data from
# RealmAuditLog for visibility to user. So we fetch the subscription logs.
stream_ids = [sub["recipient__type_id"] for sub in all_stream_subs]
events = [
RealmAuditLog.SUBSCRIPTION_CREATED,
RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
RealmAuditLog.SUBSCRIPTION_ACTIVATED,
]
# Important: We order first by event_last_message_id, which is the
# official ordering, and then tiebreak by RealmAuditLog event ID.
# That second tiebreak is important in case a user is subscribed
# and then unsubscribed without any messages being sent in the
# meantime. Without that tiebreak, we could end up incorrectly
# processing the ordering of those two subscription changes.
subscription_logs = list(
RealmAuditLog.objects.select_related("modified_stream")
.filter(
modified_user=user_profile, modified_stream_id__in=stream_ids, event_type__in=events
)
.order_by("event_last_message_id", "id")
)
all_stream_subscription_logs: DefaultDict[int, List[RealmAuditLog]] = defaultdict(list)
for log in subscription_logs:
all_stream_subscription_logs[log.modified_stream_id].append(log)
recipient_ids = []
for sub in all_stream_subs:
stream_subscription_logs = all_stream_subscription_logs[sub["recipient__type_id"]]
if stream_subscription_logs[-1].event_type == RealmAuditLog.SUBSCRIPTION_DEACTIVATED:
assert stream_subscription_logs[-1].event_last_message_id is not None
if (
stream_subscription_logs[-1].event_last_message_id
<= user_profile.last_active_message_id
):
# We are going to short circuit this iteration as its no use
# iterating since user unsubscribed before soft-deactivation
continue
recipient_ids.append(sub["recipient_id"])
all_stream_msgs = list(
Message.objects.filter(
recipient_id__in=recipient_ids, id__gt=user_profile.last_active_message_id
)
.order_by("id")
.values("id", "recipient__type_id")
)
already_created_ums = set(
UserMessage.objects.filter(
user_profile=user_profile,
message__recipient__type=Recipient.STREAM,
message_id__gt=user_profile.last_active_message_id,
).values_list("message_id", flat=True)
)
# Filter those messages for which UserMessage rows have been already created
all_stream_msgs = [msg for msg in all_stream_msgs if msg["id"] not in already_created_ums]
stream_messages: DefaultDict[int, List[Message]] = defaultdict(list)
for msg in all_stream_msgs:
stream_messages[msg["recipient__type_id"]].append(msg)
# Calling this function to filter out stream messages based upon
# subscription logs and then store all UserMessage objects for bulk insert
# This function does not perform any SQL related task and gets all the data
# required for its operation in its params.
user_messages_to_insert = filter_by_subscription_history(
user_profile, stream_messages, all_stream_subscription_logs
)
# Doing a bulk create for all the UserMessage objects stored for creation.
while len(user_messages_to_insert) > 0:
messages, user_messages_to_insert = (
user_messages_to_insert[0:BULK_CREATE_BATCH_SIZE],
user_messages_to_insert[BULK_CREATE_BATCH_SIZE:],
)
UserMessage.objects.bulk_create(messages)
user_profile.last_active_message_id = messages[-1].message_id
user_profile.save(update_fields=["last_active_message_id"])
def do_soft_deactivate_user(user_profile: UserProfile) -> None:
try:
user_profile.last_active_message_id = (
UserMessage.objects.filter(user_profile=user_profile)
.order_by("-message_id")[0]
.message_id
)
except IndexError: # nocoverage
# In the unlikely event that a user somehow has never received
# a message, we just use the overall max message ID.
user_profile.last_active_message_id = Message.objects.last().id
user_profile.long_term_idle = True
user_profile.save(update_fields=["long_term_idle", "last_active_message_id"])
logger.info("Soft deactivated user %s", user_profile.id)
def do_soft_deactivate_users(users: List[UserProfile]) -> List[UserProfile]:
BATCH_SIZE = 100
users_soft_deactivated = []
while True:
(user_batch, users) = (users[0:BATCH_SIZE], users[BATCH_SIZE:])
if len(user_batch) == 0:
break
with transaction.atomic():
realm_logs = []
for user in user_batch:
do_soft_deactivate_user(user)
event_time = timezone_now()
log = RealmAuditLog(
realm=user.realm,
modified_user=user,
event_type=RealmAuditLog.USER_SOFT_DEACTIVATED,
event_time=event_time,
)
realm_logs.append(log)
users_soft_deactivated.append(user)
RealmAuditLog.objects.bulk_create(realm_logs)
logger.info(
"Soft-deactivated batch of %s users; %s remain to process", len(user_batch), len(users)
)
return users_soft_deactivated
def do_auto_soft_deactivate_users(
inactive_for_days: int, realm: Optional[Realm]
) -> List[UserProfile]:
filter_kwargs: Dict[str, Realm] = {}
if realm is not None:
filter_kwargs = dict(user_profile__realm=realm)
users_to_deactivate = get_users_for_soft_deactivation(inactive_for_days, filter_kwargs)
users_deactivated = do_soft_deactivate_users(users_to_deactivate)
if not settings.AUTO_CATCH_UP_SOFT_DEACTIVATED_USERS:
logger.info("Not catching up users since AUTO_CATCH_UP_SOFT_DEACTIVATED_USERS is off")
return users_deactivated
if realm is not None:
filter_kwargs = dict(realm=realm)
users_to_catch_up = get_soft_deactivated_users_for_catch_up(filter_kwargs)
do_catch_up_soft_deactivated_users(users_to_catch_up)
return users_deactivated
def reactivate_user_if_soft_deactivated(user_profile: UserProfile) -> Union[UserProfile, None]:
if user_profile.long_term_idle:
add_missing_messages(user_profile)
user_profile.long_term_idle = False
user_profile.save(update_fields=["long_term_idle"])
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=RealmAuditLog.USER_SOFT_ACTIVATED,
event_time=timezone_now(),
)
logger.info("Soft reactivated user %s", user_profile.id)
return user_profile
return None
def get_users_for_soft_deactivation(
inactive_for_days: int, filter_kwargs: Any
) -> List[UserProfile]:
users_activity = list(
UserActivity.objects.filter(
user_profile__is_active=True,
user_profile__is_bot=False,
user_profile__long_term_idle=False,
**filter_kwargs,
)
.values("user_profile_id")
.annotate(last_visit=Max("last_visit"))
)
user_ids_to_deactivate = []
today = timezone_now()
for user_activity in users_activity:
if (today - user_activity["last_visit"]).days > inactive_for_days:
user_ids_to_deactivate.append(user_activity["user_profile_id"])
users_to_deactivate = list(UserProfile.objects.filter(id__in=user_ids_to_deactivate))
return users_to_deactivate
def do_soft_activate_users(users: List[UserProfile]) -> List[UserProfile]:
users_soft_activated = []
for user_profile in users:
user_activated = reactivate_user_if_soft_deactivated(user_profile)
if user_activated:
users_soft_activated.append(user_activated)
return users_soft_activated
def do_catch_up_soft_deactivated_users(users: List[UserProfile]) -> List[UserProfile]:
users_caught_up = []
failures = []
for user_profile in users:
if user_profile.long_term_idle:
try:
add_missing_messages(user_profile)
users_caught_up.append(user_profile)
except Exception: # nocoverage
capture_exception() # nocoverage
failures.append(user_profile) # nocoverage
logger.info("Caught up %d soft-deactivated users", len(users_caught_up))
if failures:
logger.error("Failed to catch up %d soft-deactivated users", len(failures)) # nocoverage
return users_caught_up
def get_soft_deactivated_users_for_catch_up(filter_kwargs: Any) -> List[UserProfile]:
users_to_catch_up = UserProfile.objects.select_related().filter(
long_term_idle=True,
is_active=True,
is_bot=False,
**filter_kwargs,
)
return users_to_catch_up
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
You'll need:
sudo apt-get install help2man fakeroot python-twitter python-simplejson
You'll also need ~/.netrc ~/.ssh/<fname>
"""
__author__ = 'Scott Kirkwood ([email protected])'
__version__ = '0.3.1'
import codecs
import getpass
import glob
import httplib
import netrc
import os
import re
import release
import shutil
import simplejson
import subprocess
import twitter
import debian
import documents
import googlecode_update
import i18n
import mailinglist
import mercurial
import pypi_list
import rst_check
import spell_check
import update_file
class PyBdistException(Exception):
pass
def fixup_setup(setup):
"""Fill in any missing pieces from setup."""
if not hasattr(setup, 'PY_NAME'):
setup.PY_NAME = setup.NAME
if not hasattr(setup, 'PY_SRC'):
setup.PY_SRC = '%s.py' % setup.PY_NAME
if not hasattr(setup, 'DEB_NAME'):
setup.DEB_NAME = setup.NAME
if not hasattr(setup, 'AUTHOR_NAME'):
setup.AUTHOR_NAME = setup.SETUP['author']
if not hasattr(setup, 'GOOGLE_CODE_EMAIL'):
setup.GOOGLE_CODE_EMAIL = setup.SETUP['author_email']
return setup
def _run_or_die(args, err_mess=None, output=True):
"""Run the `args` (a list) or die.
Args:
args: list of arguments to pass to call
err_mess: Extra hint what went wrong.
output: output the command before running
"""
if output:
print ' '.join(args)
try:
ret = subprocess.call(args)
except OSError, oserr:
mess = 'Error running: %r: %r' % (' '.join(args), oserr)
if err_mess:
mess += '\n' + err_mess
raise PyBdistException(err_mess)
if ret:
raise PyBdistException('Error running: code %r\n%r' % (ret, ' '.join(args)))
def _get_py_source_version(setup):
fname = os.path.join(setup.DIR, setup.PY_SRC)
re_py_ver = re.compile(r'__version__\s*=\s*[\'"](.*)[\'"]')
grps = re_py_ver.search(open(fname).read())
if not grps:
raise PyBdistException('Unable to find __version__ in %r' % fname)
source_ver = grps.group(1)
return source_ver
def get_and_verify_versions(setup):
"""Get the version and make sure all versions are synched."""
setup_ver = setup.VER
source_ver = _get_py_source_version(setup)
rel_ver, _, _ = _parse_last_release(setup)
changelog_ver, _, _ = release.parse_deb_changelog(
'debian/changelog')
if (setup_ver != source_ver or setup_ver != rel_ver
or setup_ver != changelog_ver):
print '** Local setup versions don\'t agree'
print '** setup.py = %r' % setup_ver
print '** %s/%s = %r' % (setup.DIR, setup.PY_SRC, source_ver)
print '** %s = %r' % (setup.RELEASE_FILE, rel_ver)
print '** %s = %r' % ('debian/changelog', changelog_ver)
raise PyBdistException('Setup versions don\'t agree')
print ' Local setup versions agree'
return setup_ver
def verify_remote_versions(setup):
"""Examine the remote versions."""
setup_ver = setup.VER
gc_ver, _, _ = release.get_last_google_code_version(setup.NAME)
pypi_ver, _, _ = pypi_list.get_latest_version(setup.NAME)
if gc_ver and gc_ver == setup_ver:
print ' code.google.com version is up-to-date'
else:
print '** Note: code.google.com version is at %r and needs to be uploaded' % gc_ver
if pypi_ver and pypi_ver == setup_ver:
print ' pypi version is up-to-date'
else:
print '** Note: pypi.python.org version is at %r and needs to be uploaded' % pypi_ver
def _parse_last_release(setup):
"""Parse the release file from setup information.
Returns:
rel_ver, relase_date, rel_lines
"""
release_regex = _get_var(setup, 'RELEASE_FORMAT')
return release.parse_last_release(
setup.RELEASE_FILE, release_regex)
def parse_last_release(setup):
_, rel_date, rel_lines = _parse_last_release(setup)
return rel_date, rel_lines
def build_zip_tar(unused_setup):
args = [
'python', 'setup.py', 'sdist', '--formats=gztar,zip']
_run_or_die(args, 'Error building sdist')
print 'Built zip and tar'
def upload_to_pypi(unused_setup):
args = [
'python', 'setup.py', 'sdist', '--formats=zip', 'upload',]
_run_or_die(args, '\n'.join([
'Error uploading to pypi',
'If it\'s the first time, run "python setup.py register"']))
print 'Upload to pypi'
def build_man(setup):
if not hasattr(setup, 'MAN_FILE') or not setup.MAN_FILE:
return
dest_dir = os.path.dirname(setup.MAN_FILE)
if not os.path.isdir(dest_dir):
print 'Making directory %r' % dest_dir
os.makedirs(dest_dir)
langs = ['']
if hasattr(setup, 'LANGS'):
langs += setup.LANGS
for lang in langs:
if not lang:
lang_dot = ''
else:
lang_dot = '%s.' % lang
cur_manfile = setup.MAN_FILE.replace('.1', '.%s1' % lang_dot)
include_file = cur_manfile.replace('.1', '.include')
if not lang:
locale = 'C'
else:
locale = lang
args = [
'help2man',
'%s/%s' % (setup.DIR, setup.PY_SRC),
#'%s' % setup.NAME,
'--locale', locale,
'-N', # no pointer to TextInfo
'-i', include_file,
'-o', cur_manfile]
_run_or_die(args, '\n'.join([
'Failed to build manfile',
'You may need to install help2man']))
print 'Built %s.1' % setup.NAME
def _get_var(setup, var):
if hasattr(setup, var):
return getattr(setup, var)
return None
def build_deb(setup):
debian.build_deb(setup)
def get_deb_filenames(setup):
"""Returns the list of debian files found in dist/ folder.
Args:
setup: information used to filter only some versions.
Returns:
list of fnames without the folder name.
"""
debs = 'dist/%s_%s*all.deb' % (setup.DEB_NAME, setup.VER)
ret = []
for deb in glob.glob(debs):
ret.append(deb.replace('dist/', ''))
return ret
def clean_config(setup):
config_file = os.path.expanduser('~/.config/%s/config' % setup.NAME)
if os.path.exists(config_file):
os.unlink(config_file)
def _clean_doc(setup):
if not setup.NAME:
raise PyBdistException('Missing setup.NAME')
docs = '/usr/share/doc/%s' % setup.NAME
if os.path.exists(docs) and os.path.isdir(docs):
print 'rm -r %s' % docs
shutil.rmtree(docs)
def _clean_man(setup):
if not setup.NAME:
raise PyBdistException('Missing setup.NAME')
man = '/usr/share/man/man1/%s.1.gz' % setup.NAME
if os.path.exists(man):
print 'rm %s' % man
def _clean_scripts(setup):
if 'scripts' not in setup.SETUP:
return
for script in setup.SETUP['scripts']:
if not script.strip():
raise PyBdistException('Missing setup.SETUP.scripts')
bin_script = '/usr/local/bin/%s' % os.path.basename(script)
if os.path.exists(bin_script):
print 'rm %s' % bin_script
os.unlink(bin_script)
def _clean_packages(setup):
dist_dirs = ['/usr/share/pyshared',
'/usr/local/lib/python2.4/dist-packages',
'/usr/local/lib/python2.5/dist-packages',
'/usr/local/lib/python2.6/dist-packages']
base_dir = os.path.basename(setup.DIR)
if not base_dir.strip():
raise PyBdistException('%r is not a good name' % setup.DIR)
for dist_dir in dist_dirs:
if not os.path.exists(dist_dir):
continue
dist_packages = '%s/%s' % (dist_dir, base_dir)
if os.path.exists(dist_packages):
print 'rm -r %s' % dist_packages
shutil.rmtree(dist_packages)
_clean_eggs(dist_dir, setup)
def _clean_eggs(dist_dir, setup):
dist_egg = '%s/%s-*.egg-info' % (dist_dir, setup.PY_NAME)
for fname in glob.glob(dist_egg):
if os.path.exists(fname):
if os.path.isdir(fname):
print 'rm -r %s' % fname
shutil.rmtree(fname)
else:
print 'rm %s' % fname
os.unlink(fname)
def clean_all(setup):
clean_config(setup)
_clean_packages(setup)
_clean_doc(setup)
_clean_man(setup)
_clean_scripts(setup)
def print_release_info(setup):
rel_date, rel_lines = parse_last_release(setup)
print 'Local version is %r, date %r' % (setup.VER, rel_date)
print 'Release notes'
print '-------------'
print '\n'.join(rel_lines)
print
def test_code(setup):
"""Run tests with nosetests."""
dirs = [setup.DIR]
for root, cur_dirs, unused_files in os.walk(setup.DIR):
for dirname in cur_dirs:
if dirname in ['.svn', '.hg', '.ropeproject']:
continue
dirs.append(os.path.join(root, dirname))
fname = '.noserc'
if os.path.exists(fname):
noserc = os.path.abspath(fname)
else:
noserc = None
args = ['nosetests']
if noserc:
args += ['--config', noserc]
args += dirs
_run_or_die(args, 'You may need to install python-nose')
def check_code(setup):
"""Check the source code for errors."""
if os.path.exists('.pycheckrc'):
pycheckrc = os.path.abspath('.pycheckrc')
else:
pycheckrc = None
olddir = os.getcwd()
os.chdir(setup.DIR)
files = glob.glob('*.py') + glob.glob('*/*.py') + glob.glob('*/*/*.py')
args = ['pychecker', '--quiet', '--limit', '30']
if pycheckrc:
args += ['--config', pycheckrc]
args += files
_run_or_die(args, 'You may need to install pychecker')
print 'Passed pychecker'
os.chdir(olddir)
def check_rst(setup):
"""Check the release reStructuredText for errors."""
rst_check.check_file(setup.RELEASE_FILE)
rst_check.check_text(setup.SETUP['long_description'])
def check_spelling(setup):
"""Check the release reStructuredText for errors."""
dictionary = '.aspell.en.pws'
spell_check.check_file(setup.RELEASE_FILE, dictionary)
spell_check.check_code_file('setup.py', dictionary)
def _maybe_update_file(old_fname, old_ver, new_fname, new_ver, replace_text, regex, del_lines=0):
print '%r has version %r and %r has version %r' % (old_fname, old_ver, new_fname, new_ver)
prompt = 'Update %r?: ' % old_fname
yn = raw_input(prompt)
if yn.lower() == 'y':
if regex:
update_file.update_lines(old_fname, regex, replace_text)
else:
update_file.insert_before(old_fname, replace_text, del_lines)
def _ver_lines_different(lines1, lines2):
if len(lines1) != len(lines2):
return True
for l1, l2 in zip(lines1, lines2):
if l1.strip() != l2.strip():
return True
return False
def _fix_versions_notes(setup):
ver, date, lines = _parse_last_release(setup)
setup_ver = setup.VER
source_file = os.path.join(setup.DIR, setup.PY_SRC)
source_ver = _get_py_source_version(setup)
setup_file = 'setup.py'
release_file = setup.RELEASE_FILE
changelog_file = 'debian/changelog'
changelog_ver, _, cl_lines = release.parse_deb_changelog(changelog_file)
STRING_GROUP = '["\']([^"\']+)["\']'
EQ = '\s*=\s*'
if ver != setup.VER:
_maybe_update_file(setup_file, setup_ver, release_file, ver,
ver, r'^VER' + EQ + STRING_GROUP)
if ver != changelog_ver or _ver_lines_different(lines, cl_lines):
setup.VER = ver
if ver == changelog_ver:
del_lines = len(cl_lines) + 5
else:
del_lines = 0
new_text = '\n'.join(release.out_debian_changelog(setup, lines))
_maybe_update_file(changelog_file, changelog_ver, release_file, ver,
new_text, None, del_lines)
if ver != source_ver:
_maybe_update_file(source_file, source_ver, release_file, ver,
ver, r'^\s*__version__' + EQ + STRING_GROUP)
def check_for_errors(setup):
_fix_versions_notes(setup)
check_code(setup)
check_rst(setup)
check_spelling(setup)
if mercurial.needs_hg_commit(verbose=False):
print '** Mercurial needs commit'
elif mercurial.needs_hg_push(verbose=False):
print '** Mercurial needs push'
get_and_verify_versions(setup)
if hasattr(setup, 'LANGS'):
i18n.count_untranslated(_get_locale_dir(setup), setup.LANGS)
def get_pass_from(fname):
"""Retrieves the password from this file.
Verifies that the password is not visible by others on the machine.
Args:
fname: ex. ~/.ssh/myuser
Returns:
None or the password. May output stuff too.
"""
fname = os.path.expanduser(fname)
if os.path.exists(fname):
mode = os.stat(fname).st_mode
if mode & 0077:
print 'Change permissions on file first, chmod 600 %r' % fname
return None
dirname = os.path.dirname(fname)
mode = os.stat(dirname).st_mode
if mode & 0077:
print 'Change permission on directory first, chmod 700 %r' % dirname
return None
return file(fname).read().rstrip()
else:
print '%r not found' % fname
return None
def upload_to_google_code(setup):
print 'Using user %r' % setup.GOOGLE_CODE_EMAIL
password = get_pass_from('~/.ssh/%s' % setup.GOOGLE_CODE_EMAIL)
if not password:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
username = setup.GOOGLE_CODE_EMAIL
files = [
'%s-%s.zip' % (setup.NAME, setup.VER),
'%s-%s.tar.gz' % (setup.NAME, setup.VER),
] + get_deb_filenames(setup)
# removes all 'Featured' downloads that aren't in my list of `files`
googlecode_update.remove_featured_labels(
setup.NAME, username, password, files)
for fname in files:
if fname.endswith('.zip') or fname.endswith('.tar.gz'):
labels = ['Type-Source', 'OpSys-Linux', 'Featured']
elif fname.endswith('.deb'):
labels = ['Type-Package', 'OpSys-Linux', 'Featured']
else:
labels = None
summary = fname
googlecode_update.maybe_upload_file(
setup.NAME, 'dist', fname, summary, labels, username, password)
def announce_on_freshmeat(setup):
"""Announce launch on freshmeat."""
print 'Announcing on Freshmeat...'
_, _, rel_lines = _parse_last_release(setup)
rcinfo = netrc.netrc(os.path.expanduser('~/.netrc'))
# Storing the auth_code as the account in the .netrc file
# ex. chmod 600 ~/.netrc
# machine freshmeat
# login myname
# account auth_code_given_by_freshmeat
# password mypassword
auth_code = rcinfo.authenticators('freshmeat')[1]
name = setup.NAME
if hasattr(setup, 'FRESHMEAT'):
name = setup.FRESHMEAT
tag = 'Bug fixes'
if setup.VER.endswith('.0'):
tag = 'Feature enhancements'
changelog = ['Changes: '] + rel_lines
release_dict = dict(version=setup.VER, changelog='\n'.join(changelog), tag_list=tag)
path = '/projects/%s/releases.json' % name
body = codecs.encode(simplejson.dumps(dict(auth_code=auth_code, release=release_dict)))
connection = httplib.HTTPConnection('freshmeat.net')
connection.request('POST', path, body, {'Content-Type': 'application/json'})
response = connection.getresponse()
if response.status == 404:
print 'Project %r not found, may have to add FRESHMEAT to setup.py' % name
raise PyBdistException('Freshmeat project not found, please register.')
elif response.status != 201:
print 'Request failed: %d %s' % (response.status, response.reason)
raise PyBdistException('Freshmeat upload failed')
print 'Done announcing on Freshmeat.'
def announce_on_twitter(setup):
print 'Announcing on twitter...'
rcinfo = netrc.netrc(os.path.expanduser('~/.netrc'))
auth = rcinfo.authenticators('twitter')
username = auth[0]
password = auth[2]
metadata = dict(version=setup.VER, name=setup.NAME, url=setup.SETUP['url'])
api = twitter.Api(username=username, password=password)
api.PostUpdate('Release %(version)s of %(name)s is available from %(url)s' % metadata)
print 'Done announcing on twitter.'
def _get_pot_filename(setup):
return os.path.join(_get_locale_dir(setup), '%s.pot' % setup.NAME)
def _get_locale_dir(setup):
return '%s/locale' % setup.DIR
def build_get_text(setup):
# TODO(scottkirkwood): sub-directories
dirs = [ 'setup.py', os.path.join(setup.DIR, '*.py')]
i18n.build_get_text(_get_pot_filename(setup), dirs)
def update_po_files(setup):
missing = i18n.update_po_files(_get_pot_filename(setup), _get_locale_dir(setup), setup.LANGS)
for lang, fname in missing:
print 'Creating %r' % fname
i18n.make_empty_po_file(fname, lang, setup)
def compile_po_files(setup):
i18n.compile_po_files(_get_locale_dir(setup), setup.LANGS)
def handle_standard_options(options, setup):
"""Handle options added by add_standard_options().
Args:
options: OptParser set of options.
setup: the setup file module.
Returns:
True if handled, false otherwise."""
fixup_setup(setup)
if options.doclean:
clean_all(setup)
elif options.check:
check_for_errors(setup)
print
print_release_info(setup)
elif options.check_remote:
verify_remote_versions(setup)
elif options.test:
test_code(setup)
elif options.git:
debian.git_import_orig(setup)
elif options.dist:
build_man(setup)
build_zip_tar(setup)
build_deb(setup)
print_release_info(setup)
elif options.upload:
print_release_info(setup)
upload_to_google_code(setup)
elif options.pypi:
print_release_info(setup)
upload_to_pypi(setup)
elif options.mail:
mailinglist.mail(setup)
elif options.freshmeat:
print_release_info(setup)
announce_on_freshmeat(setup)
elif options.twitter:
print_release_info(setup)
announce_on_twitter(setup)
elif options.missing_docs:
documents.out_license(setup)
documents.out_readme(setup)
documents.out_install(setup)
elif options.gettext:
build_get_text(setup)
update_po_files(setup)
compile_po_files(setup)
else:
return False
return True
def add_standard_options(parser, setup=None):
parser.add_option('--clean', dest='doclean', action='store_true',
help='Uninstall things')
parser.add_option('--missing-docs', dest='missing_docs', action='store_true',
help='Create missing docs.')
parser.add_option('--check', dest='check', action='store_true',
help='Check for errors.')
parser.add_option('--check-remote', dest='check_remote', action='store_true',
help='Check remote versions.')
parser.add_option('--test', dest='test', action='store_true',
help='Run nose tests.')
parser.add_option('--git', dest='git', action='store_true',
help='Push to git using gimp-import-orig.')
parser.add_option('--dist', dest='dist', action='store_true',
help='Only build distributions.')
parser.add_option('--upload', dest='upload', action='store_true',
help='Only upload to google code.')
parser.add_option('--pypi', dest='pypi', action='store_true',
help='Only upload to pypi')
if setup and hasattr(setup, 'MAILING_LIST'):
parser.add_option('--mail', dest='mail', action='store_true',
help='Announce to mailing list.')
parser.add_option('--freshmeat', dest='freshmeat', action='store_true',
help='Announce on freshmeat')
parser.add_option('--twitter', dest='twitter', action='store_true',
help='Announce on Twitter')
if not parser.has_option('--gettext') and setup and hasattr(setup, 'LANGS'):
parser.add_option('--gettext', dest='gettext', action='store_true',
help='Build gettext files.')
|
|
#!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:[email protected]
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import threading
import time
import token
import tokenize
import types
import gc
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "<string>":
continue
if filename.startswith("<doctest "):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
mods = sums.keys()
mods.sort()
print "lines cov% module (path)"
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print "%5d %3d%% %s (%s)" % sums[m]
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except IOError, err:
print >> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
table_length = len(line_increments)
docstring = False
lineno = code.co_firstlineno
for li in line_increments:
lineno += li
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if isinstance(c, types.CodeType):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
if not self.donothing:
sys.settrace(self.globaltrace)
threading.settrace(self.globaltrace)
try:
exec cmd in dict, dict
finally:
if not self.donothing:
sys.settrace(None)
threading.settrace(None)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
sys.settrace(self.globaltrace)
threading.settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
sys.settrace(None)
threading.settrace(None)
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if hasattr(f, "func_doc")]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = str(classes[0])
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
# final hack - module name shows up in str(cls), but we've already
# computed module name, so remove it
clsname = clsname.split(".")[1:]
clsname = ".".join(clsname)
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
t.run('execfile(%r)' % (progname,))
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generates Python proto modules and grpc stubs for Beam protos."""
import contextlib
import glob
import inspect
import logging
import multiprocessing
import os
import platform
import re
import shutil
import subprocess
import sys
import time
import warnings
from importlib import import_module
import pkg_resources
BEAM_PROTO_PATHS = [
os.path.join('..', '..', 'model', 'pipeline', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'job-management', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'fn-execution', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'interactive', 'src', 'main', 'proto'),
]
PYTHON_OUTPUT_PATH = os.path.join('apache_beam', 'portability', 'api')
MODEL_RESOURCES = [
os.path.normpath('../../model/fn-execution/src/main/resources'\
+ '/org/apache/beam/model/fnexecution/v1/standard_coders.yaml'),
]
def generate_urn_files(log, out_dir):
"""
Create python files with statically defined URN constants.
Creates a <proto>_pb2_urn.py file for each <proto>_pb2.py file that contains
an enum type.
This works by importing each api.<proto>_pb2 module created by `protoc`,
inspecting the module's contents, and generating a new side-car urn module.
This is executed at build time rather than dynamically on import to ensure
that it is compatible with static type checkers like mypy.
"""
import google.protobuf.message as message
import google.protobuf.pyext._message as pyext_message
class Context(object):
INDENT = ' '
CAP_SPLIT = re.compile('([A-Z][^A-Z]*|^[a-z]+)')
def __init__(self, indent=0):
self.lines = []
self.imports = set()
self.empty_types = set()
self._indent = indent
@contextlib.contextmanager
def indent(self):
self._indent += 1
yield
self._indent -= 1
def prepend(self, s):
if s:
self.lines.insert(0, (self.INDENT * self._indent) + s + '\n')
else:
self.lines.insert(0, '\n')
def line(self, s):
if s:
self.lines.append((self.INDENT * self._indent) + s + '\n')
else:
self.lines.append('\n')
def import_type(self, typ):
modname = typ.__module__
if modname in ('__builtin__', 'builtin'):
return typ.__name__
else:
self.imports.add(modname)
return modname + '.' + typ.__name__
@staticmethod
def is_message_type(obj):
return isinstance(obj, type) and \
issubclass(obj, message.Message)
@staticmethod
def is_enum_type(obj):
return type(obj).__name__ == 'EnumTypeWrapper'
def python_repr(self, obj):
if isinstance(obj, message.Message):
return self.message_repr(obj)
elif isinstance(obj, (list,
pyext_message.RepeatedCompositeContainer, # pylint: disable=c-extension-no-member
pyext_message.RepeatedScalarContainer)): # pylint: disable=c-extension-no-member
return '[%s]' % ', '.join(self.python_repr(x) for x in obj)
else:
return repr(obj)
def empty_type(self, typ):
name = ('EMPTY_' +
'_'.join(x.upper()
for x in self.CAP_SPLIT.findall(typ.__name__)))
self.empty_types.add('%s = %s()' % (name, self.import_type(typ)))
return name
def message_repr(self, msg):
parts = []
for field, value in msg.ListFields():
parts.append('%s=%s' % (field.name, self.python_repr(value)))
if parts:
return '%s(%s)' % (self.import_type(type(msg)), ', '.join(parts))
else:
return self.empty_type(type(msg))
def write_enum(self, enum_name, enum, indent):
ctx = Context(indent=indent)
with ctx.indent():
for v in enum.DESCRIPTOR.values:
extensions = v.GetOptions().Extensions
prop = (
extensions[beam_runner_api_pb2.beam_urn],
extensions[beam_runner_api_pb2.beam_constant],
extensions[metrics_pb2.monitoring_info_spec],
extensions[metrics_pb2.label_props],
)
reprs = [self.python_repr(x) for x in prop]
if all(x == "''" or x.startswith('EMPTY_') for x in reprs):
continue
ctx.line('%s = PropertiesFromEnumValue(%s)' %
(v.name, ', '.join(self.python_repr(x) for x in prop)))
if ctx.lines:
ctx.prepend('class %s(object):' % enum_name)
ctx.prepend('')
ctx.line('')
return ctx.lines
def write_message(self, message_name, message, indent=0):
ctx = Context(indent=indent)
with ctx.indent():
for obj_name, obj in inspect.getmembers(message):
if self.is_message_type(obj):
ctx.lines += self.write_message(obj_name, obj, ctx._indent)
elif self.is_enum_type(obj):
ctx.lines += self.write_enum(obj_name, obj, ctx._indent)
if ctx.lines:
ctx.prepend('class %s(object):' % message_name)
ctx.prepend('')
return ctx.lines
pb2_files = [path for path in glob.glob(os.path.join(out_dir, '*_pb2.py'))]
api_path = os.path.dirname(pb2_files[0])
sys.path.insert(0, os.path.dirname(api_path))
def _import(m):
return import_module('api.%s' % m)
try:
beam_runner_api_pb2 = _import('beam_runner_api_pb2')
metrics_pb2 = _import('metrics_pb2')
for pb2_file in pb2_files:
modname = os.path.splitext(pb2_file)[0]
out_file = modname + '_urns.py'
modname = os.path.basename(modname)
mod = _import(modname)
ctx = Context()
for obj_name, obj in inspect.getmembers(mod):
if ctx.is_message_type(obj):
ctx.lines += ctx.write_message(obj_name, obj)
if ctx.lines:
for line in reversed(sorted(ctx.empty_types)):
ctx.prepend(line)
for modname in reversed(sorted(ctx.imports)):
ctx.prepend('from . import %s' % modname)
ctx.prepend('from ..utils import PropertiesFromEnumValue')
log.info("Writing urn stubs: %s" % out_file)
with open(out_file, 'w') as f:
f.writelines(ctx.lines)
finally:
sys.path.pop(0)
def _find_protoc_gen_mypy():
# NOTE: this shouldn't be necessary if the virtualenv's environment
# is passed to tasks below it, since protoc will search the PATH itself
fname = 'protoc-gen-mypy'
if platform.system() == 'Windows':
fname += ".exe"
pathstr = os.environ.get('PATH')
search_paths = pathstr.split(os.pathsep) if pathstr else []
# should typically be installed into the venv's bin dir
search_paths.insert(0, os.path.dirname(sys.executable))
for path in search_paths:
fullpath = os.path.join(path, fname)
if os.path.exists(fullpath):
return fullpath
raise RuntimeError("Could not find %s in %s" %
(fname, ', '.join(search_paths)))
def generate_proto_files(force=False, log=None):
try:
import grpc_tools # pylint: disable=unused-import
except ImportError:
warnings.warn('Installing grpcio-tools is recommended for development.')
if log is None:
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
py_sdk_root = os.path.dirname(os.path.abspath(__file__))
common = os.path.join(py_sdk_root, '..', 'common')
proto_dirs = [os.path.join(py_sdk_root, path) for path in BEAM_PROTO_PATHS]
proto_files = sum(
[glob.glob(os.path.join(d, '*.proto')) for d in proto_dirs], [])
out_dir = os.path.join(py_sdk_root, PYTHON_OUTPUT_PATH)
out_files = [path for path in glob.glob(os.path.join(out_dir, '*_pb2.py'))]
if out_files and not proto_files and not force:
# We have out_files but no protos; assume they're up to date.
# This is actually the common case (e.g. installation from an sdist).
log.info('No proto files; using existing generated files.')
return
elif not out_files and not proto_files:
if not os.path.exists(common):
raise RuntimeError(
'Not in apache git tree; unable to find proto definitions.')
else:
raise RuntimeError(
'No proto files found in %s.' % proto_dirs)
if force:
regenerate = 'forced'
elif not out_files:
regenerate = 'no output files'
elif len(out_files) < len(proto_files):
regenerate = 'not enough output files'
elif (
min(os.path.getmtime(path) for path in out_files)
<= max(os.path.getmtime(path)
for path in proto_files + [os.path.realpath(__file__)])):
regenerate = 'output files are out-of-date'
elif len(out_files) > len(proto_files):
regenerate = 'output files without corresponding .proto files'
# too many output files: probably due to switching between git branches.
# remove them so they don't trigger constant regeneration.
for out_file in out_files:
os.remove(out_file)
else:
regenerate = None
if regenerate:
try:
from grpc_tools import protoc
except ImportError:
if platform.system() == 'Windows':
# For Windows, grpcio-tools has to be installed manually.
raise RuntimeError(
'Cannot generate protos for Windows since grpcio-tools package is '
'not installed. Please install this package manually '
'using \'pip install grpcio-tools\'.')
# Use a subprocess to avoid messing with this process' path and imports.
# Note that this requires a separate module from setup.py for Windows:
# https://docs.python.org/2/library/multiprocessing.html#windows
p = multiprocessing.Process(
target=_install_grpcio_tools_and_generate_proto_files,
kwargs={'force': force})
p.start()
p.join()
if p.exitcode:
raise ValueError("Proto generation failed (see log for details).")
else:
log.info('Regenerating Python proto definitions (%s).' % regenerate)
builtin_protos = pkg_resources.resource_filename('grpc_tools', '_proto')
protoc_gen_mypy = _find_protoc_gen_mypy()
log.info('Found protoc_gen_mypy at %s' % protoc_gen_mypy)
args = (
[sys.executable] + # expecting to be called from command line
['--proto_path=%s' % builtin_protos] +
['--proto_path=%s' % d for d in proto_dirs] +
['--python_out=%s' % out_dir] +
['--plugin=protoc-gen-mypy=%s' % protoc_gen_mypy] +
['--mypy_out=%s' % out_dir] +
# TODO(robertwb): Remove the prefix once it's the default.
['--grpc_python_out=grpc_2_0:%s' % out_dir] +
proto_files)
ret_code = protoc.main(args)
if ret_code:
raise RuntimeError(
'Protoc returned non-zero status (see logs for details): '
'%s' % ret_code)
# copy resource files
for path in MODEL_RESOURCES:
shutil.copy2(os.path.join(py_sdk_root, path), out_dir)
ret_code = subprocess.call(
["futurize", "--both-stages", "--write", "--no-diff", out_dir])
if ret_code:
raise RuntimeError(
'Error applying futurize to generated protobuf python files.')
generate_urn_files(log, out_dir)
else:
log.info('Skipping proto regeneration: all files up to date')
# Though wheels are available for grpcio-tools, setup_requires uses
# easy_install which doesn't understand them. This means that it is
# compiled from scratch (which is expensive as it compiles the full
# protoc compiler). Instead, we attempt to install a wheel in a temporary
# directory and add it to the path as needed.
# See https://github.com/pypa/setuptools/issues/377
def _install_grpcio_tools_and_generate_proto_files(force=False):
py_sdk_root = os.path.dirname(os.path.abspath(__file__))
install_path = os.path.join(py_sdk_root, '.eggs', 'grpcio-wheels')
build_path = install_path + '-build'
if os.path.exists(build_path):
shutil.rmtree(build_path)
logging.warning('Installing grpcio-tools into %s', install_path)
try:
start = time.time()
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install',
'--target', install_path, '--build', build_path,
'--upgrade',
'-r', os.path.join(py_sdk_root, 'build-requirements.txt')])
logging.warning(
'Installing grpcio-tools took %0.2f seconds.', time.time() - start)
finally:
sys.stderr.flush()
shutil.rmtree(build_path, ignore_errors=True)
sys.path.append(install_path)
try:
generate_proto_files(force=force)
finally:
sys.stderr.flush()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
generate_proto_files(force=True)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
if self._epoch == self._num_epochs:
# trim this batch, so as not to overshoot the last epoch.
batch_end_inclusive = integer_indexes.index(self._epoch_end)
integer_indexes = integer_indexes[:(batch_end_inclusive + 1)]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or pandas
`DataFrame` that will be read into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr.FeedingQueueRunner(
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import errno
import struct
import threading
import ctypes
from functools import reduce
from watchdog.utils import unicode_paths
from pathtools.path import absolute_path
from ctypes import (
c_int,
c_char_p,
c_uint32
)
from watchdog.utils import (
has_attribute,
ctypes_find_library
)
libc_string = ctypes_find_library('c', 'libc.so.6')
libc = ctypes.CDLL(libc_string, use_errno=True)
if (not has_attribute(libc, 'inotify_init') or
not has_attribute(libc, 'inotify_add_watch') or
not has_attribute(libc, 'inotify_rm_watch')):
raise ImportError("Unsupported libc version found: %s" % libc_string)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(
("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
("inotify_init", libc))
try:
inotify_init1 = ctypes.CFUNCTYPE(c_int, c_int, use_errno=True)(
("inotify_init1", libc))
except AttributeError:
def inotify_init1(flags):
raise AttributeError(
"No such symbol inotify_init1 in libc. Non-blocking inotify is only provided by Linux 2.6.27 and newer.")
class InotifyConstants(object):
"""
Constants related to inotify.
"""
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
# We don't actually need IN_CLOSE_NOWRITE, but if it is omitted,
# DELETE_SELF is never emitted.
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_CLOSE_NOWRITE,
InotifyConstants.IN_CLOSE_WRITE,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
class inotify_event_struct(ctypes.Structure):
"""
Structure representation of the inotify_event structure
(used in buffer size calculations)::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
"""
_fields_ = [('wd', c_int),
('mask', c_uint32),
('cookie', c_uint32),
('len', c_uint32),
('name', c_char_p)]
EVENT_SIZE = ctypes.sizeof(inotify_event_struct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify(object):
"""
Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self,
path,
recursive=False,
event_mask=WATCHDOG_ALL_EVENTS):
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
# Stores the watch descriptor for a given path.
self._wd_for_path = dict()
self._path_for_wd = dict()
path = absolute_path(path)
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
self._add_dir_watch(path, recursive, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self):
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self):
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self):
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = dict()
def source_for_move(self, destination_event):
"""
The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
"""Save this event as the source event for future MOVED_TO events to reference"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
"""
Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
path = absolute_path(path)
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
path = absolute_path(path)
self._remove_watch(path)
def close(self):
"""
Closes the inotify instance and removes all associated watches.
"""
with self._lock:
path = unicode_paths.encode(absolute_path(self._path))
wd = self._wd_for_path[path]
inotify_rm_watch(self._inotify_fd, wd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
"""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = absolute_path(os.path.join(root, dirname))
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = absolute_path(os.path.join(root, filename))
wd_parent_dir = self._wd_for_path[absolute_path(os.path.dirname(full_path))]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = unicode_paths.encode(self._path_for_wd[wd])
src_path = absolute_path(os.path.join(wd_path, name))
inotify_event = InotifyEvent(
wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
src_path = absolute_path(os.path.join(wd_path, name))
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
self._remove_watch_bookkeeping(src_path)
continue
event_list.append(inotify_event)
if (self.is_recursive and
inotify_event.is_directory and
inotify_event.is_create):
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
"""
Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
path = unicode_paths.encode(path)
if not os.path.isdir(path):
raise OSError('Path is not a directory')
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = absolute_path(os.path.join(root, dirname))
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, unicode_paths.encode(path), mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
def _remove_watch_bookkeeping(self, path):
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
return wd
def _remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path to remove the watch for.
"""
wd = self._remove_watch_bookkeeping(path)
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
@staticmethod
def _raise_error():
"""
Raises errors for inotify failures.
"""
_errnum = ctypes.get_errno()
raise OSError(os.strerror(_errnum))
@staticmethod
def _parse_event_buffer(event_buffer):
"""
Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 < len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent(object):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param name:
Event name.
:param src_path:
Event source path
"""
def __init__(self, wd, mask, cookie, name, src_path):
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self):
return self._src_path
@property
def wd(self):
return self._wd
@property
def mask(self):
return self._mask
@property
def cookie(self):
return self._cookie
@property
def name(self):
return self._name
@property
def is_modify(self):
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self):
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self):
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_access(self):
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self):
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self):
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self):
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self):
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self):
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self):
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self):
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self):
return self._mask & InotifyConstants.IN_ATTRIB > 0
# Additional bit masks
@property
def is_ignored(self):
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self):
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
if self.is_delete_self or self.is_move_self:
return True
return self._mask & InotifyConstants.IN_ISDIR > 0
# Python-specific functionality.
@property
def key(self):
return (self._src_path,
self._wd,
self._mask,
self._cookie,
self._name)
def __eq__(self, inotify_event):
return self.key == inotify_event.key
def __ne__(self, inotify_event):
return self.key == inotify_event.key
def __hash__(self):
return hash(self.key)
def _get_mask_string(self, mask):
masks = []
for c in dir(InotifyConstants):
if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
mask_string = '|'.join(masks)
return mask_string
_R = "<InotifyEvent: src_path=%s, wd=%d, mask=%s, cookie=%d, name=%s>"
def __repr__(self):
mask_string = self._get_mask_string(self.mask)
return InotifyEvent._R % (self.src_path,
self.wd,
mask_string,
self.cookie,
self.name)
|
|
# -*- coding: utf-8 -*-
"""
A minimalist klout API interface. Use of this API
requires klout *developer key*. You can get registered and
get a key at
<http://klout.com/s/developers/v2>
Supports Python >= 2.5 and Python 3
====================
Quickstart
====================
Install the PyPi package::
pip install Klout
This short example shows how to get a kloutId first and fetch user's score
using that kloutId::
from klout import *
# Make the Klout object
k = Klout('YOUR_KEY_HERE')
# Get kloutId of the user by inputting a twitter screenName
kloutId = k.identity.klout(screenName="erfaan").get('id')
# Get klout score of the user
score = k.user.score(kloutId=kloutId).get('score')
print "User's klout score is: %s" % (score)
# Optionally a timeout parameter (seconds) can also be sent with all calls
score = k.user.score(kloutId=kloutId, timeout=5).get('score')
"""
try:
import urllib.request as urllib_request
import urllib.error as urllib_error
import urllib.parse as urllib_parse
except ImportError:
import urllib as urllib_parse
import urllib2 as urllib_request
import urllib2 as urllib_error
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import gzip
try:
import json
except ImportError:
import simplejson as json
import socket
class _DEFAULT(object): # pylint: disable=too-few-public-methods
pass
class KloutError(Exception):
"""
Base Exception thrown by Klout object when there is a
general error interacting with the API.
"""
def __init__(self, errors):
self.errors = errors
super(KloutError, self).__init__(errors)
def __str__(self):
return repr(self)
def __repr__(self):
return "ERROR: %s" % self.errors
class KloutHTTPError(KloutError):
"""
Exception thrown by Klout object when there is an
HTTP error interacting with api.klout.com.
"""
def __init__(self, errors, uri):
self.uri = uri
super(KloutHTTPError, self).__init__(errors)
# pylint: disable=too-few-public-methods
class KloutCall(object):
"""
Klout interface base class.
"""
# pylint: disable=too-many-arguments
def __init__(self, key, domain,
callable_cls, api_version="",
uri="", uriparts=None, secure=False):
self.key = key
self.domain = domain
self.api_version = api_version
self.callable_cls = callable_cls
self.uri = uri
self.secure = secure
self.uriparts = uriparts
def __getattr__(self, k):
"""
Generic Attribute Handler
"""
try:
return object.__getattr__(self, k)
except AttributeError:
def extend_call(arg):
"""
Extend the method call
"""
return self.callable_cls(
key=self.key, domain=self.domain,
api_version=self.api_version,
callable_cls=self.callable_cls, secure=self.secure,
uriparts=self.uriparts + (arg,))
if k == "_":
return extend_call
return extend_call(k)
def __call__(self, **kwargs):
# Build the uri.
uriparts = []
api_version = self.api_version
resource = "%s.json" % self.uriparts[0]
uriparts.append(api_version)
uriparts.append(resource)
params = {}
if self.key:
params['key'] = self.key
timeout = kwargs.pop('timeout', None)
# append input variables
for key, value in kwargs.items():
if key == 'screenName':
uriparts.append('twitter')
params[key] = value
elif key == 'kloutId':
uriparts.append(str(value))
else:
uriparts.append(key)
uriparts.append(str(value))
for uripart in self.uriparts[1:]:
if not uripart == 'klout':
uriparts.append(str(uripart))
uri = '/'.join(uriparts)
if params:
uri += '?' + urllib_parse.urlencode(params)
secure_str = ''
if self.secure:
secure_str = 's'
uri_base = "http%s://%s/%s" % (
secure_str, self.domain, uri)
headers = {'Accept-Encoding': 'gzip'}
req = urllib_request.Request(uri_base, headers=headers)
return self._handle_response(req, uri, timeout)
# pylint: disable=no-self-use
def _handle_response(self, req, uri, timeout=None):
if timeout:
socket.setdefaulttimeout(timeout)
try:
handle = urllib_request.urlopen(req)
if handle.info().get('Content-Encoding') == 'gzip':
# Handle gzip decompression
buf = StringIO(handle.read())
zip_file = gzip.GzipFile(fileobj=buf)
data = zip_file.read()
else:
data = handle.read()
res = json.loads(data.decode('utf8'))
return res
except (urllib_error.HTTPError, urllib_error.URLError):
import sys
_, errors, _ = sys.exc_info()
raise KloutHTTPError(errors, uri)
# pylint: disable=too-few-public-methods
class Klout(KloutCall):
"""
A minimalist yet fully featured klout API interface.
Get RESTful data by accessing members of this class. The result
is decoded python objects (dicts and lists).
The klout API is documented at:
http://klout.com/s/developers/v2
Examples:
We need a *developer key* to call any Klout API function
>>> f = open('key')
>>> key= f.readline().strip()
>>> f.close()
By default all communication with Klout API is not secure (HTTP).
It can be made secure (HTTPS) by passing an optional `secure=True`
to `Klout` constructor like this:
>>> k = Klout(key, secure=True)
**Identity Resource**
All calls to the Klout API now require a unique kloutId.
To facilitate this, you must first translate a {network}/{networkId}
into a kloutId.
* Get kloutId by twitter id
>>> k.identity.klout(tw="11158872")
{u'id': u'11747', u'network': u'ks'}
* Get kloutId by twitter screenName
>>> k.identity.klout(screenName="erfaan")
{u'id': u'11747', u'network': u'ks'}
* Get kloutId by google plus id
>>> k.identity.klout(gp="112975106809988327760")
{u'id': u'11747', u'network': u'ks'}
**User Resource**
Once we have kloutId, we can use this resource to lookup user's score,
influcent or topics
* Get user score
>>> k.user.score(kloutId='11747') # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{u'score': ..., u'scoreDelta': {u'dayChange': ..., u'monthChange': ...}}
* Get user influences
>>> k.user.influence(kloutId='11747') # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{u'myInfluencersCount': ..., u'myInfluenceesCount': ..., \
u'myInfluencers': [...], u'myInfluencees': [...]}
* Get user topics
>>> k.user.topics(kloutId='11747') # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
[{u'displayName': ..., u'name': ..., u'imageUrl': ..., u'id': ..., \
u'displayType': ..., u'slug': ...}, ...]
"""
def __init__(self, key, domain="api.klout.com", secure=False,
api_version=_DEFAULT):
"""
Create a new klout API connector.
Pass a `key` parameter to use::
k = Klout(key='YOUR_KEY_HERE')
`domain` lets you change the domain you are connecting. By
default it's `api.klout.com`
If `secure` is True you will connect with HTTPS instead of
HTTP.
`api_version` is used to set the base uri. By default it's
'v2'.
"""
if api_version is _DEFAULT:
api_version = "v2"
KloutCall.__init__(
self, key=key, domain=domain,
api_version=api_version,
callable_cls=KloutCall, secure=secure,
uriparts=())
__all__ = ["Klout", "KloutError", "KloutHTTPError"]
|
|
"""
990.ro XBMC Addon
Copyright (C) 2012-2014 krysty
https://code.google.com/p/krysty-xbmc/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os, re
import urllib, urllib2
import xbmcplugin, xbmcgui
import plugin, db
from resources.lib.BeautifulSoup import BeautifulSoup
from resources.lib.ga import track
siteUrl = 'http://www.990.ro/'
searchUrl = 'http://www.990.ro/functions/search3/live_search_using_jquery_ajax/search.php'
tvShowsUrl = 'http://www.990.ro/seriale-lista.html'
moviesUrl = 'http://www.990.ro/toate-filmele.php'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36'
ACCEPT = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
TVshowsIcon = os.path.join(plugin.getPluginPath(), 'resources', 'media', 'tvshowsicon.png')
MoviesIcon = os.path.join(plugin.getPluginPath(), 'resources', 'media', 'moviesicon.png')
SearchIcon = os.path.join(plugin.getPluginPath(), 'resources', 'media', 'searchicon.png')
InfoIcon = os.path.join(plugin.getPluginPath(), 'resources', 'media', 'inficon.png')
SettingsIcon = os.path.join(plugin.getPluginPath(), 'resources', 'media', 'settingsicon.png')
print plugin.getPluginVersion()
DB = db.DB()
track(plugin.getPluginVersion())
def MAIN():
addDir('TV Shows',tvShowsUrl,4,TVshowsIcon)
addDir('Movies',moviesUrl,10,MoviesIcon)
addDir('Search',siteUrl,16,SearchIcon)
addDir('Settings',siteUrl,99,SettingsIcon)
addDir('Clear Cache',siteUrl,18)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def TVSHOWS(url):
import string
AZ = (ltr for ltr in string.ascii_uppercase)
addDir('All',url,1,TVshowsIcon)
addDir('Last Added',url,5,TVshowsIcon)
addDir('Search',url,15,TVshowsIcon)
addDir('[1-9]',url,17,TVshowsIcon)
for character in AZ:
addDir(character,url,17,TVshowsIcon)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getTVshows(url,order=None):
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
progress.update(1, "", "Loading list - 1%", "")
div = BeautifulSoup(http_req(url)).find("div", {"id": "tab1"})
if not order:
tvs = div.findAll("a")
else:
tvs = [s.parent for s in div.findAll("a", text = re.compile(r"^" + order + ".+?$"))]
current = 0
total = len(tvs)
while current <= total - 1:
title = htmlFilter(tvs[current].text)
link = urlFilter(tvs[current]['href'])
addDir(title, link, 2)
if progress.iscanceled(): sys.exit()
percent = int(((current + 1) * 100) / total)
message = "Loading list - " + str(percent) + "%"
progress.update(percent, "", message, "")
current += 1
progress.close()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getSeasons(tvshow, url):
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
progress.update(1, "", "Loading list - 1%", "")
seasons = re.findall(r"<img src='.+?' alt='Sezonul (.+?)'>", http_req(url))
thumb = re.findall(r"<img src='../(.+?)'", http_req(url))
if thumb: thumbnail = siteUrl + thumb[0]
else: thumbnail = ''
total = len(seasons)
current = 0
while current <= total - 1:
season_nr = str(seasons[current]).zfill(2)
name = 'Season %s' % season_nr
addDir(name,url,3,thumbnail,tvshow,season_nr)
if progress.iscanceled(): sys.exit()
percent = int(((current + 1) * 100) / total)
message = "Loading list - " + str(percent) + "%"
progress.update( percent, "", message, "" )
current += 1
progress.close()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getEpisodes(url,season,title,thumbnail):
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
progress.update(1, "", "Loading list - 1%", "")
title = re.sub('\(.+?\)', '', title).strip()
list = []
cache = False
if plugin.getSetting("enableCache") == 'true':
cacheFilename = '%s-s%s-episodes' % (re.sub(' ', '-', title), season)
cache = plugin.cacheLoad(cacheFilename, int(plugin.getSetting("cacheExpire")))
if cache:
list = cache
if not cache:
div = htmlFilter(str(BeautifulSoup(http_req(url)).find("div", {"id": "content"})), True)
episodes = re.findall(r'Sezonul '+season+', Episodul (.+?)</div>.+?<a href="seriale2-([\d]+-[\d]+)-.+?.html" class="link">(.+?)</a>', div)
if episodes:
total = len(episodes)
else:
episodes = re.findall(r'ma;">([\d]+)</div>.+?<a href="seriale2-([0-9]+-[0-9]+)-.+?.html" class="link">(.+?)</a>', div)
total = len(episodes)
current = 0
while current <= total - 1:
ep_num = episodes[current][0]
ep_name = episodes[current][2]
if ep_name == str(re.findall('(Episodul [-0-9]*)',ep_name)).strip('[]').strip('"\''): ep_name = ''
tvshow = {}
tvshow['url'] = siteUrl + 'player-serial-' + episodes[current][1] + '-sfast.html'
tvshow['thumbnail'] = thumbnail
tvshow['title'] = title
tvshow['season'] = season
tvshow['ep_num'] = ep_num
tvshow['ep_name'] = ep_name
list.append(tvshow)
if progress.iscanceled(): sys.exit()
percent = int(((current + 1) * 100) / total)
message = "Loading list - " + str(percent) + "%"
progress.update(percent, "Enabling cache storage will speed up future loads.", message, "")
current += 1
if plugin.getSetting("enableCache") == 'true':
plugin.cacheList(list, cacheFilename)
for tvshow in list:
name = 'Episode %s %s' % (tvshow['ep_num'], tvshow['ep_name'])
addDir(name,tvshow['url'],8,tvshow['thumbnail'],tvshow['title'],tvshow['season'],tvshow['ep_num'],tvshow['ep_name'],folder=False)
progress.close()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def lastAdded(cat):
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
progress.update(1, "", "Loading list - 1%", "")
div = htmlFilter(str(BeautifulSoup(http_req(siteUrl)).findAll("div", {"id": "tab1"})), True)
if cat == 'tvshows':
results = re.findall(r'<a class="link" href="(seriale2)-([0-9]+-[0-9]+)-.+?.html">(.+?)</a>.+?">(.+?)</div></div>', div)
elif cat == 'movies':
results = re.findall(r'<a class="link" href="(filme)-(.+?).html">(.+?)</a>.+?">(.+?)</div>', div)
total = len(results)
current = 0
while current <= total-1:
type = results[current][0]
link = results[current][1]
title = results[current][2]
ep_year = results[current][3]
if type == 'seriale2':
eps = re.findall(r'S(\d+)E(\d+)', ep_year)
if eps:
season = eps[0][0]
episode = eps[0][1]
else:
season = ''
episode = ''
name = '%s %sx%s' % (title, season, episode)
url = siteUrl + 'player-serial-' + link + '-sfast.html'
addDir(name,url,8,"",title,season,episode,folder=False)
elif type == 'filme':
year = re.findall('(\d{4,4})', ep_year)
name = '%s (%s)' % (title, year[0])
url = siteUrl + 'filme-' + link + '.html'
addDir(name,url,8,"",name,folder=False)
if progress.iscanceled(): sys.exit()
percent = int(((current + 1) * 100) / total)
message = "Loading list - " + str(percent) + "%"
progress.update(percent, "", message, "")
current += 1
progress.close()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def MOVIES(url,order=None):
if order == 'year':
div = BeautifulSoup(http_req(url)).findAll("div", {"id": "filtre"})[1].findAll("a", attrs = {"class": None})
for a in div:
addDir(a.text, moviesUrl + a['href'], 9, MoviesIcon)
elif order == 'genre':
div = BeautifulSoup(http_req(url)).find("div", {"id": "filtre"}).findAll("a", attrs = {"class": None})
for a in div:
addDir(plugin.ro2en(a.text), moviesUrl + a['href'], 9, MoviesIcon)
else:
addDir('Search',url,14,MoviesIcon)
addDir('Last Added',url,6,MoviesIcon)
addDir('By Year',url,11,MoviesIcon)
addDir('By Genre',url,12,MoviesIcon)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getMovies(url):
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
progress.update(1, "", "Loading list - 1%", "")
soup = BeautifulSoup(http_req(url))
pages = str(soup.find("div", {"id": "numarpagini"}))
pages = max(int(x) for x in re.findall(r'([\d]+)</a>', pages))
page = int(re.search('pagina=(\d+)', url).group(1))
div = soup.find("div", {"id": "content"})
links = div.findAll("a", {"class": "link"})
thumbs = re.findall(r'<img src="../(.+?)"', str(div))
years = re.findall(r'Aparitie: ?(\d+)', str(div))
total = len(links)
current = 0
while current <= total - 1:
name = "%s (%s)" % (htmlFilter(links[current].text), years[current])
link = urlFilter(links[current]['href'])
thumbnail = siteUrl + thumbs[current]
addDir(name, link, 8, thumbnail, name, folder=False)
if progress.iscanceled(): sys.exit()
percent = int(((current + 1) * 100) / total)
message = "Loading list - " + str(percent) + "%"
progress.update(percent, "", message, "")
current += 1
if not page == pages:
url = re.sub('pagina=\d+', 'pagina=' + str(page + 1), url)
addDir("Next Page >>", url, 9)
progress.close()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def SEARCH(cat):
kb = xbmc.Keyboard('', 'Search', False)
lastSearch = None
try:
lastSearch = plugin.loadData('search')
if lastSearch: kb.setDefault(lastSearch)
except: pass
kb.doModal()
if (kb.isConfirmed()):
inputText = kb.getText()
try: plugin.saveData('search', inputText)
except: pass
if inputText == '':
dialog = xbmcgui.Dialog().ok('Search', 'There is nothing to search.')
sys.exit()
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
progress.update(1, "", "Loading list - 1%", "")
searchText = {'kw': inputText}
req = urllib2.Request(searchUrl, urllib.urlencode(searchText))
req.add_header('User-Agent', USER_AGENT)
req.add_header('Cache-Control', 'no-transform')
response = htmlFilter(urllib2.urlopen(req).read())
if cat == 'all':
results = re.findall(r'<a href="(.+?)-(.+?)-online-download.html">.+?<div id="rest">(.+?)<div id="auth_dat">', response)
thumb = re.findall(r'<img class="search" .+? src="../(.+?)"', response)
else:
results = re.findall(r'<a href="('+cat+')-(.+?)-online-download.html">.+?<div id="rest">(.+?)<div id="auth_dat">', response)
thumb = re.findall(r'<a href="'+cat+'-.+?<img class="search" .+? src="../(.+?)"', response)
total = len(results)
current = 0
while current <= total - 1:
if results[current][0] == 'seriale':
name = re.sub('\(', ' (', results[current][2])
url = '%sseriale-%s-online-download.html' % (siteUrl, results[current][1])
thumbnail = siteUrl + thumb[current]
title = re.sub('\(.+?\)', '', name).strip()
addDir(name,url,2,thumbnail,title)
elif results[current][0] == 'filme':
title = re.sub('\(', ' (', results[current][2])
url = '%sfilme-%s-online-download.html' % (siteUrl, results[current][1])
thumbnail = siteUrl + thumb[current]
addDir(title,url,8,thumbnail,title,folder=False)
if progress.iscanceled(): sys.exit()
percent = int(((current + 1) * 100) / total)
message = "Loading list - " + str(percent) + "%"
progress.update( percent, "", message, "" )
current += 1
progress.close()
else: sys.exit()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def http_req(url):
req = urllib2.Request(url)
req.add_header('User-Agent', USER_AGENT)
req.add_header('Accept', ACCEPT)
req.add_header('Cache-Control', 'no-transform')
response = urllib2.urlopen(req)
source = response.read()
response.close()
return source
def getTrailer(url):
trailerframe = re.findall(r"<iframe width='595' height='335' src='.+?/embed/(.+?)'", http_req(url))
if trailerframe:
yt = youtube_video('http://www.youtube.com/watch?v=' + trailerframe[0])
if yt:
return yt + '?.mp4'
else: return False
else: return False
def youtube_video(url):
try:
conn = urllib2.urlopen(url)
encoding = conn.headers.getparam('charset')
content = conn.read().decode(encoding)
s = re.findall(r'"url_encoded_fmt_stream_map": "([^"]+)"', content)
if s:
import HTMLParser
s = s[0].split(',')
s = [a.replace('\\u0026', '&') for a in s]
s = [urllib2.parse_keqv_list(a.split('&')) for a in s]
n = re.findall(r'<title>(.+) - YouTube</title>', content)
s, n = (s or [], HTMLParser.HTMLParser().unescape(n[0]))
for z in s:
if z['itag'] == '18':
if 'mp4' in z['type']:
ext = '.mp4'
elif 'flv' in z['type']:
ext = '.flv'
try: link = urllib.unquote(z['url'] + '&signature=%s' % z['sig'])
except: link = urllib.unquote(z['url'])
return link
except: return False
def playTrailer(url,title='',thumbnail=''):
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Please wait...')
trailerUrl = getTrailer(url)
if trailerUrl:
title = '%s Trailer' % title
liz = xbmcgui.ListItem(title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
liz.setInfo(type = "Video", infoLabels = {"title": title})
xbmc.Player().play(item=trailerUrl, listitem=liz)
else:
xbmcgui.Dialog().ok("", "Error: trailer link not available!")
progress.close()
def playStream(url,title,thumbnail,season='',episode='',ep_name='',subtitle=''):
win = xbmcgui.Window(10000)
win.setProperty('990.playing.title', title.lower())
win.setProperty('990.playing.season', str(season))
win.setProperty('990.playing.episode', str(episode))
win.setProperty('990.playing.subtitle', subtitle)
if season and episode:
title = ('%s %sx%s %s' % (title, season, episode, ep_name)).strip()
item = xbmcgui.ListItem(title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
item.setInfo(type = "Video", infoLabels = {"title": title})
item.setPath(url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
return True
def selectSource(url,title='',thumbnail='',season='',episode='',ep_name=''):
sources = getSources(url)
if not sources:
return xbmcgui.Dialog().ok("", "Error: video link(s) not available!")
else:
labels = []
for item in sources:
labels.append(item['name'])
dialog = xbmcgui.Dialog()
index = dialog.select('Choose your stream', labels)
if index > -1:
playStream(sources[index]['url'], title, thumbnail, season, episode, ep_name, sources[index]['subtitle'])
else:
return
def getSources(url):
sources = []
try:
quality = ''
if(re.search('filme', url)):
quality = re.search(r'Calitate film: nota <b>(.+?)</b>', http_req(url))
movieId = re.search('-([\d]+)-', url)
url = siteUrl + 'player-film-' + movieId.group(1) + '-sfast.html'
match = re.search(r'http://(?:www.)?(?:fastupload|superweb)(?:.rol)?.ro/?(?:video)?/(?:.+?).html?', http_req(url))
url = match.group(0)
match = re.search(r"'file': '(.+?)',", http_req(url))
videoLink = match.group(1) + '|referer=' + url
if(quality == ''):
item = {'name': 'Play Video', 'url': videoLink, 'subtitle': getSubtitle(url)}
else:
item = {'name': 'Play Video (Quality:'+quality.group(1)+')', 'url': videoLink, 'subtitle': getSubtitle(url)}
sources.append(item)
return sources
except:
return False
def getSubtitle(url):
subtitle = ''
try:
if plugin.getSetting("enableSub") == 'true':
page = str(BeautifulSoup(http_req(url)).findAll("script"))
page = ''.join(page.split())
match = re.findall('\'tracks\':\[{\'file\':"http:\/\/superweb\.rol\.ro\/video\/jw6\/(.+?)",', page)
if match:
sub_url = 'http://superweb.rol.ro/video/jw6/' + match[0]
sub_tmp = os.path.join(xbmc.translatePath("special://temp"), match[0])
with open(sub_tmp, 'w') as f:
f.write(http_req(sub_url))
subtitle = match[0]
except: pass
return subtitle
def addDir(name,url,mode,thumbnail='',title='',season='',episode='',episode_name='',folder=True):
ok = True
params = {'name': name, 'mode': mode, 'url': url, 'thumbnail': thumbnail}
params['title'] = title
params['season'] = season
params['episode'] = episode
params['ep_name'] = episode_name
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=thumbnail)
if not folder:
liz.setProperty('isPlayable', 'true')
liz.setProperty('resumetime', str(0))
liz.setProperty('totaltime', str(1))
if not season:
contextMenuItems = []
trailer = {'url': url, 'title': title, 'mode': 19, 'thumbnail': thumbnail}
contextMenuItems.append(('Play Trailer', 'XBMC.RunPlugin(%s)' % set_params(trailer)))
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
liz.setInfo(type="Video", infoLabels = {"title": name})
ok = xbmcplugin.addDirectoryItem(handle = int(sys.argv[1]), url = set_params(params), listitem = liz, isFolder = folder)
return ok
def clearCache():
if plugin.clearCache():
xbmcgui.Dialog().ok('', 'Cache storage successfully cleared.')
else:
xbmcgui.Dialog().ok('', 'Something went wrong.')
def htmlFilter(htmlstring, trimspaces = False):
hex_entity_pat = re.compile('&#x([^;]+);')
hex_entity_fix = lambda x: hex_entity_pat.sub(lambda m: '&#%d;' % int(m.group(1), 16), x)
htmlstring = str(BeautifulSoup(hex_entity_fix(htmlstring), convertEntities=BeautifulSoup.ALL_ENTITIES))
if trimspaces:
htmlstring = "".join(line.strip() for line in htmlstring.split("\n"))
return htmlstring
def urlFilter(url):
if not re.search(siteUrl, url):
url = siteUrl + url
return url
def set_params(dict):
out = {}
for key, value in dict.iteritems():
if isinstance(value, unicode):
value = value.encode('utf8')
elif isinstance(value, str):
value.decode('utf8')
out[key] = value
return sys.argv[0] + '?' + urllib.urlencode(out)
def get_params():
param = {'default': 'none'}
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?','')
if (params[len(params)-1] == '/'):
params = params[0:len(params)-2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
return param
params = get_params()
mode = int(params.get('mode', 0))
url = urllib.unquote_plus(params.get('url', ''))
name = urllib.unquote_plus(params.get('name', ''))
title = urllib.unquote_plus(params.get('title', ''))
thumbnail = urllib.unquote_plus(params.get('thumbnail', ''))
season = urllib.unquote_plus(params.get('season', ''))
episode = urllib.unquote_plus(params.get('episode', ''))
ep_name = urllib.unquote_plus(params.get('ep_name', ''))
if mode: print 'Mode: ' + str(mode)
if url: print 'URL: ' + str(url)
if mode == 0 or not url or len(url) < 1: MAIN()
elif mode == 1: getTVshows(url)
elif mode == 2: getSeasons(name,url)
elif mode == 3: getEpisodes(url,season,title,thumbnail)
elif mode == 4: TVSHOWS(url)
elif mode == 5: lastAdded('tvshows')
elif mode == 6: lastAdded('movies')
elif mode == 8: selectSource(url,title,thumbnail,season,episode,ep_name)
elif mode == 9: getMovies(url)
elif mode == 10: MOVIES(url)
elif mode == 11: MOVIES(url,order='year')
elif mode == 12: MOVIES(url,order='genre')
elif mode == 14: SEARCH('filme')
elif mode == 15: SEARCH('seriale')
elif mode == 16: SEARCH('all')
elif mode == 17: getTVshows(url,order=name)
elif mode == 18: clearCache()
elif mode == 19: playTrailer(url,title,thumbnail)
elif mode == 99: plugin.openSettings()
|
|
"""All constants related to the ZHA component."""
import enum
import logging
from typing import List
import bellows.zigbee.application
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = (
"EZSP = Silicon Labs EmberZNet protocol: Elelabs, HUSBZB-1, Telegesis",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"deCONZ = dresden elektronik deCONZ protocol: ConBee I/II, RaspBee I/II",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"TI_CC = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = (
"ZiGate = ZiGate Zigbee radios: PiZiGate, ZiGate USB-TTL, ZiGate WiFi",
zigpy_zigate.zigbee.application.ControllerApplication,
)
xbee = (
"XBee = Digi XBee Zigbee radios: Digi XBee Series 2, 2C, 3",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> List[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T):
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
# The default learning rates are a historical artifact of the initial
# implementation.
_DNN_LEARNING_RATE = 0.001
_LINEAR_LEARNING_RATE = 0.005
def _check_no_sync_replicas_optimizer(optimizer):
if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'SyncReplicasOptimizer does not support multi optimizers case. '
'Therefore, it is not supported in DNNLinearCombined model. '
'If you want to use this optimizer, please use either DNN or Linear '
'model.')
def _linear_learning_rate(num_linear_feature_columns):
"""Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float.
"""
default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)
return min(_LINEAR_LEARNING_RATE, default_learning_rate)
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
def _dnn_linear_combined_model_fn(
features, labels, mode, head,
linear_feature_columns=None, linear_optimizer='Ftrl',
dnn_feature_columns=None, dnn_optimizer='Adagrad', dnn_hidden_units=None,
dnn_activation_fn=nn.relu, dnn_dropout=None,
input_layer_partitioner=None, config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
linear_feature_columns: An iterable containing all the feature columns used
by the Linear model.
linear_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the Linear model. Defaults to the Ftrl
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
the DNN model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
dnn_hidden_units: List of hidden units per DNN layer.
dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability we will drop out a given DNN
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
Returns:
`ModelFnOps`
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time, or `input_layer_partitioner` is missing,
or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
'Either linear_feature_columns or dnn_feature_columns must be defined.')
num_ps_replicas = config.num_ps_replicas if config else 0
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
# Build DNN Logits.
dnn_parent_scope = 'dnn'
if not dnn_feature_columns:
dnn_logits = None
else:
dnn_optimizer = optimizers.get_optimizer_instance(
dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
_check_no_sync_replicas_optimizer(dnn_optimizer)
if not dnn_hidden_units:
raise ValueError(
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified.')
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
hidden_units=dnn_hidden_units,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
if not linear_feature_columns:
linear_logits = None
else:
linear_optimizer = optimizers.get_optimizer_instance(
linear_optimizer,
learning_rate=_linear_learning_rate(len(linear_feature_columns)))
_check_no_sync_replicas_optimizer(linear_optimizer)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
feature_columns=linear_feature_columns)
linear_logits = logit_fn(features=features)
_add_layer_summary(linear_logits, scope.name)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
dnn_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=dnn_parent_scope)))
if linear_logits is not None:
train_ops.append(
linear_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=linear_parent_scope)))
train_op = control_flow_ops.group(*train_ops)
with ops.control_dependencies([train_op]):
with ops.colocate_with(global_step):
return state_ops.assign_add(global_step, 1)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNLinearCombinedClassifier(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined classification models.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
sparse_column_a = categorical_column_with_hash_bucket(...)
sparse_column_b = categorical_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNLinearCombinedClassifier(
# wide settings
linear_feature_columns=[sparse_feature_a_x_sparse_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
sparse_feature_a_emb, sparse_feature_b_emb, numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
input_layer_partitioner=None,
config=None):
"""Initializes a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary)
def _model_fn(features, labels, mode, config):
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class DNNLinearCombinedRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined models for regression.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
sparse_column_a = categorical_column_with_hash_bucket(...)
sparse_column_b = categorical_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNLinearCombinedRegressor(
# wide settings
linear_feature_columns=[sparse_feature_a_x_sparse_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
sparse_feature_a_emb, sparse_feature_b_emb, numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using mean squared error.
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
label_dimension=1,
weight_column=None,
input_layer_partitioner=None,
config=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
def _model_fn(features, labels, mode, config):
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib. # pylint: disable=protected-access
_regression_head_with_mean_squared_error_loss(
label_dimension=label_dimension, weight_column=weight_column),
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import gettext
import socket
from babel import localedata
import eventlet.patcher
import fixtures
import mock
from oslo_serialization import jsonutils
import routes
import six
import webob
from glance.api.v1 import router as router_v1
from glance.api.v2 import router as router_v2
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance import i18n
from glance.tests import utils as test_utils
class RequestTest(test_utils.BaseTestCase):
def _set_expected_languages(self, all_locales=None, avail_locales=None):
if all_locales is None:
all_locales = []
# Override localedata.locale_identifiers to return some locales.
def returns_some_locales(*args, **kwargs):
return all_locales
self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales)
# Override gettext.find to return other than None for some languages.
def fake_gettext_find(lang_id, *args, **kwargs):
found_ret = '/glance/%s/LC_MESSAGES/glance.mo' % lang_id
if avail_locales is None:
# All locales are available.
return found_ret
languages = kwargs['languages']
if languages[0] in avail_locales:
return found_ret
return None
self.stubs.Set(gettext, 'find', fake_gettext_find)
def test_content_range(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Range"] = 'bytes 10-99/*'
range_ = request.get_content_range()
self.assertEqual(10, range_.start)
self.assertEqual(100, range_.stop) # non-inclusive
self.assertIsNone(range_.length)
def test_content_range_invalid(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Range"] = 'bytes=0-99'
self.assertRaises(webob.exc.HTTPBadRequest,
request.get_content_range)
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123')
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml',))
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "text/html"
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml',))
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type(('application/json',))
self.assertEqual("application/json", result)
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_xml_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_json_xml_quality(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_language_accept_default(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8"
result = request.best_match_language()
self.assertIsNone(result)
def test_language_accept_none(self):
request = wsgi.Request.blank('/tests/123')
result = request.best_match_language()
self.assertIsNone(result)
def test_best_match_language_expected(self):
# If Accept-Language is a supported language, best_match_language()
# returns it.
self._set_expected_languages(all_locales=['it'])
req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'})
self.assertEqual('it', req.best_match_language())
def test_request_match_language_unexpected(self):
# If Accept-Language is a language we do not support,
# best_match_language() returns None.
self._set_expected_languages(all_locales=['it'])
req = wsgi.Request.blank('/', headers={'Accept-Language': 'zh'})
self.assertIsNone(req.best_match_language())
@mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match')
def test_best_match_language_unknown(self, mock_best_match):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
accepted = 'unknown-lang'
request.headers = {'Accept-Language': accepted}
mock_best_match.return_value = None
self.assertIsNone(request.best_match_language())
# If Accept-Language is missing or empty, match should be None
request.headers = {'Accept-Language': ''}
self.assertIsNone(request.best_match_language())
request.headers.pop('Accept-Language')
self.assertIsNone(request.best_match_language())
def test_http_error_response_codes(self):
sample_id, member_id, tag_val, task_id = 'abc', '123', '1', '2'
"""Makes sure v1 unallowed methods return 405"""
unallowed_methods = [
('/images', ['PUT', 'DELETE', 'HEAD', 'PATCH']),
('/images/detail', ['POST', 'PUT', 'DELETE', 'PATCH']),
('/images/%s' % sample_id, ['POST', 'PATCH']),
('/images/%s/members' % sample_id,
['POST', 'DELETE', 'HEAD', 'PATCH']),
('/images/%s/members/%s' % (sample_id, member_id),
['POST', 'HEAD', 'PATCH']),
]
api = test_utils.FakeAuthMiddleware(router_v1.API(routes.Mapper()))
for uri, methods in unallowed_methods:
for method in methods:
req = webob.Request.blank(uri)
req.method = method
res = req.get_response(api)
self.assertEqual(405, res.status_int)
"""Makes sure v2 unallowed methods return 405"""
unallowed_methods = [
('/schemas/image', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/images', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/member', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/members', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/task', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/tasks', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images', ['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s' % sample_id, ['POST', 'PUT', 'HEAD']),
('/images/%s/file' % sample_id,
['POST', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s/tags/%s' % (sample_id, tag_val),
['GET', 'POST', 'PATCH', 'HEAD']),
('/images/%s/members' % sample_id,
['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s/members/%s' % (sample_id, member_id),
['POST', 'PATCH', 'HEAD']),
('/tasks', ['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/tasks/%s' % task_id, ['POST', 'PUT', 'PATCH', 'HEAD']),
]
api = test_utils.FakeAuthMiddleware(router_v2.API(routes.Mapper()))
for uri, methods in unallowed_methods:
for method in methods:
req = webob.Request.blank(uri)
req.method = method
res = req.get_response(api)
self.assertEqual(405, res.status_int)
# Makes sure not implemented methods return 405
req = webob.Request.blank('/schemas/image')
req.method = 'NonexistentMethod'
res = req.get_response(api)
self.assertEqual(405, res.status_int)
class ResourceTest(test_utils.BaseTestCase):
def test_get_action_args(self):
env = {
'wsgiorg.routing_args': [
None,
{
'controller': None,
'format': None,
'action': 'update',
'id': 12,
},
],
}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_invalid_index(self):
env = {'wsgiorg.routing_args': []}
expected = {}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_del_controller_error(self):
actions = {'format': None,
'action': 'update',
'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_del_format_error(self):
actions = {'action': 'update', 'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_dispatch(self):
class Controller(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(expected, actual)
def test_dispatch_default(self):
class Controller(object):
def default(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(expected, actual)
def test_dispatch_no_default(self):
class Controller(object):
def show(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
self.assertRaises(AttributeError, resource.dispatch, Controller(),
'index', 'on', pants='off')
def test_call(self):
class FakeController(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(FakeController(), None, None)
def dispatch(self, obj, action, *args, **kwargs):
if isinstance(obj, wsgi.JSONRequestDeserializer):
return []
if isinstance(obj, wsgi.JSONResponseSerializer):
raise webob.exc.HTTPForbidden()
self.stubs.Set(wsgi.Resource, 'dispatch', dispatch)
request = wsgi.Request.blank('/')
response = resource.__call__(request)
self.assertIsInstance(response, webob.exc.HTTPForbidden)
self.assertEqual(403, response.status_code)
def test_call_raises_exception(self):
class FakeController(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(FakeController(), None, None)
def dispatch(self, obj, action, *args, **kwargs):
raise Exception("test exception")
self.stubs.Set(wsgi.Resource, 'dispatch', dispatch)
request = wsgi.Request.blank('/')
response = resource.__call__(request)
self.assertIsInstance(response, webob.exc.HTTPInternalServerError)
self.assertEqual(500, response.status_code)
@mock.patch.object(wsgi, 'translate_exception')
def test_resource_call_error_handle_localized(self,
mock_translate_exception):
class Controller(object):
def delete(self, req, identity):
raise webob.exc.HTTPBadRequest(explanation='Not Found')
actions = {'action': 'delete', 'identity': 12}
env = {'wsgiorg.routing_args': [None, actions]}
request = wsgi.Request.blank('/tests/123', environ=env)
message_es = 'No Encontrado'
resource = wsgi.Resource(Controller(),
wsgi.JSONRequestDeserializer(),
None)
translated_exc = webob.exc.HTTPBadRequest(message_es)
mock_translate_exception.return_value = translated_exc
e = self.assertRaises(webob.exc.HTTPBadRequest,
resource, request)
self.assertEqual(message_es, str(e))
@mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match')
@mock.patch.object(i18n, 'translate')
def test_translate_exception(self, mock_translate, mock_best_match):
mock_translate.return_value = 'No Encontrado'
mock_best_match.return_value = 'de'
req = wsgi.Request.blank('/tests/123')
req.headers["Accept-Language"] = "de"
e = webob.exc.HTTPNotFound(explanation='Not Found')
e = wsgi.translate_exception(req, e)
self.assertEqual('No Encontrado', e.explanation)
class JSONResponseSerializerTest(test_utils.BaseTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = b'{"key": "value"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1901, 3, 8, 2)}
expected = b'{"date": "1901-03-08T02:00:00.000000"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = {"is_public": True, "name": [{"name1": "test"}]}
actual = wsgi.JSONResponseSerializer().to_json(fixture)
actual = jsonutils.loads(actual)
for k in expected:
self.assertEqual(expected[k], actual[k])
def test_to_json_with_set(self):
fixture = set(["foo"])
expected = b'["foo"]'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
wsgi.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = [h for h in response.headerlist
if h[0] == 'Content-Type']
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual(b'{"key": "value"}', response.body)
class JSONRequestDeserializerTest(test_utils.BaseTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = b'asdf'
request.headers.pop('Content-Length')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = b'asdf'
request.headers['Content-Length'] = 0
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = b'asdf'
self.assertIn('Content-Length', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = wsgi.JSONRequestDeserializer().from_json(fixture)
self.assertEqual(expected, actual)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
wsgi.JSONRequestDeserializer().from_json, fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {}
self.assertEqual(expected, actual)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = b'{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_has_body_has_transfer_encoding(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='chunked'))
def test_has_body_multiple_transfer_encoding(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='chunked, gzip'))
def test_has_body_invalid_transfer_encoding(self):
self.assertFalse(self._check_transfer_encoding(
transfer_encoding='invalid', content_length=0))
def test_has_body_invalid_transfer_encoding_with_content_length(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='invalid', content_length=5))
def test_has_body_valid_transfer_encoding_with_content_length(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='chunked', content_length=0))
def _check_transfer_encoding(self, transfer_encoding=None,
content_length=None):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = b'fake_body'
request.headers['transfer-encoding'] = transfer_encoding
if content_length is not None:
request.headers['content-length'] = content_length
return wsgi.JSONRequestDeserializer().has_body(request)
def test_get_bind_addr_default_value(self):
expected = ('0.0.0.0', '123456')
actual = wsgi.get_bind_addr(default_port="123456")
self.assertEqual(expected, actual)
class ServerTest(test_utils.BaseTestCase):
def test_create_pool(self):
"""Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool."""
actual = wsgi.Server(threads=1).create_pool()
self.assertIsInstance(actual, eventlet.greenpool.GreenPool)
@mock.patch.object(wsgi.Server, 'configure_socket')
def test_http_keepalive(self, mock_configure_socket):
self.config(http_keepalive=False)
self.config(workers=0)
server = wsgi.Server(threads=1)
server.sock = 'fake_socket'
# mocking eventlet.wsgi server method to check it is called with
# configured 'http_keepalive' value.
with mock.patch.object(eventlet.wsgi,
'server') as mock_server:
fake_application = "fake-application"
server.start(fake_application, 0)
server.wait()
mock_server.assert_called_once_with('fake_socket',
fake_application,
log=server._logger,
debug=False,
custom_pool=server.pool,
keepalive=False,
socket_timeout=900)
class TestHelpers(test_utils.BaseTestCase):
def test_headers_are_unicode(self):
"""
Verifies that the headers returned by conversion code are unicode.
Headers are passed via http in non-testing mode, which automatically
converts them to unicode. Verifying that the method does the
conversion proves that we aren't passing data that works in tests
but will fail in production.
"""
fixture = {'name': 'fake public image',
'is_public': True,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
for k, v in six.iteritems(headers):
self.assertIsInstance(v, six.text_type)
def test_data_passed_properly_through_headers(self):
"""
Verifies that data is the same after being passed through headers
"""
fixture = {'name': 'fake public image',
'is_public': True,
'deleted': False,
'name': None,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
class FakeResponse(object):
pass
response = FakeResponse()
response.headers = headers
result = utils.get_image_meta_from_headers(response)
for k, v in six.iteritems(fixture):
if v is not None:
self.assertEqual(v, result[k])
else:
self.assertNotIn(k, result)
class GetSocketTestCase(test_utils.BaseTestCase):
def setUp(self):
super(GetSocketTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.get_bind_addr",
lambda x: ('192.168.0.13', 1234)))
addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)),
(2, 2, 17, '', ('192.168.0.13', 80)),
(2, 3, 0, '', ('192.168.0.13', 80))]
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.socket.getaddrinfo",
lambda *x: addr_info_list))
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.time.time",
mock.Mock(side_effect=[0, 1, 5, 10, 20, 35])))
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.utils.validate_key_cert",
lambda *x: None))
wsgi.CONF.cert_file = '/etc/ssl/cert'
wsgi.CONF.key_file = '/etc/ssl/key'
wsgi.CONF.ca_file = '/etc/ssl/ca_cert'
wsgi.CONF.tcp_keepidle = 600
def test_correct_configure_socket(self):
mock_socket = mock.Mock()
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
mock_socket))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
lambda *x, **y: mock_socket))
server = wsgi.Server()
server.default_port = 1234
server.configure_socket()
self.assertIn(mock.call.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1), mock_socket.mock_calls)
self.assertIn(mock.call.setsockopt(
socket.SOL_SOCKET,
socket.SO_KEEPALIVE,
1), mock_socket.mock_calls)
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertIn(mock.call().setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
wsgi.CONF.tcp_keepidle), mock_socket.mock_calls)
def test_get_socket_without_all_ssl_reqs(self):
wsgi.CONF.key_file = None
self.assertRaises(RuntimeError, wsgi.get_socket, 1234)
def test_get_socket_with_bind_problems(self):
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
mock.Mock(side_effect=(
[wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None]))))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
self.assertRaises(RuntimeError, wsgi.get_socket, 1234)
def test_get_socket_with_unexpected_socket_errno(self):
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM))))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234)
|
|
import MySQLdb, MySQLdb.cursors
import config
from bottle import route, run, template, debug, view, static_file, request
import time
import urllib
import urllib2
from bs4 import BeautifulSoup
import re
@route('/js/<filename>')
def js_static(filename):
return static_file(filename, root='./static/js')
@route('/img/<filename>')
def img_static(filename):
return static_file(filename, root='./static/img')
@route('/css/<filename>')
def img_static(filename):
return static_file(filename, root='./static/css')
@route('/fonts/<filename>')
def font_static(filename):
return static_file(filename, root='./static/fonts')
@route('/racer/<id:int>')
@route('/racer/<id:int>/kart/<kart_id:int>')
@route('/racer/<id:int>/heat/<heat_id:int>')
def racer_profile(id, kart_id=-1, heat_id=-1):
con = mysql_connect()
c = con.cursor()
c.execute('SELECT * \
FROM racers \
WHERE id = %s', (id,))
racer = c.fetchone()
if not racer:
return template('templates/404')
# racer['racer_name'] = re.sub('[(){}<>:.]', '', racer['racer_name'])
# racer['racer_name'] = racer['racer_name'].strip(u'\xa0')
c.execute('SELECT DISTINCT(kart_id)\
FROM laptimes\
WHERE racer_id = %s\
ORDER BY kart_id ASC', (id,))
karts = c.fetchall()
c.execute('SELECT DISTINCT(l.race_id), r.datetime\
FROM laptimes l\
LEFT JOIN races r ON r.id = l.race_id\
WHERE l.racer_id = %s\
ORDER BY race_id ASC', (id,))
heats = c.fetchall()
param_sql = ''
sql_params = (id,)
if kart_id > -1:
param_sql = 'AND kart_id = %s'
sql_params = (id, kart_id)
if heat_id > -1:
param_sql = 'AND race_id = %s'
sql_params = (id, heat_id)
c.execute('SELECT id, kart_id, race_id, lap_number, laptime, datetime, created \
FROM laptimes \
WHERE racer_id = %s \
{0} \
ORDER BY datetime ASC'.format(param_sql), sql_params)
laps = c.fetchall()
average = 0.0
for lap in laps:
average += lap['laptime']
average = round(average / len(laps), 3)
weather_data = {}
for row in laps:
weather_data[row['id']] = get_weather(row['datetime'])
c.close()
con.close()
return template('templates/racer_profile', racer=racer, laps=laps, karts=karts, kart_id=kart_id, heats=heats, heat_id=heat_id, weather_data=weather_data, average=average)
@route('/search_racers')
def search():
return template('templates/search', racers=None)
@route('/search_racers', method='POST')
def search_racers():
racer_name = request.forms.get('racer_name')
con = mysql_connect()
c = con.cursor()
c.execute('SELECT * \
FROM racers \
WHERE racer_name LIKE %s', ('%' + racer_name + '%',))
racers = c.fetchall()
c.close()
con.close()
return template('templates/search', racers=racers)
@route('/')
@route('/laptimes')
@route('/laptimes/top/<top_num:int>')
@route('/laptimes/date/<year:int>')
@route('/laptimes/date/<year:int>/top/<top_num:int>')
@route('/laptimes/date/<year:int>/<month:int>')
@route('/laptimes/date/<year:int>/<month:int>/top/<top_num:int>')
@route('/laptimes/date/<year:int>/<month:int>/<day:int>')
@route('/laptimes/date/<year:int>/<month:int>/<day:int>/top/<top_num:int>')
def show_laptimes(top_num=10, year=0, month=0, day=0):
con = mysql_connect()
c = con.cursor()
date = (year, month, day)
sql_params = (top_num,)
date_sql = ''
if year > 0:
date_sql = 'AND l.datetime >= "%s-01-01 00:00:00" \
AND l.datetime < "%s-01-01 00:00:00"'
sql_params = (year, year + 1, top_num)
if month > 0:
date_sql = 'AND l.datetime >= "%s-%s-01 00:00:00" \
AND l.datetime < "%s-%s-01 00:00:00"'
sql_params = (year, month, year, month + 1, top_num)
if day > 0:
date_sql = 'AND l.datetime >= "%s-%s-%s 00:00:00" \
AND l.datetime < "%s-%s-%s 00:00:00"'
sql_params = (year, month, day, year, month, day + 1, top_num)
query = 'SELECT l.id, l.racer_id, r.racer_name, l.kart_id, l.race_id, l.lap_number, l.laptime, l.datetime \
FROM laptimes l \
LEFT JOIN racers r ON r.id = l.racer_id \
WHERE l.laptime > 0.000 \
{0} \
ORDER BY laptime ASC \
LIMIT %s'.format(date_sql)
# query = 'SELECT racers.id, racers.name, laptimes.laptime, laptimes.datetime \
# FROM laptimes \
# INNER JOIN racers ON laptimes.racer_id = racers.id \
# WHERE 1=1 \
# {0}\
# ORDER BY laptime ASC \
# LIMIT %s'.format(date_sql)
c.execute(query, sql_params)
data = c.fetchall()
c.close()
con.close()
top_num = len(data)
average = 0.0
weather_data = {}
laptimes_minutes = {}
for row in data:
average += row['laptime']
weather = get_weather(row['datetime'])
weather_data[row['id']] = weather
laptimes_minutes[row['id']] = convert_seconds(row['laptime'])
if top_num > 0:
average = round((average / top_num), 3)
current_date = time.strftime('%Y-%m-%d')
return template('templates/laptimes', rows=data, top_num=top_num, average=average, weather_data=weather_data, date=date, current_date=current_date, laptimes_minutes=laptimes_minutes)
@route('/live')
def get_live_scores():
url = 'http://dkcdallas.clubspeedtiming.com/sp_center/cslivescore.aspx'
post_data_values = {
'__EVENTTARGET': 'ddlTrack',
'__VIEWSTATE': '/wEPDwUJNjAyMjUyNzk0D2QWAgIDD2QWDAIDD2QWAmYPZBYCZg9kFgQCAQ8QDxYGHg5EYXRhVmFsdWVGaWVsZAUCSUQeDURhdGFUZXh0RmllbGQFB1RyYWNrTm8eC18hRGF0YUJvdW5kZ2QQFQMLTm9ydGggVHJhY2sLU291dGggVHJhY2sLU3VwZXIgVHJhY2sVAwExATIBMxQrAwNnZ2cWAWZkAgMPDxYCHgdWaXNpYmxlaGRkAgUPZBYCZg9kFgICAQ8PFgIeBFRleHRkZGQCBw9kFgJmD2QWBmYPDxYCHwQFFzEwIG1pbiBBZHVsdCBTdXBlciBIZWF0ZGQCAQ8PFgIfBAUQQnkgQmVzdCBMYXAgVGltZWRkAgIPDxYCHwQFDDAwIExhcHMgTGVmdGRkAgkPPCsACwEADxYIHghEYXRhS2V5cxYAHgtfIUl0ZW1Db3VudGYeCVBhZ2VDb3VudAIBHhVfIURhdGFTb3VyY2VJdGVtQ291bnRmZGQCCw9kFgJmD2QWAmYPDxYCHwRlZGQCDQ8PFgIfA2hkZGQw/Qa8Y6HSGXM9gF7Kpqj6rq2RhNh0CuaYkL/odKCUTg==',
'__VIEWSTATEGENERATOR': 'BAC7619F',
'__EVENTVALIDATION': '/wEWBQLDsPeKCAL10pSYAwL6vb72DwL7vb72DwL4vb72DwI8+lY+QUxYwEioop2rurZh1aN4K/KyOLLgYN0te/sC',
'ddlTrack': 3
}
data = urllib.urlencode(post_data_values)
req = urllib2.Request(url, data)
racers = []
try:
response = urllib2.urlopen(req, timeout=3)
page = response.read()
racers = parse_live_board_page(page)
except Exception:
pass
return template('templates/live', racers=racers)
@route('/about')
def about():
return template('templates/about')
@route('/contact')
def contact():
return template('templates/contact')
def convert_seconds(laptime):
minutes = laptime / 60.0
seconds = laptime % 60
return '{0}:{1}'.format(int(minutes), seconds)
# Get nearest observed weather based on a provided datetime
def get_weather(datetime):
con = mysql_connect()
c = con.cursor()
c.execute('SELECT weather AS Weather, temp_f AS Temperature, relative_humidity AS Humidity, wind_dir, wind_mph \
FROM weather \
WHERE observation_time <= %s \
AND observation_time > %s - INTERVAL 30 MINUTE \
ORDER BY observation_time DESC \
LIMIT 1', (datetime, datetime))
weather = c.fetchone()
c.close()
con.close()
if weather:
return weather
return {}
# Parse the live timing board and return a list of lists of racer details
def parse_live_board_page(page):
soup = BeautifulSoup(page)
table = soup.find('table', {'id': 'dg'})
rows = table.find_all('tr')
racers = []
for row in rows[1:]:
tds = row.find_all('td')
racer_info = []
for td in tds:
racer_info.append(td.get_text().strip())
racers.append(racer_info)
return racers
# Set up the MySQL connection: host, user, pass, db, parameter to allow for a dictionary to be returned rather than a tuple
def mysql_connect(host=config.opts['mysql']['host'], username=config.opts['mysql']['username'], password=config.opts['mysql']['password'], database=config.opts['mysql']['database']):
return MySQLdb.connect(host, username, password, database, cursorclass=MySQLdb.cursors.DictCursor)
debug(True)
run(reloader=True)
|
|
#! /usr/bin/env python
# $Header$
'''Simple CGI dispatching.
'''
import os, sys
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from pysphere.ZSI import TC, EvaluateException, FaultFromZSIException, \
SoapWriter, Fault, FaultFromException, UNICODE_ENCODING, ParsedSoap, \
ParseException
from pysphere.ZSI import _child_elements, _seqtypes, _find_arraytype, _find_type, resolvers
from pysphere.ZSI.auth import ClientBinding
# Client binding information is stored in a global. We provide an accessor
# in case later on it's not.
_client_binding = None
def GetClientBinding():
'''Return the client binding object.
'''
return _client_binding
gettypecode = lambda mod,e: getattr(mod, str(e.localName)).typecode
def _Dispatch(ps, modules, SendResponse, SendFault, nsdict={}, typesmodule=None,
gettypecode=gettypecode, rpc=False, docstyle=False, **kw):
'''Find a handler for the SOAP request in ps; search modules.
Call SendResponse or SendFault to send the reply back, appropriately.
Behaviors:
default -- Call "handler" method with pyobj representation of body root, and return
a self-describing request (w/typecode). Parsing done via a typecode from
typesmodule, or Any.
docstyle -- Call "handler" method with ParsedSoap instance and parse result with an
XML typecode (DOM). Behavior, wrap result in a body_root "Response" appended message.
rpc -- Specify RPC wrapper of result. Behavior, ignore body root (RPC Wrapper)
of request, parse all "parts" of message via individual typecodes. Expect
the handler to return the parts of the message, whether it is a dict, single instance,
or a list try to serialize it as a Struct but if this is not possible put it in an Array.
Parsing done via a typecode from typesmodule, or Any.
'''
global _client_binding
try:
what = str(ps.body_root.localName)
# See what modules have the element name.
if modules is None:
modules = ( sys.modules['__main__'], )
handlers = [ getattr(m, what) for m in modules if hasattr(m, what) ]
if len(handlers) == 0:
raise TypeError("Unknown method " + what)
# Of those modules, see who's callable.
handlers = [ h for h in handlers if callable(h) ]
if len(handlers) == 0:
raise TypeError("Unimplemented method " + what)
if len(handlers) > 1:
raise TypeError("Multiple implementations found: %s" % handlers)
handler = handlers[0]
_client_binding = ClientBinding(ps)
if docstyle:
result = handler(ps.body_root)
tc = TC.XML(aslist=1, pname=what+'Response')
elif not rpc:
try:
tc = gettypecode(typesmodule, ps.body_root)
except Exception:
tc = TC.Any()
try:
arg = tc.parse(ps.body_root, ps)
except EvaluateException, ex:
SendFault(FaultFromZSIException(ex), **kw)
return
try:
result = handler(arg)
except Exception,ex:
SendFault(FaultFromZSIException(ex), **kw)
return
try:
tc = result.typecode
except AttributeError,ex:
SendFault(FaultFromZSIException(ex), **kw)
return
elif typesmodule is not None:
kwargs = {}
for e in _child_elements(ps.body_root):
try:
tc = gettypecode(typesmodule, e)
except Exception:
tc = TC.Any()
try:
kwargs[str(e.localName)] = tc.parse(e, ps)
except EvaluateException, ex:
SendFault(FaultFromZSIException(ex), **kw)
return
result = handler(**kwargs)
aslist = False
# make sure data is wrapped, try to make this a Struct
if isinstance(result,_seqtypes):
for _ in result:
aslist = hasattr(result, 'typecode')
if aslist: break
elif not isinstance(result, dict):
aslist = not hasattr(result, 'typecode')
result = (result,)
tc = TC.Any(pname=what+'Response', aslist=aslist)
else:
# if this is an Array, call handler with list
# if this is an Struct, call handler with dict
tp = _find_type(ps.body_root)
isarray = ((isinstance(tp, (tuple,list)) and tp[1] == 'Array') or _find_arraytype(ps.body_root))
data = _child_elements(ps.body_root)
tc = TC.Any()
if isarray and len(data) == 0:
result = handler()
elif isarray:
try: arg = [ tc.parse(e, ps) for e in data ]
except EvaluateException, e:
#SendFault(FaultFromZSIException(e), **kw)
SendFault(RuntimeError("THIS IS AN ARRAY: %s" %isarray))
return
result = handler(*arg)
else:
try: kwarg = dict([ (str(e.localName),tc.parse(e, ps)) for e in data ])
except EvaluateException, e:
SendFault(FaultFromZSIException(e), **kw)
return
result = handler(**kwarg)
# reponse typecode
#tc = getattr(result, 'typecode', TC.Any(pname=what+'Response'))
tc = TC.Any(pname=what+'Response')
sw = SoapWriter(nsdict=nsdict)
sw.serialize(result, tc)
return SendResponse(str(sw), **kw)
except Fault, e:
return SendFault(e, **kw)
except Exception, e:
# Something went wrong, send a fault.
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
def _ModPythonSendXML(text, code=200, **kw):
req = kw['request']
req.content_type = 'text/xml'
req.content_length = len(text)
req.send_http_header()
req.write(text)
def _ModPythonSendFault(f, **kw):
_ModPythonSendXML(f.AsSOAP(), 500, **kw)
def _JonPySendFault(f, **kw):
_JonPySendXML(f.AsSOAP(), 500, **kw)
def _JonPySendXML(text, code=200, **kw):
req = kw['request']
req.set_header("Content-Type", 'text/xml; charset="%s"' %UNICODE_ENCODING)
req.set_header("Content-Length", str(len(text)))
req.write(text)
def _CGISendXML(text, code=200, **kw):
print 'Status: %d' % code
print 'Content-Type: text/xml; charset="%s"' %UNICODE_ENCODING
print 'Content-Length: %d' % len(text)
print ''
print text
def _CGISendFault(f, **kw):
_CGISendXML(f.AsSOAP(), 500, **kw)
class SOAPRequestHandler(BaseHTTPRequestHandler):
'''SOAP handler.
'''
server_version = 'ZSI/1.1 ' + BaseHTTPRequestHandler.server_version
def send_xml(self, text, code=200):
'''Send some XML.
'''
self.send_response(code)
if text:
self.send_header('Content-type', 'text/xml; charset="%s"' %UNICODE_ENCODING)
self.send_header('Content-Length', str(len(text)))
self.end_headers()
if text:
self.wfile.write(text)
self.wfile.flush()
def send_fault(self, f, code=500):
'''Send a fault.
'''
self.send_xml(f.AsSOAP(), code)
def do_POST(self):
'''The POST command.
'''
try:
ct = self.headers['content-type']
if ct.startswith('multipart/'):
cid = resolvers.MIMEResolver(ct, self.rfile)
xml = cid.GetSOAPPart()
ps = ParsedSoap(xml, resolver=cid.Resolve)
else:
length = int(self.headers['content-length'])
ps = ParsedSoap(self.rfile.read(length))
except ParseException, e:
self.send_fault(FaultFromZSIException(e))
return
except Exception, e:
# Faulted while processing; assume it's in the header.
self.send_fault(FaultFromException(e, 1, sys.exc_info()[2]))
return
_Dispatch(ps, self.server.modules, self.send_xml, self.send_fault,
docstyle=self.server.docstyle, nsdict=self.server.nsdict,
typesmodule=self.server.typesmodule, rpc=self.server.rpc)
def AsServer(port=80, modules=None, docstyle=False, nsdict={}, typesmodule=None,
rpc=False, addr=''):
address = (addr, port)
httpd = HTTPServer(address, SOAPRequestHandler)
httpd.modules = modules
httpd.docstyle = docstyle
httpd.nsdict = nsdict
httpd.typesmodule = typesmodule
httpd.rpc = rpc
httpd.serve_forever()
def AsCGI(nsdict={}, typesmodule=None, rpc=False, modules=None):
'''Dispatch within a CGI script.
'''
if os.environ.get('REQUEST_METHOD') != 'POST':
_CGISendFault(Fault(Fault.Client, 'Must use POST'))
return
ct = os.environ['CONTENT_TYPE']
try:
if ct.startswith('multipart/'):
cid = resolvers.MIMEResolver(ct, sys.stdin)
xml = cid.GetSOAPPart()
ps = ParsedSoap(xml, resolver=cid.Resolve)
else:
length = int(os.environ['CONTENT_LENGTH'])
ps = ParsedSoap(sys.stdin.read(length))
except ParseException, e:
_CGISendFault(FaultFromZSIException(e))
return
_Dispatch(ps, modules, _CGISendXML, _CGISendFault, nsdict=nsdict,
typesmodule=typesmodule, rpc=rpc)
def AsHandler(request=None, modules=None, **kw):
'''Dispatch from within ModPython.'''
ps = ParsedSoap(request)
kw['request'] = request
_Dispatch(ps, modules, _ModPythonSendXML, _ModPythonSendFault, **kw)
def AsJonPy(request=None, modules=None, **kw):
'''Dispatch within a jonpy CGI/FastCGI script.
'''
kw['request'] = request
if request.environ.get('REQUEST_METHOD') != 'POST':
_JonPySendFault(Fault(Fault.Client, 'Must use POST'), **kw)
return
ct = request.environ['CONTENT_TYPE']
try:
if ct.startswith('multipart/'):
cid = resolvers.MIMEResolver(ct, request.stdin)
xml = cid.GetSOAPPart()
ps = ParsedSoap(xml, resolver=cid.Resolve)
else:
length = int(request.environ['CONTENT_LENGTH'])
ps = ParsedSoap(request.stdin.read(length))
except ParseException, e:
_JonPySendFault(FaultFromZSIException(e), **kw)
return
_Dispatch(ps, modules, _JonPySendXML, _JonPySendFault, **kw)
|
|
#
# cocos2d
# http://cocos2d.org
#
# Particle Engine done by Phil Hassey
# http://www.imitationpickles.org
#
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pyglet
from pyglet.gl import *
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from cocos.actions import *
from cocos.sprite import Sprite
import random; rr = random.randrange
class Fire:
def __init__(self,x,y,vy,frame,size):
self.x,self.y,self.vy,self.frame,self.size = x,y,vy,frame,size
class FireManager( Layer ):
def __init__(self, view_width, num):
super( FireManager, self ).__init__()
self.view_width = view_width
self.goodies = []
self.batch = pyglet.graphics.Batch()
self.fimg = pyglet.resource.image('fire.jpg')
self.group = pyglet.sprite.SpriteGroup(self.fimg.texture,
blend_src=GL_SRC_ALPHA, blend_dest=GL_ONE)
self.vertex_list = self.batch.add(4*num, GL_QUADS, self.group,
'v2i', 'c4B', ('t3f', self.fimg.texture.tex_coords*num))
for n in xrange(0, num):
f = Fire(0,0,0,0,0)
self.goodies.append(f)
self.vertex_list.vertices[n*8:(n+1)*8] = [0, 0, 0, 0, 0, 0, 0, 0]
self.vertex_list.colors[n*16:(n+1)*16] = [0,0,0,0,] * 4
self.schedule( self.step )
def step(self,dt):
w,h = self.fimg.width,self.fimg.height
fires = self.goodies
verts, clrs = self.vertex_list.vertices, self.vertex_list.colors
for n,f in enumerate(fires):
if not f.frame:
f.x = rr(0,self.view_width)
f.y = rr(-120,-80)
f.vy = rr(40,70)/100.0
f.frame = rr(50,250)
f.size = 8+pow(rr(0.0,100)/100.0,2.0)*32;
f.scale= f.size/32.0
x = f.x = f.x+ rr(-50,50)/100.0
y = f.y = f.y+f.vy*4
c = 3*f.frame/255.0;
r,g,b = (min(255,int(c*0xc2)),min(255,int(c*0x41)),min(255,int(c*0x21)))
f.frame -= 1
ww,hh = w*f.scale,h*f.scale
x-=ww/2
verts[n*8:(n+1)*8] = map(int,[x,y,x+ww,y,x+ww,y+hh,x,y+hh])
clrs[n*16:(n+1)*16] = [r,g,b,255] * 4
def draw( self ):
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
class SpriteLayer ( Layer ):
def __init__( self ):
super( SpriteLayer, self ).__init__()
sprite1 = Sprite('grossini.png' )
sprite2 = Sprite('grossinis_sister1.png')
sprite3 = Sprite('grossinis_sister2.png')
sprite1.position = (320,240)
sprite2.position = (620,100)
sprite3.position = (20,100)
self.add( sprite1 )
self.add( sprite2 )
self.add( sprite3 )
ju_right = JumpBy( (600,0), height=100, jumps=4, duration=5 )
ju_left = JumpBy( (-600,0), height=100, jumps=4, duration=5 )
rot1 = Rotate( 180 * 4, duration=5)
sprite1.opacity = 128
sc = ScaleBy( 9, 5 )
rot = Rotate( 180, 5 )
sprite1.do( Repeat( sc + Reverse(sc) ) )
sprite1.do( Repeat( rot + Reverse(rot) ) )
sprite2.do( Repeat( ju_left + Reverse(ju_left) ) )
sprite2.do( Repeat( Reverse(rot1) + rot1 ) )
sprite3.do( Repeat( ju_right + Reverse(ju_right) ) )
sprite3.do( Repeat( rot1 + Reverse(rot1) ) )
class MainMenu(Menu):
def __init__( self ):
# call superclass with the title
super( MainMenu, self ).__init__("GROSSINI'S SISTERS" )
pyglet.font.add_directory('.')
# you can override the font that will be used for the title and the items
self.font_title['font_name'] = 'You Are Loved'
self.font_title['font_size'] = 72
self.font_item['font_name'] = 'You Are Loved'
self.font_item_selected['font_name'] = 'You Are Loved'
# you can also override the font size and the colors. see menu.py for
# more info
# example: menus can be vertical aligned and horizontal aligned
self.menu_valign = CENTER
self.menu_halign = CENTER
items = []
items.append( MenuItem('New Game', self.on_new_game ) )
items.append( MenuItem('Options', self.on_options ) )
items.append( MenuItem('Scores', self.on_scores ) )
items.append( MenuItem('Quit', self.on_quit ) )
self.create_menu( items, zoom_in(), zoom_out() )
# Callbacks
def on_new_game( self ):
# director.set_scene( StartGame() )
print "on_new_game()"
def on_scores( self ):
self.parent.switch_to( 2 )
def on_options( self ):
self.parent.switch_to( 1 )
def on_quit( self ):
director.pop()
class OptionMenu(Menu):
def __init__( self ):
super( OptionMenu, self ).__init__("GROSSINI'S SISTERS" )
self.font_title['font_name'] = 'You Are Loved'
self.font_title['font_size'] = 72
self.font_item['font_name'] = 'You Are Loved'
self.font_item_selected['font_name'] = 'You Are Loved'
self.menu_valign = BOTTOM
self.menu_halign = RIGHT
items = []
items.append( MenuItem('Fullscreen', self.on_fullscreen) )
items.append( ToggleMenuItem('Show FPS: ', self.on_show_fps, True) )
items.append( MenuItem('OK', self.on_quit) )
self.create_menu( items, shake(), shake_back() )
# Callbacks
def on_fullscreen( self ):
director.window.set_fullscreen( not director.window.fullscreen )
def on_quit( self ):
self.parent.switch_to( 0 )
def on_show_fps( self, value ):
director.show_FPS = value
class ScoreMenu(Menu):
def __init__( self ):
super( ScoreMenu, self ).__init__("GROSSINI'S SISTERS" )
self.font_title['font_name'] = 'You Are Loved'
self.font_title['font_size'] = 72
self.font_item['font_name'] = 'You Are Loved'
self.font_item_selected['font_name'] = 'You Are Loved'
self.menu_valign = BOTTOM
self.menu_halign = LEFT
self.create_menu( [MenuItem('Go Back', self.on_quit)] )
def on_quit( self ):
self.parent.switch_to( 0 )
def init():
director.init( resizable=True, width=640, height=480)
def start():
director.set_depth_test()
firelayer = FireManager(director.get_window_size()[0], 250)
spritelayer = SpriteLayer()
menulayer = MultiplexLayer( MainMenu(), OptionMenu(), ScoreMenu() )
scene =Scene( firelayer, spritelayer, menulayer )
twirl_normal = Twirl( center=(320,240), grid=(16,12), duration=15, twirls=6, amplitude=6 )
twirl = AccelDeccelAmplitude( twirl_normal, rate=4.0 )
lens = Lens3D( radius=240, center=(320,240), grid=(32,24), duration=5)
waves3d = AccelDeccelAmplitude( Waves3D( waves=18, amplitude=80, grid=(32,24), duration=15), rate=4.0 )
flipx = FlipX3D(duration=1)
flipy = FlipY3D(duration=1)
flip = Flip(duration=1)
liquid = Liquid( grid=(16,12), duration=4)
ripple = Ripple3D( grid=(32,24), waves=7, duration=10, amplitude=100, radius=320)
shakyt = ShakyTiles3D( grid=(16,12), duration=3)
corners = CornerSwap( duration=1)
waves = AccelAmplitude(Waves( waves=8, amplitude=50, grid=(32,24), duration=5), rate=2.0)
shaky = Shaky3D( randrange=10, grid=(32,24), duration=5)
quadmove = QuadMoveBy( delta0=(320,240), delta1=(-630,0), delta2=(-320,-240), delta3=(630,0), duration=2 )
fadeout = FadeOutTRTiles( grid=(16,12), duration=2)
cornerup = MoveCornerUp( duration=1)
cornerdown = MoveCornerDown( duration=1)
shatter = ShatteredTiles3D( randrange=16, grid=(16,12), duration=4 )
shuffle = ShuffleTiles( grid=(16,12), duration=1 )
orbit = OrbitCamera( radius=1, delta_radius=2, angle_x=0, delta_x=-90, angle_z=0, delta_z=180, duration=4 )
jumptiles = JumpTiles3D( jumps=2, duration=4, amplitude=80, grid=(16,12) )
wavestiles = WavesTiles3D( waves=3, amplitude=60, duration=8, grid=(16,12) )
turnoff = TurnOffTiles( grid=(16,12), duration=2 )
# firelayer.do(
# spritelayer.do(
# menulayer.do(
scene.do(
Delay(3) +
ripple + Delay(2) +
wavestiles + Delay(1) +
twirl +
liquid + Delay(2) +
shakyt + Delay(2) +
ReuseGrid() +
shuffle + Delay(4) + ReuseGrid() + turnoff + Reverse(turnoff) + Delay(1) +
shatter +
flip+ Delay(2) +
Reverse(flip) +
flipx + Delay(2) + ReuseGrid() +
flipy + Delay(2) + ReuseGrid() +
flipx + Delay(2) + ReuseGrid() +
flipy + Delay(2) +
lens + ReuseGrid() + ( (orbit+Reverse(orbit)) | waves3d) + Delay(1) +
corners + Delay(2) + Reverse(corners) +
waves + Delay(2) + ReuseGrid() + shaky +
jumptiles + Delay(1) +
cornerup + Delay(1) +
Reverse(cornerdown) + Delay(1) +
fadeout + Reverse(fadeout) + Delay(2) +
quadmove + Delay(1) +
Reverse(quadmove) +
StopGrid()
)
scene.do( Delay(10) + OrbitCamera( delta_z=-360*3, duration=10*4 ) )
firelayer.do( Delay(4) + DoAction( Repeat( RotateBy(360, 10) ) ) )
return scene
def run(scene):
director.run( scene )
if __name__ == "__main__":
init()
s = start()
run(s)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os.path
import re
import string
# Header for an assembly file.
_ASM_HEADER = """\
; Copyright {year} Google Inc. All Rights Reserved.
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
; This file is generated by {basename}, DO NOT MODIFY.
; Regenerate this file by running syzygy/agent/asan/generate_files.bat.
{preamble}
.CODE
; Allow section and label names to begin with a leading period.
OPTION DOTNAME
"""
# Preamble for the x86 ASM files.
_ASM_X86_HEADER_PREAMBLE = """\n\
.386
.MODEL FLAT, C
"""
# Trailer for an assembly file.
_ASM_TRAILER = """\
END
"""
_REDIRECTORS_EXTERN_TEMPLATE = """\
; Declare the tail function all the stubs direct to.
EXTERN asan_redirect_tail_clang:PROC
{extra_externs}"""
"""\
; Declare the tail function all the stubs direct to.
EXTERN asan_redirect_tail:PROC
"""
_REDIRECTORS_EXTERN = {
'x86': _REDIRECTORS_EXTERN_TEMPLATE.format(
extra_externs='EXTERN C asan_redirect_tail:PROC\n'),
'x64': _REDIRECTORS_EXTERN_TEMPLATE.format(extra_externs='')
}
_REDIRECTORS_PROC_HEADER = """\
; Declare a single top-level function to prevent identical code folding from
; folding the redirectors into one. Each redirector simply calls through to
; the tail function. This allows the tail function to trivially compute the
; redirector's address, which is used to identify the invoked redirector.
asan_redirectors PROC
; Adds a NOP at the beginning of this function to make this work when using
; incremental linking. Every reference to the first probe will otherwise be
; replaced by a jump to a thunk.
nop
"""
_REDIRECTORS_PROC_TRAILER = """\
asan_redirectors ENDP
"""
# Declares external functions and data required by the probe implementations.
# Args:
# shadow: The name of the variable housing the shadow memory.
_INTERCEPTORS_PREAMBLE = """\
; Declare the global shadow memory array that probes refer to.
EXTERN C {shadow}:FAR
; Declare the string checking helper function.
EXTERN C asan_check_strings_memory_accesses:PROC
; Declare the redirect function.
EXTERN C asan_redirect_stub_entry:PROC
EXTERN C asan_redirect_clang_stub_entry:PROC
; Declare the error handling funtion.
EXTERN C asan_report_bad_memory_access:PROC
; Declares the symbols that this compiland exports.
PUBLIC asan_no_check
PUBLIC asan_string_no_check
PUBLIC asan_redirect_tail
PUBLIC asan_redirect_tail_clang
PUBLIC asan_shadow_references"""
_INTERCEPTORS_SEGMENT_HEADER = """\
; Create a new text segment to house the memory interceptors.
.probes SEGMENT PAGE PUBLIC READ EXECUTE 'CODE'
"""
_INTERCEPTORS_SEGMENT_FOOTER = """\
.probes ENDS
"""
_RDATA_SEGMENT_HEADER = """\
; Start writing to the read-only .rdata segment.
.rdata SEGMENT PAGE PUBLIC READ 'DATA'
"""
_RDATA_SEGMENT_FOOTER = """\
.rdata ENDS
"""
# Snippets relating to shadow memory.
_SHADOW = "asan_memory_interceptors_shadow_memory"
_SHADOW_REFERENCE_TABLE_HEADER = """\
; This is a null-terminated table of pointers to all shadow memory references.
; This is emitted so that the shadow memory pointer may be rewritten at
; runtime by the dynamic RTL.
ALIGN 4
asan_shadow_references LABEL FAR"""
_SHADOW_REFERENCE_TABLE_ENTRY = """\
DWORD shadow_reference_{shadow_index!s} - 4"""
_SHADOW_REFERENCE_TABLE_FOOTER = """\
DWORD 0
"""
# Generates the single-instance assembly stubs.
_INTERCEPTORS_GLOBAL_FUNCTIONS = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function modifies no other registers,
; in particular it saves and restores EFLAGS.
ALIGN 16
asan_no_check PROC
; Restore EDX.
mov edx, DWORD PTR[esp + 4]
; And return.
ret 4
asan_no_check ENDP
; No state is saved for string instructions.
ALIGN 16
asan_string_no_check PROC
; Just return.
ret
asan_string_no_check ENDP
; On entry, the address to check is in EDX and the stack has:
; - previous contents of EDX.
; - return address to original caller.
; - return address to redirection stub.
ALIGN 16
asan_redirect_tail PROC
; Prologue, save context.
pushfd
pushad
; Normalize the string operation direction.
cld
; Compute the address of the calling function and push it.
mov eax, DWORD PTR[esp + 9 * 4]
sub eax, 5 ; Length of call instruction.
push eax
; Push the original caller's address.
push DWORD PTR[esp + 11 * 4]
call asan_redirect_stub_entry
; Clean arguments off the stack.
add esp, 8
; Overwrite access_size with the stub to return to.
mov DWORD PTR[esp + 9 * 4], eax
; Restore context.
popad
popfd
; return to the stashed stub.
ret
asan_redirect_tail ENDP
; On entry the stack has:
; - the address to check.
; - return address to original caller.
; - return address to redirection stub.
ALIGN 16
asan_redirect_tail_clang PROC
; Prologue, save context.
pushfd
pushad
; Normalize the string operation direction.
cld
; Compute the address of the calling function and push it.
mov eax, DWORD PTR[esp + 9 * 4]
sub eax, 5 ; Length of call instruction.
push eax
; Push the original caller's address.
push DWORD PTR[esp + 11 * 4]
call asan_redirect_clang_stub_entry
; Clean arguments off the stack.
add esp, 8
; Overwrite access_size with the stub to return to.
mov DWORD PTR[esp + 9 * 4], eax
; Restore context.
popad
popfd
; return to the stashed stub.
ret
asan_redirect_tail_clang ENDP
"""
# Starts by saving EAX onto the stack and then loads the value of
# the flags into it.
#
# This is a trick for efficient saving/restoring part of the flags register.
# See http://blog.freearrow.com/archives/396.
# Flags (bits 16-31) probably need a pipeline flush on update (POPFD). Thus,
# using LAHF/SAHF instead gives better performance.
# PUSHFD/POPFD: 23.314684 ticks
# LAHF/SAHF: 8.838665 ticks
_SAVE_EFLAGS = """\
; Save the EFLAGS.
push eax
lahf
seto al"""
# Restores the flags.
#
# The previous flags value is assumed to be in EAX and we expect to have the
# previous value of EAX on the top of the stack.
# AL is set to 1 if the overflow flag was set before the call to our hook, 0
# otherwise. We add 0x7F to it so it'll restore the flag. Then we restore the
# low bytes of the flags and EAX.
_RESTORE_EFLAGS = """\
; Restore the EFLAGS.
add al, 7Fh
sahf
pop eax"""
_2GB_CHECK = """\
; Divide by 8 to convert the address to a shadow index. This is a signed
; operation so the sign bit will stay positive if the address is above the 2GB
; threshold, and the check will fail.
sar edx, 3
js report_failure_{probe_index}"""
_4GB_CHECK = """\
; Divide by 8 to convert the address to a shadow index. No range check is
; needed as the address space is 4GB.
shr edx, 3"""
# The common part of the fast path shared between the different
# implementations of the hooks.
#
# This does the following:
# - Saves the memory location in EDX for the slow path.
# - Does an address check if neccessary.
# - Checks for zero shadow for this memory location. We use the cmp
# instruction so it'll set the sign flag if the upper bit of the shadow
# value of this memory location is set to 1.
# - If the shadow byte is not equal to zero then it jumps to the slow path.
# - Otherwise it removes the memory location from the top of the stack.
_FAST_PATH = """\
push edx
{range_check}
movzx edx, BYTE PTR[edx + {shadow}]
; This is a label to the previous shadow memory reference. It will be
; referenced by the table at the end of the 'asan_probes' procedure.
shadow_reference_{shadow_index!s} LABEL NEAR
cmp dl, 0
jnz check_access_slow_{probe_index}
add esp, 4"""
# This is the common part of the slow path shared between the different
# implementations of the hooks.
#
# The memory location is expected to be on top of the stack and the shadow
# value for it is assumed to be in DL at this point.
# This also relies on the fact that the shadow non accessible byte mask has
# its upper bit set to 1 and that we jump to this macro after doing a
# "cmp shadow_byte, 0", so the sign flag would be set to 1 if the value isn't
# accessible.
# We inline the Shadow::IsAccessible function for performance reasons.
# This function does the following:
# - Checks if this byte is accessible and jumps to the error path if it's
# not.
# - Removes the memory location from the top of the stack.
_SLOW_PATH = """\
js report_failure_{probe_index}
mov dh, BYTE PTR[esp]
and dh, 7
cmp dh, dl
jae report_failure_{probe_index}
add esp, 4"""
# The error path.
#
# It expects to have the previous value of EDX at [ESP + 4] and the address
# of the faulty instruction at [ESP].
# This macro takes care of saving and restoring the flags.
_ERROR_PATH ="""\
; Restore original value of EDX, and put memory location on stack.
xchg edx, DWORD PTR[esp + 4]
; Create an Asan registers context on the stack.
pushfd
pushad
; Fix the original value of ESP in the Asan registers context.
; Removing 12 bytes (e.g. EFLAGS / EIP / Original EDX).
add DWORD PTR[esp + 12], 12
; Push ARG4: the address of Asan context on stack.
push esp
; Push ARG3: the access size.
push {access_size}
; Push ARG2: the access type.
push {access_mode_value}
; Push ARG1: the memory location.
push DWORD PTR[esp + 52]
call asan_report_bad_memory_access
; Remove 4 x ARG on stack.
add esp, 16
; Restore original registers.
popad
popfd
; Return and remove memory location on stack.
ret 4"""
# Collects the above macros and bundles them up in a dictionary so they can be
# easily expanded by the string format functions.
_MACROS = {
"AsanSaveEflags": _SAVE_EFLAGS,
"AsanRestoreEflags": _RESTORE_EFLAGS,
"AsanFastPath": _FAST_PATH,
"AsanSlowPath": _SLOW_PATH,
"AsanErrorPath": _ERROR_PATH,
}
# Generates the Asan check access functions.
#
# The name of the generated method will be
# asan_check_(@p access_size)_byte_(@p access_mode_str)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of
# access.
# probe_index: The index of the probe function. Used to mangle internal labels
# so that they are unique to this probes implementation.
_CHECK_FUNCTION = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function modifies no other registers,
; in particular it saves and restores EFLAGS.
ALIGN 16
asan_check_{access_size}_byte_{access_mode_str}_{mem_model} PROC \
; Probe #{probe_index}.
{AsanSaveEflags}
{AsanFastPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 8]
{AsanRestoreEflags}
ret 4
check_access_slow_{probe_index} LABEL NEAR
{AsanSlowPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 8]
{AsanRestoreEflags}
ret 4
report_failure_{probe_index} LABEL NEAR
; Restore memory location in EDX.
pop edx
{AsanRestoreEflags}
{AsanErrorPath}
asan_check_{access_size}_byte_{access_mode_str}_{mem_model} ENDP
"""
# Declare the check access function public label.
_CHECK_FUNCTION_DECL = """\
PUBLIC asan_check_{access_size}_byte_{access_mode_str}_{mem_model} ; Probe \
#{probe_index}."""
# Generates a variant of the Asan check access functions that don't save
# the flags.
#
# The name of the generated method will be
# asan_check_(@p access_size)_byte_(@p access_mode_str)_no_flags().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of access.
# probe_index: The index of the probe function. Used to mangle internal labels
# so that they are unique to this probes implementation.
# Note: Calling this function may alter the EFLAGS register only.
_CHECK_FUNCTION_NO_FLAGS = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function may modify EFLAGS, but preserves
; all other registers.
ALIGN 16
asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} PROC \
; Probe #{probe_index}.
{AsanFastPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 4]
ret 4
check_access_slow_{probe_index} LABEL NEAR
{AsanSlowPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 4]
ret 4
report_failure_{probe_index} LABEL NEAR
; Restore memory location in EDX.
pop edx
{AsanErrorPath}
asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} ENDP
"""
# Declare the check access function public label.
_CHECK_FUNCTION_NO_FLAGS_DECL = """\
PUBLIC asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} \
; Probe #{probe_index}."""
# Generates the Asan memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p access_size)_byte_(@p access_mode_str)(@p suffix)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# suffix: The suffix - if any - for this function name
_REDIRECT_FUNCTION = """\
asan_redirect_{access_size}_byte_{access_mode_str}{suffix} LABEL PROC
call asan_redirect_tail"""
# Declare the public label.
_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect_{access_size}_byte_{access_mode_str}{suffix}"""
# Generates the Clang-Asan memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p access_mode_str)(@p access_size)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (load or store).
_CLANG_REDIRECT_FUNCTION = """\
asan_redirect_{access_mode_str}{access_size} LABEL PROC
call asan_redirect_tail_clang"""
# Declare the public label.
_CLANG_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect_{access_mode_str}{access_size}"""
# Generates the Asan check access functions for a string instruction.
#
# The name of the generated method will be
# asan_check_(@p prefix)(@p access_size)_byte_(@p inst)_access().
#
# Args:
# inst: The instruction mnemonic.
# prefix: The prefix of the instruction (repz or nothing).
# counter: The number of times the instruction must be executed (ECX).
# It may be a register or a constant.
# dst:_mode The memory access mode for destination (EDI).
# src:_mode The memory access mode for destination (ESI).
# access:_size The size of the access (in byte).
# compare: A flag to enable shortcut execution by comparing memory
# contents.
_CHECK_STRINGS = """\
ALIGN 16
asan_check{prefix}{access_size}_byte_{func}_access PROC ; Probe #{probe_index}.
; Prologue, save context.
pushfd
pushad
; Fix the original value of ESP in the Asan registers context.
; Removing 8 bytes (e.g.EFLAGS / EIP was on stack).
add DWORD PTR[esp + 12], 8
; Setup increment in EBX (depends on direction flag in EFLAGS).
mov ebx, {access_size}
pushfd
pop eax
test eax, 400h
jz skip_neg_direction_{probe_index}
neg ebx
skip_neg_direction_{probe_index} LABEL NEAR
; By standard calling convention, direction flag must be forward.
cld
; Push ARG(context), the Asan registers context.
push esp
; Push ARG(compare), shortcut when memory contents differ.
push {compare}
; Push ARG(increment), increment for EDI/EDI.
push ebx
; Push ARG(access_size), the access size.
push {access_size}
; Push ARG(length), the number of memory accesses.
push {counter}
; Push ARG(src_access_mode), source access type.
push {src_mode}
; Push ARG(src), the source pointer.
push esi
; Push ARG(dst_access_mode), destination access type.
push {dst_mode}
; Push ARG(dst), the destination pointer.
push edi
; Call the generic check strings function.
call asan_check_strings_memory_accesses
add esp, 36
; Epilogue, restore context.
popad
popfd
ret
asan_check{prefix}{access_size}_byte_{func}_access ENDP
"""
# Declare the string checking probe public label.
_CHECK_STRINGS_DECL = """\
PUBLIC asan_check{prefix}{access_size}_byte_{func}_access ; Probe \
#{probe_index}."""
# Generates the Asan string memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p prefix)(@p access_size)_byte_(@p inst)_access().
#
# Args:
# inst: The instruction mnemonic.
# prefix: The prefix of the instruction (repz or nothing).
# counter: The number of times the instruction must be executed (ECX).
# It may be a register or a constant.
# dst:_mode The memory access mode for destination (EDI).
# src:_mode The memory access mode for destination (ESI).
# access:_size The size of the access (in byte).
# compare: A flag to enable shortcut execution by comparing memory
# contents.
_STRING_REDIRECT_FUNCTION = """\
asan_redirect{prefix}{access_size}_byte_{func}_access LABEL PROC
call asan_redirect_tail"""
# Declare the public label.
_STRING_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect{prefix}{access_size}_byte_{func}_access"""
class MacroAssembler(string.Formatter):
"""A formatter specialization to inject the AsanXXX macros and make
them easier to use."""
def parse(self, str):
"""Override to trim whitespace on empty trailing line."""
for (lit, fld, fmt, conv) in super(MacroAssembler, self).parse(str):
# Strip trailing whitespace from the previous literal to allow natural
# use of AsanXXX macros.
m = re.match('^(.*\n)( +)$', lit)
if m:
lit = m.group(0)
yield((lit, fld, fmt, conv))
def get_value(self, key, args, kwargs):
"""Override to inject macro definitions."""
if key in _MACROS:
macro = _MACROS[key].format(*args, **kwargs)
# Trim leading whitespace to allow natural use of AsanXXX macros.
macro = macro.lstrip()
return macro
return super(MacroAssembler, self).get_value(key, args, kwargs)
# Access sizes for the memory accessors generated.
_ACCESS_SIZES = (1, 2, 4, 8, 10, 16, 32)
# These values must correspond to those defined in the agent::asan::AccessMode
# enum. See syzygy/agent/asan/error_info.h.
_ASAN_READ_ACCESS = 0
_ASAN_WRITE_ACCESS = 1
_ASAN_UNKNOWN_ACCESS = 2
# Access modes for the memory accessors generated.
_ACCESS_MODES = [
('read_access', _ASAN_READ_ACCESS),
('write_access', _ASAN_WRITE_ACCESS),
]
_CLANG_ACCESS_MODES = [
('load', _ASAN_READ_ACCESS),
('store', _ASAN_WRITE_ACCESS),
]
# Memory models for the generated accessors, and the associated address range
# checks to insert.
_MEMORY_MODELS = [
('2gb', _2GB_CHECK.lstrip()),
('4gb', _4GB_CHECK.lstrip()),
]
# The string accessors generated.
_STRING_ACCESSORS = [
("cmps", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 4, 1),
("cmps", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 2, 1),
("cmps", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 1, 1),
("cmps", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 4, 1),
("cmps", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 2, 1),
("cmps", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 1, 1),
("lods", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 4, 1),
("lods", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 2, 1),
("lods", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 1, 1),
("lods", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 4, 1),
("lods", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 2, 1),
("lods", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 1, 1),
("movs", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 4, 0),
("movs", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 2, 0),
("movs", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 1, 0),
("movs", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 4, 0),
("movs", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 2, 0),
("movs", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 1, 0),
("stos", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 4, 0),
("stos", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 2, 0),
("stos", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 1, 0),
("stos", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 4, 0),
("stos", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 2, 0),
("stos", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 1, 0),
]
class ToStringCounter(object):
"""A helper class that counts how often it is converted to a string."""
def __init__(self, count=0):
self._count = count
def __str__(self):
self._count += 1
return str(self._count - 1)
def count(self):
return self._count
def _IterateOverInterceptors(parts,
formatter,
format,
format_no_flags,
probe_index=0,
shadow_index=0):
"""Helper for _GenerateInterceptorsAsmFile."""
f = formatter
# This variable hides a counter which automatically increments for every
# reference made to it. This allows the probes to use arbitrarily many
# references to the shadow memory and the generator will implicitly track
# these and emit a table entry per reference.
#
# For this mechanism to work reliably all references to 'shadow_index' in the
# formatting strings must be specified using '{shadow_index!s}'. This
# guarantees that the __str__ method of the ToStringCounter instance will be
# called.
shadow_index = ToStringCounter(shadow_index)
for mem_model, range_check in _MEMORY_MODELS:
# Iterate over the probes that have flags.
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
formatted_range_check = f.format(range_check, probe_index=probe_index)
parts.append(f.format(format,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
mem_model=mem_model,
probe_index=probe_index,
range_check=formatted_range_check,
shadow=_SHADOW,
shadow_index=shadow_index))
probe_index += 1
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
formatted_range_check = f.format(range_check, probe_index=probe_index)
parts.append(f.format(format_no_flags,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
mem_model=mem_model,
probe_index=probe_index,
range_check=formatted_range_check,
shadow=_SHADOW,
shadow_index=shadow_index))
probe_index += 1
# Return the probe and shadow memory reference counts.
return (probe_index, shadow_index.count())
def _IterateOverStringInterceptors(parts, formatter, format, probe_index=0):
"""Helper for _GenerateInterceptorsAsmFile."""
for (fn, p, c, dst_mode, src_mode, size, compare) in _STRING_ACCESSORS:
parts.append(formatter.format(format,
access_size=size,
compare=compare,
counter=c,
dst_mode=dst_mode,
func=fn,
prefix=p,
probe_index=probe_index,
src_mode=src_mode))
probe_index += 1
return probe_index
def _GenerateInterceptorsAsmFile():
f = MacroAssembler()
parts = [f.format(_ASM_HEADER,
basename=os.path.basename(__file__),
year=datetime.datetime.now().year,
preamble=_ASM_X86_HEADER_PREAMBLE)]
parts.append(f.format(_INTERCEPTORS_PREAMBLE, shadow=_SHADOW))
probe_index = 0
shadow_index = 0
# Generate the block of public label declarations.
(probe_index, shadow_index) = _IterateOverInterceptors(parts, f,
_CHECK_FUNCTION_DECL, _CHECK_FUNCTION_NO_FLAGS_DECL,
probe_index=probe_index, shadow_index=shadow_index)
probe_index = _IterateOverStringInterceptors(parts, f, _CHECK_STRINGS_DECL,
probe_index=probe_index)
parts.append('')
# Place all of the probe functions in a custom segment.
parts.append(f.format(_INTERCEPTORS_SEGMENT_HEADER))
# Generate the single-instance functions.
parts.append(f.format(_INTERCEPTORS_GLOBAL_FUNCTIONS))
# TODO(siggi): Think about the best way to allow the stubs to communicate
# their own and their alternative identities to the bottleneck function.
# A particularly nice way is to generate an array of N-tuples that can
# be used when patching up IATs, where the redirector and the
# alternatives consume a row each. Passing in the array entry to the
# bottleneck is then the nicest, but the easiest is probably to pass in
# the redirector function itself...
# Reset the probe and shadow indices.
probe_index = 0
shadow_index = 0
# Output the actual interceptors themselves
(probe_index, shadow_index) = _IterateOverInterceptors(parts, f,
_CHECK_FUNCTION, _CHECK_FUNCTION_NO_FLAGS, probe_index=probe_index,
shadow_index=shadow_index)
# Generate string operation accessors.
probe_index = _IterateOverStringInterceptors(parts, f, _CHECK_STRINGS,
probe_index=probe_index)
# Close the custom segment housing the probges.
parts.append(f.format(_INTERCEPTORS_SEGMENT_FOOTER))
# Output the table of shadow references to .rdata.
parts.append(f.format(_RDATA_SEGMENT_HEADER))
parts.append(f.format(_SHADOW_REFERENCE_TABLE_HEADER))
for i in range(0, shadow_index):
parts.append(f.format(_SHADOW_REFERENCE_TABLE_ENTRY, shadow_index=i))
parts.append(_SHADOW_REFERENCE_TABLE_FOOTER)
parts.append(f.format(_RDATA_SEGMENT_FOOTER))
parts.append(f.format(_ASM_TRAILER))
return parts
def _GenerateRedirectorsAsmFile(target_cpu):
assert(target_cpu in ('x86', 'x64'))
f = MacroAssembler()
header_preamble = ''
if target_cpu == 'x86':
header_preamble = _ASM_X86_HEADER_PREAMBLE
parts = [f.format(_ASM_HEADER,
basename=os.path.basename(__file__),
year=datetime.datetime.now().year,
preamble=header_preamble)]
parts.append(f.format(_REDIRECTORS_EXTERN[target_cpu]))
if target_cpu == 'x86':
# Declare the memory accessor redirectors.
for suffix in ("", "_no_flags"):
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
parts.append(f.format(_REDIRECT_FUNCTION_DECL,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
suffix=suffix))
# Declare string operation redirectors.
for (fn, p, c, dst_mode, src_mode, size, compare) in _STRING_ACCESSORS:
parts.append(f.format(_STRING_REDIRECT_FUNCTION_DECL,
func=fn,
prefix=p,
counter=c,
dst_mode=dst_mode,
src_mode=src_mode,
access_size=size,
compare=compare))
# Generate the Clang-Asan probes
for access_size in _ACCESS_SIZES:
for access, access_name in _CLANG_ACCESS_MODES:
parts.append(f.format(_CLANG_REDIRECT_FUNCTION_DECL,
access_mode_str=access,
access_size=access_size))
parts.append(f.format(_REDIRECTORS_PROC_HEADER))
if target_cpu == 'x86':
# Generate the memory accessor redirectors.
for suffix in ("", "_no_flags"):
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
parts.append(f.format(_REDIRECT_FUNCTION,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
suffix=suffix))
# Generate string operation redirectors.
for (fn, p, c, dst_mode, src_mode, size, compare) in _STRING_ACCESSORS:
parts.append(f.format(_STRING_REDIRECT_FUNCTION,
func=fn,
prefix=p,
counter=c,
dst_mode=dst_mode,
src_mode=src_mode,
access_size=size,
compare=compare))
# Generate the Clang-Asan accessor redirectors
for access_size in _ACCESS_SIZES:
for access, access_name in _CLANG_ACCESS_MODES:
parts.append(f.format(_CLANG_REDIRECT_FUNCTION,
access_mode_str=access,
access_size=access_size))
parts.append(f.format(_REDIRECTORS_PROC_TRAILER))
parts.append(f.format(_ASM_TRAILER))
return parts
def _WriteFile(file_name, parts):
contents = '\n'.join(parts)
dir = os.path.dirname(__file__)
with open(os.path.join(dir, file_name), "wb") as f:
f.write(contents)
def main():
interceptors_asm = _GenerateInterceptorsAsmFile()
redirectors_asm = _GenerateRedirectorsAsmFile('x86')
redirectors_asm_x64 = _GenerateRedirectorsAsmFile('x64')
_WriteFile('gen/memory_interceptors_impl.asm', interceptors_asm)
_WriteFile('gen/memory_redirectors.asm', redirectors_asm)
_WriteFile('gen/memory_redirectors_x64.asm', redirectors_asm_x64)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#Usage: python DTMF.py SomeSound.wav
#currently only tested with 16bit wav files that were sampled at 44.1kHz
#You might want to convert this file http://upload.wikimedia.org/wikipedia/commons/b/b4/DTMF_all_16.ogg to a 16bit wav file with a sample rate of 44.1kHz to test decoding
#Yeah, I know, there are better alternatives tha FFT to decode
#DTMF signals. Goertzel does a pretty good job here for example.
#But when I needed to decode DTMF, I thought it was a good point to
#finally learn using FFT since I wanted to try it for a long time.
#And had some ideas for projects that could use it.
#Most parts of this code are taken from tutorials, are based of my talking to other people
#that already used FFT and some might only work by accident as I
#coded them without understanding everything I needed.
#The MIT License (MIT)
#Copyright (c) 2015 Martin Zell
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pylab import*
from scipy.io import wavfile
import sys
debug = False
#Method to wrap the actual FFT
def doFFT(samples,sfreq):
#Samples are already normalized by main method, so just do an fft
frequencies = fft(samples)
#Sample rate/2 = max. frequency that can be reconstructed, so discard the rest
#fft is symmetrical
uniquePoints = ceil((len(samples)+1)/2.)
frequencies = frequencies[0:uniquePoints]
#frequency data is in complex number format, we only need the real part:
frequencies = abs(frequencies)
# scale by number of contained points
#nobody could explain me why, but the common reason for that seems to be:
#"Everyone does it (by that)!"
frequencies = frequencies/float(len(samples))
# Power of frequency
frequencies = frequencies ** 2
#since we discarded half of the data, we need to adjust the power now (*2)
#if number of frequencies is even, the Nyquist point is included, which should not be doubled
if (len(samples) %2 > 0):
frequencies[1:len(samples)] = frequencies[1:len(samples)] * 2
else:
frequencies[1:len(samples)-1] = frequencies[1:len(samples)-1] * 2
if debug:
freqArray = arange(0, uniquePoints, 1.0) * (sfreq / len(samples));
plot(freqArray/1000, 10*log10(frequencies[:uniquePoints]), color='k')
xlabel('Frequency (kHz)')
ylabel('Power (dB)')
show()
#power of frequency x can be found around position x/(samplefreq / numSamplesUsedForFFT)
return frequencies
#Method to get the power of a given frequency - usually from more than one bin
#so we need to interpolate the value
def getPower(f,frequencies, sfreq, numSamples):
#get bin as float (usually between two discrete values)
index = (float(f)/(sfreq / numSamples))
#the first bin
indexf = int(floor(index))
#the second bin
indexc = int(ceil(index))
#where in between
interpolate = index-indexf
#base value from first bin
powerBase = frequencies[indexf]
#difference between the two bins
difference = frequencies[indexc]-frequencies[indexf]
#linear interpolation seems to be sufficient in this case
power = powerBase+(difference*interpolate)
if debug:
print str(indexf)+ " - "+str(index)+" - "+str(indexc)+" - "+str(interpolate)
print "powerBase:"+str(powerBase)
print "powerNext:"+str(frequencies[indexc])
print "power:"+str(power)
return power
# This will lead to garbage - don't even think about it
# return max(frequencies[indexc],frequencies[indexf])
#Actual decoding of the DTMF signals goes here
def doDecodeDTMF(frequencies, sfreq, numSamples):
#At first power -> decibel. Not neccessary for the next step, but
#plot for debug is nicer and more readable
frequencies = 10*log10(frequencies)
#DTMF uses 8 tones, of which 2 are mixed.
#4 columns, 4 rows
#by identifying the tones, we can locate the buttons
#buttons are arranged like this:
codeTable = [
['1','2','3','A'],
['4','5','6','B'],
['7','8','9','C'],
['*','0','#','D'],
]
#initialize list for the power of the tones
col = [0,0,0,0]
row = [0,0,0,0]
#get the power of the specified frequency (in Hz)
row[0] = getPower(697,frequencies, sfreq, numSamples)
row[1] = getPower(770,frequencies, sfreq, numSamples)
row[2] = getPower(852,frequencies, sfreq, numSamples)
row[3] = getPower(941,frequencies, sfreq, numSamples)
col[0] = getPower(1209,frequencies, sfreq, numSamples)
col[1] = getPower(1336,frequencies, sfreq, numSamples)
col[2] = getPower(1477,frequencies, sfreq, numSamples)
col[3] = getPower(1633,frequencies, sfreq, numSamples)
if debug:
print "col: " + str(col)
print "row: " + str(row)
maxCol = 0
maxRow = 0
maxVal = None
#search for the strongest signal in column tones
for i in range(len(col)):
if maxVal < col[i]:
maxVal = col[i]
maxCol = i
maxVal = None
#search for the strongest signal in row tones
for i in range(len(row)):
if maxVal < row[i]:
maxVal = row[i]
maxRow = i
#...and return the char from the code table at said position
return codeTable[maxRow][maxCol]
#naive method to find start and end of key press
#Assumes silence between keys.
#Optionally increase threshold when audio is noisy
def findSoundToAnalyze(wavdata, threshold=0.2):
#Start in Mode "silence"
mode = 'silence'
start = None
end = None
samplesBelowThreshold = 0
count = 0
boundaries = []
for i in range(len(wavdata)):
if mode == 'silence':
#When mode is silence
#and value abve threshold
#save start index and switch to mode sound
if abs(wavdata[i]) > threshold:
start= i
mode = 'sound'
samplesBelowThreshold=0
if mode == 'sound':
#when there is sound below threshold, count samples
if abs(wavdata[i]) < threshold:
samplesBelowThreshold= samplesBelowThreshold+1
#reset counter to zero at sound above threshold
if abs(wavdata[i]) >= threshold:
samplesBelowThreshold=0
#if more samples than 200 are silence AND we gathered at least 630 samples
#FIXME: Amount samples depends on sample rate
if samplesBelowThreshold > 200 and (i-start) >= 630:
end = i
if debug:
print "Start at "+str(start/float(sampFreq))+" ("+str(start)+"), End at "+str((end)/float(sampFreq))+" ("+str(end)+") - Length: "+str((end-start)/sampFreq)+"("+str(end-start)+")"
mode = 'silence'
count = count+1
boundaries.append([start,end])
if debug:
print "Count Keys: "+str(count)
print "Boundaries:"+str(boundaries)
return boundaries
#FIXME: Threshold for seperation of key presses should be given on commandline optionally
if __name__ == "__main__":
#Wavfile is given as argument
sampFreq, snd = wavfile.read(sys.argv[1])
numSamples = channels = 1
samples = None
datatype = snd.dtype
#FFT wants all values in th range [1;1), but here we have 16 or 32bit values
#Normalizing is needed
if (snd.dtype == dtype(int16)):
snd = snd / (2.**15)
elif (snd.dtype == dtype(int32)):
snd = snd / (2.**31)
#Check if mono or more than one channel (only first one needed)
if (len(snd.shape) > 1):
numSamples = snd.shape[0] #number of samples
channels = snd.shape[1] #number of channels (shoud be 2 in his case)
wavdata = snd[:,0] #and the sound data of the first channel (second one ignored)
else:
numSamples = snd.shape[0]#number of samples
wavdata = snd[:]#sound data
length = float(numSamples)/sampFreq
#find start and end of keypress
boundaries = findSoundToAnalyze(wavdata)
# Print some values
print "Type: "+str(datatype)
print "SampleFreq: "+str(sampFreq)
print "Number of Samples: "+str(numSamples)
print "Number of Audio Channels: "+str(channels)
print "Audio length: "+str(length)+" seconds"
print "Number of keys pressed: "+str(len(boundaries))
if debug:
bounds = []
for i in range(numSamples):
bounds.append(0)
for area in boundaries:
for i in range(area[1]-area[0]):
bounds[i+area[0]] = .8
#Show waveform
timeArray = arange(0, float(numSamples), 1)
timeArray = timeArray / sampFreq
timeArray = timeArray * 1000 #scale to milliseconds
#plot sound
plot(timeArray, abs(wavdata), color='k')
#plot bounds
plot(timeArray, bounds, color='g')
ylabel('Amplitude')
xlabel('Time (ms)')
show()
numKey = 0
keys = []
#For every keypress
for area in boundaries:
numKey = numKey+1
#FIXME: True for 44.kHz sample rate, nees to be calculated on the fly
#DTMF says that tones should last atleast about 50ms to ensure recognition,
#70ms is optimal
#DTMF tones are as narrow as about 70Hz, we need a bin width of max. 35Hz
#to seperate them.
#frequency spectrum for 44.1kHz is 22.05 kHz (half the sample rate)
#Amount of samples needed for 35Hz bins: 22.05 kHz / 35 Hz = 630 samples
#Proven by tests: 35Hz bin with seperates DTMF good;
#higher resolution not needed and ould be wste of processing power
#slice 630 samples from audio data
sam = wavdata[area[0]:area[0]+631]
numSam = len(sam)
#get Key
keys.append(doDecodeDTMF(doFFT(sam,sampFreq),sampFreq,numSam))
#print keys
print "Keys: "+"".join(keys)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import mock
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import log
from nova import rpc
from nova import test
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.Field(fields.Integer())}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.Field(fields.Integer(), default=1),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
'readonly': fields.Field(fields.Integer(), read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def _update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.TestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.NovaObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.IPV4AndV6AddressField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.IPV4AndV6Address)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.TestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.TestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
pass
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def json_comparator(self, expected, obj_val):
# json-ify an object field for comparison with its db str
# equivalent
self.assertEqual(expected, jsonutils.dumps(obj_val))
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.NovaObject.indirection_api = None
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
def assertRemotes(self):
self.assertNotEqual(self.remote_object_calls, [])
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_object_dict_syntax(self):
obj = MyObj(foo=123, bar='bar')
self.assertEqual(obj['foo'], 123)
self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.Field(fields.Integer())}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext('bar', 'alternate')
obj = MyObj.query(ctxt1)
obj._update_test(ctxt2)
self.assertEqual(obj.bar, 'alternate-context')
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
self.assertRemotes()
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object', 'rel_objects'] +
base_fields)
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.Field(fields.Integer())}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(exception.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,readonly=<?>,'
'rel_object=<?>,rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive), '1.8',
'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive),
'1.7', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.2')
self.assertEqual('1.2',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive),
'1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive), '1.5',
'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
_prim = copy.copy(primitive)
obj._obj_make_obj_compatible(_prim, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', _prim)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_complains_about_missing_rules(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {}
self.assertRaises(exception.ObjectActionError,
obj.obj_make_compatible, {}, '1.0')
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(exception.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo']), obj.obj_what_changed())
self.assertEqual(1, obj.foo)
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.7'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
self.assertRemotes()
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
def test_revision_ignored(self):
MyObj2.VERSION = '1.1.456'
obj = MyObj2.query(self.context)
self.assertEqual('bar', obj.bar)
class TestObjectListBase(test.TestCase):
def test_list_like_operations(self):
class MyElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyElement, self).__init__()
self.foo = foo
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyElement')}
objlist = Foo(context='foo',
objects=[MyElement(1), MyElement(2), MyElement(3)])
self.assertEqual(list(objlist), objlist.objects)
self.assertEqual(len(objlist), 3)
self.assertIn(objlist.objects[0], objlist)
self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
self.assertEqual(objlist[:1]._context, 'foo')
self.assertEqual(objlist[2], objlist.objects[2])
self.assertEqual(objlist.count(objlist.objects[0]), 1)
self.assertEqual(objlist.index(objlist.objects[1]), 1)
objlist.sort(key=lambda x: x.foo, reverse=True)
self.assertEqual([3, 2, 1],
[x.foo for x in objlist])
def test_serialization(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.Field(fields.String())}
obj = Foo(objects=[])
for i in 'abc':
bar = Bar(foo=i)
obj.objects.append(bar)
obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
self.assertFalse(obj is obj2)
self.assertEqual([x.foo for x in obj],
[y.foo for y in obj2])
def _test_object_list_version_mappings(self, list_obj_class):
# Figure out what sort of object this list is for
list_field = list_obj_class.fields['objects']
item_obj_field = list_field._type._element_type
item_obj_name = item_obj_field._type._obj_name
# Look through all object classes of this type and make sure that
# the versions we find are covered by the parent list class
for item_class in base.NovaObject._obj_classes[item_obj_name]:
self.assertIn(
item_class.VERSION,
list_obj_class.child_versions.values(),
'Version mapping is incomplete for %s' % (
list_obj_class.__name__))
def test_object_version_mappings(self):
# Find all object list classes and make sure that they at least handle
# all the current object versions
for obj_classes in base.NovaObject._obj_classes.values():
for obj_class in obj_classes:
if issubclass(obj_class, base.ObjectListBase):
self._test_object_list_version_mappings(obj_class)
def test_list_changes(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.StringField()}
obj = Foo(objects=[])
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects.append(Bar(foo='test'))
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.obj_reset_changes()
# This should still look dirty because the child is dirty
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects[0].obj_reset_changes()
# This should now look clean because the child is clean
self.assertEqual(set(), obj.obj_what_changed())
def test_initialize_objects(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.StringField()}
obj = Foo()
self.assertEqual([], obj.objects)
self.assertEqual(set(), obj.obj_what_changed())
def test_obj_repr(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'uuid': fields.StringField()}
obj = Foo(objects=[Bar(uuid='fake-uuid')])
self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj))
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport.called)
else:
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
backported_to)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive.itervalues():
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2.itervalues():
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d',
'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25',
'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5',
'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a',
'BandwidthUsage': '1.2-a9d7c2ba54995e48ce38688c51c9416d',
'BandwidthUsageList': '1.2-5b564cbfd5ae6e106443c086938e7602',
'BlockDeviceMapping': '1.7-c53f09c7f969e0222d9f6d67a950a08e',
'BlockDeviceMappingList': '1.8-15ab98892f8fd26faa49f45f3cffaef0',
'ComputeNode': '1.10-70202a38b858977837b313d94475a26b',
'ComputeNodeList': '1.10-4ae1f844c247029fbcdb5fdccbe9e619',
'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4',
'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99',
'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836',
'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143',
'FixedIP': '1.8-2472964d39e50da67202109eb85cd173',
'FixedIPList': '1.8-6cfaa5b6dd27e9eb8fcf8462dea06077',
'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4',
'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721',
'FloatingIP': '1.6-27eb68b7c9c620dd5f0561b5a3be0e82',
'FloatingIPList': '1.7-f376f63ed99243f9d90841b7f6732bbf',
'HVSpec': '1.0-c4d8377cc4fe519930e60c1d8265a142',
'Instance': '1.18-7827a9e9846a75f3038bd556e6f530d3',
'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663',
'InstanceActionEvent': '1.1-42dbdba74bd06e0619ca75cd3397cd1b',
'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e',
'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266',
'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7',
'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e',
'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d',
'InstanceGroup': '1.9-95ece99f092e8f4f88327cdbb44162c9',
'InstanceGroupList': '1.6-c6b78f3c9d9080d33c08667e80589817',
'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f',
'InstanceList': '1.14-fe7f3266de1475454b939dee36a2ebcc',
'InstanceNUMACell': '1.2-5d2dfa36e9ecca9b63f24bf3bc958ea4',
'InstanceNUMATopology': '1.1-86b95d263c4c68411d44c6741b8d2bb0',
'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f',
'InstancePCIRequests': '1.1-bc7c6684d8579ee49d6a3b8aef756918',
'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a',
'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8',
'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed',
'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353',
'MyObj': '1.6-02b1e712b7ee334fa3fefe024c340977',
'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e',
'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e',
'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc',
'NetworkRequestList': '1.1-beeab521ac9450f1f5ef4eaa945a783c',
'NUMACell': '1.2-cb9c3b08cc1c418d021492f788d04173',
'NUMAPagesTopology': '1.0-97d93f70a68625b5f29ff63a40a4f612',
'NUMATopology': '1.2-790f6bdff85bf6e5677f409f3a4f1c6a',
'PciDevice': '1.3-e059641df10e85d464672c5183a9473b',
'PciDeviceList': '1.1-38cbe2d3c23b9e46f7a74b486abcad85',
'PciDevicePool': '1.0-d6ed1abe611c9947345a44155abe6f11',
'PciDevicePoolList': '1.0-d31e08e0ff620a4df7cc2014b6c50da8',
'Quotas': '1.2-36098cf2143e6535873c3fa3d6fe56f7',
'QuotasNoOp': '1.2-164c628906b170fd946a7672e85e4935',
'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2',
'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b',
'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f',
'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576',
'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c',
'Service': '1.9-82bbfd46a744a9c89bc44b47a1b81683',
'ServiceList': '1.7-b856301eb7714839248e189bf4886168',
'Tag': '1.0-a11531f4e4e3166eef6243d6d58a18bd',
'TagList': '1.0-e89bf8c8055f1f1d654fb44f0abf1f53',
'TestSubclassedObject': '1.6-87177ccbefd7a740a9e261f958e15b00',
'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2',
'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
}
object_relationships = {
'BlockDeviceMapping': {'Instance': '1.18'},
'ComputeNode': {'PciDevicePoolList': '1.0'},
'FixedIP': {'Instance': '1.18', 'Network': '1.2',
'VirtualInterface': '1.0',
'FloatingIPList': '1.7'},
'FloatingIP': {'FixedIP': '1.8'},
'Instance': {'InstanceFault': '1.2',
'InstanceInfoCache': '1.5',
'InstanceNUMATopology': '1.1',
'PciDeviceList': '1.1',
'TagList': '1.0',
'SecurityGroupList': '1.0',
'Flavor': '1.1',
'InstancePCIRequests': '1.1'},
'InstanceNUMACell': {'VirtCPUTopology': '1.0'},
'MyObj': {'MyOwnedObject': '1.0'},
'SecurityGroupRule': {'SecurityGroup': '1.1'},
'Service': {'ComputeNode': '1.10'},
'TestSubclassedObject': {'MyOwnedObject': '1.0'}
}
class TestObjectVersions(test.TestCase):
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _get_fingerprint(self, obj_name):
obj_class = base.NovaObject._obj_classes[obj_name][0]
fields = obj_class.fields.items()
fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, inspect.getargspec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (fields, methods, obj_class.child_versions)
else:
relevant_data = (fields, methods)
fingerprint = '%s-%s' % (obj_class.VERSION,
hashlib.md5(str(relevant_data)).hexdigest())
return fingerprint
def test_versions(self):
fingerprints = {}
for obj_name in base.NovaObject._obj_classes:
fingerprints[obj_name] = self._get_fingerprint(obj_name)
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
stored = set(object_data.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = object_data.get(name)
actual[name] = fingerprints.get(name)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def _build_tree(self, tree, obj_class):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
if isinstance(field._type, fields.Object):
sub_obj_name = field._type._obj_name
sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
self._build_tree(tree, sub_obj_class)
tree.setdefault(obj_name, {})
tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
def test_relationships(self):
tree = {}
for obj_name in base.NovaObject._obj_classes.keys():
self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
stored = set([(x, str(y)) for x, y in object_relationships.items()])
computed = set([(x, str(y)) for x, y in tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = object_relationships.get(name)
actual[name] = tree.get(name)
self.assertEqual(expected, actual,
'Some objects have changed dependencies. '
'Please make sure to bump the versions of '
'parent objects and provide a rule in their '
'obj_make_compatible() routines to backlevel '
'the child object.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version)
def test_obj_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = utils.convert_version_to_tuple(my_version)
_ch_version = utils.convert_version_to_tuple(child_version)
self.assertTrue((last_my_version < _my_version
and last_child_version <= _ch_version),
'Object %s relationship '
'%s->%s for field %s is out of order' % (
obj_name, my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
|
|
import glob
import sh
import subprocess
from multiprocessing import cpu_count
from os import environ, utime
from os.path import dirname, exists, join
from pathlib import Path
from shutil import copy2
from pythonforandroid.logger import info, warning, shprint
from pythonforandroid.patching import version_starts_with
from pythonforandroid.recipe import Recipe, TargetPythonRecipe
from pythonforandroid.util import (
current_directory,
ensure_dir,
walk_valid_filens,
BuildInterruptingException,
)
NDK_API_LOWER_THAN_SUPPORTED_MESSAGE = (
'Target ndk-api is {ndk_api}, '
'but the python3 recipe supports only {min_ndk_api}+'
)
class Python3Recipe(TargetPythonRecipe):
'''
The python3's recipe
^^^^^^^^^^^^^^^^^^^^
The python 3 recipe can be built with some extra python modules, but to do
so, we need some libraries. By default, we ship the python3 recipe with
some common libraries, defined in ``depends``. We also support some optional
libraries, which are less common that the ones defined in ``depends``, so
we added them as optional dependencies (``opt_depends``).
Below you have a relationship between the python modules and the recipe
libraries::
- _ctypes: you must add the recipe for ``libffi``.
- _sqlite3: you must add the recipe for ``sqlite3``.
- _ssl: you must add the recipe for ``openssl``.
- _bz2: you must add the recipe for ``libbz2`` (optional).
- _lzma: you must add the recipe for ``liblzma`` (optional).
.. note:: This recipe can be built only against API 21+.
.. versionchanged:: 2019.10.06.post0
- Refactored from deleted class ``python.GuestPythonRecipe`` into here
- Added optional dependencies: :mod:`~pythonforandroid.recipes.libbz2`
and :mod:`~pythonforandroid.recipes.liblzma`
.. versionchanged:: 0.6.0
Refactored into class
:class:`~pythonforandroid.python.GuestPythonRecipe`
'''
version = '3.8.9'
url = 'https://www.python.org/ftp/python/{version}/Python-{version}.tgz'
name = 'python3'
patches = [
'patches/pyconfig_detection.patch',
'patches/reproducible-buildinfo.diff',
# Python 3.7.1
('patches/py3.7.1_fix-ctypes-util-find-library.patch', version_starts_with("3.7")),
('patches/py3.7.1_fix-zlib-version.patch', version_starts_with("3.7")),
# Python 3.8.1 & 3.9.X
('patches/py3.8.1.patch', version_starts_with("3.8")),
('patches/py3.8.1.patch', version_starts_with("3.9"))
]
if sh.which('lld') is not None:
patches = patches + [
("patches/py3.7.1_fix_cortex_a8.patch", version_starts_with("3.7")),
("patches/py3.8.1_fix_cortex_a8.patch", version_starts_with("3.8")),
("patches/py3.8.1_fix_cortex_a8.patch", version_starts_with("3.9"))
]
depends = ['hostpython3', 'sqlite3', 'openssl', 'libffi']
# those optional depends allow us to build python compression modules:
# - _bz2.so
# - _lzma.so
opt_depends = ['libbz2', 'liblzma']
'''The optional libraries which we would like to get our python linked'''
configure_args = (
'--host={android_host}',
'--build={android_build}',
'--enable-shared',
'--enable-ipv6',
'ac_cv_file__dev_ptmx=yes',
'ac_cv_file__dev_ptc=no',
'--without-ensurepip',
'ac_cv_little_endian_double=yes',
'--prefix={prefix}',
'--exec-prefix={exec_prefix}',
'--enable-loadable-sqlite-extensions')
'''The configure arguments needed to build the python recipe. Those are
used in method :meth:`build_arch` (if not overwritten like python3's
recipe does).
'''
MIN_NDK_API = 21
'''Sets the minimal ndk api number needed to use the recipe.
.. warning:: This recipe can be built only against API 21+, so it means
that any class which inherits from class:`GuestPythonRecipe` will have
this limitation.
'''
stdlib_dir_blacklist = {
'__pycache__',
'test',
'tests',
'lib2to3',
'ensurepip',
'idlelib',
'tkinter',
}
'''The directories that we want to omit for our python bundle'''
stdlib_filen_blacklist = [
'*.py',
'*.exe',
'*.whl',
]
'''The file extensions that we want to blacklist for our python bundle'''
site_packages_dir_blacklist = {
'__pycache__',
'tests'
}
'''The directories from site packages dir that we don't want to be included
in our python bundle.'''
site_packages_filen_blacklist = [
'*.py'
]
'''The file extensions from site packages dir that we don't want to be
included in our python bundle.'''
compiled_extension = '.pyc'
'''the default extension for compiled python files.
.. note:: the default extension for compiled python files has been .pyo for
python 2.x-3.4 but as of Python 3.5, the .pyo filename extension is no
longer used and has been removed in favour of extension .pyc
'''
def __init__(self, *args, **kwargs):
self._ctx = None
super().__init__(*args, **kwargs)
@property
def _libpython(self):
'''return the python's library name (with extension)'''
return 'libpython{link_version}.so'.format(
link_version=self.link_version
)
@property
def link_version(self):
'''return the python's library link version e.g. 3.7m, 3.8'''
major, minor = self.major_minor_version_string.split('.')
flags = ''
if major == '3' and int(minor) < 8:
flags += 'm'
return '{major}.{minor}{flags}'.format(
major=major,
minor=minor,
flags=flags
)
def include_root(self, arch_name):
return join(self.get_build_dir(arch_name), 'Include')
def link_root(self, arch_name):
return join(self.get_build_dir(arch_name), 'android-build')
def should_build(self, arch):
return not Path(self.link_root(arch.arch), self._libpython).is_file()
def prebuild_arch(self, arch):
super().prebuild_arch(arch)
self.ctx.python_recipe = self
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = environ.copy()
env['HOSTARCH'] = arch.command_prefix
env['CC'] = arch.get_clang_exe(with_target=True)
env['PATH'] = (
'{hostpython_dir}:{old_path}').format(
hostpython_dir=self.get_recipe(
'host' + self.name, self.ctx).get_path_to_python(),
old_path=env['PATH'])
env['CFLAGS'] = ' '.join(
[
'-fPIC',
'-DANDROID',
'-D__ANDROID_API__={}'.format(self.ctx.ndk_api),
]
)
env['LDFLAGS'] = env.get('LDFLAGS', '')
if sh.which('lld') is not None:
# Note: The -L. is to fix a bug in python 3.7.
# https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=234409
env['LDFLAGS'] += ' -L. -fuse-ld=lld'
else:
warning('lld not found, linking without it. '
'Consider installing lld if linker errors occur.')
return env
def set_libs_flags(self, env, arch):
'''Takes care to properly link libraries with python depending on our
requirements and the attribute :attr:`opt_depends`.
'''
def add_flags(include_flags, link_dirs, link_libs):
env['CPPFLAGS'] = env.get('CPPFLAGS', '') + include_flags
env['LDFLAGS'] = env.get('LDFLAGS', '') + link_dirs
env['LIBS'] = env.get('LIBS', '') + link_libs
if 'sqlite3' in self.ctx.recipe_build_order:
info('Activating flags for sqlite3')
recipe = Recipe.get_recipe('sqlite3', self.ctx)
add_flags(' -I' + recipe.get_build_dir(arch.arch),
' -L' + recipe.get_lib_dir(arch), ' -lsqlite3')
if 'libffi' in self.ctx.recipe_build_order:
info('Activating flags for libffi')
recipe = Recipe.get_recipe('libffi', self.ctx)
# In order to force the correct linkage for our libffi library, we
# set the following variable to point where is our libffi.pc file,
# because the python build system uses pkg-config to configure it.
env['PKG_CONFIG_PATH'] = recipe.get_build_dir(arch.arch)
add_flags(' -I' + ' -I'.join(recipe.get_include_dirs(arch)),
' -L' + join(recipe.get_build_dir(arch.arch), '.libs'),
' -lffi')
if 'openssl' in self.ctx.recipe_build_order:
info('Activating flags for openssl')
recipe = Recipe.get_recipe('openssl', self.ctx)
self.configure_args += \
('--with-openssl=' + recipe.get_build_dir(arch.arch),)
add_flags(recipe.include_flags(arch),
recipe.link_dirs_flags(arch), recipe.link_libs_flags())
for library_name in {'libbz2', 'liblzma'}:
if library_name in self.ctx.recipe_build_order:
info(f'Activating flags for {library_name}')
recipe = Recipe.get_recipe(library_name, self.ctx)
add_flags(recipe.get_library_includes(arch),
recipe.get_library_ldflags(arch),
recipe.get_library_libs_flag())
# python build system contains hardcoded zlib version which prevents
# the build of zlib module, here we search for android's zlib version
# and sets the right flags, so python can be build with android's zlib
info("Activating flags for android's zlib")
zlib_lib_path = join(self.ctx.ndk_platform, 'usr', 'lib')
zlib_includes = join(self.ctx.ndk_dir, 'sysroot', 'usr', 'include')
zlib_h = join(zlib_includes, 'zlib.h')
try:
with open(zlib_h) as fileh:
zlib_data = fileh.read()
except IOError:
raise BuildInterruptingException(
"Could not determine android's zlib version, no zlib.h ({}) in"
" the NDK dir includes".format(zlib_h)
)
for line in zlib_data.split('\n'):
if line.startswith('#define ZLIB_VERSION '):
break
else:
raise BuildInterruptingException(
'Could not parse zlib.h...so we cannot find zlib version,'
'required by python build,'
)
env['ZLIB_VERSION'] = line.replace('#define ZLIB_VERSION ', '')
add_flags(' -I' + zlib_includes, ' -L' + zlib_lib_path, ' -lz')
return env
def build_arch(self, arch):
if self.ctx.ndk_api < self.MIN_NDK_API:
raise BuildInterruptingException(
NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format(
ndk_api=self.ctx.ndk_api, min_ndk_api=self.MIN_NDK_API
),
)
recipe_build_dir = self.get_build_dir(arch.arch)
# Create a subdirectory to actually perform the build
build_dir = join(recipe_build_dir, 'android-build')
ensure_dir(build_dir)
# TODO: Get these dynamically, like bpo-30386 does
sys_prefix = '/usr/local'
sys_exec_prefix = '/usr/local'
env = self.get_recipe_env(arch)
env = self.set_libs_flags(env, arch)
android_build = sh.Command(
join(recipe_build_dir,
'config.guess'))().stdout.strip().decode('utf-8')
with current_directory(build_dir):
if not exists('config.status'):
shprint(
sh.Command(join(recipe_build_dir, 'configure')),
*(' '.join(self.configure_args).format(
android_host=env['HOSTARCH'],
android_build=android_build,
prefix=sys_prefix,
exec_prefix=sys_exec_prefix)).split(' '),
_env=env)
shprint(
sh.make, 'all', '-j', str(cpu_count()),
'INSTSONAME={lib_name}'.format(lib_name=self._libpython),
_env=env
)
# TODO: Look into passing the path to pyconfig.h in a
# better way, although this is probably acceptable
sh.cp('pyconfig.h', join(recipe_build_dir, 'Include'))
def compile_python_files(self, dir):
'''
Compile the python files (recursively) for the python files inside
a given folder.
.. note:: python2 compiles the files into extension .pyo, but in
python3, and as of Python 3.5, the .pyo filename extension is no
longer used...uses .pyc (https://www.python.org/dev/peps/pep-0488)
'''
args = [self.ctx.hostpython]
args += ['-OO', '-m', 'compileall', '-b', '-f', dir]
subprocess.call(args)
def create_python_bundle(self, dirn, arch):
"""
Create a packaged python bundle in the target directory, by
copying all the modules and standard library to the right
place.
"""
# Todo: find a better way to find the build libs folder
modules_build_dir = join(
self.get_build_dir(arch.arch),
'android-build',
'build',
'lib.linux{}-{}-{}'.format(
'2' if self.version[0] == '2' else '',
arch.command_prefix.split('-')[0],
self.major_minor_version_string
))
# Compile to *.pyc/*.pyo the python modules
self.compile_python_files(modules_build_dir)
# Compile to *.pyc/*.pyo the standard python library
self.compile_python_files(join(self.get_build_dir(arch.arch), 'Lib'))
# Compile to *.pyc/*.pyo the other python packages (site-packages)
self.compile_python_files(self.ctx.get_python_install_dir())
# Bundle compiled python modules to a folder
modules_dir = join(dirn, 'modules')
c_ext = self.compiled_extension
ensure_dir(modules_dir)
module_filens = (glob.glob(join(modules_build_dir, '*.so')) +
glob.glob(join(modules_build_dir, '*' + c_ext)))
info("Copy {} files into the bundle".format(len(module_filens)))
for filen in module_filens:
info(" - copy {}".format(filen))
copy2(filen, modules_dir)
# zip up the standard library
stdlib_zip = join(dirn, 'stdlib.zip')
with current_directory(join(self.get_build_dir(arch.arch), 'Lib')):
stdlib_filens = list(walk_valid_filens(
'.', self.stdlib_dir_blacklist, self.stdlib_filen_blacklist))
if 'SOURCE_DATE_EPOCH' in environ:
# for reproducible builds
stdlib_filens.sort()
timestamp = int(environ['SOURCE_DATE_EPOCH'])
for filen in stdlib_filens:
utime(filen, (timestamp, timestamp))
info("Zip {} files into the bundle".format(len(stdlib_filens)))
shprint(sh.zip, '-X', stdlib_zip, *stdlib_filens)
# copy the site-packages into place
ensure_dir(join(dirn, 'site-packages'))
ensure_dir(self.ctx.get_python_install_dir())
# TODO: Improve the API around walking and copying the files
with current_directory(self.ctx.get_python_install_dir()):
filens = list(walk_valid_filens(
'.', self.site_packages_dir_blacklist,
self.site_packages_filen_blacklist))
info("Copy {} files into the site-packages".format(len(filens)))
for filen in filens:
info(" - copy {}".format(filen))
ensure_dir(join(dirn, 'site-packages', dirname(filen)))
copy2(filen, join(dirn, 'site-packages', filen))
# copy the python .so files into place
python_build_dir = join(self.get_build_dir(arch.arch),
'android-build')
python_lib_name = 'libpython' + self.link_version
shprint(
sh.cp,
join(python_build_dir, python_lib_name + '.so'),
join(self.ctx.bootstrap.dist_dir, 'libs', arch.arch)
)
info('Renaming .so files to reflect cross-compile')
self.reduce_object_file_names(join(dirn, 'site-packages'))
return join(dirn, 'site-packages')
recipe = Python3Recipe()
|
|
"""
<Program Name>
ed25519_keys.py
<Author>
Vladimir Diaz <[email protected]>
<Started>
September 24, 2013.
<Copyright>
See LICENSE for licensing information.
<Purpose>
The goal of this module is to support ed25519 signatures. ed25519 is an
elliptic-curve public key signature scheme, its main strength being small
signatures (64 bytes) and small public keys (32 bytes).
http://ed25519.cr.yp.to/
'securesystemslib/ed25519_keys.py' calls 'ed25519.py', which is the pure Python
implementation of ed25519 optimized for a faster runtime. The Python
reference implementation is concise, but very slow (verifying signatures
takes ~9 seconds on an Intel core 2 duo @ 2.2 ghz x 2). The optimized
version can verify signatures in ~2 seconds.
http://ed25519.cr.yp.to/software.html
https://github.com/pyca/ed25519
Optionally, ed25519 cryptographic operations may be executed by PyNaCl, which
is a Python binding to the NaCl library and is faster than the pure python
implementation. Verifying signatures can take approximately 0.0009 seconds.
PyNaCl relies on the libsodium C library. PyNaCl is required for key and
signature generation. Verifying signatures may be done in pure Python.
https://github.com/pyca/pynacl
https://github.com/jedisct1/libsodium
http://nacl.cr.yp.to/
https://github.com/pyca/ed25519
The ed25519-related functions included here are generate(), create_signature()
and verify_signature(). The 'ed25519' and PyNaCl (i.e., 'nacl') modules used
by ed25519_keys.py perform the actual ed25519 computations and the functions
listed above can be viewed as an easy-to-use public interface.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# 'binascii' required for hexadecimal conversions. Signatures and
# public/private keys are hexlified.
import binascii
# TODO: The 'warnings' module needed to temporarily suppress user warnings
# raised by 'pynacl' (as of version 0.2.3). Warnings temporarily suppressed
# here to avoid confusing users with an unexpected error message that gives
# no indication of its source. These warnings are printed when using
# the repository tools, including for clients that request an update.
# http://docs.python.org/2/library/warnings.html#temporarily-suppressing-warnings
import warnings
# 'os' required to generate OS-specific randomness (os.urandom) suitable for
# cryptographic use.
# http://docs.python.org/2/library/os.html#miscellaneous-functions
import os
# Import the python implementation of the ed25519 algorithm provided by pyca,
# which is an optimized version of the one provided by ed25519's authors.
# Note: The pure Python version does not include protection against side-channel
# attacks. Verifying signatures can take approximately 2 seconds on an intel
# core 2 duo @ 2.2 ghz x 2). Optionally, the PyNaCl module may be used to
# speed up ed25519 cryptographic operations.
# http://ed25519.cr.yp.to/software.html
# https://github.com/pyca/ed25519
# https://github.com/pyca/pynacl
#
# Import the PyNaCl library, if available. It is recommended this library be
# used over the pure python implementation of ed25519, due to its speedier
# routines and side-channel protections available in the libsodium library.
#
# TODO: Version 0.2.3 of 'pynacl' prints: "UserWarning: reimporting '...' might
# overwrite older definitions." when importing 'nacl.signing'. Suppress user
# warnings temporarily (at least until this issue is fixed by PyNaCl).
#
# Note: A 'pragma: no cover' comment is intended for test 'coverage'. Lines
# or code blocks with this comment should not be flagged as uncovered.
# pynacl will always be install prior to running the unit tests.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
import nacl.signing
import nacl.encoding
# PyNaCl's 'cffi' dependency may raise an 'IOError' exception when importing
# 'nacl.signing'.
except (ImportError, IOError): # pragma: no cover
pass
# The optimized pure Python implementation of ed25519 provided by TUF. If
# PyNaCl cannot be imported and an attempt to use is made in this module, a
# 'securesystemslib.exceptions.UnsupportedLibraryError' exception is raised.
import securesystemslib._vendor.ed25519.ed25519
import securesystemslib.formats
import securesystemslib.exceptions
# Supported ed25519 signing schemes: 'ed25519'. The pure Python implementation
# (i.e., ed25519') and PyNaCl (i.e., 'nacl', libsodium + Python bindings)
# modules are currently supported in the creation of 'ed25519' signatures.
# Previously, a distinction was made between signatures made by the pure Python
# implementation and PyNaCl.
_SUPPORTED_ED25519_SIGNING_SCHEMES = ['ed25519']
def generate_public_and_private():
"""
<Purpose>
Generate a pair of ed25519 public and private keys with PyNaCl. The public
and private keys returned conform to 'securesystemslib.formats.ED25519PULIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively, and have the form:
'\xa2F\x99\xe0\x86\x80%\xc8\xee\x11\xb95T\xd9\...'
An ed25519 seed key is a random 32-byte string. Public keys are also 32
bytes.
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.ED25519PUBLIC_SCHEMA.matches(public)
True
>>> securesystemslib.formats.ED25519SEED_SCHEMA.matches(private)
True
<Arguments>
None.
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError, if the PyNaCl ('nacl') module is unavailable.
NotImplementedError, if a randomness source is not found by 'os.urandom'.
<Side Effects>
The ed25519 keys are generated by first creating a random 32-byte seed
with os.urandom() and then calling PyNaCl's nacl.signing.SigningKey().
<Returns>
A (public, private) tuple that conform to 'securesystemslib.formats.ED25519PUBLIC_SCHEMA'
and 'securesystemslib.formats.ED25519SEED_SCHEMA', respectively.
"""
# Generate ed25519's seed key by calling os.urandom(). The random bytes
# returned should be suitable for cryptographic use and is OS-specific.
# Raise 'NotImplementedError' if a randomness source is not found.
# ed25519 seed keys are fixed at 32 bytes (256-bit keys).
# http://blog.mozilla.org/warner/2011/11/29/ed25519-keys/
seed = os.urandom(32)
public = None
# Generate the public key. PyNaCl (i.e., 'nacl' module) performs the actual
# key generation.
try:
nacl_key = nacl.signing.SigningKey(seed)
public = nacl_key.verify_key.encode(encoder=nacl.encoding.RawEncoder())
except NameError: # pragma: no cover
message = 'The PyNaCl library and/or its dependencies unavailable.'
raise securesystemslib.exceptions.UnsupportedLibraryError(message)
return public, seed
def create_signature(public_key, private_key, data, scheme):
"""
<Purpose>
Return a (signature, scheme) tuple, where the signature scheme is 'ed25519'
and is always generated by PyNaCl (i.e., 'nacl'). The signature returned
conforms to 'securesystemslib.formats.ED25519SIGNATURE_SCHEMA', and has the
form:
'\xae\xd7\x9f\xaf\x95{bP\x9e\xa8YO Z\x86\x9d...'
A signature is a 64-byte string.
>>> public, private = generate_public_and_private()
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'ed25519'
>>> signature, scheme = \
create_signature(public, private, data, scheme)
>>> securesystemslib.formats.ED25519SIGNATURE_SCHEMA.matches(signature)
True
>>> scheme == 'ed25519'
True
>>> signature, scheme = \
create_signature(public, private, data, scheme)
>>> securesystemslib.formats.ED25519SIGNATURE_SCHEMA.matches(signature)
True
>>> scheme == 'ed25519'
True
<Arguments>
public:
The ed25519 public key, which is a 32-byte string.
private:
The ed25519 private key, which is a 32-byte string.
data:
Data object used by create_signature() to generate the signature.
scheme:
The signature scheme used to generate the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
<Side Effects>
nacl.signing.SigningKey.sign() called to generate the actual signature.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ed25519 signatures are 64
bytes, however, the hexlified signature is stored in the dictionary
returned.
"""
# Does 'public_key' have the correct format?
# This check will ensure 'public_key' conforms to
# 'securesystemslib.formats.ED25519PUBLIC_SCHEMA', which must have length 32
# bytes. Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ED25519PUBLIC_SCHEMA.check_match(public_key)
# Is 'private_key' properly formatted?
securesystemslib.formats.ED25519SEED_SCHEMA.check_match(private_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ED25519_SIG_SCHEMA.check_match(scheme)
# Signing the 'data' object requires a seed and public key.
# nacl.signing.SigningKey.sign() generates the signature.
public = public_key
private = private_key
signature = None
# The private and public keys have been validated above by
# 'securesystemslib.formats' and should be 32-byte strings. This is a
# defensive check for a valid 'scheme', which should have already been
# validated in the check_match() above.
if scheme == 'ed25519': #pragma: no cover
try:
nacl_key = nacl.signing.SigningKey(private)
nacl_sig = nacl_key.sign(data)
signature = nacl_sig.signature
except NameError: # pragma: no cover
message = 'The PyNaCl library and/or its dependencies unavailable.'
raise securesystemslib.exceptions.UnsupportedLibraryError(message)
except (ValueError, TypeError, nacl.exceptions.CryptoError) as e:
message = 'An "ed25519" signature could not be created with PyNaCl.'
raise securesystemslib.exceptions.CryptoError(message + str(e))
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
return signature, scheme
def verify_signature(public_key, scheme, signature, data, use_pynacl=False):
"""
<Purpose>
Determine whether the private key corresponding to 'public_key' produced
'signature'. verify_signature() will use the public key, the 'scheme' and
'sig', and 'data' arguments to complete the verification.
>>> public, private = generate_public_and_private()
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'ed25519'
>>> signature, scheme = \
create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data, use_pynacl=False)
True
>>> verify_signature(public, scheme, signature, data, use_pynacl=True)
True
>>> bad_data = b'The sly brown fox jumps over the lazy dog'
>>> bad_signature, scheme = \
create_signature(public, private, bad_data, scheme)
>>> verify_signature(public, scheme, bad_signature, data, use_pynacl=False)
False
<Arguments>
public_key:
The public key is a 32-byte string.
scheme:
'ed25519' signature scheme used by either the pure python
implementation (i.e., ed25519.py) or PyNacl (i.e., 'nacl').
signature:
The signature is a 64-byte string.
data:
Data object used by securesystemslib.ed25519_keys.create_signature() to
generate 'signature'. 'data' is needed here to verify the signature.
use_pynacl:
True, if the ed25519 signature should be verified by PyNaCl. False,
if the signature should be verified with the pure Python implementation
of ed25519 (slower).
<Exceptions>
securesystemslib.exceptions.UnsupportedAlgorithmError. Raised if the
signature scheme 'scheme' is not one supported by
securesystemslib.ed25519_keys.create_signature().
securesystemslib.exceptions.FormatError. Raised if the arguments are
improperly formatted.
<Side Effects>
securesystemslib._vendor.ed25519.ed25519.checkvalid() called to do the
actual verification. nacl.signing.VerifyKey.verify() called if
'use_pynacl' is True.
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
# Does 'public_key' have the correct format?
# This check will ensure 'public_key' conforms to
# 'securesystemslib.formats.ED25519PUBLIC_SCHEMA', which must have length 32
# bytes. Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ED25519PUBLIC_SCHEMA.check_match(public_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ED25519_SIG_SCHEMA.check_match(scheme)
# Is 'signature' properly formatted?
securesystemslib.formats.ED25519SIGNATURE_SCHEMA.check_match(signature)
# Is 'use_pynacl' properly formatted?
securesystemslib.formats.BOOLEAN_SCHEMA.check_match(use_pynacl)
# Verify 'signature'. Before returning the Boolean result, ensure 'ed25519'
# was used as the signature scheme. Raise
# 'securesystemslib.exceptions.UnsupportedLibraryError' if 'use_pynacl' is
# True but 'nacl' is unavailable.
public = public_key
valid_signature = False
# This is a defensive check for a valid 'scheme', which should have already
# been validated in the check_match() above.
if scheme in _SUPPORTED_ED25519_SIGNING_SCHEMES: #pragma: no cover
if use_pynacl:
try:
nacl_verify_key = nacl.signing.VerifyKey(public)
nacl_message = nacl_verify_key.verify(data, signature)
valid_signature = True
except NameError: # pragma: no cover
message = 'The PyNaCl library and/or its dependencies unavailable.'
raise securesystemslib.exceptions.UnsupportedLibraryError(message)
except nacl.exceptions.BadSignatureError:
pass
# Verify 'ed25519' signature with the pure Python implementation.
else:
try:
securesystemslib._vendor.ed25519.ed25519.checkvalid(signature, data, public)
valid_signature = True
# The pure Python implementation raises 'Exception' if 'signature' is
# invalid.
except Exception as e:
pass
else: #pragma: no cover
message = 'Unsupported ed25519 signature scheme: ' + repr(scheme) + '.\n' + \
'Supported schemes: ' + repr(_SUPPORTED_ED25519_SIGNING_SCHEMES) + '.'
raise securesystemslib.exceptions.UnsupportedAlgorithmError(message)
return valid_signature
if __name__ == '__main__':
# The interactive sessions of the documentation strings can
# be tested by running 'ed25519_keys.py' as a standalone module.
# python -B ed25519_keys.py
import doctest
doctest.testmod()
|
|
# Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MDSService module
"""
import time
import random
from ovs.lib.helpers.decorators import ensure_single
from celery.schedules import crontab
from ovs.celery_run import celery
from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk
from ovs.dal.hybrids.service import Service as DalService
from ovs.dal.hybrids.j_mdsservice import MDSService
from ovs.dal.lists.servicelist import ServiceList
from ovs.dal.lists.servicetypelist import ServiceTypeList
from ovs.dal.lists.vpoollist import VPoolList
from ovs.extensions.generic.configuration import Configuration
from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration, MetadataServerClient
from ovs.extensions.generic.system import System
from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException
from ovs.log.logHandler import LogHandler
from volumedriver.storagerouter.storagerouterclient import MDSNodeConfig, MDSMetaDataBackendConfig
from volumedriver.storagerouter import storagerouterclient
logger = LogHandler.get('lib', name='mds')
storagerouterclient.Logger.setupLogging(LogHandler.load_path('storagerouterclient'))
storagerouterclient.Logger.enableLogging()
class MDSServiceController(object):
"""
Contains all BLL related to MDSServices
"""
@staticmethod
def prepare_mds_service(client, storagerouter, vpool, fresh_only=True, reload_config=False):
"""
Prepares an MDS service:
* Creates the required configuration
* Sets up the service files
Assumes the StorageRouter and VPool are already configured with a StorageDriver and that all model-wise
configuration regarding both is completed.
"""
mdsservice_type = ServiceTypeList.get_by_name('MetadataServer')
storagedriver = [sd for sd in vpool.storagedrivers if sd.storagerouter_guid == storagerouter.guid][0]
# Fetch service sequence number
service_number = -1
for mds_service in vpool.mds_services:
if mds_service.service.storagerouter_guid == storagerouter.guid:
service_number = max(mds_service.number, service_number)
if fresh_only is True and service_number >= 0:
return None # There are already one or more MDS services running, aborting
service_number += 1
# Find free port
occupied_ports = []
for service in mdsservice_type.services:
if service.storagerouter_guid == storagerouter.guid:
occupied_ports.append(service.ports[0])
port = System.get_free_ports(Configuration.get('ovs.ports.mds'),
exclude=occupied_ports, nr=1, client=client)[0]
# Add service to the model
service = DalService()
service.name = 'metadataserver_{0}_{1}'.format(vpool.name, service_number)
service.type = mdsservice_type
service.storagerouter = storagerouter
service.ports = [port]
service.save()
mds_service = MDSService()
mds_service.service = service
mds_service.vpool = vpool
mds_service.number = service_number
mds_service.save()
mds_nodes = []
for service in mdsservice_type.services:
if service.storagerouter_guid == storagerouter.guid:
mds_service = service.mds_service
if mds_service.vpool_guid == vpool.guid:
mds_nodes.append({'host': service.storagerouter.ip,
'port': service.ports[0],
'db_directory': '{0}/mds_{1}_{2}'.format(storagedriver.mountpoint_md,
vpool.name,
mds_service.number),
'scratch_directory': '{0}/mds_{1}_{2}'.format(storagedriver.mountpoint_temp,
vpool.name,
mds_service.number)})
# Generate the correct section in the Storage Driver's configuration
storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name)
storagedriver_config.load(client)
storagedriver_config.clean() # Clean out obsolete values
storagedriver_config.configure_metadata_server(mds_nodes=mds_nodes)
storagedriver_config.save(client, reload_config=reload_config)
return mds_service
@staticmethod
def remove_mds_service(mds_service, client, storagerouter, vpool, reload_config):
"""
Removes an MDS service
"""
if len(mds_service.vdisks_guids) > 0:
raise RuntimeError('Cannot remove MDSService that is still serving disks')
mdsservice_type = ServiceTypeList.get_by_name('MetadataServer')
storagedriver = [sd for sd in vpool.storagedrivers if sd.storagerouter_guid == storagerouter.guid][0]
# Clean up model
this_service_number = mds_service.number
service = mds_service.service
mds_service.delete()
service.delete()
# Generate new mds_nodes section
mds_nodes = []
for service in mdsservice_type.services:
if service.storagerouter_guid == storagerouter.guid:
mds_service = service.mds_service
if mds_service.vpool_guid == vpool.guid:
mds_nodes.append({'host': service.storagerouter.ip,
'port': service.ports[0],
'db_directory': '{0}/mds_{1}_{2}'.format(storagedriver.mountpoint_md,
vpool.name,
mds_service.number),
'scratch_directory': '{0}/mds_{1}_{2}'.format(storagedriver.mountpoint_temp,
vpool.name,
mds_service.number)})
# Generate the correct section in the Storage Driver's configuration
storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name)
storagedriver_config.load(client)
storagedriver_config.clean() # Clean out obsolete values
storagedriver_config.configure_metadata_server(mds_nodes=mds_nodes)
storagedriver_config.save(client, reload_config=reload_config)
tries = 5
cleaned = False
while tries > 0 and cleaned is False:
try:
client.dir_delete(['{0}/mds_{1}_{2}'.format(storagedriver.mountpoint_md,
vpool.name,
this_service_number),
'{0}/mds_{1}_{2}'.format(storagedriver.mountpoint_temp,
vpool.name,
this_service_number)])
logger.debug('MDS files cleaned up')
cleaned = True
except Exception:
time.sleep(5)
logger.debug('Waiting for the MDS service to go down...')
tries -= 1
@staticmethod
def sync_vdisk_to_reality(vdisk):
"""
Syncs a vdisk to reality (except hypervisor)
"""
vdisk.reload_client()
vdisk.invalidate_dynamics(['info'])
config = vdisk.info['metadata_backend_config']
config_dict = {}
for item in config:
if item['ip'] not in config_dict:
config_dict[item['ip']] = []
config_dict[item['ip']].append(item['port'])
mds_dict = {}
for junction in vdisk.mds_services:
service = junction.mds_service.service
storagerouter = service.storagerouter
if config[0]['ip'] == storagerouter.ip and config[0]['port'] == service.ports[0]:
junction.is_master = True
junction.save()
if storagerouter.ip not in mds_dict:
mds_dict[storagerouter.ip] = []
mds_dict[storagerouter.ip].append(service.ports[0])
elif storagerouter.ip in config_dict and service.ports[0] in config_dict[storagerouter.ip]:
junction.is_master = False
junction.save()
if storagerouter.ip not in mds_dict:
mds_dict[storagerouter.ip] = []
mds_dict[storagerouter.ip].append(service.ports[0])
else:
junction.delete()
for ip, ports in config_dict.iteritems():
for port in ports:
if ip not in mds_dict or port not in mds_dict[ip]:
service = ServiceList.get_by_ip_ports(ip, [port])
if service is not None:
mds_service_vdisk = MDSServiceVDisk()
mds_service_vdisk.vdisk = vdisk
mds_service_vdisk.mds_service = service.mds_service
mds_service_vdisk.is_master = config[0]['ip'] == service.storagerouter.ip and config[0]['port'] == service.ports[0]
mds_service_vdisk.save()
@staticmethod
def ensure_safety(vdisk, excluded_storagerouters=None):
"""
Ensures (or tries to ensure) the safety of a given vdisk (except hypervisor).
Assumptions:
* A local overloaded master is better than a non-local non-overloaded master
* Prefer master/services to be on different hosts, a subsequent slave on the same node doesn't add safety
* Don't actively overload services (e.g. configure an MDS as slave causing it to get overloaded)
* Too much safety is not wanted (it adds loads to nodes while not required)
"""
logger.debug('Ensuring MDS safety for vdisk {0}'.format(vdisk.guid))
vdisk.reload_client()
if excluded_storagerouters is None:
excluded_storagerouters = []
maxload = Configuration.get('ovs.storagedriver.mds.maxload')
safety = Configuration.get('ovs.storagedriver.mds.safety')
tlogs = Configuration.get('ovs.storagedriver.mds.tlogs')
services = [mds_service.service for mds_service in vdisk.vpool.mds_services
if mds_service.service.storagerouter not in excluded_storagerouters]
nodes = set(service.storagerouter.ip for service in services)
services_load = {}
service_per_key = {}
for service in services:
load, load_plus = MDSServiceController.get_mds_load(service.mds_service)
services_load[service.guid] = load, load_plus
service_per_key['{0}:{1}'.format(service.storagerouter.ip, service.ports[0])] = service
# List current configuration and filter out excluded services
reconfigure_required = False
reconfigure_reasons = []
vdisk.invalidate_dynamics(['info', 'storagedriver_id', 'storagerouter_guid'])
configs = vdisk.info['metadata_backend_config']
for config in configs:
config['key'] = '{0}:{1}'.format(config['ip'], config['port'])
master_service = None
if len(configs) > 0:
config = configs[0]
if config['key'] in service_per_key:
master_service = service_per_key.get(config['key'])
configs.remove(config)
else:
reconfigure_required = True
reconfigure_reasons.append('Master ({0}:{1}) cannot be used anymore'.format(config['ip'], config['port']))
slave_services = []
for config in configs:
if config['key'] in service_per_key:
slave_services.append(service_per_key[config['key']])
else:
reconfigure_required = True
reconfigure_reasons.append('Slave ({0}:{1}) cannot be used anymore'.format(config['ip'], config['port']))
# Fix services_load
services_per_load = {}
for service in services:
if service == master_service or service in slave_services:
load = services_load[service.guid][0]
else:
load = services_load[service.guid][1]
services_load[service.guid] = load
if load not in services_per_load:
services_per_load[load] = []
services_per_load[load].append(service)
# Further checks if a reconfiguration is required.
service_nodes = []
if master_service is not None:
service_nodes.append(master_service.storagerouter.ip)
for service in slave_services:
ip = service.storagerouter.ip
if ip in service_nodes:
reconfigure_required = True
reconfigure_reasons.append('Multiple MDS services on the same node')
else:
service_nodes.append(ip)
if len(service_nodes) > safety:
# Too much safety
reconfigure_required = True
reconfigure_reasons.append('Too much safety')
if len(service_nodes) < safety and len(service_nodes) < len(nodes):
# Insufficient MDS services configured while there should be sufficient nodes available
reconfigure_required = True
reconfigure_reasons.append('Not enough safety')
if master_service is not None and services_load[master_service.guid] > maxload:
# The master service is overloaded
reconfigure_required = True
reconfigure_reasons.append('Master overloaded')
if master_service is not None and master_service.storagerouter_guid != vdisk.storagerouter_guid:
# The master is not local
reconfigure_required = True
reconfigure_reasons.append('Master is not local')
if any(service for service in slave_services if services_load[service.guid] > maxload):
# There's a slave service overloaded
reconfigure_required = True
reconfigure_reasons.append('One or more slaves overloaded')
if reconfigure_required is False:
logger.debug('No reconfiguration required for vdisk {0}'.format(vdisk.guid))
MDSServiceController.sync_vdisk_to_reality(vdisk)
return
logger.debug('Reconfiguration required for vdisk {0}:'.format(vdisk.guid))
for reason in reconfigure_reasons:
logger.debug('Reason: {0} - vdisk {1}'.format(reason, vdisk.guid))
# Prepare fresh configuration
new_services = []
# Check whether the master (if available) is non-local to the vdisk and/or is overloaded
master_ok = master_service is not None
if master_ok is True:
master_ok = master_service.storagerouter_guid == vdisk.storagerouter_guid and services_load[master_service.guid] <= maxload
if master_ok:
# Add this master to the fresh configuration
new_services.append(master_service)
else:
# Try to find the best non-overloaded local MDS (slave)
candidate_master = None
candidate_master_load = 0
local_mds = None
local_mds_load = 0
for service in services:
load = services_load[service.guid]
if load <= maxload and service.storagerouter_guid == vdisk.storagerouter_guid:
if local_mds is None or local_mds_load > load:
# This service is a non-overloaded local MDS
local_mds = service
local_mds_load = load
if service in slave_services:
if candidate_master is None or candidate_master_load > load:
# This service is a non-overloaded local slave
candidate_master = service
candidate_master_load = load
if candidate_master is not None:
# A non-overloaded local slave was found.
client = MetadataServerClient.load(candidate_master)
try:
amount_of_tlogs = client.catch_up(str(vdisk.volume_id), True)
except RuntimeError as ex:
if 'Namespace does not exist' in ex.message:
client.create_namespace(str(vdisk.volume_id))
amount_of_tlogs = client.catch_up(str(vdisk.volume_id), True)
else:
raise
if amount_of_tlogs < tlogs:
# Almost there. Catching up right now, and continue as soon as it's up-to-date
start = time.time()
client.catch_up(str(vdisk.volume_id), False)
logger.debug('MDS catch up for vdisk {0} took {1}s'.format(vdisk.guid, round(time.time() - start, 2)))
# It's up to date, so add it as a new master
new_services.append(candidate_master)
if master_service is not None:
# The current master (if available) is now candidate for become one of the slaves
slave_services.append(master_service)
else:
# It's not up to date, keep the previous master (if available) and give the local slave
# some more time to catch up
if master_service is not None:
new_services.append(master_service)
new_services.append(candidate_master)
if candidate_master in slave_services:
slave_services.remove(candidate_master)
else:
# There's no non-overloaded local slave found. Keep the current master (if available) and add
# a local MDS (if available) as slave
if master_service is not None:
new_services.append(master_service)
if local_mds is not None:
new_services.append(local_mds)
if local_mds in slave_services:
slave_services.remove(local_mds)
# At this point, there might (or might not) be a (new) master, and a (catching up) slave. The rest of the non-local
# MDS nodes must now be added to the configuration until the safety is reached. There's always one extra
# slave recycled to make sure there's always an (almost) up-to-date slave ready for failover
loads = sorted(load for load in services_per_load.keys() if load <= maxload)
nodes = set(service.storagerouter.ip for service in new_services)
slave_added = False
if len(nodes) < safety:
for load in loads:
for service in services_per_load[load]:
if slave_added is False and service in slave_services and service.storagerouter.ip not in nodes:
try:
SSHClient(service.storagerouter)
new_services.append(service)
slave_services.remove(service)
nodes.add(service.storagerouter.ip)
slave_added = True
except UnableToConnectException:
logger.debug('Skip {0} as it is unreachable'.format(service.storagerouter.ip))
if len(nodes) < safety:
for load in loads:
for service in services_per_load[load]:
if len(nodes) < safety and service.storagerouter.ip not in nodes:
try:
SSHClient(service.storagerouter)
new_services.append(service)
nodes.add(service.storagerouter.ip)
except UnableToConnectException:
logger.debug('Skip {0} as it is unreachable'.format(service.storagerouter.ip))
# Build the new configuration and update the vdisk
configs = []
for service in new_services:
client = MetadataServerClient.load(service)
client.create_namespace(str(vdisk.volume_id))
configs.append(MDSNodeConfig(address=str(service.storagerouter.ip),
port=service.ports[0]))
vdisk.storagedriver_client.update_metadata_backend_config(
volume_id=str(vdisk.volume_id),
metadata_backend_config=MDSMetaDataBackendConfig(configs)
)
MDSServiceController.sync_vdisk_to_reality(vdisk)
logger.debug('Ensuring MDS safety for vdisk {0} completed'.format(vdisk.guid))
@staticmethod
def get_preferred_mds(storagerouter, vpool, include_load=False):
"""
Gets the MDS on this StorageRouter/VPool pair which is preferred to achieve optimal balancing
"""
mds_service = None
for current_mds_service in vpool.mds_services:
if current_mds_service.service.storagerouter_guid == storagerouter.guid:
load = MDSServiceController.get_mds_load(current_mds_service)
if mds_service is None or load < mds_service[1]:
mds_service = (current_mds_service, load)
if include_load is True:
return mds_service
return mds_service[0] if mds_service is not None else None
@staticmethod
def get_mds_load(mds_service):
"""
Gets a 'load' for an MDS service based on its capacity and the amount of assinged VDisks
"""
service_capacity = float(mds_service.capacity)
if service_capacity < 0:
return 50, 50
if service_capacity == 0:
return float('inf'), float('inf')
usage = len(mds_service.vdisks_guids)
return round(usage / service_capacity * 100.0, 5), round((usage + 1) / service_capacity * 100.0, 5)
@staticmethod
def get_mds_storagedriver_config_set(vpool):
"""
Builds a configuration for all StorageRouters from a given VPool with following goals:
* Primary MDS is the local one
* All slaves are on different hosts
* Maximum `mds.safety` nodes are returned
"""
mds_per_storagerouter = {}
mds_per_load = {}
for storagedriver in vpool.storagedrivers:
storagerouter = storagedriver.storagerouter
mds_service, load = MDSServiceController.get_preferred_mds(storagerouter, vpool, include_load=True)
mds_per_storagerouter[storagerouter.guid] = {'host': storagerouter.ip, 'port': mds_service.service.ports[0]}
if load not in mds_per_load:
mds_per_load[load] = []
mds_per_load[load].append(storagerouter.guid)
safety = Configuration.get('ovs.storagedriver.mds.safety')
config_set = {}
for storagerouter_guid in mds_per_storagerouter:
config_set[storagerouter_guid] = [mds_per_storagerouter[storagerouter_guid]]
for load in sorted(mds_per_load.keys()):
if len(config_set[storagerouter_guid]) >= safety:
break
sr_guids = mds_per_load[load]
random.shuffle(sr_guids)
for sr_guid in sr_guids:
if len(config_set[storagerouter_guid]) >= safety:
break
if sr_guid != storagerouter_guid:
config_set[storagerouter_guid].append(mds_per_storagerouter[sr_guid])
return config_set
@staticmethod
@celery.task(name='ovs.mds.mds_checkup', bind=True, schedule=crontab(minute='30', hour='0,6,12,18'))
@ensure_single(['ovs.mds.mds_checkup'])
def mds_checkup():
"""
Validates the current MDS setup/configuration and takes actions where required
"""
mds_dict = {}
for vpool in VPoolList.get_vpools():
for mds_service in vpool.mds_services:
storagerouter = mds_service.service.storagerouter
if vpool not in mds_dict:
mds_dict[vpool] = {}
if storagerouter not in mds_dict[vpool]:
mds_dict[vpool][storagerouter] = {'client': SSHClient(storagerouter, username='root'),
'services': []}
mds_dict[vpool][storagerouter]['services'].append(mds_service)
for vpool, storagerouter_info in mds_dict.iteritems():
# 1. First, make sure there's at least one MDS on every StorageRouter that's not overloaded
# If not, create an extra MDS for that StorageRouter
for storagerouter in storagerouter_info:
client = mds_dict[vpool][storagerouter]['client']
mds_services = mds_dict[vpool][storagerouter]['services']
has_room = False
for mds_service in mds_services[:]:
if mds_service.capacity == 0 and len(mds_service.vdisks_guids) == 0:
client = SSHClient(storagerouter)
MDSServiceController.remove_mds_service(mds_service, client, storagerouter, vpool, reload_config=True)
mds_services.remove(mds_service)
for mds_service in mds_services:
_, load = MDSServiceController.get_mds_load(mds_service)
if load < Configuration.get('ovs.storagedriver.mds.maxload'):
has_room = True
break
if has_room is False:
mds_service = MDSServiceController.prepare_mds_service(client, storagerouter, vpool,
fresh_only=False, reload_config=True)
if mds_service is None:
raise RuntimeError('Could not add MDS node')
mds_services.append(mds_service)
mds_config_set = MDSServiceController.get_mds_storagedriver_config_set(vpool)
for storagerouter in mds_dict[vpool]:
client = mds_dict[vpool][storagerouter]['client']
storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name)
storagedriver_config.load(client)
if storagedriver_config.is_new is False:
storagedriver_config.clean() # Clean out obsolete values
storagedriver_config.configure_filesystem(
fs_metadata_backend_mds_nodes=mds_config_set[storagerouter.guid]
)
storagedriver_config.save(client)
# 2. Per VPool, execute a safety check, making sure the master/slave configuration is optimal.
for vdisk in vpool.vdisks:
MDSServiceController.ensure_safety(vdisk)
if __name__ == '__main__':
from ovs.dal.lists.storagerouterlist import StorageRouterList
try:
while True:
output = ['',
'Open vStorage - MDS debug information',
'=====================================',
'timestamp: {0}'.format(time.time()),
'']
for _sr in StorageRouterList.get_storagerouters():
output.append('+ {0} ({1})'.format(_sr.name, _sr.ip))
vpools = set(sd.vpool for sd in _sr.storagedrivers)
for _vpool in vpools:
output.append(' + {0}'.format(_vpool.name))
for _mds_service in _vpool.mds_services:
if _mds_service.service.storagerouter_guid == _sr.guid:
masters, slaves = 0, 0
for _junction in _mds_service.vdisks:
if _junction.is_master:
masters += 1
else:
slaves += 1
capacity = _mds_service.capacity
if capacity == -1:
capacity = 'infinite'
_load, _ = MDSServiceController.get_mds_load(_mds_service)
if _load == float('inf'):
_load = 'infinite'
else:
_load = '{0}%'.format(round(_load, 2))
output.append(' + {0} - port {1} - {2} master(s), {3} slave(s) - capacity: {4}, load: {5}'.format(
_mds_service.number, _mds_service.service.ports[0], masters, slaves, capacity, _load
))
output += ['',
'Press ^C to exit',
'']
print '\x1b[2J\x1b[H' + '\n'.join(output)
time.sleep(1)
except KeyboardInterrupt:
pass
|
|
#!/usr/bin/env python
"""
NAME: mprun.py
==============
DESCRIPTION
===========
Take a CMD that requires one INPUT and one OUTPUT files and run
the CMD on a set of files via multiple processes simultaneously.
INSTALLATION
============
1. Download mprun.py
2. Run
USAGE
=====
python mprun.py -p 8 "cat {{INPUT}} | wc -l > temp/{{OUTPUT}}" *.txt
{{INPUT}}
Will be replaced with the files supplied one at a time to create the pool of jobs.
{{OUTPUT}}
Will be the *basename* of the {{INPUT}}-file with an added ".out"-ending.
VERSION HISTORY
===============
0.1.4 2016/05/01 pylint and pep8 cleaned
0.1.3 2016/02/18 Did not call aResults.get(), which led to premature end o program
0.1.2 2016/02/17 Better WARNINGS; stdout and strerr now possibl to capture
0.1.1 2016/02/17 Some improvements.
0.1.0 2016/02/17 Initial version.
LICENCE
=======
See supplied LICENCE file.
2016, copyright Sebastian Schmeier ([email protected]), http://sschmeier.com
"""
from timeit import default_timer as timer
from multiprocessing import Pool
import sys
import os
import os.path
import argparse
import time
import subprocess
import re
__version__ = '0.1.4'
__date__ = '2016/05/01'
__email__ = '[email protected]'
__author__ = 'Sebastian Schmeier'
def parse_cmdline():
""" Parse command-line args. """
## parse cmd-line -----------------------------------------------------------
description = 'Read delimited file.'
version = 'version %s, date %s' % (__version__, __date__)
epilog = 'Copyright %s (%s)' % (__author__, __email__)
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('--version',
action='version',
version='%s' % (version))
parser.add_argument(
'command',
metavar='CMD',
type=str,
help=
'Command to execute on every {{INPUT}} file. Should contain one '+\
'"{{INPUT}}" and one optional "{{OUTPUT}}" placeholder descriptor in '+\
'the CMD, which are substituted with the filenames supplied, e.g. '+\
'"cat {{INPUT}} | wc -l > temp/{{OUTPUT}}"')
parser.add_argument('files_list',
nargs='+',
metavar='FILE',
type=str,
help='Files to use as {{INPUT}}.')
parser.add_argument(
'--stderr',
type=str,
metavar='PATH',
dest='error_path',
default=None,
help=
'Create a separate error file for each job in the directory at PATH.'+\
' [default: Do not create any error-files, stderr->dev/null]')
parser.add_argument(
'--stdout',
type=str,
metavar='PATH',
dest='stdout_path',
default=None,
help=
'Create a separate stdout-file for each job in the directory at PATH.'+\
' [default: Do not create any stdout-files, stdout->dev/null]')
parser.add_argument(
'--dry',
action='store_true',
dest='do_dryrun',
default=False,
help=
'Only print created commands without runnig them. [default: False]')
group1 = parser.add_argument_group('Multithreading', 'optional arguments:')
group1.add_argument(
'-p',
'--processes',
metavar='INT',
type=int,
dest='process_num',
default=2,
help=
'Number of sub-processes (workers) to use. It is only logical to not'+\
' give more processes than cpus/cores are available. [default: 2]')
group1.add_argument(
'--no-pb',
action='store_true',
dest='hide_progress',
default=False,
help=
'Turn the progress-bar off. A progress-bar will force a "chunksize"'+\
' of 1 in threading. This might slow things down for very large job'+\
' numbers, but allows for a realistic progress-bar. [default: Show'+\
' progress-bar -> chunksize = 1]')
args = parser.parse_args()
return args, parser
def run_command(args):
"""
THIS IS THE ACCTUAL WORKHORSE FUNCTION THAT HAS TO BE EXECUTED MULTPLE TIMES.
This function will be distributed to the processes as requested.
# do stuff
res = ...
return (args, res)
"""
command = args[1] # command to execute
err = args[2] # stderr file
out = args[3] # stdout file
if err:
stderr_filehandle = open(err, 'w')
else:
# standard err to /dev/null
stderr_filehandle = open(os.devnull, 'w')
if out:
stdout_filehandle = open(out, 'w')
else:
# standard out to /dev/null
stdout_filehandle = open(os.devnull, 'w')
returncode = subprocess.call(command,
shell=True,
stdout=stdout_filehandle,
stderr=stderr_filehandle)
stdout_filehandle.close()
stderr_filehandle.close()
# TEST:
# check returncode for non-zero status
if returncode != 0:
sys.stderr.write(
'[mprun WARNING]: *** Non-zero exit codes of child process'+\
' encountered. Better check with --stderr. ***\n')
return (args, returncode)
def main():
""" MAIN """
args, parser = parse_cmdline()
# TEST:
# Supplied file list not empty
if len(args.files_list) < 1:
parser.error('You need to supply at least one file. EXIT.')
files_list = []
for filename in args.files_list:
filename_path = os.path.abspath(os.path.expanduser(filename))
# TEST:
# Test that file exisits
if os.path.isfile(filename_path):
files_list.append(filename_path)
else:
parser.error('Input-file "%s" not found. EXIT.' % (filename))
# Check that the CMD contains only one occurrence of {{INPUT}} and {{OUTPUT}}
command = args.command
res1 = re.findall('{{INPUT}}', command)
res2 = re.findall('{{OUTPUT}}', command)
# TEST:
# Test that {{INPUT}} is given as it is required
if len(res1) != 1:
parser.error(
'CMD should contain exactly one occurrence of an {{INPUT}} placeholder. EXIT.')
# this is optional, give warning
if len(res2) == 0:
sys.stderr.write(
'[mprun WARNING]: *** CMD does not contain a {{OUTPUT}} placeholder. ***\n')
# TEST:
# can not be more than one
elif len(res2) > 1:
parser.error(
'CMD should contain at most one occurrence of an {{OUTPUT}} placeholder. EXIT.')
# Stderr-file path
error_path = None
if args.error_path:
# TEST:
# Test if stderr-path exists
if not os.path.isdir(args.error_path):
sys.stderr.write(
'[mprun WARNING]: *** The stderr-path "%s" does not exist.'+\
' Will be ignored and stderr -> dev/null ***\n'
% args.error_path)
else:
error_path = os.path.abspath(os.path.expanduser(args.error_path))
# Stdout-file path
stdout_path = None
if args.stdout_path:
# TEST:
# Test if stdout-path exists
if not os.path.isdir(args.stdout_path):
sys.stderr.write(
'[mprun WARNING]: *** The stdout-path "%s" does not exist.'+\
' Will be ignored and stdout -> dev/null. ***\n'
% args.stdout_path)
else:
stdout_path = os.path.abspath(os.path.expanduser(args.stdout_path))
# ------------------------------------------------------
# THREADING
# ------------------------------------------------------
# get number of subprocesses to use
process_num = args.process_num
# TEST:
# Number of processes cannot be smaller than 1.
if process_num < 1:
parser.error('-p has to be > 0: EXIT.')
# FILL ARRAY WITH PARAMETER SETS TO PROCESS
# this array contains all jobs that have to be run
job_list = []
job_num = 1
# e.g. create jobs based on supplied command+files, here one file = one jobs
for filename in files_list:
# Create the command to execute
command2 = command.replace('{{INPUT}}', filename)
command2 = command2.replace('{{OUTPUT}}',
os.path.basename(filename) + '.out')
# create error-filename
err = None
if error_path:
# create error-file path
err = os.path.join(error_path,
'%s.stderr' % (os.path.basename(filename)))
out = None
if stdout_path:
out = os.path.join(stdout_path,
'%s.stdout' % (os.path.basename(filename)))
job_list.append((job_num, command2, err, out))
job_num += 1
# Number of total jobs
jobs_total = len(job_list)
out = '[mprun OK]: #JOBS TO RUN: %i | #CONCURRENT PROCESSES TO USE: %i\n'
sys.stdout.write(out % (jobs_total, process_num))
# Dry run?
if args.do_dryrun:
sys.stdout.write('[mprun WARNING]: *** DRY RUN: NOT PROCESSING ***\n')
for row in job_list:
sys.stdout.write('%s\n' % row[1])
return
# Timing
start_time = timer() # very crude
# create pool of workers ---------------------
pool = Pool(processes=process_num)
# No prgress-bar requested.
if args.hide_progress:
results = pool.map_async(run_command, job_list)
else:
# "chunksize" usually only makes a noticeable performance
# difference for very large iterables
# Here, I set it to one to get the progress bar working nicly
# Otherwise it will not give the correct number of processes left
# but the chunksize number instead.
chunksize = 1
results = pool.map_async(run_command, job_list, chunksize=chunksize)
# No more work to add to pool
pool.close()
# Progress-bar
if not args.hide_progress:
# Progress bar
#==============================
# This can be changed to make progress-bar bigger or smaller
progress_bar_length = 50
#==============================
while not results.ready():
jobs_not_done = results._number_left
jobs_done = jobs_total - jobs_not_done
bar_done = jobs_done * progress_bar_length / jobs_total
bar_str = ('=' * bar_done).ljust(progress_bar_length)
percent = int(jobs_done * 100 / jobs_total)
sys.stdout.write("[mprun OK]: [%s] %s%%\r" \
%(bar_str, str(percent).rjust(3)))
sys.stdout.flush()
time.sleep(0.1) # wait a bit: here we test all .1 secs
# Finish the progress bar
bar_str = '=' * progress_bar_length
sys.stdout.write("[mprun OK]: [%s] 100%%\r\n" % (bar_str))
# does actually not produce a result but returns exit/return-codes
# however, need to call it otherwise program will not finish
# all processes
results = results.get()
# --------------------------------------------
end_time = timer()
# Print the timing
sys.stdout.write('[mprun OK]: RUNTIME(s): %.4f | AVG/JOB: %.4f\n' \
%(end_time - start_time, (end_time - start_time)/jobs_total))
# collect all error return-codes
returncode_list = [returntuple[1] for returntuple in results]
if max(returncode_list) != 0:
sys.stdout.write(
'[mprun WARNING]: END OF PROGRAM. Non-zero error returncodes encountered\n')
else:
sys.stdout.write('[mprun OK]: END OF PROGRAM.\n')
return
if __name__ == '__main__':
sys.exit(main())
|
|
"""
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from Cryptodome.Cipher import AES
import sys
import struct
import hashlib
import binascii
from .ecWrapper import PrivateKey
from ecpy.curves import Curve
import os
LOAD_SEGMENT_CHUNK_HEADER_LENGTH = 3
MIN_PADDING_LENGTH = 1
SCP_MAC_LENGTH = 0xE
BOLOS_TAG_APPNAME = 0x01
BOLOS_TAG_APPVERSION = 0x02
BOLOS_TAG_ICON = 0x03
BOLOS_TAG_DERIVEPATH = 0x04
BOLOS_TAG_DATASIZE = 0x05
BOLOS_TAG_DEPENDENCY = 0x06
def string_to_bytes(x):
return bytes(x, 'ascii')
def encodelv(v):
l = len(v)
s = b""
if l < 128:
s += struct.pack(">B", l)
elif l < 256:
s += struct.pack(">B", 0x81)
s += struct.pack(">B", l)
elif l < 65536:
s += struct.pack(">B", 0x82)
s += struct.pack(">H", l)
else:
raise Exception("Unimplemented LV encoding")
s += v
return s
def encodetlv(t, v):
l = len(v)
s = struct.pack(">B", t)
if l < 128:
s += struct.pack(">B", l)
elif l < 256:
s += struct.pack(">B", 0x81)
s += struct.pack(">B", l)
elif l < 65536:
s += struct.pack(">B", 0x82)
s += struct.pack(">H", l)
else:
raise Exception("Unimplemented TLV encoding")
s += v
return s
def str2bool(v):
if v is not None:
return v.lower() in ("yes", "true", "t", "1")
return False
SCP_DEBUG = str2bool(os.getenv("SCP_DEBUG"))
class HexLoader:
def scp_derive_key(self, ecdh_secret, keyindex):
retry = 0
# di = sha256(i || retrycounter || ecdh secret)
while True:
sha256 = hashlib.new('sha256')
sha256.update(struct.pack(">IB", keyindex, retry))
sha256.update(ecdh_secret)
# compare di with order
CURVE_SECP256K1 = Curve.get_curve('secp256k1')
if int.from_bytes(sha256.digest(), 'big') < CURVE_SECP256K1.order:
break
#regenerate a new di satisfying order upper bound
retry+=1
# Pi = di*G
privkey = PrivateKey(bytes(sha256.digest()))
pubkey = bytearray(privkey.pubkey.serialize(compressed=False))
# ki = sha256(Pi)
sha256 = hashlib.new('sha256')
sha256.update(pubkey)
#print ("Key " + str (keyindex) + ": " + sha256.hexdigest())
return sha256.digest()
def __init__(self, card, cla=0xF0, secure=False, mutauth_result=None, relative=True, cleardata_block_len=None):
self.card = card
self.cla = cla
self.secure = secure
self.createappParams = None
#legacy unsecure SCP (pre nanos-1.4, pre blue-2.1)
self.max_mtu = 0xFE
if not self.card is None:
self.max_mtu = min(self.max_mtu, self.card.apduMaxDataSize())
self.scpVersion = 2
self.key = mutauth_result
self.iv = b'\x00' * 16
self.relative = relative
#store the aligned block len to be transported if requested
self.cleardata_block_len=cleardata_block_len
if not (self.cleardata_block_len is None):
if not self.card is None:
self.cleardata_block_len = min(self.cleardata_block_len, self.card.apduMaxDataSize())
# try:
if type(mutauth_result) is dict and 'ecdh_secret' in mutauth_result:
self.scp_enc_key = self.scp_derive_key(mutauth_result['ecdh_secret'], 0)[0:16]
self.scp_enc_iv = b"\x00" * 16
self.scp_mac_key = self.scp_derive_key(mutauth_result['ecdh_secret'], 1)[0:16]
self.scp_mac_iv = b"\x00" * 16
self.scpVersion = 3
self.max_mtu = 0xFE
if not self.card is None:
self.max_mtu = min(self.max_mtu, self.card.apduMaxDataSize()&0xF0)
def crc16(self, data):
TABLE_CRC16_CCITT = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
]
crc = 0xFFFF
for i in range(0, len(data)):
b = data[i] & 0xff
b = (b ^ ((crc >> 8) & 0xff)) & 0xff
crc = (TABLE_CRC16_CCITT[b] ^ (crc << 8)) & 0xffff
return crc
def exchange(self, cla, ins, p1, p2, data):
#wrap
data = self.scpWrap(data)
apdu = bytearray([cla, ins, p1, p2, len(data)]) + bytearray(data)
if self.card == None:
print("%s" % binascii.hexlify(apdu))
else:
# unwrap after exchanged
return self.scpUnwrap(bytes(self.card.exchange(apdu)))
def scpWrap(self, data):
if not self.secure or data is None or len(data) == 0:
return data
if self.scpVersion == 3:
if SCP_DEBUG:
print(binascii.hexlify(data))
# ENC
paddedData = data + b'\x80'
while (len(paddedData) % 16) != 0:
paddedData += b'\x00'
if SCP_DEBUG:
print(binascii.hexlify(paddedData))
cipher = AES.new(self.scp_enc_key, AES.MODE_CBC, self.scp_enc_iv)
encryptedData = cipher.encrypt(paddedData)
self.scp_enc_iv = encryptedData[-16:]
if SCP_DEBUG:
print(binascii.hexlify(encryptedData))
# MAC
cipher = AES.new(self.scp_mac_key, AES.MODE_CBC, self.scp_mac_iv)
macData = cipher.encrypt(encryptedData)
self.scp_mac_iv = macData[-16:]
# only append part of the mac
encryptedData += self.scp_mac_iv[-SCP_MAC_LENGTH:]
if SCP_DEBUG:
print(binascii.hexlify(encryptedData))
else:
paddedData = data + b'\x80'
while (len(paddedData) % 16) != 0:
paddedData += b'\x00'
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
if SCP_DEBUG:
print("wrap_old: "+binascii.hexlify(paddedData))
encryptedData = cipher.encrypt(paddedData)
self.iv = encryptedData[-16:]
#print (">>")
return encryptedData
def scpUnwrap(self, data):
if not self.secure or data is None or len(data) == 0 or len(data) == 2:
return data
padding_char = 0x80
if self.scpVersion == 3:
if SCP_DEBUG:
print(binascii.hexlify(data))
# MAC
cipher = AES.new(self.scp_mac_key, AES.MODE_CBC, self.scp_mac_iv)
macData = cipher.encrypt(bytes(data[0:-SCP_MAC_LENGTH]))
self.scp_mac_iv = macData[-16:]
if self.scp_mac_iv[-SCP_MAC_LENGTH:] != data[-SCP_MAC_LENGTH:] :
raise BaseException("Invalid SCP MAC")
# consume mac
data = data[0:-SCP_MAC_LENGTH]
if SCP_DEBUG:
print(binascii.hexlify(data))
# ENC
cipher = AES.new(self.scp_enc_key, AES.MODE_CBC, self.scp_enc_iv)
self.scp_enc_iv = bytes(data[-16:])
data = cipher.decrypt(bytes(data))
l = len(data) - 1
while data[l] != padding_char:
l-=1
if l == -1:
raise BaseException("Invalid SCP ENC padding")
data = data[0:l]
decryptedData = data
if SCP_DEBUG:
print(binascii.hexlify(data))
else:
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
decryptedData = cipher.decrypt(data)
if SCP_DEBUG:
print("unwrap_old: "+binascii.hexlify(decryptedData))
l = len(decryptedData) - 1
while decryptedData[l] != padding_char:
l-=1
if l == -1:
raise BaseException("Invalid SCP ENC padding")
decryptedData = decryptedData[0:l]
self.iv = data[-16:]
#print ("<<")
return decryptedData
def selectSegment(self, baseAddress):
data = b'\x05' + struct.pack('>I', baseAddress)
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def loadSegmentChunk(self, offset, chunk):
data = b'\x06' + struct.pack('>H', offset) + chunk
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def flushSegment(self):
data = b'\x07'
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def crcSegment(self, offsetSegment, lengthSegment, crcExpected):
data = b'\x08' + struct.pack('>H', offsetSegment) + struct.pack('>I', lengthSegment) + struct.pack('>H', crcExpected)
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def validateTargetId(self, targetId):
data = struct.pack('>I', targetId)
self.exchange(self.cla, 0x04, 0x00, 0x00, data)
def boot(self, bootadr, signature=None):
# Force jump into Thumb mode
bootadr |= 1
data = b'\x09' + struct.pack('>I', bootadr)
if signature != None:
data += struct.pack('>B', len(signature)) + signature
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def commit(self, signature=None):
data = b'\x09'
if signature != None:
data += struct.pack('>B', len(signature)) + signature
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def createAppNoInstallParams(self, appflags, applength, appname, icon=None, path=None, iconOffset=None, iconSize=None, appversion=None):
data = b'\x0B' + struct.pack('>I', applength) + struct.pack('>I', appflags) + struct.pack('>B', len(appname)) + appname
if iconOffset is None:
if not (icon is None):
data += struct.pack('>B', len(icon)) + icon
else:
data += b'\x00'
if not (path is None):
data += struct.pack('>B', len(path)) + path
else:
data += b'\x00'
if not iconOffset is None:
data += struct.pack('>I', iconOffset) + struct.pack('>H', iconSize)
if not appversion is None:
data += struct.pack('>B', len(appversion)) + appversion
# in previous version, appparams are not part of the application hash yet
self.createappParams = None #data[1:]
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def createApp(self, code_length, data_length=0, install_params_length=0, flags=0, bootOffset=1):
#keep the create app parameters to be included in the load app hash
self.createappParams = struct.pack('>IIIII', code_length, data_length, install_params_length, flags, bootOffset)
data = b'\x0B' + self.createappParams
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def deleteApp(self, appname):
data = b'\x0C' + struct.pack('>B',len(appname)) + appname
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def deleteAppByHash(self, appfullhash):
if len(appfullhash) != 32:
raise BaseException("Invalid hash format, sha256 expected")
data = b'\x15' + appfullhash
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def getVersion(self):
data = b'\x10'
response = self.exchange(self.cla, 0x00, 0x00, 0x00, data)
result = {}
offset = 0
result['targetId'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
result['osVersion'] = response[offset + 1 : offset + 1 + response[offset]].decode('utf-8')
offset += 1 + response[offset]
offset += 1
result['flags'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
result['mcuVersion'] = response[offset + 1 : offset + 1 + response[offset] - 1].decode('utf-8')
offset += 1 + response[offset]
if offset < len(response):
result['mcuHash'] = response[offset : offset + 32]
return result
def listApp(self, restart=True):
if self.secure:
if restart:
data = b'\x0E'
else:
data = b'\x0F'
response = self.exchange(self.cla, 0x00, 0x00, 0x00, data)
else:
if restart:
response = self.exchange(self.cla, 0xDE, 0x00, 0x00, b'')
else:
response = self.exchange(self.cla, 0xDF, 0x00, 0x00, b'')
#print binascii.hexlify(response[0])
result = []
offset = 0
if len(response) > 0:
if response[0] != 0x01:
# support old format
while offset != len(response):
item = {}
offset += 1
item['name'] = response[offset + 1 : offset + 1 + response[offset]].decode('utf-8')
offset += 1 + response[offset]
item['flags'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
item['hash'] = response[offset : offset + 32]
offset += 32
result.append(item)
else:
offset += 1
while offset != len(response):
item = {}
#skip the current entry's size
offset += 1
item['flags'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
item['hash_code_data'] = response[offset : offset + 32]
offset += 32
item['hash'] = response[offset : offset + 32]
offset += 32
item['name'] = response[offset + 1 : offset + 1 + response[offset]].decode('utf-8')
offset += 1 + response[offset]
result.append(item)
return result
def getMemInfo(self):
response = self.exchange(self.cla, 0x00, 0x00, 0x00, b'\x11')
item = {}
offset = 0
item['systemSize'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
item['applicationsSize'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
item['freeSize'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
item['usedAppSlots'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
offset += 4
item['totalAppSlots'] = (response[offset] << 24) | (response[offset + 1] << 16) | (response[offset + 2] << 8) | response[offset + 3]
return item
def load(self, erase_u8, max_length_per_apdu, hexFile, reverse=False, doCRC=True, targetId=None, targetVersion=None):
if max_length_per_apdu > self.max_mtu:
max_length_per_apdu = self.max_mtu
initialAddress = 0
if self.relative:
initialAddress = hexFile.minAddr()
sha256 = hashlib.new('sha256')
# stat by hashing the create app params to ensure complete app signature
if targetId != None and (targetId&0xF) > 3:
if targetVersion == None:
print("Target version is not set, application hash will not match!")
targetVersion=""
#encore targetId U4LE, and version string bytes
sha256.update(struct.pack('>I', targetId) + string_to_bytes(targetVersion))
if self.createappParams:
sha256.update(self.createappParams)
areas = hexFile.getAreas()
if reverse:
areas = reversed(hexFile.getAreas())
for area in areas:
startAddress = area.getStart() - initialAddress
data = area.getData()
self.selectSegment(startAddress)
if len(data) == 0:
continue
if len(data) > 0x10000:
raise Exception("Invalid data size for loader")
crc = self.crc16(bytearray(data))
offset = 0
length = len(data)
if reverse:
offset = length
while length > 0:
if length > max_length_per_apdu - LOAD_SEGMENT_CHUNK_HEADER_LENGTH - MIN_PADDING_LENGTH - SCP_MAC_LENGTH:
chunkLen = max_length_per_apdu - LOAD_SEGMENT_CHUNK_HEADER_LENGTH - MIN_PADDING_LENGTH - SCP_MAC_LENGTH
if (chunkLen%16) != 0:
chunkLen -= (chunkLen%16)
else:
chunkLen = length
if self.cleardata_block_len and chunkLen%self.cleardata_block_len:
if chunkLen < self.cleardata_block_len:
raise Exception("Cannot transport not block aligned data with fixed block len")
chunkLen -= chunkLen%self.cleardata_block_len
# pad with 00's when not complete block and performing NENC
if reverse:
chunk = data[offset-chunkLen : offset]
self.loadSegmentChunk(offset-chunkLen, bytes(chunk))
else:
chunk = data[offset : offset + chunkLen]
sha256.update(chunk)
self.loadSegmentChunk(offset, bytes(chunk))
if reverse:
offset -= chunkLen
else:
offset += chunkLen
length -= chunkLen
self.flushSegment()
if doCRC:
self.crcSegment(0, len(data), crc)
return sha256.hexdigest()
def run(self, bootoffset=1, signature=None):
self.boot(bootoffset, signature)
def resetCustomCA(self):
data = b'\x13'
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def setupCustomCA(self, name, public):
data = b'\x12' + struct.pack('>B', len(name)) + name.encode() + struct.pack('>B', len(public)) + public
self.exchange(self.cla, 0x00, 0x00, 0x00, data)
def runApp(self, name):
data = name
self.exchange(self.cla, 0xD8, 0x00, 0x00, data)
|
|
import logging
from .document import Documents
from .edge import Edges
from .index import Index
from .exceptions import InvalidCollectionId, CollectionIdAlreadyExist, \
InvalidCollection
from .aql import AQLQuery
logger = logging.getLogger(__name__)
__all__ = ("Collection", "Collections")
class Collections(object):
"""connection) for Collections"""
COLLECTION_DOCUMENTS, COLLECTION_EDGES = 2, 3
COLLECTIONS_LIST_URL = "/_api/collection"
def __init__(self, connection):
self.connection = connection
self.collections = {}
def __call__(self, *args, **kwargs):
"""Return list of collections within current database"""
response = self.connection.get(self.COLLECTIONS_LIST_URL)
names = [c.get("name") for c in response.get("collections", [])]
return names
def __getattr__(self, name):
"""
Accessible as property by default.
"""
return self._collection(name)
def __getitem__(self, name):
"""
In case property used internally by ``Collections``
it's possible to use dict-like interface, for example
``.database`` used internally as link to database instance
but feel free to use dict-like interface to
create collection with name ``database``: ``voca["database"]``
"""
return self._collection(name)
def _collection(self, name):
"""Lazy init of collection"""
if name in self.collections:
return self.collections.get(name)
self.collections[name] = self.collections.get(
name,
Collection(connection=self.connection, name=name))
return self.collections.get(name)
@property
def database(self):
return self.connection.database
def rename_collection(self, collection, new_name):
"""
Private method which should be used by ``Collection``
instance itself.
"""
if collection is None or \
not issubclass(collection.__class__, Collection):
raise InvalidCollection(
"Object '{0}' is not subclass of "
"Collection or is None".format(repr(collection))
)
if new_name in self.collections:
raise CollectionIdAlreadyExist(
"Collection with name '{0}' already exist".format(new_name)
)
if not collection.cid in self.collections:
self.collections[collection.cid] = collection
old_name = collection.cid
collection.name = new_name
self.collections[new_name] = collection
del self.collections[old_name]
def __repr__(self):
return "<Collections proxy for {0}>".format(self.connection)
class Collection(object):
"""Represent single collection with certain name"""
TYPE_DOCUMENT, TYPE_EDGE = 2, 3
COLLECTION_DETAILS_PATH = "/_api/collection/{0}/{1}"
CREATE_COLLECTION_PATH = "/_api/collection"
DELETE_COLLECTION_PATH = "/_api/collection/{0}"
LOAD_COLLECTION_PATH = "/_api/collection/{0}/load"
UNLOAD_COLLECTION_PATH = "/_api/collection/{0}/unload"
TRUNCATE_COLLECTION_PATH = "/_api/collection/{0}/truncate"
PROPERTIES_COLLECTION_PATH = "/_api/collection/{0}/properties"
RENAME_COLLECTION_PATH = "/_api/collection/{0}/rename"
INFO_ALLOWED_RESOURCES = ["count", "figures"]
def __init__(self, connection=None, name=None, id=None,
createCollection=True, response=None):
self.connection = connection
self.name = name
self.id = id
self.response = response
self.createCollection = createCollection
self.state_fields = ("connection", "name", "id", "createCollector")
self._documents = None
self._edges = None
self._index = None
def __repr__(self):
return "<Collection '{0}' for {1}>".format(self.name, self.connection)
def __eq__(self, obj):
return obj.name == obj.name
@property
def cid(self):
"""
Get collection name
"""
return self.name
@property
def query(self):
"""
.. :py:attr::
Create Query Builder for current collection.
.. testcode::
c.test.create()
c.test.docs.create({"name": "sample"})
assert len(c.test.query.execute()), 1
"""
return AQLQuery(
connection=self.connection,
collection=self.cid)
@property
def index(self):
"""
Get **Indexes** related to Collection
"""
if not self._index:
self._index = Index(self)
return self._index
@property
def documents(self):
"""
Get :ref:`documents` related to Collection.
Technically return instance of :ref:`documents proxy` object
"""
if self._documents is None:
self._documents = Documents(collection=self)
return self._documents
@property
def edges(self):
"""
Get :ref:`edges` related to Collection.
Technically return instance of :ref:`edges proxy` object
If this method used to query edges (or called with no arguments)
it may generated exceptions:
* ``DocumentIncompatibleDataType``
In case you're not provided ``VERTEX`` of the Edge
which should be instance or subclass od :ref:`document`
More about :term:`DocumentIncompatibleDataType`
"""
if self._edges is None:
self._edges = Edges(collection=self)
return self._edges
@property
def docs(self):
"""
Shortcut for `documents` property
"""
return self.documents
def info(self, resource=""):
"""
Get information about collection.
Information returns **AS IS** as
raw ``Response`` data
"""
if resource not in self.INFO_ALLOWED_RESOURCES:
resource = ""
return self.connection.get(
self.COLLECTION_DETAILS_PATH.format(self.name, resource)
).data
def create_edges(self, *args, **kwargs):
"""
Create new **Edges Collection** - sepcial
kind of collections to keep information about edges.
"""
kwargs.update({"type": Collections.COLLECTION_EDGES})
return self.create(*args, **kwargs)
def create(self, waitForSync=False,
type=Collections.COLLECTION_DOCUMENTS, **kwargs):
"""
Create new **Collection**. You can specify
``waitForSync`` argument (boolean) to wait until
collection will be synced to disk
"""
params = {"waitForSync": waitForSync,
"name": self.name,
"type": type}
params.update(kwargs)
response = self.connection.post(
self.CREATE_COLLECTION_PATH,
data=params)
if response.status == 200:
# TODO: update ID/revision for this collection
return self
return None
def count(self):
"""
Get count of all documents in collection
"""
response = self.info(resource="count")
return response.get("count", 0)
def __len__(self):
"""
Exactly the same as ``count`` but it's possible
to use in more convenient way
.. testcode::
c.test.create()
assert c.test.count() == len(c.test)
"""
return self.count()
def load(self):
"""
Load collection into memory
"""
return self.connection.put(
self.LOAD_COLLECTION_PATH.format(self.name)
)
def unload(self):
"""
Unload collection from memory
"""
return self.connection.put(
self.UNLOAD_COLLECTION_PATH.format(self.name)
)
def delete(self):
"""
Delete collection
"""
response = self.connection.delete(
self.DELETE_COLLECTION_PATH.format(self.name)
)
if response.status == 200:
return True
return False
def rename(self, name=None):
"""
Change name of Collection to ``name``.
Return value is ``bool`` if success or
error respectively.
This method may raise exceptions:
* ``InvalidCollection``
This one may be generated only in case
very low-level instantiation of Collection
and if base collection proxy isn't provided
More about :term:`InvalidCollection`
* ``CollectionIdAlreadyExist``
If Collection with new name already exist
this exception will be generated.
More about :term:`CollectionIdAlreadyExist`
* ``InvalidCollectionId``
If Collection instantiated but name
is not defined or not set.
More about :term:`InvalidCollectionId`
Sample usage:
.. testcode::
c.test.create()
c.test.rename("test2")
assert "test2" in c()
"""
if name is None or name == "":
raise InvalidCollectionId(
"Please, provide correct collection name")
response = self.connection.post(
self.RENAME_COLLECTION_PATH.format(self.name),
data={"name": name})
if not response.is_error:
# pass new name to connection
# change current id of the collection
self.connection.collection.rename_collection(self, name)
return True
return False
def properties(self, **props):
"""
Set or get collection properties.
If ``**props`` are empty eq no keyed arguments
specified then this method return properties for
current **Collection**.
Otherwise method will set or update properties
using values from ``**props``
"""
url = self.PROPERTIES_COLLECTION_PATH.format(self.name)
if not props:
return self.connection.get(url).data
# update fields which should be updated,
# keep old fields as is
origin = self.properties()
if isinstance(origin, dict):
origin.update(props)
return self.connection.put(url, data=origin).data
def truncate(self):
"""
Truncate current **Collection**
"""
return self.connection.put(
self.TRUNCATE_COLLECTION_PATH.format(self.name))
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NULicenseStatus(NURESTObject):
""" Represents a LicenseStatus in the VSD
Notes:
None
"""
__rest_name__ = "licensestatus"
__resource_name__ = "licensestatus"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a LicenseStatus instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> licensestatus = NULicenseStatus(id=u'xxxx-xxx-xxx-xxx', name=u'LicenseStatus')
>>> licensestatus = NULicenseStatus(data=my_dict)
"""
super(NULicenseStatus, self).__init__()
# Read/Write Attributes
self._accumulate_licenses_enabled = None
self._embedded_metadata = None
self._entity_scope = None
self._total_licensed_avrsgs_count = None
self._total_licensed_avrss_count = None
self._total_licensed_gateways_count = None
self._total_licensed_nics_count = None
self._total_licensed_nsgs_count = None
self._total_licensed_ovrss_count = None
self._total_licensed_used_avrsgs_count = None
self._total_licensed_used_avrss_count = None
self._total_licensed_used_nics_count = None
self._total_licensed_used_nsgs_count = None
self._total_licensed_used_ovrss_count = None
self._total_licensed_used_vdfgs_count = None
self._total_licensed_used_vdfs_count = None
self._total_licensed_used_vms_count = None
self._total_licensed_used_vrsgs_count = None
self._total_licensed_used_vrss_count = None
self._total_licensed_vdfgs_count = None
self._total_licensed_vdfs_count = None
self._total_licensed_vms_count = None
self._total_licensed_vrsgs_count = None
self._total_licensed_vrss_count = None
self._total_used_gateways_count = None
self._external_id = None
self.expose_attribute(local_name="accumulate_licenses_enabled", remote_name="accumulateLicensesEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="total_licensed_avrsgs_count", remote_name="totalLicensedAVRSGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_avrss_count", remote_name="totalLicensedAVRSsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_gateways_count", remote_name="totalLicensedGatewaysCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_nics_count", remote_name="totalLicensedNICsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_nsgs_count", remote_name="totalLicensedNSGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_ovrss_count", remote_name="totalLicensedOVRSsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_avrsgs_count", remote_name="totalLicensedUsedAVRSGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_avrss_count", remote_name="totalLicensedUsedAVRSsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_nics_count", remote_name="totalLicensedUsedNICsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_nsgs_count", remote_name="totalLicensedUsedNSGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_ovrss_count", remote_name="totalLicensedUsedOVRSsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_vdfgs_count", remote_name="totalLicensedUsedVDFGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_vdfs_count", remote_name="totalLicensedUsedVDFsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_vms_count", remote_name="totalLicensedUsedVMsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_vrsgs_count", remote_name="totalLicensedUsedVRSGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_used_vrss_count", remote_name="totalLicensedUsedVRSsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_vdfgs_count", remote_name="totalLicensedVDFGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_vdfs_count", remote_name="totalLicensedVDFsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_vms_count", remote_name="totalLicensedVMsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_vrsgs_count", remote_name="totalLicensedVRSGsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_licensed_vrss_count", remote_name="totalLicensedVRSsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="total_used_gateways_count", remote_name="totalUsedGatewaysCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def accumulate_licenses_enabled(self):
""" Get accumulate_licenses_enabled value.
Notes:
Whether the various VRS license flavours be merged in one pool
This attribute is named `accumulateLicensesEnabled` in VSD API.
"""
return self._accumulate_licenses_enabled
@accumulate_licenses_enabled.setter
def accumulate_licenses_enabled(self, value):
""" Set accumulate_licenses_enabled value.
Notes:
Whether the various VRS license flavours be merged in one pool
This attribute is named `accumulateLicensesEnabled` in VSD API.
"""
self._accumulate_licenses_enabled = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def total_licensed_avrsgs_count(self):
""" Get total_licensed_avrsgs_count value.
Notes:
Indicates total AVRSG count for all the licenses in the system
This attribute is named `totalLicensedAVRSGsCount` in VSD API.
"""
return self._total_licensed_avrsgs_count
@total_licensed_avrsgs_count.setter
def total_licensed_avrsgs_count(self, value):
""" Set total_licensed_avrsgs_count value.
Notes:
Indicates total AVRSG count for all the licenses in the system
This attribute is named `totalLicensedAVRSGsCount` in VSD API.
"""
self._total_licensed_avrsgs_count = value
@property
def total_licensed_avrss_count(self):
""" Get total_licensed_avrss_count value.
Notes:
Indicates total AVRS count for all the licenses in the system
This attribute is named `totalLicensedAVRSsCount` in VSD API.
"""
return self._total_licensed_avrss_count
@total_licensed_avrss_count.setter
def total_licensed_avrss_count(self, value):
""" Set total_licensed_avrss_count value.
Notes:
Indicates total AVRS count for all the licenses in the system
This attribute is named `totalLicensedAVRSsCount` in VSD API.
"""
self._total_licensed_avrss_count = value
@property
def total_licensed_gateways_count(self):
""" Get total_licensed_gateways_count value.
Notes:
Indicates total VRS+VRSG+VRSB licenses licensed in the system
This attribute is named `totalLicensedGatewaysCount` in VSD API.
"""
return self._total_licensed_gateways_count
@total_licensed_gateways_count.setter
def total_licensed_gateways_count(self, value):
""" Set total_licensed_gateways_count value.
Notes:
Indicates total VRS+VRSG+VRSB licenses licensed in the system
This attribute is named `totalLicensedGatewaysCount` in VSD API.
"""
self._total_licensed_gateways_count = value
@property
def total_licensed_nics_count(self):
""" Get total_licensed_nics_count value.
Notes:
Indicates total NIC count for all the licenses in the system
This attribute is named `totalLicensedNICsCount` in VSD API.
"""
return self._total_licensed_nics_count
@total_licensed_nics_count.setter
def total_licensed_nics_count(self, value):
""" Set total_licensed_nics_count value.
Notes:
Indicates total NIC count for all the licenses in the system
This attribute is named `totalLicensedNICsCount` in VSD API.
"""
self._total_licensed_nics_count = value
@property
def total_licensed_nsgs_count(self):
""" Get total_licensed_nsgs_count value.
Notes:
Indicates total NSG count for all the licenses in the system
This attribute is named `totalLicensedNSGsCount` in VSD API.
"""
return self._total_licensed_nsgs_count
@total_licensed_nsgs_count.setter
def total_licensed_nsgs_count(self, value):
""" Set total_licensed_nsgs_count value.
Notes:
Indicates total NSG count for all the licenses in the system
This attribute is named `totalLicensedNSGsCount` in VSD API.
"""
self._total_licensed_nsgs_count = value
@property
def total_licensed_ovrss_count(self):
""" Get total_licensed_ovrss_count value.
Notes:
Indicates total OVRSs count for all the licenses in the system.
This attribute is named `totalLicensedOVRSsCount` in VSD API.
"""
return self._total_licensed_ovrss_count
@total_licensed_ovrss_count.setter
def total_licensed_ovrss_count(self, value):
""" Set total_licensed_ovrss_count value.
Notes:
Indicates total OVRSs count for all the licenses in the system.
This attribute is named `totalLicensedOVRSsCount` in VSD API.
"""
self._total_licensed_ovrss_count = value
@property
def total_licensed_used_avrsgs_count(self):
""" Get total_licensed_used_avrsgs_count value.
Notes:
Indicates total used AVRSG count for all the licenses in the system
This attribute is named `totalLicensedUsedAVRSGsCount` in VSD API.
"""
return self._total_licensed_used_avrsgs_count
@total_licensed_used_avrsgs_count.setter
def total_licensed_used_avrsgs_count(self, value):
""" Set total_licensed_used_avrsgs_count value.
Notes:
Indicates total used AVRSG count for all the licenses in the system
This attribute is named `totalLicensedUsedAVRSGsCount` in VSD API.
"""
self._total_licensed_used_avrsgs_count = value
@property
def total_licensed_used_avrss_count(self):
""" Get total_licensed_used_avrss_count value.
Notes:
Indicates total used AVRS count for all the licenses in the system
This attribute is named `totalLicensedUsedAVRSsCount` in VSD API.
"""
return self._total_licensed_used_avrss_count
@total_licensed_used_avrss_count.setter
def total_licensed_used_avrss_count(self, value):
""" Set total_licensed_used_avrss_count value.
Notes:
Indicates total used AVRS count for all the licenses in the system
This attribute is named `totalLicensedUsedAVRSsCount` in VSD API.
"""
self._total_licensed_used_avrss_count = value
@property
def total_licensed_used_nics_count(self):
""" Get total_licensed_used_nics_count value.
Notes:
Indicates total used NIC count for all the licenses in the system
This attribute is named `totalLicensedUsedNICsCount` in VSD API.
"""
return self._total_licensed_used_nics_count
@total_licensed_used_nics_count.setter
def total_licensed_used_nics_count(self, value):
""" Set total_licensed_used_nics_count value.
Notes:
Indicates total used NIC count for all the licenses in the system
This attribute is named `totalLicensedUsedNICsCount` in VSD API.
"""
self._total_licensed_used_nics_count = value
@property
def total_licensed_used_nsgs_count(self):
""" Get total_licensed_used_nsgs_count value.
Notes:
Indicates total used NSG count for all the licenses in the system
This attribute is named `totalLicensedUsedNSGsCount` in VSD API.
"""
return self._total_licensed_used_nsgs_count
@total_licensed_used_nsgs_count.setter
def total_licensed_used_nsgs_count(self, value):
""" Set total_licensed_used_nsgs_count value.
Notes:
Indicates total used NSG count for all the licenses in the system
This attribute is named `totalLicensedUsedNSGsCount` in VSD API.
"""
self._total_licensed_used_nsgs_count = value
@property
def total_licensed_used_ovrss_count(self):
""" Get total_licensed_used_ovrss_count value.
Notes:
Indicates total used OVRSs count for all the licenses in the system.
This attribute is named `totalLicensedUsedOVRSsCount` in VSD API.
"""
return self._total_licensed_used_ovrss_count
@total_licensed_used_ovrss_count.setter
def total_licensed_used_ovrss_count(self, value):
""" Set total_licensed_used_ovrss_count value.
Notes:
Indicates total used OVRSs count for all the licenses in the system.
This attribute is named `totalLicensedUsedOVRSsCount` in VSD API.
"""
self._total_licensed_used_ovrss_count = value
@property
def total_licensed_used_vdfgs_count(self):
""" Get total_licensed_used_vdfgs_count value.
Notes:
Indicates total used VDFG count for all the licenses in the system.
This attribute is named `totalLicensedUsedVDFGsCount` in VSD API.
"""
return self._total_licensed_used_vdfgs_count
@total_licensed_used_vdfgs_count.setter
def total_licensed_used_vdfgs_count(self, value):
""" Set total_licensed_used_vdfgs_count value.
Notes:
Indicates total used VDFG count for all the licenses in the system.
This attribute is named `totalLicensedUsedVDFGsCount` in VSD API.
"""
self._total_licensed_used_vdfgs_count = value
@property
def total_licensed_used_vdfs_count(self):
""" Get total_licensed_used_vdfs_count value.
Notes:
Indicates total used VDF count for all the licenses in the system.
This attribute is named `totalLicensedUsedVDFsCount` in VSD API.
"""
return self._total_licensed_used_vdfs_count
@total_licensed_used_vdfs_count.setter
def total_licensed_used_vdfs_count(self, value):
""" Set total_licensed_used_vdfs_count value.
Notes:
Indicates total used VDF count for all the licenses in the system.
This attribute is named `totalLicensedUsedVDFsCount` in VSD API.
"""
self._total_licensed_used_vdfs_count = value
@property
def total_licensed_used_vms_count(self):
""" Get total_licensed_used_vms_count value.
Notes:
Indicates total used VM count for all the licenses in the system
This attribute is named `totalLicensedUsedVMsCount` in VSD API.
"""
return self._total_licensed_used_vms_count
@total_licensed_used_vms_count.setter
def total_licensed_used_vms_count(self, value):
""" Set total_licensed_used_vms_count value.
Notes:
Indicates total used VM count for all the licenses in the system
This attribute is named `totalLicensedUsedVMsCount` in VSD API.
"""
self._total_licensed_used_vms_count = value
@property
def total_licensed_used_vrsgs_count(self):
""" Get total_licensed_used_vrsgs_count value.
Notes:
Indicates total used VRSG count for all the licenses in the system
This attribute is named `totalLicensedUsedVRSGsCount` in VSD API.
"""
return self._total_licensed_used_vrsgs_count
@total_licensed_used_vrsgs_count.setter
def total_licensed_used_vrsgs_count(self, value):
""" Set total_licensed_used_vrsgs_count value.
Notes:
Indicates total used VRSG count for all the licenses in the system
This attribute is named `totalLicensedUsedVRSGsCount` in VSD API.
"""
self._total_licensed_used_vrsgs_count = value
@property
def total_licensed_used_vrss_count(self):
""" Get total_licensed_used_vrss_count value.
Notes:
Indicates total used VRS count for all the licenses in the system
This attribute is named `totalLicensedUsedVRSsCount` in VSD API.
"""
return self._total_licensed_used_vrss_count
@total_licensed_used_vrss_count.setter
def total_licensed_used_vrss_count(self, value):
""" Set total_licensed_used_vrss_count value.
Notes:
Indicates total used VRS count for all the licenses in the system
This attribute is named `totalLicensedUsedVRSsCount` in VSD API.
"""
self._total_licensed_used_vrss_count = value
@property
def total_licensed_vdfgs_count(self):
""" Get total_licensed_vdfgs_count value.
Notes:
Indicates total VDFG count for all the licenses in the system
This attribute is named `totalLicensedVDFGsCount` in VSD API.
"""
return self._total_licensed_vdfgs_count
@total_licensed_vdfgs_count.setter
def total_licensed_vdfgs_count(self, value):
""" Set total_licensed_vdfgs_count value.
Notes:
Indicates total VDFG count for all the licenses in the system
This attribute is named `totalLicensedVDFGsCount` in VSD API.
"""
self._total_licensed_vdfgs_count = value
@property
def total_licensed_vdfs_count(self):
""" Get total_licensed_vdfs_count value.
Notes:
Indicates total VDF count for all the licenses in the system
This attribute is named `totalLicensedVDFsCount` in VSD API.
"""
return self._total_licensed_vdfs_count
@total_licensed_vdfs_count.setter
def total_licensed_vdfs_count(self, value):
""" Set total_licensed_vdfs_count value.
Notes:
Indicates total VDF count for all the licenses in the system
This attribute is named `totalLicensedVDFsCount` in VSD API.
"""
self._total_licensed_vdfs_count = value
@property
def total_licensed_vms_count(self):
""" Get total_licensed_vms_count value.
Notes:
Indicates total VM count for all the licenses in the system
This attribute is named `totalLicensedVMsCount` in VSD API.
"""
return self._total_licensed_vms_count
@total_licensed_vms_count.setter
def total_licensed_vms_count(self, value):
""" Set total_licensed_vms_count value.
Notes:
Indicates total VM count for all the licenses in the system
This attribute is named `totalLicensedVMsCount` in VSD API.
"""
self._total_licensed_vms_count = value
@property
def total_licensed_vrsgs_count(self):
""" Get total_licensed_vrsgs_count value.
Notes:
Indicates total VRSG count for all the licenses in the system
This attribute is named `totalLicensedVRSGsCount` in VSD API.
"""
return self._total_licensed_vrsgs_count
@total_licensed_vrsgs_count.setter
def total_licensed_vrsgs_count(self, value):
""" Set total_licensed_vrsgs_count value.
Notes:
Indicates total VRSG count for all the licenses in the system
This attribute is named `totalLicensedVRSGsCount` in VSD API.
"""
self._total_licensed_vrsgs_count = value
@property
def total_licensed_vrss_count(self):
""" Get total_licensed_vrss_count value.
Notes:
Indicates total VRS count for all the licenses in the system
This attribute is named `totalLicensedVRSsCount` in VSD API.
"""
return self._total_licensed_vrss_count
@total_licensed_vrss_count.setter
def total_licensed_vrss_count(self, value):
""" Set total_licensed_vrss_count value.
Notes:
Indicates total VRS count for all the licenses in the system
This attribute is named `totalLicensedVRSsCount` in VSD API.
"""
self._total_licensed_vrss_count = value
@property
def total_used_gateways_count(self):
""" Get total_used_gateways_count value.
Notes:
Indicates total VRS+VRSG+VRSB+VDF+VDFG licenses used in the system
This attribute is named `totalUsedGatewaysCount` in VSD API.
"""
return self._total_used_gateways_count
@total_used_gateways_count.setter
def total_used_gateways_count(self, value):
""" Set total_used_gateways_count value.
Notes:
Indicates total VRS+VRSG+VRSB+VDF+VDFG licenses used in the system
This attribute is named `totalUsedGatewaysCount` in VSD API.
"""
self._total_used_gateways_count = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
# -*- coding: utf-8 -*-
"""
Contains all core validation functions.
"""
import os
import re
import socket
import logging
import tempfile
import traceback
import basedefs
from . import utils
from .setup_controller import Controller
from .exceptions import ParamValidationError
__all__ = ('ParamValidationError', 'validate_integer', 'validate_float',
'validate_regexp', 'validate_port', 'validate_not_empty',
'validate_options', 'validate_ip', 'validate_multi_ip',
'validate_file', 'validate_ping', 'validate_ssh',
'validate_multi_ssh')
def validate_integer(param, options=None):
"""
Raises ParamValidationError if given param is not integer.
"""
options = options or []
try:
int(param)
except ValueError:
logging.debug('validate_integer(%s, options=%s) failed.' %
(param, options))
msg = 'Given value is not an integer: %s'
raise ParamValidationError(msg % param)
def validate_float(param, options=None):
"""
Raises ParamValidationError if given param is not a float.
"""
options = options or []
try:
float(param)
except ValueError:
logging.debug('validate_float(%s, options=%s) failed.' %
(param, options))
msg = 'Given value is not a float: %s'
raise ParamValidationError(msg % param)
def validate_regexp(param, options=None):
"""
Raises ParamValidationError if given param doesn't match at least
one of regular expressions given in options.
"""
options = options or []
for regex in options:
if re.search(regex, param):
break
else:
logging.debug('validate_regexp(%s, options=%s) failed.' %
(param, options))
msg = 'Given value does not match required regular expression: %s'
raise ParamValidationError(msg % param)
def validate_multi_regexp(param, options=None):
"""
Raises ParamValidationError if any of the comma separated values given
in param doesn't match one of the regular expressions given in options.
"""
options = options or []
for i in param.split(','):
validate_regexp(i.strip(), options=options)
def validate_port(param, options=None):
"""
Raises ParamValidationError if given param is not a decimal number
in range (0, 65535).
"""
options = options or []
validate_integer(param, options)
port = int(param)
if not (port >= 0 and port < 65535):
logging.debug('validate_port(%s, options=%s) failed.' %
(param, options))
msg = 'Given value is outside the range of (0, 65535): %s'
raise ParamValidationError(msg % param)
def validate_not_empty(param, options=None):
"""
Raises ParamValidationError if given param is empty.
"""
options = options or []
if not param and param is not False:
logging.debug('validate_not_empty(%s, options=%s) failed.' %
(param, options))
msg = 'Given value is not allowed: %s'
raise ParamValidationError(msg % param)
def validate_options(param, options=None):
"""
Raises ParamValidationError if given param is not member of options.
"""
options = options or []
# TO-DO: to be more flexible, remove this and exit in case param is empty
validate_not_empty(param, options)
if param not in options:
logging.debug('validate_options(%s, options=%s) failed.' %
(param, options))
msg = 'Given value is not member of allowed values %s: %s'
raise ParamValidationError(msg % (options, param))
def validate_multi_options(param, options=None):
"""
Validates if comma separated values given in params are members
of options.
"""
if not param:
return
options = options or []
for i in param.split(','):
validate_options(i.strip(), options=options)
def validate_ip(param, options=None):
"""
Raises ParamValidationError if given parameter value is not in IPv4
or IPv6 address.
"""
for family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(family, param)
break
except socket.error:
continue
else:
logging.debug('validate_ip(%s, options=%s) failed.' %
(param, options))
msg = 'Given host is not in IP address format: %s'
raise ParamValidationError(msg % param)
def validate_multi_ip(param, options=None):
"""
Raises ParamValidationError if comma separated IP addresses given
parameter value are in IPv4 or IPv6 aformat.
"""
for host in param.split(','):
host = host.split('/', 1)[0]
validate_ip(host.strip(), options)
def validate_file(param, options=None):
"""
Raises ParamValidationError if provided file in param does not exist.
"""
options = options or []
# TO-DO: to be more flexible, remove this and exit in case param is empty
validate_not_empty(param)
if not os.path.isfile(param):
logging.debug('validate_file(%s, options=%s) failed.' %
(param, options))
msg = 'Given file does not exist: %s'
raise ParamValidationError(msg % param)
def validate_ping(param, options=None):
"""
Raises ParamValidationError if provided host does not answer to ICMP
echo request.
"""
options = options or []
# TO-DO: to be more flexible, remove this and exit in case param is empty
validate_not_empty(param)
rc, out = utils.execute(['/bin/ping', '-c', '1', str(param)],
can_fail=False)
if rc != 0:
logging.debug('validate_ping(%s, options=%s) failed.' %
(param, options))
msg = 'Given host is unreachable: %s'
raise ParamValidationError(msg % param)
def validate_multi_ping(param, options=None):
"""
Raises ParamValidationError if comma separated host given in param
do not answer to ICMP echo request.
"""
options = options or []
# TO-DO: to be more flexible, remove this and exit in case param is empty
validate_not_empty(param)
for host in param.split(","):
validate_ping(host.strip())
_tested_ports = []
def touch_port(host, port):
"""
Check that provided host is listening on provided port.
"""
key = "%s:%d" % (host, port)
if key in _tested_ports:
return
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.shutdown(socket.SHUT_RDWR)
s.close()
_tested_ports.append(key)
def validate_ssh(param, options=None):
"""
Raises ParamValidationError if provided host does not listen
on port 22.
"""
options = options or []
try:
touch_port(param.strip(), 22)
except socket.error:
logging.debug('validate_ssh(%s, options=%s) failed.' %
(param, options))
msg = 'Given host does not listen on port 22: %s'
raise ParamValidationError(msg % param)
def validate_multi_ssh(param, options=None):
"""
Raises ParamValidationError if comma separated host provided
in param do not listen on port 22.
"""
options = options or []
for host in param.split(","):
validate_ssh(host)
|
|
# Pretty-printer commands.
# Copyright (C) 2010-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with pretty-printers."""
import copy
import gdb
import re
def parse_printer_regexps(arg):
"""Internal utility to parse a pretty-printer command argv.
Arguments:
arg: The arguments to the command. The format is:
[object-regexp [name-regexp]].
Individual printers in a collection are named as
printer-name;subprinter-name.
Returns:
The result is a 3-tuple of compiled regular expressions, except that
the resulting compiled subprinter regexp is None if not provided.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
object_regexp = "" # match everything
name_regexp = "" # match everything
subname_regexp = None
if argc > 3:
raise SyntaxError("too many arguments")
if argc >= 1:
object_regexp = argv[0]
if argc >= 2:
name_subname = argv[1].split(";", 1)
name_regexp = name_subname[0]
if len(name_subname) == 2:
subname_regexp = name_subname[1]
# That re.compile raises SyntaxError was determined empirically.
# We catch it and reraise it to provide a slightly more useful
# error message for the user.
try:
object_re = re.compile(object_regexp)
except SyntaxError:
raise SyntaxError("invalid object regexp: %s" % object_regexp)
try:
name_re = re.compile (name_regexp)
except SyntaxError:
raise SyntaxError("invalid name regexp: %s" % name_regexp)
if subname_regexp is not None:
try:
subname_re = re.compile(subname_regexp)
except SyntaxError:
raise SyntaxError("invalid subname regexp: %s" % subname_regexp)
else:
subname_re = None
return(object_re, name_re, subname_re)
def printer_enabled_p(printer):
"""Internal utility to see if printer (or subprinter) is enabled."""
if hasattr(printer, "enabled"):
return printer.enabled
else:
return True
class InfoPrettyPrinter(gdb.Command):
"""GDB command to list all registered pretty-printers.
Usage: info pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to list.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__ (self):
super(InfoPrettyPrinter, self).__init__("info pretty-printer",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(printer):
"""Return "" if PRINTER is enabled, otherwise " [disabled]"."""
if printer_enabled_p(printer):
return ""
else:
return " [disabled]"
@staticmethod
def printer_name(printer):
"""Return the printer's name."""
if hasattr(printer, "name"):
return printer.name
if hasattr(printer, "__name__"):
return printer.__name__
# This "shouldn't happen", but the public API allows for
# direct additions to the pretty-printer list, and we shouldn't
# crash because someone added a bogus printer.
# Plus we want to give the user a way to list unknown printers.
return "unknown"
def list_pretty_printers(self, pretty_printers, name_re, subname_re):
"""Print a list of pretty-printers."""
# A potential enhancement is to provide an option to list printers in
# "lookup order" (i.e. unsorted).
sorted_pretty_printers = sorted (copy.copy(pretty_printers),
key = self.printer_name)
for printer in sorted_pretty_printers:
name = self.printer_name(printer)
enabled = self.enabled_string(printer)
if name_re.match(name):
print (" %s%s" % (name, enabled))
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
sorted_subprinters = sorted (copy.copy(printer.subprinters),
key = self.printer_name)
for subprinter in sorted_subprinters:
if (not subname_re or
subname_re.match(subprinter.name)):
print (" %s%s" %
(subprinter.name,
self.enabled_string(subprinter)))
def invoke1(self, title, printer_list,
obj_name_to_match, object_re, name_re, subname_re):
"""Subroutine of invoke to simplify it."""
if printer_list and object_re.match(obj_name_to_match):
print (title)
self.list_pretty_printers(printer_list, name_re, subname_re)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
self.invoke1("global pretty-printers:", gdb.pretty_printers,
"global", object_re, name_re, subname_re)
cp = gdb.current_progspace()
self.invoke1("progspace %s pretty-printers:" % cp.filename,
cp.pretty_printers, "progspace",
object_re, name_re, subname_re)
for objfile in gdb.objfiles():
self.invoke1(" objfile %s pretty-printers:" % objfile.filename,
objfile.pretty_printers, objfile.filename,
object_re, name_re, subname_re)
def count_enabled_printers(pretty_printers):
"""Return a 2-tuple of number of enabled and total printers."""
enabled = 0
total = 0
for printer in pretty_printers:
if (hasattr(printer, "subprinters")
and printer.subprinters is not None):
if printer_enabled_p(printer):
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
enabled += 1
total += len(printer.subprinters)
else:
if printer_enabled_p(printer):
enabled += 1
total += 1
return (enabled, total)
def count_all_enabled_printers():
"""Return a 2-tuble of the enabled state and total number of all printers.
This includes subprinters.
"""
enabled_count = 0
total_count = 0
(t_enabled, t_total) = count_enabled_printers(gdb.pretty_printers)
enabled_count += t_enabled
total_count += t_total
(t_enabled, t_total) = count_enabled_printers(gdb.current_progspace().pretty_printers)
enabled_count += t_enabled
total_count += t_total
for objfile in gdb.objfiles():
(t_enabled, t_total) = count_enabled_printers(objfile.pretty_printers)
enabled_count += t_enabled
total_count += t_total
return (enabled_count, total_count)
def pluralize(text, n, suffix="s"):
"""Return TEXT pluralized if N != 1."""
if n != 1:
return "%s%s" % (text, suffix)
else:
return text
def show_pretty_printer_enabled_summary():
"""Print the number of printers enabled/disabled.
We count subprinters individually.
"""
(enabled_count, total_count) = count_all_enabled_printers()
print ("%d of %d printers enabled" % (enabled_count, total_count))
def do_enable_pretty_printer_1 (pretty_printers, name_re, subname_re, flag):
"""Worker for enabling/disabling pretty-printers.
Arguments:
pretty_printers: list of pretty-printers
name_re: regular-expression object to select printers
subname_re: regular expression object to select subprinters or None
if all are affected
flag: True for Enable, False for Disable
Returns:
The number of printers affected.
This is just for informational purposes for the user.
"""
total = 0
for printer in pretty_printers:
if (hasattr(printer, "name") and name_re.match(printer.name) or
hasattr(printer, "__name__") and name_re.match(printer.__name__)):
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
total += 1
# NOTE: We preserve individual subprinter settings.
printer.enabled = flag
else:
# NOTE: Whether this actually disables the subprinter
# depends on whether the printer's lookup function supports
# the "enable" API. We can only assume it does.
for subprinter in printer.subprinters:
if subname_re.match(subprinter.name):
# Only record printers that change state.
if (printer_enabled_p(printer) and
printer_enabled_p(subprinter) != flag):
total += 1
subprinter.enabled = flag
else:
# This printer has no subprinters.
# If the user does "disable pretty-printer .* .* foo"
# should we disable printers that don't have subprinters?
# How do we apply "foo" in this context? Since there is no
# "foo" subprinter it feels like we should skip this printer.
# There's still the issue of how to handle
# "disable pretty-printer .* .* .*", and every other variation
# that can match everything. For now punt and only support
# "disable pretty-printer .* .*" (i.e. subname is elided)
# to disable everything.
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
total += 1
printer.enabled = flag
return total
def do_enable_pretty_printer (arg, flag):
"""Internal worker for enabling/disabling pretty-printers."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
total = 0
if object_re.match("global"):
total += do_enable_pretty_printer_1(gdb.pretty_printers,
name_re, subname_re, flag)
cp = gdb.current_progspace()
if object_re.match("progspace"):
total += do_enable_pretty_printer_1(cp.pretty_printers,
name_re, subname_re, flag)
for objfile in gdb.objfiles():
if object_re.match(objfile.filename):
total += do_enable_pretty_printer_1(objfile.pretty_printers,
name_re, subname_re, flag)
if flag:
state = "enabled"
else:
state = "disabled"
print ("%d %s %s" % (total, pluralize("printer", total), state))
# Print the total list of printers currently enabled/disabled.
# This is to further assist the user in determining whether the result
# is expected. Since we use regexps to select it's useful.
show_pretty_printer_enabled_summary()
# Enable/Disable one or more pretty-printers.
#
# This is intended for use when a broken pretty-printer is shipped/installed
# and the user wants to disable that printer without disabling all the other
# printers.
#
# A useful addition would be -v (verbose) to show each printer affected.
class EnablePrettyPrinter (gdb.Command):
"""GDB command to enable the specified pretty-printer.
Usage: enable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(EnablePrettyPrinter, self).__init__("enable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, True)
class DisablePrettyPrinter (gdb.Command):
"""GDB command to disable the specified pretty-printer.
Usage: disable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(DisablePrettyPrinter, self).__init__("disable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, False)
def register_pretty_printer_commands():
"""Call from a top level script to install the pretty-printer commands."""
InfoPrettyPrinter()
EnablePrettyPrinter()
DisablePrettyPrinter()
register_pretty_printer_commands()
|
|
from sympy.core import Basic, S, Function, diff, Number, sympify, Tuple
from sympy.core.relational import Equality, Relational
from sympy.logic.boolalg import Boolean
from sympy.core.sets import Set
from sympy.core.symbol import Dummy
class ExprCondPair(Tuple):
"""Represents an expression, condition pair."""
true_sentinel = Dummy('True')
def __new__(cls, expr, cond):
if cond is True:
cond = ExprCondPair.true_sentinel
return Tuple.__new__(cls, expr, cond)
@property
def expr(self):
"""
Returns the expression of this pair.
"""
return self.args[0]
@property
def cond(self):
"""
Returns the condition of this pair.
"""
if self.args[1] == ExprCondPair.true_sentinel:
return True
return self.args[1]
@property
def free_symbols(self):
"""
Return the free symbols of this pair.
"""
# Overload Basic.free_symbols because self.args[1] may contain non-Basic
result = self.expr.free_symbols
if hasattr(self.cond, 'free_symbols'):
result |= self.cond.free_symbols
return result
def __iter__(self):
yield self.expr
yield self.cond
class Piecewise(Function):
"""
Represents a piecewise function.
Usage:
Piecewise( (expr,cond), (expr,cond), ... )
- Each argument is a 2-tuple defining a expression and condition
- The conds are evaluated in turn returning the first that is True.
If any of the evaluated conds are not determined explicitly False,
e.g. x < 1, the function is returned in symbolic form.
- If the function is evaluated at a place where all conditions are False,
a ValueError exception will be raised.
- Pairs where the cond is explicitly False, will be removed.
Examples
========
>>> from sympy import Piecewise, log
>>> from sympy.abc import x
>>> f = x**2
>>> g = log(x)
>>> p = Piecewise( (0, x<-1), (f, x<=1), (g, True))
>>> p.subs(x,1)
1
>>> p.subs(x,5)
log(5)
See Also
========
piecewise_fold
"""
nargs = None
is_Piecewise = True
def __new__(cls, *args, **options):
# (Try to) sympify args first
newargs = []
for ec in args:
pair = ExprCondPair(*ec)
cond = pair.cond
if cond is False:
continue
if not isinstance(cond, (bool, Relational, Set, Boolean)):
raise TypeError(
"Cond %s is of type %s, but must be a bool," \
" Relational, Number or Set" % (cond, type(cond)))
newargs.append(pair)
if cond is ExprCondPair.true_sentinel:
break
if options.pop('evaluate', True):
r = cls.eval(*newargs)
else:
r = None
if r is None:
return Basic.__new__(cls, *newargs, **options)
else:
return r
@classmethod
def eval(cls, *args):
from sympy import Or
# Check for situations where we can evaluate the Piecewise object.
# 1) Hit an unevaluable cond (e.g. x<1) -> keep object
# 2) Hit a true condition -> return that expr
# 3) Remove false conditions, if no conditions left -> raise ValueError
all_conds_evaled = True # Do all conds eval to a bool?
piecewise_again = False # Should we pass args to Piecewise again?
non_false_ecpairs = []
or1 = Or(*[cond for (_, cond) in args if cond is not True])
for expr, cond in args:
# Check here if expr is a Piecewise and collapse if one of
# the conds in expr matches cond. This allows the collapsing
# of Piecewise((Piecewise(x,x<0),x<0)) to Piecewise((x,x<0)).
# This is important when using piecewise_fold to simplify
# multiple Piecewise instances having the same conds.
# Eventually, this code should be able to collapse Piecewise's
# having different intervals, but this will probably require
# using the new assumptions.
if isinstance(expr, Piecewise):
or2 = Or(*[c for (_, c) in expr.args if c is not True])
for e, c in expr.args:
# Don't collapse if cond is "True" as this leads to
# incorrect simplifications with nested Piecewises.
if c == cond and (or1 == or2 or cond is not True):
expr = e
piecewise_again = True
cond_eval = cls.__eval_cond(cond)
if cond_eval is None:
all_conds_evaled = False
non_false_ecpairs.append( (expr, cond) )
elif cond_eval:
if all_conds_evaled:
return expr
non_false_ecpairs.append( (expr, cond) )
if len(non_false_ecpairs) != len(args) or piecewise_again:
return Piecewise(*non_false_ecpairs)
return None
def doit(self, **hints):
"""
Evaluate this piecewise function.
"""
newargs = []
for e, c in self.args:
if hints.get('deep', True):
if isinstance(e, Basic):
e = e.doit(**hints)
if isinstance(c, Basic):
c = c.doit(**hints)
newargs.append((e, c))
return Piecewise(*newargs)
@property
def is_commutative(self):
return all(expr.is_commutative for expr, _ in self.args)
def _eval_integral(self,x):
from sympy.integrals import integrate
return Piecewise(*[(integrate(e, x), c) for e, c in self.args])
def _eval_interval(self, sym, a, b):
"""Evaluates the function along the sym in a given interval ab"""
# FIXME: Currently complex intervals are not supported. A possible
# replacement algorithm, discussed in issue 2128, can be found in the
# following papers;
# http://portal.acm.org/citation.cfm?id=281649
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.70.4127&rep=rep1&type=pdf
int_expr = []
mul = 1
if a > b:
a, b, mul = b, a, -1
default = None
# Determine what intervals the expr,cond pairs affect.
# 1) If cond is True, then log it as default
# 1.1) Currently if cond can't be evaluated, throw NotImplementedError.
# 2) For each inequality, if previous cond defines part of the interval
# update the new conds interval.
# - eg x < 1, x < 3 -> [oo,1],[1,3] instead of [oo,1],[oo,3]
# 3) Sort the intervals to make it easier to find correct exprs
for expr, cond in self.args:
if cond is True:
default = expr
break
elif isinstance(cond, Equality):
continue
lower, upper = cond.lts, cond.gts # part 1: initialize with givens
if cond.lts.has(sym): # part 1a: expand the side ...
lower = S.NegativeInfinity # e.g. x <= 0 ---> -oo <= 0
elif cond.gts.has(sym): # part 1a: ... that can be expanded
upper = S.Infinity # e.g. x >= 0 ---> oo >= 0
else:
raise NotImplementedError(
"Unable to handle interval evaluation of expression.")
# part 1b: Reduce (-)infinity to what was passed in.
lower, upper = max(a, lower), min(b, upper)
for n in xrange(len(int_expr)):
# Part 2: remove any interval overlap. For any conflicts, the
# iterval already there wins, and the incoming interval updates
# its bounds accordingly.
if self.__eval_cond(lower < int_expr[n][1]) and \
self.__eval_cond(lower >= int_expr[n][0]):
lower = int_expr[n][1]
if self.__eval_cond(upper > int_expr[n][0]) and \
self.__eval_cond(upper <= int_expr[n][1]):
upper = int_expr[n][0]
if self.__eval_cond(lower < upper): # Is it still an interval?
int_expr.append((lower, upper, expr))
int_expr.sort(key=lambda x:x[0])
# Add holes to list of intervals if there is a default value,
# otherwise raise a ValueError.
holes = []
curr_low = a
for int_a, int_b, expr in int_expr:
if curr_low < int_a:
holes.append([curr_low, min(b, int_a), default])
curr_low = int_b
if curr_low > b:
break
if curr_low < b:
holes.append([curr_low, b, default])
if holes and default is not None:
int_expr.extend(holes)
elif holes and default == None:
raise ValueError("Called interval evaluation over piecewise " \
"function on undefined intervals %s" % \
", ".join([str((h[0], h[1])) for h in holes]))
# Finally run through the intervals and sum the evaluation.
ret_fun = 0
for int_a, int_b, expr in int_expr:
ret_fun += expr._eval_interval(sym, max(a, int_a), min(b, int_b))
return mul * ret_fun
def _eval_derivative(self, s):
return Piecewise(*[(diff(e, s), c) for e, c in self.args])
def _eval_subs(self, old, new):
"""
Piecewise conditions may contain Sets whose modifications
requires the use of contains rather than substitution. They
may also contain bool which are not of Basic type.
"""
args = list(self.args)
for i, (e, c) in enumerate(args):
try:
e = e._subs(old, new)
except TypeError:
if e != old:
continue
e = new
if isinstance(c, bool):
pass
elif isinstance(c, Set):
# What do we do if there are more than one symbolic
# variable. Which do we put pass to Set.contains?
c = c.contains(new)
elif isinstance(c, Basic):
c = c._subs(old, new)
args[i] = e, c
return Piecewise(*args)
def _eval_nseries(self, x, n, logx):
args = map(lambda ec: (ec.expr._eval_nseries(x, n, logx), ec.cond), \
self.args)
return self.func(*args)
def _eval_as_leading_term(self, x):
# This is completely wrong, cf. issue 3110
return self.args[0][0].as_leading_term(x)
@classmethod
def __eval_cond(cls, cond):
"""Return the truth value of the condition."""
if cond is True:
return True
return None
def piecewise_fold(expr):
"""
Takes an expression containing a piecewise function and returns the
expression in piecewise form.
Examples
========
>>> from sympy import Piecewise, piecewise_fold, sympify as S
>>> from sympy.abc import x
>>> p = Piecewise((x, x < 1), (1, S(1) <= x))
>>> piecewise_fold(x*p)
Piecewise((x**2, x < 1), (x, 1 <= x))
See Also
========
Piecewise
"""
if not isinstance(expr, Basic) or not expr.has(Piecewise):
return expr
new_args = map(piecewise_fold, expr.args)
if expr.func is ExprCondPair:
return ExprCondPair(*new_args)
piecewise_args = []
for n, arg in enumerate(new_args):
if arg.func is Piecewise:
piecewise_args.append(n)
if len(piecewise_args) > 0:
n = piecewise_args[0]
new_args = [(expr.func(*(new_args[:n] + [e] + new_args[n+1:])), c) \
for e, c in new_args[n].args]
if len(piecewise_args) > 1:
return piecewise_fold(Piecewise(*new_args))
return Piecewise(*new_args)
|
|
from __future__ import unicode_literals
from collections import deque
from djblets.util.compat.django.template.loader import render_to_string
from reviewboard.reviews.errors import DepthLimitExceededError
#: The maximum depth limit of any action instance.
MAX_DEPTH_LIMIT = 2
#: The mapping of all action IDs to their corresponding action instances.
_all_actions = {}
#: All top-level action IDs (in their left-to-right order of appearance).
_top_level_ids = deque()
#: Determines if the default action instances have been populated yet.
_populated = False
class BaseReviewRequestAction(object):
"""A base class for an action that can be applied to a review request.
Creating an action requires subclassing :py:class:`BaseReviewRequestAction`
and overriding any fields/methods as desired. Different instances of the
same subclass can also override the class fields with their own instance
fields.
Example:
.. code-block:: python
class UsedOnceAction(BaseReviewRequestAction):
action_id = 'once'
label = 'This is used once.'
class UsedMultipleAction(BaseReviewRequestAction):
def __init__(self, action_id, label):
super(UsedMultipleAction, self).__init__()
self.action_id = 'repeat-' + action_id
self.label = 'This is used multiple times,'
Note:
Since the same action will be rendered for multiple different users in
a multithreaded environment, the action state should not be modified
after initialization. If we want different action attributes at
runtime, then we can override one of the getter methods (such as
:py:meth:`get_label`), which by default will simply return the original
attribute from initialization.
"""
#: The ID of this action. Must be unique across all types of actions and
#: menu actions, at any depth.
action_id = None
#: The label that displays this action to the user.
label = None
#: The URL to invoke if this action is clicked.
url = '#'
#: Determines if this action should be initially hidden to the user.
hidden = False
def __init__(self):
"""Initialize this action.
By default, actions are top-level and have no children.
"""
self._parent = None
self._max_depth = 0
def copy_to_dict(self, context):
"""Copy this action instance to a dictionary.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
dict: The corresponding dictionary.
"""
return {
'action_id': self.action_id,
'label': self.get_label(context),
'url': self.get_url(context),
'hidden': self.get_hidden(context),
}
def get_label(self, context):
"""Return this action's label.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The label that displays this action to the user.
"""
return self.label
def get_url(self, context):
"""Return this action's URL.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The URL to invoke if this action is clicked.
"""
return self.url
def get_hidden(self, context):
"""Return whether this action should be initially hidden to the user.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
bool: Whether this action should be initially hidden to the user.
"""
return self.hidden
def should_render(self, context):
"""Return whether or not this action should render.
The default implementation is to always render the action everywhere.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
return True
@property
def max_depth(self):
"""Lazily compute the max depth of any action contained by this action.
Top-level actions have a depth of zero, and child actions have a depth
that is one more than their parent action's depth.
Algorithmically, the notion of max depth is equivalent to the notion of
height in the context of trees (from graph theory). We decided to use
this term instead so as not to confuse it with the dimensional height
of a UI element.
Returns:
int: The max depth of any action contained by this action.
"""
return self._max_depth
def reset_max_depth(self):
"""Reset the max_depth of this action and all its ancestors to zero."""
self._max_depth = 0
if self._parent:
self._parent.reset_max_depth()
def render(self, context, action_key='action',
template_name='reviews/action.html'):
"""Render this action instance and return the content as HTML.
Args:
context (django.template.Context):
The collection of key-value pairs that is passed to the
template in order to render this action.
action_key (unicode, optional):
The key to be used for this action in the context map.
template_name (unicode, optional):
The name of the template to be used for rendering this action.
Returns:
unicode: The action rendered in HTML.
"""
content = ''
if self.should_render(context):
context.push()
try:
context[action_key] = self.copy_to_dict(context)
content = render_to_string(template_name, context)
finally:
context.pop()
return content
def register(self, parent=None):
"""Register this review request action instance.
Note:
Newly registered top-level actions are appended to the left of the
other previously registered top-level actions. So if we intend to
register a collection of top-level actions in a certain order, then
we likely want to iterate through the actions in reverse.
Args:
parent (BaseReviewRequestMenuAction, optional):
The parent action instance of this action instance.
Raises:
KeyError:
A second registration is attempted (action IDs must be unique
across all types of actions and menu actions, at any depth).
DepthLimitExceededError:
The maximum depth limit is exceeded.
"""
_populate_defaults()
if self.action_id in _all_actions:
raise KeyError('%s already corresponds to a registered review '
'request action' % self.action_id)
if self.max_depth > MAX_DEPTH_LIMIT:
raise DepthLimitExceededError(self.action_id, MAX_DEPTH_LIMIT)
if parent:
parent.child_actions.append(self)
self._parent = parent
else:
_top_level_ids.appendleft(self.action_id)
_all_actions[self.action_id] = self
def unregister(self):
"""Unregister this review request action instance.
Note:
This method can mutate its parent's child actions. So if we are
iteratively unregistering a parent's child actions, then we should
consider first making a clone of the list of children.
Raises:
KeyError: An unregistration is attempted before it's registered.
"""
_populate_defaults()
try:
del _all_actions[self.action_id]
except KeyError:
raise KeyError('%s does not correspond to a registered review '
'request action' % self.action_id)
if self._parent:
self._parent.child_actions.remove(self)
else:
_top_level_ids.remove(self.action_id)
self.reset_max_depth()
class BaseReviewRequestMenuAction(BaseReviewRequestAction):
"""A base class for an action with a dropdown menu.
Note:
A menu action's child actions must always be pre-registered.
"""
def __init__(self, child_actions=None):
"""Initialize this menu action.
Args:
child_actions (list of BaseReviewRequestAction, optional):
The list of child actions to be contained by this menu action.
Raises:
KeyError:
A second registration is attempted (action IDs must be unique
across all types of actions and menu actions, at any depth).
DepthLimitExceededError:
The maximum depth limit is exceeded.
"""
super(BaseReviewRequestMenuAction, self).__init__()
self.child_actions = []
child_actions = child_actions or []
for child_action in child_actions:
child_action.register(self)
def copy_to_dict(self, context):
"""Copy this menu action instance to a dictionary.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
dict: The corresponding dictionary.
"""
dict_copy = {
'child_actions': self.child_actions,
}
dict_copy.update(super(BaseReviewRequestMenuAction, self).copy_to_dict(
context))
return dict_copy
@property
def max_depth(self):
"""Lazily compute the max depth of any action contained by this action.
Returns:
int: The max depth of any action contained by this action.
"""
if self.child_actions and self._max_depth == 0:
self._max_depth = 1 + max(child_action.max_depth
for child_action in self.child_actions)
return self._max_depth
def render(self, context, action_key='menu_action',
template_name='reviews/menu_action.html'):
"""Render this menu action instance and return the content as HTML.
Args:
context (django.template.Context):
The collection of key-value pairs that is passed to the
template in order to render this menu action.
action_key (unicode, optional):
The key to be used for this menu action in the context map.
template_name (unicode, optional):
The name of the template to be used for rendering this menu
action.
Returns:
unicode: The action rendered in HTML.
"""
return super(BaseReviewRequestMenuAction, self).render(
context, action_key, template_name)
def unregister(self):
"""Unregister this review request action instance.
This menu action recursively unregisters its child action instances.
Raises:
KeyError: An unregistration is attempted before it's registered.
"""
super(BaseReviewRequestMenuAction, self).unregister()
# Unregistration will mutate self.child_actions, so we make a copy.
for child_action in list(self.child_actions):
child_action.unregister()
# TODO: Convert all this to use djblets.registries.
def _populate_defaults():
"""Populate the default action instances."""
global _populated
if not _populated:
_populated = True
from reviewboard.reviews.default_actions import get_default_actions
for default_action in reversed(get_default_actions()):
default_action.register()
def get_top_level_actions():
"""Return a generator of all top-level registered action instances.
Yields:
BaseReviewRequestAction:
All top-level registered review request action instances.
"""
_populate_defaults()
return (_all_actions[action_id] for action_id in _top_level_ids)
def register_actions(actions, parent_id=None):
"""Register the given actions as children of the corresponding parent.
If no parent_id is given, then the actions are assumed to be top-level.
Args:
actions (iterable of BaseReviewRequestAction):
The collection of action instances to be registered.
parent_id (unicode, optional):
The action ID of the parent of each action instance to be
registered.
Raises:
KeyError:
The parent action cannot be found or a second registration is
attempted (action IDs must be unique across all types of actions
and menu actions, at any depth).
DepthLimitExceededError:
The maximum depth limit is exceeded.
"""
_populate_defaults()
if parent_id is None:
parent = None
else:
try:
parent = _all_actions[parent_id]
except KeyError:
raise KeyError('%s does not correspond to a registered review '
'request action' % parent_id)
for action in reversed(actions):
action.register(parent)
if parent:
parent.reset_max_depth()
def unregister_actions(action_ids):
"""Unregister each of the actions corresponding to the given IDs.
Args:
action_ids (iterable of unicode):
The collection of action IDs corresponding to the actions to be
removed.
Raises:
KeyError: An unregistration is attempted before it's registered.
"""
_populate_defaults()
for action_id in action_ids:
try:
action = _all_actions[action_id]
except KeyError:
raise KeyError('%s does not correspond to a registered review '
'request action' % action_id)
action.unregister()
def clear_all_actions():
"""Clear all registered actions.
This method is really only intended to be used by unit tests. We might be
able to remove this hack once we convert to djblets.registries.
Warning:
This will clear **all** actions, even if they were registered in
separate extensions.
"""
global _populated
_all_actions.clear()
_top_level_ids.clear()
_populated = False
|
|
from enum import IntEnum
from couchbase.exceptions import CouchbaseException
from typing import *
class MixedAuthException(CouchbaseException):
"""
Cannot use old and new style auth together in the same cluster
"""
pass
class NoBucketException(CouchbaseException):
"""
Operation requires at least a single bucket to be open
"""
# TODO: refactor this into base class perhaps?
def _recursive_creds_merge(base, overlay):
for k, v in overlay.items():
base_k = base.get(k, None)
if not base_k:
base[k] = v
continue
if isinstance(v, dict):
if isinstance(base_k, dict):
base[k] = _recursive_creds_merge(base_k, v)
else:
raise Exception("Cannot merge dict and {}".format(v))
else:
raise Exception("Cannot merge non dicts")
return base
class Authenticator(object):
def __init__(self, cert_path=None):
"""
:param cert_path: Path for SSL certificate (last in chain if multiple)
"""
self._cert_path = cert_path
def get_credentials(self, bucket=None):
"""
Gets the credentials for a specified bucket. If bucket is
`None`, gets the username and password for the entire cluster, if
different.
:param bucket: The bucket to act as context
:return: A dictionary of (optional) scheme and credentials e.g. `{'scheme':'couchbases',options:{'username':'fred', 'password':'opensesame'}}`
"""
return self.get_auto_credentials(bucket)
@classmethod
def unwanted_keys(cls):
"""
The set of option keys that are definitely incompatible with this authentication style.
"""
return set()
@classmethod
def unique_keys(cls):
"""
The set of option keys, if any, that this authenticator uniquely possesses.
"""
return set(cls.get_unique_creds_dict().keys())
@classmethod
def get_unique_creds_dict(cls):
"""
User overridable
A dictionary of authenticator-unique options and functions/lambdas of the form:
function(self):
return self.password
e.g.
{'certpath': lambda self: self.certpath}
"""
return {}
def _base_options(self, bucket, overlay):
base_dict = {'options': {'certpath': self._cert_path}
if self._cert_path else {}}
return _recursive_creds_merge(base_dict, overlay)
def get_cred_bucket(self, bucket, **overlay):
"""
:param bucket:
:return: returns the non-unique parts of the credentials for bucket authentication,
as a dictionary of functions, e.g.:
'options': {'username': self.username}, 'scheme': 'couchbases'}
"""
return self._base_options(bucket, overlay)
def get_cred_not_bucket(self, **overlay):
"""
:param bucket:
:return: returns the non-unique parts of the credentials for admin access
as a dictionary of functions, e.g.:
{'options':{'password': self.password}}
"""
return self._base_options(None, overlay)
def get_auto_credentials(self, bucket):
"""
:param bucket:
:return: returns a dictionary of credentials for bucket/admin
authentication
"""
result = {k: v(self) for k, v in self.get_unique_creds_dict().items()}
if bucket:
result.update(self.get_cred_bucket(bucket))
else:
result.update(self.get_cred_not_bucket())
return result
def supports_tls(self):
return True
def supports_non_tls(self):
return True
class PasswordAuthenticator(Authenticator):
def __init__(self,
username, # type: str
password, # type: str
cert_path=None # type: str
):
"""
This class uses a single credential pair of username and password, and
is designed to be used either with cluster management operations or
with Couchbase 5.0 style usernames with role based access control.
:param str username: username to use for auth.
:param str password: password for the user.
:param str cert_path: Path to the CA key.
"""
super(PasswordAuthenticator, self).__init__(cert_path=cert_path)
self.username = username
self.password = password
def get_cred_bucket(self, bucket, **overlay):
return self.get_cred_not_bucket(**overlay)
def get_cred_not_bucket(self, **overlay):
merged = _recursive_creds_merge(
{'options': {'username': self.username, 'password': self.password}}, overlay)
return super(PasswordAuthenticator, self).get_cred_not_bucket(**merged)
@classmethod
def unwanted_keys(cls):
return {'password'}
class ClassicAuthenticator(Authenticator):
def __init__(self, cluster_username=None,
cluster_password=None,
buckets=None,
cert_path=None):
"""
Classic authentication mechanism.
:param cluster_username:
Global cluster username. Only required for management operations
:type cluster_username: str
:param cluster_password:
Global cluster password. Only required for management operations
:param buckets:
A dictionary of `{bucket_name: bucket_password}`.
:param cert_path:
Path of the CA key
"""
super(ClassicAuthenticator, self).__init__(cert_path=cert_path)
self.username = cluster_username
self.password = cluster_password
self.buckets = buckets if buckets else {}
def get_cred_not_bucket(self):
return super(ClassicAuthenticator, self).get_cred_not_bucket(
**{'options': {'username': self.username, 'password': self.password}})
def get_cred_bucket(self, bucket, **overlay):
merged = _recursive_creds_merge(
{'options': {'password': self.buckets.get(bucket)}}, overlay)
return super(ClassicAuthenticator, self).get_cred_bucket(
bucket, **merged)
class CertAuthenticator(Authenticator):
def __init__(self,
cert_path=None, # type: str
key_path=None, # type: str
trust_store_path=None, # type: str
cluster_username=None, # type: str
cluster_password=None # type: str
):
"""
Certificate authentication mechanism.
:param str cluster_username: Global cluster username. Only required for management operations
:param str cluster_password: Global cluster password. Only required for management operations
:param str cert_path: Path to the CA key
:param str key_path: Path to the key
:param str trust_store_path: Path of the certificate trust store.
"""
super(CertAuthenticator, self).__init__(cert_path=cert_path)
self.username = cluster_username
self.password = cluster_password
self.keypath = key_path
self.trust_store_path = trust_store_path
@classmethod
def get_unique_creds_dict(clazz):
return {'keypath': lambda self: self.keypath,
'truststorepath': lambda self: self.trust_store_path}
def get_cred_bucket(self, bucket, **overlay):
merged = _recursive_creds_merge(
{'options': {'username': self.username}, 'scheme': 'couchbases'},
overlay)
return super(CertAuthenticator, self).get_cred_bucket(bucket, **merged)
def get_cred_not_bucket(self):
return super(CertAuthenticator, self).get_cred_not_bucket(
**{'options': {'password': self.password}})
def supports_non_tls(self):
return False
@classmethod
def unwanted_keys(cls):
return {'password'}
def get_credentials(self, bucket=None):
return self.get_auto_credentials(bucket)
class AuthDomain(IntEnum):
"""
The Authentication domain for a user.
Local: Users managed by Couchbase Server.
External: Users managed by an external resource, eg LDAP.
"""
Local = 0
External = 1
@classmethod
def to_str(cls, value):
if value == cls.External:
return "external"
else:
return "local"
@classmethod
def from_str(cls, value):
if value == "external":
return cls.External
else:
return cls.Local
|
|
#FIXME Service changes?
#FIXME redo model serialization API
#FIXME Notifiers
#FIXME ACLs
#FIXME locks
#FIXME isDirty
#FIXME SingleUserMode
#Represents a single Trike model
class TModel(object):
def __init__(self):
self.objects = TObjectSet(self)
self.changes = TChangeSet(self)
self.branches = TBranchSet(self)
self._ID = GUID()
self.config = TConfig(model)
self._dirty = False #Have we changed since load or writeout
self._busy = False #Lock for serialization
pass
# {{{ model management
#FIXME locks
def load(self, target):
pass #returns the most recent ChangeID
#FIXME locks
#FIXME isDirty
#FIXME ACLModelAdmin
#FIXME SingleUser
def unload(self, force=False): #WEBMETHOD
#shuts down model, removes objects from memory
self.changes.unload()
self.branches.unload()
self.objects.unload()
pass
#FIXME ACLModelRead
def getName(self): #WEBMETHOD
pass
#FIXME ACLModelAdmin
#FIXME isDirty
#FIXME SingleUser
def setName(self, name): #WEBMETHOD
pass
#FIXME ACLModelRead
def getModelCodeVersion(self): #WEBMETHOD
pass #what Trike version is this data for?
#FIXME ACLModelAdmin
#FIXME isDirty
#FIXME SingleUser
def setConfig(self, config): #WEBMETHOD
pass
#FIXME ACLModelRead
def getConfigItem(self, name): #WEBMETHOD
pass
#FIXME ACLModelAdmin
#FIXME isDirty
#FIXME SingleUser
def setConfigItem(self, name, value): #WEBMETHOD
pass
#FIXME ACLModelAdmin
#FIXME isDirty
#FIXME SingleUser
def setACL(self, newACL): #WEBMETHOD
pass
#FIXME ACLModelRead
def getACL(self): #WEBMETHOD
pass
#FIXME ACLModelAdmin
#FIXME SinglUser
def lockSingleUser(self, user, session, force=False): #WEBMETHOD
pass
#FIXME ACLModelAdmin
#FIXME SingleUser
def unlockSingleUser(self, user=Null, session=Null, force=False): #WEBMETHOD
pass
#FIXME ACLModelRead
def isSingleUser(self): #WEBMETHOD
pass
#FIXME ACLModelRead
def notify(self): #WEBMETHOD
pass #returns when anything changes in the model, for UI coordination
#FIXME ACLModelRead
def isDirty(self): #WEBMETHOD
pass #have we changed since we were loaded
def makeDirty(self):
pass
#FIXME ACLModelWrite
def save(self): #WEBMETHOD
pass #Save ourselves to disk
def isBusy(self):
pass #lock used for serialization
def getBusy(self): #Acquire lock
pass
def relax(self): #Drop lock
pass
def getModelAPIDoc(self): #WEBMETHOD
#Returns documentation about the API
pass
def getTrikeDoc(self): #WEBMETHOD
#Returns documentation about Trike
pass
def getTypeConstants(self): #WEBMETHOD
#Returns the GUIDs used for messages, events, TObject types, and TObject handlers
pass
# }}}
# {{{ change management
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def receiveChange(self, isCreation = False, targetID = None, targetKey = None,
targetTypeID = None, eventTypeID, eventParams, branchID): #WEBMETHOD
self.changes.enqueue(isCreation, targetID, targetKey, targetTypeID,
eventTypeID, eventParams, branchID)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def rollbackToChange(self, changeID): #WEBMETHOD
"""This hard-deletes all changes below this one from all branches.
Most of the time you want undo."""
self.changes.rollbackTo(changeID)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def prune(self, changeID): #WEBMETHOD
self.changes.prune(changeID)
pass
# }}}
# {{{ branch management
#FIXME ACLModelRead
def getAllNamedBranches(self): #WEBMETHOD
"""Returns name, ID pairs for all named branches."""
self.branches.getNamed()
pass
#FIXME ACLModelRead
def getAllBranches(self): #WEBMETHOD
"""Returns all branch IDs."""
self.branches.getAllIDs()
pass
#FIXME ACLModelRead
def getCurrentLeafForBranch(self, branchID): #WEBMETHOD
self.branches.get(branchID).getLeaf()
pass
#FIXME ACLModelRead
def getBranchName(self, branchID): #WEBMETHOD
self.branches.getName(self.branches.get(branchID))
pass
#FIXME ACLModelRead
def getBranchByName(self, branchName): #WEBMETHOD
"""Returns the ID of a branch by name."""
self.branches.getByName(branchName).getID()
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def setBranchName(self, branchID, name): #WEBMETHOD
self.branches.get(branchID).setName(name)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def undoChangesBefore(self, branchID, changeID): #WEBMETHOD
"""Returns the ID of the new undoBranch and the new leaf ID of the main branch."""
self.branches.get(branchID).undoBefore(changeID)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def redoChanges(self, branchID, changeID): #WEBMETHOD
self.branches.get(branchID).redo(changeID)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def portChange(self, changeID, branchID): #WEBMETHOD
self.branches.port(changeID, branchID)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def receiveBranch(self, name, attachToChangeID, changeSet): #WEBMETHOD
self.branches.receive(name, attachToChangeID, changeSet)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def branchFrom(self, changeID, name = null): #WEBMETHOD
self.branchFrom(changeID, name)
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def deleteBranch(self, branchID): #WEBMETHOD
self.branches.get(branchID).delete()
pass
#FIXME locks
#FIXME ACLModelWrite
#FIXME SingleUser
def deleteUnamedBranches(self): #WEBMETHOD
self.branches.deleteUnamed()
pass
# }}}
# {{{ model serialization
#FIXME ACLModelRead
#FIXME ACLModelWrite
#FIXME locks
#FIXME serialization
def serializeModelLifetime(self, clean=False): #WEBMETHOD
"""Serialize out everything about this model, including all state,
changes, change results, branches, and configuration. If clean is
set, assume we're flushing ourselves to disk, require ACLModelWrite
and clear isDirty."""
pass
#FIXME ACLModelRead
#FIXME locks
#FIXME serialization
def serializeModelAt(self, changeID = None, branchID = None): #WEBMETHOD
"""Serialize the state of the model at either a specific change or at
the leaf of a specific branch. At least one of these two must be
specified. Does not include configuration or any history."""
pass
#FIXME ACLModelRead
def serializeBranches(self): #WEBMETHOD
"""Serialize the root and leaf IDs and names of all branches."""
self.branches.serialize()
pass
#FIXME ACLModelRead
#FIXME serialization
def getChangeIDsInBranch(self, branchID): #WEBMETHOD
"""Return all of the changeIDs in the given branch in order, from root
to leaf."""
pass
#FIXME ACLModelRead
#FIXME serialization
def getChangeIDsAfterXInBranch(self, branchID, changeID): #WEBMETHOD
"""Return a list of all changeIDs more recent than the given one in a
given branch. We walk up the branch from the leaf, on the assumption
that this will nornmally be used to find changes near the leaf."""
pass
#FIXME ACLModelRead
def serializeChange(self, changeID, getResults = False): #WEBMETHOD
"""Serialize a change in the same form it is submitted to a model. If
results are asked for, serialize the results of the change as well."""
changes.get(changeID).serialize(getResults)
pass
#FIXME ACLModelRead
def serializeChangeResults(self, changeID): #WEBMETHOD
"""Serialize just the results of a change."""
self.changes.get(changeID).getResults()
pass
#FIXME ACLModelRead
#FIXME serialization
def serializeChangesInBranch(self, branchID, getResults = False): #WEBMETHOD
"""Serialize all changes in a branch from root to leaf, optionally
including their results."""
pass
#FIXME ACLModelRead
#FIXME serialization
def serializeChangesInBranchAfterX(self, branchID, changeID, getResults): #WEBMETHOD
"""Serialize all changes in a branch after the specified change,
optionally including their results."""
pass
#FIXME ACLModelRead
#FIXME locks
def getObjectIDsAt(self, changeID = None, branchID, = None, typeID = None): #WEBMETHOD
"""Return all objectIDs at a given change or at the leaf of a given
branch (at least one must be specified). Optionally, restrict the set
to objects of the specified type."""
if changeID is not None:
self.changes.get(changeID).getRelevantObjectIDs(typeID)
else:
if branchID is not None:
self.changes.get(self.branches.get(branchID).getLeafID()).getRelevantObjectIDs(typeID)
else:
raise pass
pass
#FIXME ACLModelRead
def serializeObjectLifetime(self, objectID): #WEBMETHOD
"""Serialize the entire lifetime of the given object."""
self.objects.get(objectID).serialize()
pass
#FIXME ACLModelRead
def serializeObjectAt(self, objectID, changeID = None, branchID = None): #WEBMETHOD
"""Serialize the specified object at either the given change or at the
leaf of the given branch."""
if changeID is not None:
self.objects.get(objectID).serialize(changeID)
else:
if branchID is not None:
self.objects.get(objectID).serialize(self.branches.get(branchID).getLeafID().getID())
else:
raise pass
pass
#FIXME ACLModelRead
#FIXME locks
#FIXME Exceptions
def serializeObjectsByTypeAt(self, typeID, changeID = None, branchID = None): #WEBMETHOD
"""Serialize all objects of a given type at either a specific change
or at the leaf of the specified branch. If objects of all types are
desired, use SerializeModelAt() instead."""
if changeID is not None:
objects.serialize(changeID, typeID)
else:
if branchID is not None:
objects.serialize(branches.get(branchID).getLeaf.getID(), typeID)
else:
raise pass
pass
#FIXME ACLModelRead
def serializeConfig(self): #WEBMETHOD
"""Serialize the current configuration of the model."""
config.serialize()
pass
# }}}
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_concurrency import lockutils
from tempest import clients
from tempest import config
from tempest.lib import auth
from tempest.lib.common import dynamic_creds
from tempest.lib.common import preprov_creds
from tempest.lib import exceptions
CONF = config.CONF
"""This module provides factories of credential and credential providers
Credentials providers and clients are (going to be) part of tempest.lib,
and so they may not hold any dependency to tempest configuration.
Methods in this module collect the relevant configuration details and pass
them to credentials providers and clients, so that test can have easy
access to these features.
Client managers with hard-coded configured credentials are also moved here,
to avoid circular dependencies."""
# === Credential Providers
# Subset of the parameters of credential providers that depend on configuration
def _get_common_provider_params(identity_version):
if identity_version == 'v3':
identity_uri = CONF.identity.uri_v3
elif identity_version == 'v2':
identity_uri = CONF.identity.uri
else:
raise exceptions.InvalidIdentityVersion(
identity_version=identity_version)
return {
'identity_version': identity_version,
'identity_uri': identity_uri,
'credentials_domain': CONF.auth.default_credentials_domain_name,
'admin_role': CONF.identity.admin_role
}
def get_dynamic_provider_params(identity_version, admin_creds=None):
"""Dynamic provider parameters setup from config
This helper returns a dict of parameter that can be used to initialise
a `DynamicCredentialProvider` according to tempest configuration.
Parameters that are not configuration specific (name, network_resources)
are not returned.
:param identity_version: 'v2' or 'v3'
:param admin_creds: An object of type `auth.Credentials`. If None, it
is built from the configuration file as well.
:return: A dict with the parameters
"""
_common_params = _get_common_provider_params(identity_version)
admin_creds = admin_creds or get_configured_admin_credentials(
fill_in=True, identity_version=identity_version)
if identity_version == 'v3':
endpoint_type = CONF.identity.v3_endpoint_type
elif identity_version == 'v2':
endpoint_type = CONF.identity.v2_admin_endpoint_type
return dict(_common_params, **dict([
('admin_creds', admin_creds),
('identity_admin_domain_scope', CONF.identity.admin_domain_scope),
('identity_admin_role', CONF.identity.admin_role),
('extra_roles', CONF.auth.tempest_roles),
('neutron_available', CONF.service_available.neutron),
('project_network_cidr', CONF.network.project_network_cidr),
('project_network_mask_bits', CONF.network.project_network_mask_bits),
('public_network_id', CONF.network.public_network_id),
('create_networks', (CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network)),
('resource_prefix', 'tempest'),
('identity_admin_endpoint_type', endpoint_type)
]))
def get_preprov_provider_params(identity_version):
"""Pre-provisioned provider parameters setup from config
This helper returns a dict of parameter that can be used to initialise
a `PreProvisionedCredentialProvider` according to tempest configuration.
Parameters that are not configuration specific (name) are not returned.
:param identity_version: 'v2' or 'v3'
:return: A dict with the parameters
"""
_common_params = _get_common_provider_params(identity_version)
reseller_admin_role = CONF.object_storage.reseller_admin_role
return dict(_common_params, **dict([
('accounts_lock_dir', lockutils.get_lock_path(CONF)),
('test_accounts_file', CONF.auth.test_accounts_file),
('object_storage_operator_role', CONF.object_storage.operator_role),
('object_storage_reseller_admin_role', reseller_admin_role)
]))
def get_credentials_provider(name, network_resources=None,
force_tenant_isolation=False,
identity_version=None):
"""Return the right implementation of CredentialProvider based on config
This helper returns the right implementation of CredentialProvider based on
config and on the value of force_tenant_isolation.
:param name: When provided, it makes it possible to associate credential
artifacts back to the owner (test class).
:param network_resources: Dictionary of network resources to be allocated
for each test account. Only valid for the dynamic
credentials provider.
:param force_tenant_isolation: Always return a `DynamicCredentialProvider`,
regardless of the configuration.
:param identity_version: Use the specified identity API version, regardless
of the configuration. Valid values are 'v2', 'v3'.
"""
# If a test requires a new account to work, it can have it via forcing
# dynamic credentials. A new account will be produced only for that test.
# In case admin credentials are not available for the account creation,
# the test should be skipped else it would fail.
identity_version = identity_version or CONF.identity.auth_version
if CONF.auth.use_dynamic_credentials or force_tenant_isolation:
return dynamic_creds.DynamicCredentialProvider(
name=name,
network_resources=network_resources,
**get_dynamic_provider_params(identity_version))
else:
if CONF.auth.test_accounts_file:
# Most params are not relevant for pre-created accounts
return preprov_creds.PreProvisionedCredentialProvider(
name=name,
**get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
'A valid credential provider is needed')
def is_admin_available(identity_version):
"""Helper to check for admin credentials
Helper function to check if a set of admin credentials is available so we
can do a single call from skip_checks.
This helper depends on identity_version as there may be admin credentials
available for v2 but not for v3.
:param identity_version: 'v2' or 'v3'
"""
is_admin = True
# If dynamic credentials is enabled admin will be available
if CONF.auth.use_dynamic_credentials:
return is_admin
# Check whether test accounts file has the admin specified or not
elif CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
name='check_admin',
**get_preprov_provider_params(identity_version))
if not check_accounts.admin_available():
is_admin = False
else:
try:
get_configured_admin_credentials(fill_in=False,
identity_version=identity_version)
except exceptions.InvalidConfiguration:
is_admin = False
return is_admin
def is_alt_available(identity_version):
"""Helper to check for alt credentials
Helper function to check if a second set of credentials is available (aka
alt credentials) so we can do a single call from skip_checks.
This helper depends on identity_version as there may be alt credentials
available for v2 but not for v3.
:param identity_version: 'v2' or 'v3'
"""
# If dynamic credentials is enabled alt will be available
if CONF.auth.use_dynamic_credentials:
return True
# Check whether test accounts file has the admin specified or not
if CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
name='check_alt',
**get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
'A valid credential provider is needed')
try:
if not check_accounts.is_multi_user():
return False
else:
return True
except exceptions.InvalidConfiguration:
return False
# === Credentials
# Type of credentials available from configuration
CREDENTIAL_TYPES = {
'identity_admin': ('auth', 'admin'),
'user': ('identity', None),
'alt_user': ('identity', 'alt')
}
def get_configured_admin_credentials(fill_in=True, identity_version=None):
"""Get admin credentials from the config file
Read credentials from configuration, builds a Credentials object based on
the specified or configured version
:param fill_in: If True, a request to the Token API is submitted, and the
credential object is filled in with all names and IDs from
the token API response.
:param identity_version: The identity version to talk to and the type of
credentials object to be created. 'v2' or 'v3'.
:returns: An object of a sub-type of `auth.Credentials`
"""
identity_version = identity_version or CONF.identity.auth_version
if identity_version not in ('v2', 'v3'):
raise exceptions.InvalidConfiguration(
'Unsupported auth version: %s' % identity_version)
conf_attributes = ['username', 'password',
'project_name']
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
params = config.service_client_config()
for attr in conf_attributes:
params[attr] = getattr(CONF.auth, 'admin_' + attr)
# Build and validate credentials. We are reading configured credentials,
# so validate them even if fill_in is False
credentials = get_credentials(fill_in=fill_in,
identity_version=identity_version, **params)
if not fill_in:
if not credentials.is_valid():
msg = ("The admin credentials are incorrectly set in the config "
"file for identity version %s. Double check that all "
"required values are assigned.")
raise exceptions.InvalidConfiguration(msg % identity_version)
return credentials
def get_credentials(fill_in=True, identity_version=None, **kwargs):
"""Get credentials from dict based on config
Wrapper around auth.get_credentials to use the configured identity version
if none is specified.
:param fill_in: If True, a request to the Token API is submitted, and the
credential object is filled in with all names and IDs from
the token API response.
:param identity_version: The identity version to talk to and the type of
credentials object to be created. 'v2' or 'v3'.
:param kwargs: Attributes to be used to build the Credentials object.
:returns: An object of a sub-type of `auth.Credentials`
"""
params = dict(config.service_client_config(), **kwargs)
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
# To honour the "default_credentials_domain_name", if not domain
# field is specified at all, add it the credential dict.
if identity_version == 'v3':
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
if not domain_fields.intersection(kwargs.keys()):
domain_name = CONF.auth.default_credentials_domain_name
# NOTE(andreaf) Setting domain_name implicitly sets user and
# project domain names, if they are None
params['domain_name'] = domain_name
auth_url = CONF.identity.uri_v3
else:
auth_url = CONF.identity.uri
return auth.get_credentials(auth_url,
fill_in=fill_in,
identity_version=identity_version,
**params)
# === Credential / client managers
class AdminManager(clients.Manager):
"""Manager that uses admin credentials for its managed client objects"""
def __init__(self):
super(AdminManager, self).__init__(
credentials=get_configured_admin_credentials())
|
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
==========================
fMRI: SPM nested workflows
==========================
The fmri_spm.py integrates several interfaces to perform a first
and second level analysis on a two-subject data set. The tutorial can
be found in the examples folder. Run the tutorial from inside the
nipype tutorial directory::
python fmri_spm_nested.py
Import necessary modules from nipype."""
from __future__ import print_function
from builtins import range
import os.path as op # system functions
from nipype.interfaces import io as nio # Data i/o
from nipype.interfaces import spm as spm # spm
# from nipype.interfaces import matlab as mlab # how to run matlab
from nipype.interfaces import fsl as fsl # fsl
from nipype.interfaces import utility as niu # utility
from nipype.pipeline import engine as pe # pypeline engine
from nipype.algorithms import rapidart as ra # artifact detection
from nipype.algorithms import modelgen as model # model specification
"""
Preliminaries
-------------
Set any package specific configuration. The output file format
for FSL routines is being set to uncompressed NIFTI and a specific
version of matlab is being used. The uncompressed format is required
because SPM does not handle compressed NIFTI.
"""
# Tell fsl to generate all output in uncompressed nifti format
fsl.FSLCommand.set_default_output_type('NIFTI')
# Set the way matlab should be called
# mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
# mlab.MatlabCommand.set_default_paths('/software/spm8')
"""
Setting up workflows
--------------------
In this tutorial we will be setting up a hierarchical workflow for spm
analysis. This will demonstrate how pre-defined workflows can be setup
and shared across users, projects and labs.
Example of how to inline functions in connect()
-----------------------------------------------
"""
def _template_path(in_data):
import os.path as op
return op.abspath(op.join(in_data, 'nipype-tutorial/data/T1.nii'))
"""
Set-up preprocessing workflow
-----------------------------
This is a generic preprocessing workflow that can be used by different analyses
"""
preproc = pe.Workflow(name='preproc')
"""
A node called :code:`inputnode` is set to designate the path in which input data
are located:
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_data']), name='inputnode')
"""Use :class:`nipype.interfaces.spm.Realign` for motion correction
and register all images to the mean image.
"""
realign = pe.Node(spm.Realign(), name="realign")
realign.inputs.register_to_mean = True
"""Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
art = pe.Node(ra.ArtifactDetect(), name="art")
art.inputs.use_differences = [True, False]
art.inputs.use_norm = True
art.inputs.norm_threshold = 1
art.inputs.zintensity_threshold = 3
art.inputs.mask_type = 'file'
art.inputs.parameter_source = 'SPM'
"""Skull strip structural images using
:class:`nipype.interfaces.fsl.BET`.
"""
skullstrip = pe.Node(fsl.BET(), name="skullstrip")
skullstrip.inputs.mask = True
"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid
body registration of the functional data to the structural data.
"""
coregister = pe.Node(spm.Coregister(), name="coregister")
coregister.inputs.jobtype = 'estimate'
"""Warp functional and structural data to SPM's T1 template using
:class:`nipype.interfaces.spm.Normalize`. The tutorial data set
includes the template image, T1.nii.
"""
normalize = pe.Node(spm.Normalize(), name="normalize")
"""Smooth the functional data using
:class:`nipype.interfaces.spm.Smooth`.
"""
smooth = pe.Node(spm.Smooth(), name="smooth")
fwhmlist = [4]
smooth.iterables = ('fwhm', fwhmlist)
preproc.connect([(inputnode, normalize, [(('in_data', _template_path), 'template')]),
(realign, coregister, [('mean_image', 'source'),
('realigned_files', 'apply_to_files')]),
(coregister, normalize, [('coregistered_files', 'apply_to_files')]),
(normalize, smooth, [('normalized_files', 'in_files')]),
(normalize, skullstrip, [('normalized_source', 'in_file')]),
(realign, art, [('realignment_parameters', 'realignment_parameters')]),
(normalize, art, [('normalized_files', 'realigned_files')]),
(skullstrip, art, [('mask_file', 'mask_file')]),
])
"""
Set up analysis workflow
------------------------
"""
l1analysis = pe.Workflow(name='analysis')
"""Generate SPM-specific design information using
:class:`nipype.interfaces.spm.SpecifyModel`.
"""
modelspec = pe.Node(model.SpecifySPMModel(), name="modelspec")
modelspec.inputs.concatenate_runs = True
"""Generate a first level SPM.mat file for analysis
:class:`nipype.interfaces.spm.Level1Design`.
"""
level1design = pe.Node(spm.Level1Design(), name="level1design")
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the
parameters of the model.
"""
level1estimate = pe.Node(spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical': 1}
"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the
first level contrasts specified in a few steps above.
"""
contrastestimate = pe.Node(spm.EstimateContrast(), name="contrastestimate")
"""Use :class: `nipype.interfaces.utility.Select` to select each contrast for
reporting.
"""
selectcontrast = pe.Node(niu.Select(), name="selectcontrast")
"""Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of
the contrast estimate and a background image into one volume.
"""
overlaystats = pe.Node(fsl.Overlay(), name="overlaystats")
overlaystats.inputs.stat_thresh = (3, 10)
overlaystats.inputs.show_negative_stats = True
overlaystats.inputs.auto_thresh_bg = True
"""Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid
statistical volumes for a report of the first-level results.
"""
slicestats = pe.Node(fsl.Slicer(), name="slicestats")
slicestats.inputs.all_axial = True
slicestats.inputs.image_width = 750
l1analysis.connect([(modelspec, level1design, [('session_info', 'session_info')]),
(level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
(level1estimate, contrastestimate, [('spm_mat_file', 'spm_mat_file'),
('beta_images', 'beta_images'),
('residual_image', 'residual_image')]),
(contrastestimate, selectcontrast, [('spmT_images', 'inlist')]),
(selectcontrast, overlaystats, [('out', 'stat_image')]),
(overlaystats, slicestats, [('out_file', 'in_file')])
])
"""
Preproc + Analysis pipeline
---------------------------
"""
l1pipeline = pe.Workflow(name='firstlevel')
l1pipeline.connect([(preproc, l1analysis, [('realign.realignment_parameters',
'modelspec.realignment_parameters'),
('smooth.smoothed_files',
'modelspec.functional_runs'),
('art.outlier_files',
'modelspec.outlier_files'),
('skullstrip.mask_file',
'level1design.mask_image'),
('normalize.normalized_source',
'overlaystats.background_image')]),
])
"""
Data specific components
------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the subject directories
subject_list = ['s1', 's3']
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
struct=[['subject_id', 'struct']])
infosource = pe.Node(niu.IdentityInterface(fields=['subject_id']), name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name='datasource')
datasource.inputs.template = 'nipype-tutorial/data/%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Experimental paradigm specific components
-----------------------------------------
Here we create a function that returns subject-specific information
about the experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information
necessary to generate an SPM design matrix. In this tutorial, the same
paradigm was used for every participant.
"""
def subjectinfo(subject_id):
from nipype.interfaces.base import Bunch
from copy import deepcopy
print("Subject ID: %s\n" % str(subject_id))
output = []
names = ['Task-Odd', 'Task-Even']
for r in range(4):
onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))]
output.insert(r,
Bunch(conditions=names,
onsets=deepcopy(onsets),
durations=[[15] for s in names],
amplitudes=None,
tmod=None,
pmod=None,
regressor_names=None,
regressors=None))
return output
"""Setup the contrast structure that needs to be evaluated. This is a
list of lists. The inner list specifies the contrasts and has the
following format - [Name,Stat,[list of condition names],[weights on
those conditions]. The condition names must match the `names` listed
in the `subjectinfo` function described above.
"""
cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5])
cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1])
contrasts = [cont1, cont2]
# set up node specific inputs
modelspecref = l1pipeline.inputs.analysis.modelspec
modelspecref.input_units = 'secs'
modelspecref.output_units = 'secs'
modelspecref.time_repetition = 3.
modelspecref.high_pass_filter_cutoff = 120
l1designref = l1pipeline.inputs.analysis.level1design
l1designref.timing_units = modelspecref.output_units
l1designref.interscan_interval = modelspecref.time_repetition
l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts
# Iterate over each contrast and create report images.
selectcontrast.iterables = ('index', [[i] for i in range(len(contrasts))])
"""
Setup the pipeline
------------------
The nodes created above do not describe the flow of data. They merely
describe the parameters used for each function. In this section we
setup the connections between the nodes such that appropriate outputs
from nodes are piped into appropriate inputs of other nodes.
Use the :class:`nipype.pipeline.engine.Pipeline` to create a
graph-based execution pipeline for first level analysis. The config
options tells the pipeline engine to use `workdir` as the disk
location to use when running the processes and keeping their
outputs. The `use_parameterized_dirs` tells the engine to create
sub-directories under `workdir` corresponding to the iterables in the
pipeline. Thus for this pipeline there will be subject specific
sub-directories.
The ``nipype.pipeline.engine.Pipeline.connect`` function creates the
links between the processes, i.e., how data should flow in and out of
the processing nodes.
"""
level1 = pe.Workflow(name="level1")
level1.base_dir = op.abspath('spm_tutorial2/workingdir')
level1.connect([(inputnode, datasource, [('in_data', 'base_directory')]),
(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, l1pipeline, [('func', 'preproc.realign.in_files'),
('struct', 'preproc.coregister.target'),
('struct', 'preproc.normalize.source')]),
(infosource, l1pipeline, [(('subject_id', subjectinfo),
'analysis.modelspec.subject_info')]),
])
"""
Setup storage results
---------------------
Use :class:`nipype.interfaces.io.DataSink` to store selected outputs
from the pipeline in a specific location. This allows the user to
selectively choose important output bits from the analysis and keep
them.
The first step is to create a datasink node and then to connect
outputs from the modules above to storage locations. These take the
following form directory_name[.[@]subdir] where parts between [] are
optional. For example 'realign.@mean' below creates a directory called
realign in 'l1output/subject_id/' and stores the mean image output
from the Realign process in the realign directory. If the @ is left
out, then a sub-directory with the name 'mean' would be created and
the mean image would be copied to that directory.
"""
datasink = pe.Node(nio.DataSink(), name="datasink")
datasink.inputs.base_directory = op.abspath('spm_tutorial2/l1output')
report = pe.Node(nio.DataSink(), name='report')
report.inputs.base_directory = op.abspath('spm_tutorial2/report')
report.inputs.parameterization = False
def getstripdir(subject_id):
import os.path as op
return op.join(op.abspath('spm_tutorial2/workingdir'), '_subject_id_%s' % subject_id)
# store relevant outputs from various stages of the 1st level analysis
level1.connect([(infosource, datasink, [('subject_id', 'container'),
(('subject_id', getstripdir), 'strip_dir')]),
(l1pipeline, datasink, [('analysis.contrastestimate.con_images', 'contrasts.@con'),
('analysis.contrastestimate.spmT_images', 'contrasts.@T')]),
(infosource, report, [('subject_id', 'container'),
(('subject_id', getstripdir), 'strip_dir')]),
(l1pipeline, report, [('analysis.slicestats.out_file', '@report')]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures
with appropriate parameters and the connectivity between the
processes, but does not generate any output. To actually run the
analysis on the data the ``nipype.pipeline.engine.Pipeline.Run``
function needs to be called.
"""
if __name__ == '__main__':
level1.run('MultiProc')
level1.write_graph()
"""
Setup level 2 pipeline
----------------------
Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast
images across a group of first level subjects. Unlike the previous
pipeline that iterated over subjects, this pipeline will iterate over
contrasts.
"""
# collect all the con images for each contrast.
contrast_ids = list(range(1, len(contrasts) + 1))
l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source")
# we use .*i* to capture both .img (SPM8) and .nii (SPM12)
l2source.inputs.template = op.abspath('spm_tutorial2/l1output/*/con*/*/_fwhm_%d/con_%04d.*i*')
# iterate over all contrast images
l2source.iterables = [('fwhm', fwhmlist),
('con', contrast_ids)]
l2source.inputs.sort_filelist = True
"""Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a
simple statistical analysis of the contrasts from the group of
subjects (n=2 in this example).
"""
# setup a 1-sample t-test node
onesamplettestdes = pe.Node(spm.OneSampleTTestDesign(), name="onesampttestdes")
l2estimate = pe.Node(spm.EstimateModel(), name="level2estimate")
l2estimate.inputs.estimation_method = {'Classical': 1}
l2conestimate = pe.Node(spm.EstimateContrast(), name="level2conestimate")
cont1 = ('Group', 'T', ['mean'], [1])
l2conestimate.inputs.contrasts = [cont1]
l2conestimate.inputs.group_contrast = True
"""As before, we setup a pipeline to connect these two nodes (l2source
-> onesamplettest).
"""
l2pipeline = pe.Workflow(name="level2")
l2pipeline.base_dir = op.abspath('spm_tutorial2/l2output')
l2pipeline.connect([(l2source, onesamplettestdes, [('outfiles', 'in_files')]),
(onesamplettestdes, l2estimate, [('spm_mat_file', 'spm_mat_file')]),
(l2estimate, l2conestimate, [('spm_mat_file', 'spm_mat_file'),
('beta_images', 'beta_images'),
('residual_image', 'residual_image')]),
])
"""
Execute the second level pipeline
---------------------------------
"""
if __name__ == '__main__':
l2pipeline.run('MultiProc')
|
|
import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability, DEPLOYMENT_IMAGE_INFO
@pytest.fixture()
def task_vars():
return dict(
openshift_is_atomic=False,
openshift_is_containerized=False,
openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
)
@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
("openshift-enterprise", False, [], False),
("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True),
("openshift-enterprise", False, ["oo_etcd_to_config"], False),
("origin", True, ["nfs"], False),
("openshift-enterprise", True, ["lb"], False),
])
def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
task_vars['openshift_is_containerized'] = openshift_is_containerized
task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [
(True, True),
(False, False),
(True, False),
(False, True),
])
def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
assert module_args['name']
return {
'images': [module_args['name']],
}
task_vars['openshift_is_containerized'] = openshift_is_containerized
task_vars['openshift_is_atomic'] = openshift_is_atomic
result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@pytest.mark.parametrize("available_locally", [
False,
True,
])
def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {}
task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"]
task_vars['openshift_image_tag'] = 'v3.4'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
result = check.run()
assert not result.get('failed', False)
def test_all_images_unavailable(task_vars):
def execute_module(module_name=None, *args):
if module_name == "wait_for":
return {}
elif module_name == "command":
return {'failed': True}
return {} # docker_image_facts failure
task_vars['openshift_docker_additional_registries'] = ["docker.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
task_vars['openshift_image_tag'] = 'latest'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
actual = check.run()
assert actual['failed']
assert "required container images are not available" in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
(
"docker image update failure",
["docker image update failure"],
),
(
"No package matching 'skopeo' found available, installed or updated",
["dependencies can be installed via `yum`"]
),
])
def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
}
return {}
task_vars['openshift_docker_additional_registries'] = ["unknown.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
@pytest.mark.parametrize(
"image, registries, connection_test_failed, skopeo_failed, "
"expect_success, expect_registries_reached", [
(
"spam/eggs:v1", ["test.reg"],
True, True,
False,
{"test.reg": False, "docker.io": False},
),
(
"spam/eggs:v1", ["test.reg"],
False, True,
False,
{"test.reg": True, "docker.io": True},
),
(
"eggs.reg/spam/eggs:v1", ["test.reg"],
False, False,
True,
{"eggs.reg": True},
),
])
def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
if module_name == "wait_for":
return dict(msg="msg", failed=connection_test_failed)
elif module_name == "command":
return dict(msg="msg", failed=skopeo_failed)
tv = task_vars()
tv.update({"openshift_docker_additional_registries": registries})
check = DockerImageAvailability(execute_module, tv)
check._module_retry_interval = 0
available = check.is_available_skopeo_image(image)
assert available == expect_success
assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
"origin", False, ['oo_nodes_to_config'], "",
set([
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes:latest', # origin version of registry-console
])
),
( # set a different URL for images
"origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}',
set([
'foo.io/openshift/origin-pod:vtest',
'foo.io/openshift/origin-deployer:vtest',
'foo.io/openshift/origin-docker-registry:vtest',
'foo.io/openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes:latest', # AFAICS this is not built from the URL
])
),
(
"origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], "",
set([
# images running on top of openshift
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes:latest',
# containerized component images
'openshift/origin:vtest',
'openshift/node:vtest',
'openshift/openvswitch:vtest',
'registry.access.redhat.com/rhel7/etcd',
])
),
( # enterprise images
"openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'foo.io/openshift3/ose-pod:f13ac45',
'foo.io/openshift3/ose-deployer:f13ac45',
'foo.io/openshift3/ose-docker-registry:f13ac45',
'foo.io/openshift3/ose-haproxy-router:f13ac45',
# registry-console is not constructed/versioned the same as the others.
'openshift3/registry-console:vtest',
# containerized images aren't built from oreg_url
'openshift3/node:vtest',
'openshift3/openvswitch:vtest',
])
),
(
"openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'registry.access.redhat.com/rhel7/etcd',
# lb does not yet come in a containerized version
])
),
])
def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected):
task_vars = dict(
openshift_is_containerized=openshift_is_containerized,
openshift_is_atomic=False,
openshift_deployment_type=deployment_type,
group_names=groups,
oreg_url=oreg_url,
openshift_image_tag='vtest',
)
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
@pytest.mark.parametrize("task_vars, expected", [
(
dict(
openshift_deployment_type="origin",
openshift_image_tag="vtest",
),
"cockpit/kubernetes:latest",
), (
dict(
openshift_deployment_type="openshift-enterprise",
openshift_image_tag="vtest",
),
"openshift3/registry-console:vtest",
), (
dict(
openshift_deployment_type="openshift-enterprise",
openshift_image_tag="v3.7.0-alpha.0",
openshift_cockpit_deployer_prefix="registry.example.com/spam/",
),
"registry.example.com/spam/registry-console:v3.7",
), (
dict(
openshift_deployment_type="origin",
openshift_image_tag="v3.7.0-alpha.0",
openshift_cockpit_deployer_prefix="registry.example.com/eggs/",
openshift_cockpit_deployer_version="spam",
),
"registry.example.com/eggs/kubernetes:spam",
),
])
def test_registry_console_image(task_vars, expected):
info = DEPLOYMENT_IMAGE_INFO[task_vars["openshift_deployment_type"]]
tag = task_vars["openshift_image_tag"]
assert expected == DockerImageAvailability(task_vars=task_vars)._registry_console_image(tag, info)
@pytest.mark.parametrize("task_vars, expected", [
(
dict(
group_names=['oo_nodes_to_config'],
osn_ovs_image='spam/ovs',
openshift_image_tag="veggs",
),
set([
'spam/ovs', 'openshift/node:veggs', 'cockpit/kubernetes:latest',
'openshift/origin-haproxy-router:veggs', 'openshift/origin-deployer:veggs',
'openshift/origin-docker-registry:veggs', 'openshift/origin-pod:veggs',
]),
), (
dict(
group_names=['oo_masters_to_config'],
),
set(['openshift/origin:latest']),
), (
dict(
group_names=['oo_etcd_to_config'],
),
set(['registry.access.redhat.com/rhel7/etcd']),
), (
dict(
group_names=['oo_etcd_to_config'],
osm_etcd_image='spam/etcd',
),
set(['spam/etcd']),
),
])
def test_containerized(task_vars, expected):
task_vars.update(dict(
openshift_is_containerized=True,
openshift_deployment_type="origin",
))
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
|
|
from collections.abc import Iterable
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
logger = logger.bind(name='from_imdb')
class FromIMDB:
"""
This plugin enables generating entries based on an entity, an entity being a person, character or company.
It's based on IMDBpy which is required (pip install imdbpy). The basic config required just an IMDB ID of the
required entity.
For example:
from_imdb: ch0001354
Schema description:
Other than ID, all other properties are meant to filter the full list that the entity generates.
id: string that relates to a supported entity type. For example: 'nm0000375'. Required.
job_types: a string or list with job types from job_types. Default is 'actor'.
content_types: A string or list with content types from content_types. Default is 'movie'.
max_entries: The maximum number of entries that can return. This value's purpose is basically flood protection
against unruly configurations that will return too many results. Default is 200.
Advanced config example:
dynamic_movie_queue:
from_imdb:
id: co0051941
job_types:
- actor
- director
content_types: tv series
accept_all: yes
movie_queue: add
"""
job_types = [
'actor',
'actress',
'director',
'producer',
'writer',
'self',
'editor',
'miscellaneous',
'editorial department',
'cinematographer',
'visual effects',
'thanks',
'music department',
'in development',
'archive footage',
'soundtrack',
]
content_types = [
'movie',
'tv series',
'tv mini series',
'video game',
'video movie',
'tv movie',
'episode',
]
content_type_conversion = {
'movie': 'movie',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video game',
}
character_content_type_conversion = {
'movie': 'feature',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video-game',
}
jobs_without_content_type = ['actor', 'actress', 'self', 'in development', 'archive footage']
imdb_pattern = one_or_more(
{
'type': 'string',
'pattern': r'(nm|co|ch)\d{7,8}',
'error_pattern': 'Get the id from the url of the person/company you want to use,'
' e.g. http://imdb.com/text/<id here>/blah',
},
unique_items=True,
)
schema = {
'oneOf': [
imdb_pattern,
{
'type': 'object',
'properties': {
'id': imdb_pattern,
'job_types': one_or_more(
{'type': 'string', 'enum': job_types}, unique_items=True
),
'content_types': one_or_more(
{'type': 'string', 'enum': content_types}, unique_items=True
),
'max_entries': {'type': 'integer'},
'match_type': {'type': 'string', 'enum': ['strict', 'loose']},
},
'required': ['id'],
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
"""
Converts config to dict form and sets defaults if needed
"""
config = config
if isinstance(config, str):
config = {'id': [config]}
elif isinstance(config, list):
config = {'id': config}
if isinstance(config, dict) and not isinstance(config['id'], list):
config['id'] = [config['id']]
config.setdefault('content_types', [self.content_types[0]])
config.setdefault('job_types', [self.job_types[0]])
config.setdefault('max_entries', 200)
config.setdefault('match_type', 'strict')
if isinstance(config.get('content_types'), str):
logger.debug('Converted content type from string to list.')
config['content_types'] = [config['content_types']]
if isinstance(config['job_types'], str):
logger.debug('Converted job type from string to list.')
config['job_types'] = [config['job_types']]
# Special case in case user meant to add actress instead of actor (different job types in IMDB)
if 'actor' in config['job_types'] and 'actress' not in config['job_types']:
config['job_types'].append('actress')
return config
def get_items(self, config):
items = []
for id in config['id']:
try:
entity_type, entity_object = self.get_entity_type_and_object(id)
except Exception as e:
logger.error(
'Could not resolve entity via ID: {}. '
'Either error in config or unsupported entity. Error:{}',
id,
e,
)
continue
items += self.get_items_by_entity(
entity_type,
entity_object,
config.get('content_types'),
config.get('job_types'),
config.get('match_type'),
)
return set(items)
def get_entity_type_and_object(self, imdb_id):
"""
Return a tuple of entity type and entity object
:param imdb_id: string which contains IMDB id
:return: entity type, entity object (person, company, etc.)
"""
if imdb_id.startswith('nm'):
person = self.ia.get_person(imdb_id[2:])
logger.info('Starting to retrieve items for person: {}', person)
return 'Person', person
elif imdb_id.startswith('co'):
company = self.ia.get_company(imdb_id[2:])
logger.info('Starting to retrieve items for company: {}', company)
return 'Company', company
elif imdb_id.startswith('ch'):
character = self.ia.get_character(imdb_id[2:])
logger.info('Starting to retrieve items for Character: {}', character)
return 'Character', character
def get_items_by_entity(
self, entity_type, entity_object, content_types, job_types, match_type
):
"""
Gets entity object and return movie list using relevant method
"""
if entity_type == 'Company':
return self.items_by_company(entity_object)
if entity_type == 'Character':
return self.items_by_character(entity_object, content_types, match_type)
elif entity_type == 'Person':
return self.items_by_person(entity_object, job_types, content_types, match_type)
def flatten_list(self, _list):
"""
Gets a list of lists and returns a flat list
"""
for el in _list:
if isinstance(el, Iterable) and not isinstance(el, str):
for sub in self.flatten_list(el):
yield sub
else:
yield el
def flat_list(self, non_flat_list, remove_none=False):
flat_list = self.flatten_list(non_flat_list)
if remove_none:
flat_list = [_f for _f in flat_list if _f]
return flat_list
def filtered_items(self, unfiltered_items, content_types, match_type):
items = []
unfiltered_items = set(unfiltered_items)
for item in sorted(unfiltered_items):
if match_type == 'strict':
logger.debug(
'Match type is strict, verifying item type to requested content types'
)
self.ia.update(item)
if item['kind'] in content_types:
logger.verbose(
'Adding item "{}" to list. Item kind is "{}"', item, item['kind']
)
items.append(item)
else:
logger.verbose('Rejecting item "{}". Item kind is "{}', item, item['kind'])
else:
logger.debug('Match type is loose, all items are being added')
items.append(item)
return items
def items_by_person(self, person, job_types, content_types, match_type):
"""
Return item list for a person object
"""
unfiltered_items = self.flat_list(
[self.items_by_job_type(person, job_type, content_types) for job_type in job_types],
remove_none=True,
)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_content_type(self, person, job_type, content_type):
return [
_f
for _f in (person.get(job_type + ' ' + self.content_type_conversion[content_type], []))
if _f
]
def items_by_job_type(self, person, job_type, content_types):
items = (
person.get(job_type, [])
if job_type in self.jobs_without_content_type
else [
person.get(job_type + ' ' + 'documentary', [])
and person.get(job_type + ' ' + 'short', [])
and self.items_by_content_type(person, job_type, content_type)
if content_type == 'movie'
else self.items_by_content_type(person, job_type, content_type)
for content_type in content_types
]
)
return [_f for _f in items if _f]
def items_by_character(self, character, content_types, match_type):
"""
Return items list for a character object
:param character: character object
:param content_types: content types as defined in config
:return:
"""
unfiltered_items = self.flat_list(
[
character.get(self.character_content_type_conversion[content_type])
for content_type in content_types
],
remove_none=True,
)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_company(self, company):
"""
Return items list for a company object
:param company: company object
:return: company items list
"""
return company.get('production companies')
@cached('from_imdb', persist='2 hours')
def on_task_input(self, task, config):
try:
from imdb import IMDb
self.ia = IMDb()
except ImportError:
logger.error(
'IMDBPY is required for this plugin. Please install using "pip install imdbpy"'
)
return
entries = []
config = self.prepare_config(config)
items = self.get_items(config)
if not items:
logger.error('Could not get IMDB item list, check your configuration.')
return
for item in items:
entry = Entry(
title=item['title'],
imdb_id='tt' + self.ia.get_imdbID(item),
url='',
imdb_url=self.ia.get_imdbURL(item),
)
if entry.isvalid():
if entry not in entries:
entries.append(entry)
if entry and task.options.test:
logger.info("Test mode. Entry includes:")
for key, value in list(entry.items()):
logger.info(' {}: {}', key.capitalize(), value)
else:
logger.error('Invalid entry created? {}', entry)
if len(entries) <= config.get('max_entries'):
return entries
else:
logger.warning(
'Number of entries ({}) exceeds maximum allowed value {}. '
'Edit your filters or raise the maximum value by entering a higher "max_entries"',
len(entries),
config.get('max_entries'),
)
return
@event('plugin.register')
def register_plugin():
plugin.register(FromIMDB, 'from_imdb', api_ver=2)
|
|
import unittest
import numpy as np
from pgmpy.factors import Factor
from pgmpy.models import FactorGraph
from pgmpy.models import MarkovModel
from pgmpy.models import JunctionTree
from pgmpy.tests import help_functions as hf
import numpy as np
import unittest
from pgmpy.exceptions import CardinalityError
from pgmpy.extern.six.moves import range
class TestFactorGraphCreation(unittest.TestCase):
def setUp(self):
self.graph = FactorGraph()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, FactorGraph)
def test_class_init_data_string(self):
self.graph = FactorGraph([('a', 'phi1'), ('b', 'phi1')])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'phi1'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'phi1'], ['b', 'phi1']])
def test_add_single_node(self):
self.graph.add_node('phi1')
self.assertEqual(self.graph.nodes(), ['phi1'])
def test_add_multiple_nodes(self):
self.graph.add_nodes_from(['a', 'b', 'phi1'])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'phi1'])
def test_add_single_edge(self):
self.graph.add_edge('a', 'phi1')
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'phi1'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'phi1']])
def test_add_multiple_edges(self):
self.graph.add_edges_from([('a', 'phi1'), ('b', 'phi1')])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'phi1'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'phi1'], ['b', 'phi1']])
def test_add_self_loop_raises_error(self):
self.assertRaises(ValueError, self.graph.add_edge, 'a', 'a')
def tearDown(self):
del self.graph
class TestFactorGraphFactorOperations(unittest.TestCase):
def setUp(self):
self.graph = FactorGraph()
def test_add_single_factor(self):
self.graph.add_edges_from([('a', 'phi1'), ('b', 'phi1')])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1)
self.assertListEqual(self.graph.get_factors(), [phi1])
def test_add_multiple_factors(self):
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.assertEqual(self.graph.get_factors(node=phi1), phi1)
self.assertEqual(self.graph.get_factors(node=phi2), phi2)
def test_remove_factors(self):
self.graph.add_edges_from([('a', 'phi1'), ('b', 'phi1'),
('b', 'phi2'), ('c', 'phi2')])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1, phi2)
self.graph.remove_factors(phi1)
self.assertListEqual(self.graph.get_factors(), [phi2])
def test_get_partition_function(self):
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.assertEqual(self.graph.get_partition_function(), 22.0)
def tearDown(self):
del self.graph
class TestFactorGraphMethods(unittest.TestCase):
def setUp(self):
self.graph = FactorGraph()
def test_get_cardinality(self):
self.graph.add_edges_from([('a', 'phi1'), ('b', 'phi1'),
('c', 'phi2'), ('d', 'phi2'),
('a', 'phi3'), ('d', 'phi3')])
self.assertDictEqual(self.graph.get_cardinality(), {})
phi1 = Factor(['a', 'b'], [1, 2], np.random.rand(2))
self.graph.add_factors(phi1)
self.assertDictEqual(self.graph.get_cardinality(), {'a': 1, 'b': 2})
self.graph.remove_factors(phi1)
self.assertDictEqual(self.graph.get_cardinality(), {})
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['c', 'd'], [1, 2], np.random.rand(2))
self.graph.add_factors(phi1, phi2)
self.assertDictEqual(self.graph.get_cardinality(), {'d': 2, 'a': 2, 'b': 2, 'c': 1})
phi3 = Factor(['d', 'a'], [1, 2], np.random.rand(2))
self.graph.add_factors(phi3)
self.assertDictEqual(self.graph.get_cardinality(), {'d': 1, 'c': 1, 'b': 2, 'a': 2})
self.graph.remove_factors(phi1, phi2, phi3)
self.assertDictEqual(self.graph.get_cardinality(), {})
# def test_get_factor_nodes(self):
# phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
# phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
# self.graph.add_edges_from([('a', phi1), ('b', phi1),
# ('b', phi2), ('c', phi2)])
# self.graph.add_factors(phi1, phi2)
# self.assertListEqual(sorted(self.graph.get_factor_nodes()),
# ([phi1, phi2]))
def test_get_variable_nodes(self):
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.assertListEqual(sorted(self.graph.get_variable_nodes()),
['a', 'b', 'c'])
def test_get_variable_nodes_raises_error(self):
self.graph.add_edges_from([('a', 'phi1'), ('b', 'phi1'),
('b', 'phi2'), ('c', 'phi2')])
self.assertRaises(ValueError, self.graph.get_variable_nodes)
def test_to_markov_model(self):
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
mm = self.graph.to_markov_model()
self.assertIsInstance(mm, MarkovModel)
self.assertListEqual(sorted(mm.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(mm.edges()),
[['a', 'b'], ['b', 'c']])
self.assertListEqual(sorted(mm.get_factors(),
key=lambda x: x.scope()), [phi1, phi2])
def test_to_junction_tree(self):
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
jt = self.graph.to_junction_tree()
self.assertIsInstance(jt, JunctionTree)
self.assertListEqual(hf.recursive_sorted(jt.nodes()),
[['a', 'b'], ['b', 'c']])
self.assertEqual(len(jt.edges()), 1)
def test_check_model(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.assertTrue(self.graph.check_model())
self.graph.remove_factors(phi1)
self.graph.remove_node(phi1)
phi1 = Factor(['a', 'b'], [4, 2], np.random.rand(8))
self.graph.add_factors(phi1)
self.graph.add_edges_from([('a', phi1)])
self.assertTrue(self.graph.check_model())
def test_check_model1(self):
self.graph.add_nodes_from(['a', 'b', 'c', 'd'])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.assertRaises(ValueError, self.graph.check_model)
self.graph.remove_node('d')
self.assertTrue(self.graph.check_model())
def test_check_model2(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.graph.add_edges_from([('a', 'b')])
self.assertRaises(ValueError, self.graph.check_model)
self.graph.add_edges_from([(phi1, phi2)])
self.assertRaises(ValueError, self.graph.check_model)
self.graph.remove_edges_from([('a', 'b'), (phi1, phi2)])
self.assertTrue(self.graph.check_model())
def test_check_model3(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [2, 2], np.random.rand(4))
phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2, phi3)
self.assertRaises(ValueError, self.graph.check_model)
self.graph.remove_factors(phi3)
self.assertTrue(self.graph.check_model())
def test_check_model4(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = Factor(['b', 'c'], [3, 2], np.random.rand(6))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([('a', phi1), ('b', phi1),
('b', phi2), ('c', phi2)])
self.graph.add_factors(phi1, phi2)
self.assertRaises(CardinalityError, self.graph.check_model)
self.graph.remove_factors(phi2)
self.graph.remove_node(phi2)
phi3 = Factor(['c', 'a'], [4, 4], np.random.rand(16))
self.graph.add_factors(phi3)
self.graph.add_edges_from([('a', phi3), ('c', phi3)])
self.assertRaises(CardinalityError, self.graph.check_model)
def tearDown(self):
del self.graph
|
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fakeredis
import logging
from c7n_mailer.ldap_lookup import LdapLookup, Redis
from ldap3 import Server, Connection, MOCK_SYNC
from ldap3.strategy import mockBase
logger = logging.getLogger('custodian.mailer')
PETER = (
'uid=peter,cn=users,dc=initech,dc=com',
{
'uid': ['peter'],
'manager': 'uid=bill_lumbergh,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'displayName': 'Peter',
'objectClass': 'person'
}
)
BILL = (
'uid=bill_lumbergh,cn=users,dc=initech,dc=com',
{
'uid': ['bill_lumbergh'],
'mail': '[email protected]',
'displayName': 'Bill Lumberg',
'objectClass': 'person'
}
)
MAILER_CONFIG = {
'smtp_port': 25,
'from_address': '[email protected]',
'contact_tags': ['OwnerEmail', 'SupportEmail'],
'queue_url': 'https://sqs.us-east-1.amazonaws.com/xxxx/cloudcustodian-mailer',
'region': 'us-east-1',
'ldap_uri': 'ldap.initech.com',
'smtp_server': 'smtp.inittech.com',
'cache_engine': 'sqlite',
'role': 'arn:aws:iam::xxxx:role/cloudcustodian-mailer',
'ldap_uid_tags': ['CreatorName', 'Owner'],
}
MAILER_CONFIG_AZURE = {
'queue_url': 'asq://storageaccount.queue.core.windows.net/queuename',
'from_address': '[email protected]',
'sendgrid_api_key': 'SENDGRID_API_KEY'
}
RESOURCE_1 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
},
{
'Value': 'peter',
'Key': 'CreatorName'
}
],
'VolumeId': 'vol-01a0e6ea6b89f0099'
}
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1c',
'Attachments': [],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
},
{
'Value': 'peter',
'Key': 'CreatorName'
}
],
'VolumeId': 'vol-21a0e7ea9b19f0043',
'Size': 8
}
SQS_MESSAGE_1 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'to': ['resource-owner', 'ldap_uid_tags'],
'email_ldap_username_manager': True,
'template': '',
'priority_header': '1',
'type': 'notify',
'transport': {'queue': 'xxx', 'type': 'sqs'},
'subject': '{{ account }} AWS EBS Volumes will be DELETED in 15 DAYS!'
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'to': ['resource-owner', 'ldap_uid_tags'],
'email_ldap_username_manager': True,
'template': '',
'priority_header': '1',
'type': 'notify',
'subject': 'EBS Volumes will be DELETED in 15 DAYS!'
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_1]
}
SQS_MESSAGE_2 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size']
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size']
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_1, RESOURCE_2]
}
SQS_MESSAGE_3 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size&metric_value_tag=Size']
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size&metric_value_tag=Size']
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_2]
}
ASQ_MESSAGE = '''{
"account":"subscription",
"account_id":"ee98974b-5d2a-4d98-a78a-382f3715d07e",
"region":"all",
"action":{
"to":[
"[email protected]"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
},
"policy":{
"resource":"azure.keyvault",
"name":"test-notify-for-keyvault",
"actions":[
{
"to":[
"[email protected]"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
}
]
},
"event":null,
"resources":[
{
"name":"cckeyvault1",
"tags":{
},
"resourceGroup":"test_keyvault",
"location":"southcentralus",
"type":"Microsoft.KeyVault/vaults",
"id":"/subscriptions/ee98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_keyvault/providers/Microsoft.KeyVault/vaults/cckeyvault1"
}
]
}'''
# Monkey-patch ldap3 to work around a bytes/text handling bug.
_safe_rdn = mockBase.safe_rdn
def safe_rdn(*a, **kw):
return [(k, mockBase.to_raw(v)) for k, v in _safe_rdn(*a, **kw)]
mockBase.safe_rdn = safe_rdn
def get_fake_ldap_connection():
server = Server('my_fake_server')
connection = Connection(
server,
client_strategy=MOCK_SYNC
)
connection.bind()
connection.strategy.add_entry(PETER[0], PETER[1])
connection.strategy.add_entry(BILL[0], BILL[1])
return connection
def get_ldap_lookup(cache_engine=None, uid_regex=None):
if cache_engine == 'sqlite':
config = {
'cache_engine': 'sqlite',
'ldap_cache_file': ':memory:'
}
elif cache_engine == 'redis':
config = {
'cache_engine': 'redis',
'redis_host': 'localhost'
}
if uid_regex:
config['ldap_uid_regex'] = uid_regex
ldap_lookup = MockLdapLookup(config, logger)
michael_bolton = {
'dn': 'CN=Michael Bolton,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'manager': 'CN=Milton,cn=users,dc=initech,dc=com',
'displayName': 'Michael Bolton'
}
milton = {
'uid': '123456',
'dn': 'CN=Milton,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'manager': 'CN=cthulhu,cn=users,dc=initech,dc=com',
'displayName': 'Milton'
}
bob_porter = {
'dn': 'CN=Bob Porter,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'manager': 'CN=Bob Slydell,cn=users,dc=initech,dc=com',
'displayName': 'Bob Porter'
}
ldap_lookup.base_dn = 'cn=users,dc=initech,dc=com'
ldap_lookup.uid_key = 'uid'
ldap_lookup.attributes.append('uid')
ldap_lookup.caching.set('michael_bolton', michael_bolton)
ldap_lookup.caching.set(bob_porter['dn'], bob_porter)
ldap_lookup.caching.set('123456', milton)
ldap_lookup.caching.set(milton['dn'], milton)
return ldap_lookup
class MockLdapLookup(LdapLookup):
# allows us to instantiate this object and not need a redis daemon
def get_redis_connection(self, redis_host, redis_port):
return MockRedisLookup()
# us to instantiate this object and not have ldap3 try to connect
# to anything or raise exception in unit tests, we replace connection with a mock
def get_connection(self, ignore, these, params):
return get_fake_ldap_connection()
class MockRedisLookup(Redis):
def __init__(self):
self.connection = fakeredis.FakeStrictRedis()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for compute API."""
import copy
import datetime
import iso8601
import mox
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.objects import instance_info_cache
from nova.objects import migration as migration_obj
from nova.objects import service as service_obj
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_migration
from nova.tests.objects import test_service
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
class _ComputeAPIUnitTestMixIn(object):
def setUp(self):
super(_ComputeAPIUnitTestMixIn, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
def _get_vm_states(self, exclude_states=None):
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
if not exclude_states:
exclude_states = set()
return vm_state - exclude_states
def _create_flavor(self, params=None):
flavor = {'id': 1,
'flavorid': 1,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'vcpu_weight': None,
'root_gb': 1,
'ephemeral_gb': 0,
'rxtx_factor': 1,
'swap': 0,
'deleted': 0,
'disabled': False,
'is_public': True,
}
if params:
flavor.update(params)
return flavor
def _create_instance_obj(self, params=None, flavor=None):
"""Create a test instance."""
if not params:
params = {}
if flavor is None:
flavor = self._create_flavor()
def make_fake_sys_meta():
sys_meta = params.pop("system_metadata", {})
for key in flavors.system_metadata_flavor_props:
sys_meta['instance_type_%s' % key] = flavor[key]
return sys_meta
now = timeutils.utcnow()
instance = instance_obj.Instance()
instance.metadata = {}
instance.metadata.update(params.pop('metadata', {}))
instance.system_metadata = make_fake_sys_meta()
instance.system_metadata.update(params.pop('system_metadata', {}))
instance._context = self.context
instance.id = 1
instance.uuid = uuidutils.generate_uuid()
instance.cell_name = 'api!child'
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.image_ref = FAKE_IMAGE_REF
instance.reservation_id = 'r-fakeres'
instance.user_id = self.user_id
instance.project_id = self.project_id
instance.host = 'fake_host'
instance.node = NODENAME
instance.instance_type_id = flavor['id']
instance.ami_launch_index = 0
instance.memory_mb = 0
instance.vcpus = 0
instance.root_gb = 0
instance.ephemeral_gb = 0
instance.architecture = 'x86_64'
instance.os_type = 'Linux'
instance.locked = False
instance.created_at = now
instance.updated_at = now
instance.launched_at = now
instance.disable_terminate = False
instance.info_cache = instance_info_cache.InstanceInfoCache()
if params:
instance.update(params)
instance.obj_reset_changes()
return instance
def test_create_quota_exceeded_messages(self):
image_href = "image_href"
image_id = 0
instance_type = self._create_flavor()
self.mox.StubOutWithMock(self.compute_api, "_get_image")
self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
quotas = {'instances': 1, 'cores': 1, 'ram': 1}
usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in
['instances', 'cores', 'ram'])
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
quota_exception = exception.OverQuota(quotas=quotas,
usages=usages, overs=['instances'], headroom=headroom)
for _unused in range(2):
self.compute_api._get_image(self.context, image_href).AndReturn(
(image_id, {}))
quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int))
quota.QUOTAS.reserve(self.context, instances=40,
cores=mox.IsA(int),
ram=mox.IsA(int)).AndRaise(quota_exception)
self.mox.ReplayAll()
for min_count, message in [(20, '20-40'), (40, '40')]:
try:
self.compute_api.create(self.context, instance_type,
"image_href", min_count=min_count,
max_count=40)
except exception.TooManyInstances as e:
self.assertEqual(message, e.kwargs['req'])
else:
self.fail("Exception not raised")
def test_suspend(self):
# Ensure instance can be suspended.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertEqual(instance.task_state, None)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
instance.save(expected_task_state=None)
self.compute_api._record_action_start(self.context,
instance, instance_actions.SUSPEND)
rpcapi.suspend_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.suspend(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.SUSPENDING,
instance.task_state)
def _test_suspend_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.suspend,
self.context, instance)
def test_suspend_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_suspend_fails(state)
def test_resume(self):
# Ensure instance can be resumed (if suspended).
instance = self._create_instance_obj(
params=dict(vm_state=vm_states.SUSPENDED))
self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
self.assertEqual(instance.task_state, None)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'resume_instance')
instance.save(expected_task_state=None)
self.compute_api._record_action_start(self.context,
instance, instance_actions.RESUME)
rpcapi.resume_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.resume(self.context, instance)
self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
self.assertEqual(task_states.RESUMING,
instance.task_state)
def test_start(self):
params = dict(vm_state=vm_states.STOPPED)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=None)
self.compute_api._record_action_start(self.context,
instance, instance_actions.START)
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'start_instance')
rpcapi.start_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.start(self.context, instance)
self.assertEqual(task_states.POWERING_ON,
instance.task_state)
def test_start_invalid_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.start,
self.context, instance)
def test_start_no_host(self):
params = dict(vm_state=vm_states.STOPPED, host='')
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.start,
self.context, instance)
def _test_stop(self, vm_state, force=False):
# Make sure 'progress' gets reset
params = dict(task_state=None, progress=99, vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=None)
self.compute_api._record_action_start(self.context,
instance, instance_actions.STOP)
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'stop_instance')
rpcapi.stop_instance(self.context, instance, do_cast=True)
self.mox.ReplayAll()
if force:
self.compute_api.force_stop(self.context, instance)
else:
self.compute_api.stop(self.context, instance)
self.assertEqual(task_states.POWERING_OFF,
instance.task_state)
self.assertEqual(0, instance.progress)
def test_stop(self):
self._test_stop(vm_states.ACTIVE)
def test_stop_stopped_instance_with_bypass(self):
self._test_stop(vm_states.STOPPED, force=True)
def _test_stop_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.ERROR]))
for state in invalid_vm_states:
self._test_stop_invalid_state(state)
def test_stop_a_stopped_inst(self):
params = {'vm_state': vm_states.STOPPED}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_a_stopped_inst(self):
params = {'vm_state': vm_states.STOPPED}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_no_host(self):
params = {'host': ''}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.stop,
self.context, instance)
def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
# Ensure instance can be soft rebooted.
inst = self._create_instance_obj()
inst.vm_state = vm_state
inst.task_state = task_state
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(inst, 'save')
inst.save(expected_task_state=[None, task_states.REBOOTING])
self.context.elevated().AndReturn(self.context)
self.compute_api._record_action_start(self.context, inst,
instance_actions.REBOOT)
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
rpcapi.reboot_instance(self.context, instance=inst,
block_device_info=None,
reboot_type=reboot_type)
self.mox.ReplayAll()
self.compute_api.reboot(self.context, inst, reboot_type)
def _test_reboot_type_fails(self, reboot_type, **updates):
inst = self._create_instance_obj()
inst.update(updates)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
self.context, inst, reboot_type)
def test_reboot_hard_active(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD')
def test_reboot_hard_error(self):
self._test_reboot_type(vm_states.ERROR, 'HARD')
def test_reboot_hard_rebooting(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOTING)
def test_reboot_hard_rescued(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
def test_reboot_hard_error_not_launched(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
launched_at=None)
def test_reboot_soft(self):
self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
def test_reboot_soft_error(self):
self._test_reboot_type(vm_states.ERROR, 'SOFT')
def test_reboot_soft_rebooting(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
def test_reboot_soft_rescued(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
def test_reboot_soft_error_not_launched(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
launched_at=None)
def _test_delete_resized_part(self, inst):
migration = migration_obj.Migration._from_db_object(
self.context, migration_obj.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(migration_obj.Migration,
'get_by_instance_and_status')
self.context.elevated().AndReturn(self.context)
migration_obj.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'finished').AndReturn(migration)
self.compute_api._downsize_quota_delta(self.context, inst
).AndReturn('deltas')
self.compute_api._reserve_quota_delta(self.context, 'deltas'
).AndReturn('rsvs')
self.compute_api._record_action_start(
self.context, inst, instance_actions.CONFIRM_RESIZE)
self.compute_api.compute_rpcapi.confirm_resize(
self.context, inst, migration,
migration['source_compute'], 'rsvs', cast=False)
def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
inst.info_cache.delete()
compute_utils.notify_about_instance_usage(
mox.IgnoreArg(), self.context, inst,
'%s.start' % delete_type)
self.context.elevated().AndReturn(self.context)
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
db.instance_system_metadata_get(self.context,
inst.uuid).AndReturn('sys-meta')
state = ('soft' in delete_type and vm_states.SOFT_DELETED or
vm_states.DELETED)
updates.update({'vm_state': state,
'task_state': None,
'terminated_at': delete_time})
inst.save()
db.instance_destroy(self.context, inst.uuid, constraint=None)
compute_utils.notify_about_instance_usage(
mox.IgnoreArg(),
self.context, inst, '%s.end' % delete_type,
system_metadata='sys-meta')
def _test_delete(self, delete_type, **attrs):
reservations = 'fake-resv'
inst = self._create_instance_obj()
inst.update(attrs)
inst._context = self.context
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(delete_time)
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
'service_is_up')
self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(inst.info_cache, 'delete')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
db.block_device_mapping_get_all_by_instance(
self.context, inst.uuid).AndReturn([])
inst.save()
self.compute_api._create_reservations(
self.context, inst, inst.instance_type_id, inst.project_id,
inst.user_id).AndReturn(reservations)
# NOTE(comstud): This is getting messy. But what we are wanting
# to test is:
# If cells is enabled and we're the API cell:
# * Cast to cells_rpcapi.<method> with reservations=None
# * Commit reservations
# Otherwise:
# * Check for downed host
# * If downed host:
# * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part())
# * Commit reservations
# * If not downed host:
# * Record the action start.
# * Cast to compute_rpcapi.<method> with the reservations
cast = True
commit_quotas = True
if not self.is_cells:
if inst.vm_state == vm_states.RESIZED:
self._test_delete_resized_part(inst)
self.context.elevated().AndReturn(self.context)
db.service_get_by_compute_host(
self.context, inst.host).AndReturn(
test_service.fake_service)
self.compute_api.servicegroup_api.service_is_up(
mox.IsA(service_obj.Service)).AndReturn(
inst.host != 'down-host')
if inst.host == 'down-host':
self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
else:
# Happens on the manager side
commit_quotas = False
if cast:
if not self.is_cells:
self.compute_api._record_action_start(self.context, inst,
instance_actions.DELETE)
if commit_quotas:
cast_reservations = None
else:
cast_reservations = reservations
if delete_type == 'soft_delete':
rpcapi.soft_delete_instance(self.context, inst,
reservations=cast_reservations)
elif delete_type in ['delete', 'force_delete']:
rpcapi.terminate_instance(self.context, inst, [],
reservations=cast_reservations)
if commit_quotas:
# Local delete or when is_cells is True.
quota.QUOTAS.commit(self.context, reservations,
project_id=inst.project_id,
user_id=inst.user_id)
self.mox.ReplayAll()
getattr(self.compute_api, delete_type)(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def test_delete(self):
self._test_delete('delete')
def test_delete_if_not_launched(self):
self._test_delete('delete', launched_at=None)
def test_delete_in_resizing(self):
self._test_delete('delete', task_state=task_states.RESIZE_FINISH)
def test_delete_in_resized(self):
self._test_delete('delete', vm_state=vm_states.RESIZED)
def test_delete_with_down_host(self):
self._test_delete('delete', host='down-host')
def test_delete_soft_with_down_host(self):
self._test_delete('soft_delete', host='down-host')
def test_delete_soft(self):
self._test_delete('soft_delete')
def test_delete_forced(self):
self._test_delete('force_delete', vm_state=vm_states.SOFT_DELETED)
def test_delete_fast_if_host_not_set(self):
inst = self._create_instance_obj()
inst.host = ''
updates = {'progress': 0, 'task_state': task_states.DELETING}
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(db, 'constraint')
self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
db.block_device_mapping_get_all_by_instance(self.context,
inst.uuid).AndReturn([])
inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.instance_type_id,
inst.project_id, inst.user_id
).AndReturn(None)
if self.is_cells:
rpcapi.terminate_instance(self.context, inst, [],
reservations=None)
else:
compute_utils.notify_about_instance_usage(mox.IgnoreArg(),
self.context,
inst,
'delete.start')
db.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
db.instance_destroy(self.context, inst.uuid,
constraint='constraint')
compute_utils.notify_about_instance_usage(
mox.IgnoreArg(), self.context, inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def test_delete_disabled(self):
inst = self._create_instance_obj()
inst.disable_terminate = True
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
def test_delete_soft_rollback(self):
inst = self._create_instance_obj()
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(inst, 'save')
delete_time = datetime.datetime(1955, 11, 5)
timeutils.set_time_override(delete_time)
db.block_device_mapping_get_all_by_instance(
self.context, inst.uuid).AndReturn([])
inst.save().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute_api.soft_delete, self.context, inst)
def test_is_volume_backed_being_true_if_root_is_block_device(self):
bdms = [{'device_name': '/dev/xvda1', 'volume_id': 'volume_id',
'snapshot_id': 'snapshot_id'}]
params = {'image_ref': 'some-image-ref', 'root_device_name':
'/dev/xvda1'}
instance = self._create_instance_obj(params=params)
self.assertTrue(self.compute_api.is_volume_backed_instance(
self.context,
instance, bdms))
def test_is_volume_backed_being_false_if_root_is_not_block_device(self):
bdms = [{'device_name': '/dev/xvda1', 'volume_id': 'volume_id',
'snapshot_id': 'snapshot_id'}]
params = {'image_ref': 'some-image-ref', 'root_device_name':
'/dev/xvdd1'}
instance = self._create_instance_obj(params=params)
self.assertFalse(self.compute_api.is_volume_backed_instance(
self.context,
instance, bdms))
def test_is_volume_backed_being_false_if_root_device_is_not_set(self):
bdms = [{'device_name': None}]
params = {'image_ref': 'some-image-ref', 'root_device_name': None}
instance = self._create_instance_obj(params=params)
self.assertFalse(self.compute_api.is_volume_backed_instance(
self.context,
instance, bdms))
def _test_confirm_resize(self, mig_ref_passed=False):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = migration_obj.Migration._from_db_object(
self.context, migration_obj.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(migration_obj.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'confirm_resize')
self.context.elevated().AndReturn(self.context)
if not mig_ref_passed:
migration_obj.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
self.compute_api._downsize_quota_delta(self.context,
fake_inst).AndReturn('deltas')
resvs = ['resvs']
self.compute_api._reserve_quota_delta(self.context,
'deltas').AndReturn(resvs)
def _check_mig(expected_task_state=None):
self.assertEqual('confirming', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.is_cells:
quota.QUOTAS.commit(self.context, resvs)
resvs = []
self.compute_api._record_action_start(self.context, fake_inst,
'confirmResize')
self.compute_api.compute_rpcapi.confirm_resize(
self.context, fake_inst, fake_mig, 'compute-source', resvs)
self.mox.ReplayAll()
if mig_ref_passed:
self.compute_api.confirm_resize(self.context, fake_inst,
migration=fake_mig)
else:
self.compute_api.confirm_resize(self.context, fake_inst)
def test_confirm_resize(self):
self._test_confirm_resize()
def test_confirm_resize_with_migration_ref(self):
self._test_confirm_resize(mig_ref_passed=True)
def _test_revert_resize(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = migration_obj.Migration._from_db_object(
self.context, migration_obj.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(migration_obj.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api,
'_reverse_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'revert_resize')
self.context.elevated().AndReturn(self.context)
migration_obj.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
self.compute_api._reverse_upsize_quota_delta(
self.context, fake_mig).AndReturn('deltas')
resvs = ['resvs']
self.compute_api._reserve_quota_delta(self.context,
'deltas').AndReturn(resvs)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_REVERTING,
fake_inst.task_state)
fake_inst.save(expected_task_state=None).WithSideEffects(
_check_state)
def _check_mig(expected_task_state=None):
self.assertEqual('reverting', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.is_cells:
quota.QUOTAS.commit(self.context, resvs)
resvs = []
self.compute_api._record_action_start(self.context, fake_inst,
'revertResize')
self.compute_api.compute_rpcapi.revert_resize(
self.context, fake_inst, fake_mig, 'compute-dest', resvs)
self.mox.ReplayAll()
self.compute_api.revert_resize(self.context, fake_inst)
def test_revert_resize(self):
self._test_revert_resize()
def _test_resize(self, flavor_id_passed=True,
same_host=False, allow_same_host=False,
allow_mig_same_host=False,
project_id=None,
extra_kwargs=None):
if extra_kwargs is None:
extra_kwargs = {}
self.flags(allow_resize_to_same_host=allow_same_host,
allow_migrate_to_same_host=allow_mig_same_host)
params = {}
if project_id is not None:
# To test instance w/ different project id than context (admin)
params['project_id'] = project_id
fake_inst = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
current_flavor = flavors.extract_flavor(fake_inst)
if flavor_id_passed:
new_flavor = dict(id=200, flavorid='new-flavor-id',
name='new_flavor', disabled=False)
flavors.get_flavor_by_flavor_id(
'new-flavor-id',
read_deleted='no').AndReturn(new_flavor)
else:
new_flavor = current_flavor
resvs = ['resvs']
self.compute_api._upsize_quota_delta(
self.context, new_flavor,
current_flavor).AndReturn('deltas')
self.compute_api._reserve_quota_delta(self.context, 'deltas',
project_id=fake_inst['project_id']).AndReturn(resvs)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_PREP, fake_inst.task_state)
self.assertEqual(fake_inst.progress, 0)
for key, value in extra_kwargs.items():
self.assertEqual(value, getattr(fake_inst, key))
fake_inst.save(expected_task_state=None).WithSideEffects(
_check_state)
if allow_same_host:
filter_properties = {'ignore_hosts': []}
else:
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if not flavor_id_passed and not allow_mig_same_host:
filter_properties['ignore_hosts'].append(fake_inst['host'])
if self.is_cells:
quota.QUOTAS.commit(self.context, resvs,
project_id=fake_inst['project_id'])
resvs = []
mig = migration_obj.Migration()
def _get_migration():
return mig
def _check_mig(ctxt):
self.assertEqual(fake_inst.uuid, mig.instance_uuid)
self.assertEqual(current_flavor['id'],
mig.old_instance_type_id)
self.assertEqual(new_flavor['id'],
mig.new_instance_type_id)
self.assertEqual('finished', mig.status)
self.stubs.Set(migration_obj, 'Migration', _get_migration)
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(mig, 'create')
self.context.elevated().AndReturn(self.context)
mig.create(self.context).WithSideEffects(_check_mig)
self.compute_api._record_action_start(self.context, fake_inst,
'resize')
scheduler_hint = {'filter_properties': filter_properties}
self.compute_api.compute_task_api.resize_instance(
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=new_flavor, reservations=resvs)
self.mox.ReplayAll()
if flavor_id_passed:
self.compute_api.resize(self.context, fake_inst,
flavor_id='new-flavor-id',
**extra_kwargs)
else:
self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
def _test_migrate(self, *args, **kwargs):
self._test_resize(*args, flavor_id_passed=True, **kwargs)
def test_resize(self):
self._test_resize()
def test_resize_with_kwargs(self):
self._test_resize(extra_kwargs=dict(cow='moo'))
def test_resize_same_host_and_allowed(self):
self._test_resize(same_host=True, allow_same_host=True)
def test_resize_same_host_and_not_allowed(self):
self._test_resize(same_host=True, allow_same_host=False)
def test_resize_different_project_id(self):
self._test_resize(project_id='different')
def test_migrate(self):
self._test_migrate()
def test_migrate_with_kwargs(self):
self._test_migrate(extra_kwargs=dict(cow='moo'))
def test_migrate_same_host_and_allowed(self):
self._test_migrate(same_host=True, allow_same_host=True)
def test_migrate_same_host_and_not_allowed(self):
self._test_migrate(same_host=True, allow_same_host=False)
def test_migrate_different_project_id(self):
self._test_migrate(project_id='different')
def test_resize_invalid_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
exc = exception.FlavorNotFound(flavor_id='flavor-id')
flavors.get_flavor_by_flavor_id('flavor-id',
read_deleted='no').AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_resize_disabled_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
disabled=True)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
self.mox.ReplayAll()
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_resize_same_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
fake_flavor = flavors.extract_flavor(fake_inst)
flavors.get_flavor_by_flavor_id(
fake_flavor['flavorid'],
read_deleted='no').AndReturn(fake_flavor)
self.mox.ReplayAll()
# Pass in flavor_id.. same as current flavor.
self.assertRaises(exception.CannotResizeToSameFlavor,
self.compute_api.resize, self.context,
fake_inst, flavor_id=fake_flavor['flavorid'])
def test_resize_quota_exceeds_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
current_flavor = flavors.extract_flavor(fake_inst)
fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
disabled=False)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
deltas = dict(resource=0)
self.compute_api._upsize_quota_delta(
self.context, fake_flavor,
current_flavor).AndReturn(deltas)
usage = dict(in_use=0, reserved=0)
quotas = {'resource': 0}
usages = {'resource': usage}
overs = ['resource']
headroom = {'resource': quotas['resource'] -
(usages['resource']['in_use'] + usages['resource']['reserved'])}
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs,
headroom=headroom)
self.compute_api._reserve_quota_delta(self.context, deltas,
project_id=fake_inst['project_id']).AndRaise(
exception.OverQuota(**over_quota_args))
self.mox.ReplayAll()
self.assertRaises(exception.TooManyInstances,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_pause(self):
# Ensure instance can be paused.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertEqual(instance.task_state, None)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'pause_instance')
instance.save(expected_task_state=None)
self.compute_api._record_action_start(self.context,
instance, instance_actions.PAUSE)
rpcapi.pause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.pause(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.PAUSING,
instance.task_state)
def _test_pause_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.pause,
self.context, instance)
def test_pause_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_pause_fails(state)
def test_unpause(self):
# Ensure instance can be unpaused.
params = dict(vm_state=vm_states.PAUSED)
instance = self._create_instance_obj(params=params)
self.assertEqual(instance.vm_state, vm_states.PAUSED)
self.assertEqual(instance.task_state, None)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.is_cells:
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
instance.save(expected_task_state=None)
self.compute_api._record_action_start(self.context,
instance, instance_actions.UNPAUSE)
rpcapi.unpause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.unpause(self.context, instance)
self.assertEqual(vm_states.PAUSED, instance.vm_state)
self.assertEqual(task_states.UNPAUSING, instance.task_state)
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volumes return to previous states in case of error.
def fake_vol_api_begin_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
volumes[volume_id]['status'] = 'detaching'
def fake_vol_api_roll_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'detaching':
volumes[volume_id]['status'] = 'in-use'
def fake_vol_api_reserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
self.assertEqual(volumes[volume_id]['status'], 'available')
volumes[volume_id]['status'] = 'attaching'
def fake_vol_api_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_swap_volume_exc(context, instance, old_volume_id,
new_volume_id):
raise AttributeError # Random exception
# Should fail if VM state is not valid
instance = {'vm_state': vm_states.BUILDING,
'launched_at': timeutils.utcnow(),
'locked': False,
'availability_zone': 'fake_az',
'uuid': 'fake'}
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'attach_status': 'attached',
'instance_uuid': 'fake',
'size': 5,
'status': 'in-use'}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'attach_status': 'detached',
'instance_uuid': None,
'size': 5,
'status': 'available'}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
instance['vm_state'] = vm_states.ACTIVE
# Should fail if old volume is not attached
volumes[old_volume_id]['attach_status'] = 'detached'
self.assertRaises(exception.VolumeUnattached,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEquals(volumes[old_volume_id]['status'], 'in-use')
self.assertEquals(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['attach_status'] = 'attached'
# Should fail if old volume's instance_uuid is not that of the instance
volumes[old_volume_id]['instance_uuid'] = 'fake2'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEquals(volumes[old_volume_id]['status'], 'in-use')
self.assertEquals(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['instance_uuid'] = 'fake'
# Should fail if new volume is attached
volumes[new_volume_id]['attach_status'] = 'attached'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEquals(volumes[old_volume_id]['status'], 'in-use')
self.assertEquals(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['attach_status'] = 'detached'
# Should fail if new volume is smaller than the old volume
volumes[new_volume_id]['size'] = 4
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEquals(volumes[old_volume_id]['status'], 'in-use')
self.assertEquals(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['size'] = 5
# Fail call to swap_volume
self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
fake_vol_api_begin_detaching)
self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
fake_vol_api_roll_detaching)
self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
fake_vol_api_reserve)
self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
fake_vol_api_unreserve)
self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
fake_swap_volume_exc)
self.assertRaises(AttributeError,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEquals(volumes[old_volume_id]['status'], 'in-use')
self.assertEquals(volumes[new_volume_id]['status'], 'available')
# Should succeed
self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
lambda c, instance, old_volume_id, new_volume_id: True)
self.compute_api.swap_volume(self.context, instance,
volumes[old_volume_id],
volumes[new_volume_id])
def _test_snapshot_and_backup(self, is_snapshot=True,
with_base_ref=False, min_ram=None,
min_disk=None,
create_fails=False):
# 'cache_in_nova' is for testing non-inheritable properties
# 'user_id' should also not be carried from sys_meta into
# image property...since it should be set explicitly by
# _create_image() in compute api.
fake_sys_meta = dict(image_foo='bar', blah='bug?',
image_cache_in_nova='dropped',
cache_in_nova='dropped',
user_id='meow')
if with_base_ref:
fake_sys_meta['image_base_image_ref'] = 'fake-base-ref'
params = dict(system_metadata=fake_sys_meta)
instance = self._create_instance_obj(params=params)
fake_sys_meta.update(instance.system_metadata)
extra_props = dict(cow='moo', cat='meow')
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(self.compute_api.image_service,
'create')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'snapshot_instance')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'backup_instance')
image_type = is_snapshot and 'snapshot' or 'backup'
expected_sys_meta = dict(fake_sys_meta)
expected_sys_meta.pop('cache_in_nova')
expected_sys_meta.pop('image_cache_in_nova')
expected_sys_meta.pop('user_id')
expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo')
if with_base_ref:
expected_sys_meta['base_image_ref'] = expected_sys_meta.pop(
'image_base_image_ref')
expected_props = {'instance_uuid': instance.uuid,
'user_id': self.context.user_id,
'image_type': image_type}
expected_props.update(extra_props)
expected_props.update(expected_sys_meta)
expected_meta = {'name': 'fake-name',
'is_public': False,
'properties': expected_props}
if is_snapshot:
if min_ram is not None:
expected_meta['min_ram'] = min_ram
if min_disk is not None:
expected_meta['min_disk'] = min_disk
else:
expected_props['backup_type'] = 'fake-backup-type'
compute_utils.get_image_metadata(
self.context, self.compute_api.image_service,
FAKE_IMAGE_REF, instance).AndReturn(expected_meta)
fake_image = dict(id='fake-image-id')
mock_method = self.compute_api.image_service.create(
self.context, expected_meta)
if create_fails:
mock_method.AndRaise(test.TestingException())
else:
mock_method.AndReturn(fake_image)
def check_state(expected_task_state=None):
expected_state = (is_snapshot and task_states.IMAGE_SNAPSHOT or
task_states.IMAGE_BACKUP)
self.assertEqual(expected_state, instance.task_state)
if not create_fails:
instance.save(expected_task_state=None).WithSideEffects(
check_state)
if is_snapshot:
self.compute_api.compute_rpcapi.snapshot_instance(
self.context, instance, fake_image['id'])
else:
self.compute_api.compute_rpcapi.backup_instance(
self.context, instance, fake_image['id'],
'fake-backup-type', 'fake-rotation')
self.mox.ReplayAll()
got_exc = False
try:
if is_snapshot:
res = self.compute_api.snapshot(self.context, instance,
'fake-name',
extra_properties=extra_props)
else:
res = self.compute_api.backup(self.context, instance,
'fake-name',
'fake-backup-type',
'fake-rotation',
extra_properties=extra_props)
self.assertEqual(fake_image, res)
except test.TestingException:
got_exc = True
self.assertEqual(create_fails, got_exc)
def test_snapshot(self):
self._test_snapshot_and_backup()
def test_snapshot_fails(self):
self._test_snapshot_and_backup(create_fails=True)
def test_snapshot_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
def test_snapshot_with_base_image_ref(self):
self._test_snapshot_and_backup(with_base_ref=True)
def test_snapshot_min_ram(self):
self._test_snapshot_and_backup(min_ram=42)
def test_snapshot_min_disk(self):
self._test_snapshot_and_backup(min_disk=42)
def test_backup(self):
self._test_snapshot_and_backup(is_snapshot=False)
def test_backup_fails(self):
self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
def test_backup_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name',
'fake', 'fake')
def test_backup_with_base_image_ref(self):
self._test_snapshot_and_backup(is_snapshot=False,
with_base_ref=True)
def test_snapshot_volume_backed(self):
instance = self._create_instance_obj()
instance['root_device_name'] = 'vda'
instance_bdms = []
image_meta = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away',
}
expect_meta = {
'name': 'test-snapshot',
'properties': {'root_device_name': 'vda', 'mappings': 'DONTCARE'},
'size': 0,
'is_public': False
}
def fake_get_instance_bdms(context, instance):
return copy.deepcopy(instance_bdms)
def fake_image_create(context, image_meta, data):
self.assertThat(image_meta, matchers.DictMatches(expect_meta))
def fake_volume_get(context, volume_id):
return {'id': volume_id, 'display_description': ''}
def fake_volume_create_snapshot(context, volume_id, name, description):
return {'id': '%s-snapshot' % volume_id}
self.stubs.Set(self.compute_api, 'get_instance_bdms',
fake_get_instance_bdms)
self.stubs.Set(self.compute_api.image_service, 'create',
fake_image_create)
self.stubs.Set(self.compute_api.volume_api, 'get',
fake_volume_get)
self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
fake_volume_create_snapshot)
# No block devices defined
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
bdm = {'no_device': False, 'volume_id': '1',
'connection_info': 'inf', 'device_name': '/dev/vda'}
for key in block_device.BlockDeviceDict._db_only_fields:
bdm[key] = 'MUST DELETE'
instance_bdms.append(bdm)
expect_meta['properties']['block_device_mapping'] = []
expect_meta['properties']['block_device_mapping'].append(
{'no_device': False, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda'})
# All the db_only fields and the volume ones are removed
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
image_mappings = [{'device': 'vda', 'virtual': 'ephemeral0'},
{'device': 'vdb', 'virtual': 'swap'},
{'device': 'vdc', 'virtual': 'ephemeral1'}]
image_meta['properties']['mappings'] = image_mappings
expect_meta['properties']['block_device_mapping'].extend([
{'no_device': True, 'device_name': '/dev/vdb'},
{'no_device': True, 'device_name': '/dev/vdc'}])
# Check that the mappgins from the image properties are included
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
def test_volume_snapshot_create(self):
volume_id = '1'
create_info = {'id': 'eyedee'}
fake_bdm = {
'instance': {
'uuid': 'fake_uuid',
'vm_state': vm_states.ACTIVE,
},
}
def fake_get_bdm(context, _volume_id, columns_to_join):
self.assertEqual(volume_id, _volume_id)
return fake_bdm
self.stubs.Set(self.compute_api.db,
'block_device_mapping_get_by_volume_id', fake_get_bdm)
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'volume_snapshot_create')
self.compute_api.compute_rpcapi.volume_snapshot_create(self.context,
fake_bdm['instance'], volume_id, create_info)
self.mox.ReplayAll()
snapshot = self.compute_api.volume_snapshot_create(self.context,
volume_id, create_info)
expected_snapshot = {
'snapshot': {
'id': create_info['id'],
'volumeId': volume_id,
},
}
self.assertEqual(snapshot, expected_snapshot)
def test_volume_snapshot_delete(self):
volume_id = '1'
snapshot_id = '2'
fake_bdm = {
'instance': {
'uuid': 'fake_uuid',
'vm_state': vm_states.ACTIVE,
},
}
def fake_get_bdm(context, _volume_id, columns_to_join):
self.assertEqual(volume_id, _volume_id)
return fake_bdm
self.stubs.Set(self.compute_api.db,
'block_device_mapping_get_by_volume_id', fake_get_bdm)
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'volume_snapshot_delete')
self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context,
fake_bdm['instance'], volume_id, snapshot_id, {})
self.mox.ReplayAll()
self.compute_api.volume_snapshot_delete(self.context, volume_id,
snapshot_id, {})
def _create_instance_with_disabled_disk_config(self):
sys_meta = {"image_auto_disk_config": "Disabled"}
params = {"system_metadata": sys_meta}
return obj_base.obj_to_primitive(self._create_instance_obj(
params=params))
def _setup_fake_image_with_disabled_disk_config(self):
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {"auto_disk_config": "Disabled"},
}
def fake_show(obj, context, image_id):
return self.fake_image
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
return self.fake_image['id']
def test_resize_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.resize,
self.context, fake_inst,
auto_disk_config=True)
def test_create_with_disabled_auto_disk_config_fails(self):
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.create, self.context,
"fake_flavor", image_id, auto_disk_config=True)
def test_rebuild_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config()
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.rebuild,
self.context,
fake_inst,
image_id,
"new password",
auto_disk_config=True)
class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
super(ComputeAPIUnitTestCase, self).setUp()
self.compute_api = compute_api.API()
self.is_cells = False
class ComputeCellsAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
super(ComputeCellsAPIUnitTestCase, self).setUp()
self.flags(cell_type='api', enable=True, group='cells')
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.is_cells = True
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class InboundNatRulesOperations(object):
"""InboundNatRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of InboundNatRule
:rtype:
~azure.mgmt.network.v2017_10_01.models.InboundNatRulePaged[~azure.mgmt.network.v2017_10_01.models.InboundNatRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.InboundNatRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.InboundNatRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'}
def _delete_initial(
self, resource_group_name, load_balancer_name, inbound_nat_rule_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, load_balancer_name, inbound_nat_rule_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'}
def get(
self, resource_group_name, load_balancer_name, inbound_nat_rule_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: InboundNatRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_10_01.models.InboundNatRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'}
def _create_or_update_initial(
self, resource_group_name, load_balancer_name, inbound_nat_rule_name, inbound_nat_rule_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, load_balancer_name, inbound_nat_rule_name, inbound_nat_rule_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create
or update inbound nat rule operation.
:type inbound_nat_rule_parameters:
~azure.mgmt.network.v2017_10_01.models.InboundNatRule
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns InboundNatRule or
ClientRawResponse<InboundNatRule> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.InboundNatRule]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.InboundNatRule]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('InboundNatRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'}
|
|
"""Data Equivalence Tests"""
from __future__ import print_function
# Author: Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import inspect
import os.path as op
import shutil
import warnings
from nose.tools import assert_equal, assert_raises, assert_true
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from mne.utils import _TempDir, run_tests_if_main
from mne import pick_types, find_events
from mne.io.constants import FIFF
from mne.io import read_raw_fif, read_raw_brainvision
from mne.io.tests.test_raw import _test_raw_reader
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
vhdr_path = op.join(data_dir, 'test.vhdr')
vmrk_path = op.join(data_dir, 'test.vmrk')
vhdr_partially_disabled_hw_filter_path = op.join(data_dir,
'test_partially_disabled'
'_hw_filter.vhdr')
vhdr_old_path = op.join(data_dir,
'test_old_layout_latin1_software_filter.vhdr')
vmrk_old_path = op.join(data_dir,
'test_old_layout_latin1_software_filter.vmrk')
vhdr_v2_path = op.join(data_dir, 'testv2.vhdr')
vmrk_v2_path = op.join(data_dir, 'testv2.vmrk')
vhdr_highpass_path = op.join(data_dir, 'test_highpass.vhdr')
vhdr_mixed_highpass_path = op.join(data_dir, 'test_mixed_highpass.vhdr')
vhdr_highpass_hz_path = op.join(data_dir, 'test_highpass_hz.vhdr')
vhdr_mixed_highpass_hz_path = op.join(data_dir, 'test_mixed_highpass_hz.vhdr')
# Not a typo: we can reuse the highpass file for the lowpass (Hz) test
vhdr_lowpass_path = op.join(data_dir, 'test_highpass.vhdr')
vhdr_mixed_lowpass_path = op.join(data_dir, 'test_mixed_lowpass.vhdr')
vhdr_lowpass_s_path = op.join(data_dir, 'test_lowpass_s.vhdr')
vhdr_mixed_lowpass_s_path = op.join(data_dir, 'test_mixed_lowpass_s.vhdr')
montage = op.join(data_dir, 'test.hpts')
eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
eog = ['HL', 'HR', 'Vb']
event_id = {'Sync On': 5}
warnings.simplefilter('always')
def test_ascii():
"""Test ASCII BV reading."""
raw = read_raw_brainvision(vhdr_path, event_id=event_id)
tempdir = _TempDir()
ascii_vhdr_path = op.join(tempdir, op.split(vhdr_path)[-1])
# copy marker file
shutil.copy(vhdr_path.replace('.vhdr', '.vmrk'),
ascii_vhdr_path.replace('.vhdr', '.vmrk'))
# modify header file
skipping = False
with open(ascii_vhdr_path, 'wb') as fout:
with open(vhdr_path, 'rb') as fin:
for line in fin:
# Common Infos section
if line.startswith(b'DataFormat'):
line = b'DataFormat=ASCII\n'
elif line.startswith(b'DataFile='):
line = b'DataFile=test.dat\n'
# Replace the "'Binary Infos'" section
elif line.startswith(b'[Binary Infos]'):
skipping = True
fout.write(b'[ASCII Infos]\nDecimalSymbol=.\nSkipLines=1\n'
b'SkipColumns=0\n\n')
elif skipping and line.startswith(b'['):
skipping = False
if not skipping:
fout.write(line)
# create the .dat file
data, times = raw[:]
with open(ascii_vhdr_path.replace('.vhdr', '.dat'), 'wb') as fid:
fid.write(b' '.join(ch_name.encode('ASCII')
for ch_name in raw.ch_names) + b'\n')
fid.write(b'\n'.join(b' '.join(b'%.3f' % dd for dd in d)
for d in data[:-1].T / raw._cals[:-1]))
raw = read_raw_brainvision(ascii_vhdr_path, event_id=event_id)
data_new, times_new = raw[:]
assert_allclose(data_new, data, atol=1e-15)
assert_allclose(times_new, times)
def test_brainvision_data_highpass_filters():
"""Test reading raw Brain Vision files with amplifier filter settings."""
# Homogeneous highpass in seconds (default measurement unit)
with warnings.catch_warnings(record=True) as w: # event parsing
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_highpass_path,
montage=montage, eog=eog)
assert_true(all('parse triggers that' in str(ww.message) for ww in w))
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
# Heterogeneous highpass in seconds (default measurement unit)
with warnings.catch_warnings(record=True) as w: # event parsing
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_path,
montage=montage, eog=eog, event_id=event_id)
lowpass_warning = ['different lowpass filters' in str(ww.message)
for ww in w]
highpass_warning = ['different highpass filters' in str(ww.message)
for ww in w]
expected_warnings = zip(lowpass_warning, highpass_warning)
assert_true(all(any([lp, hp]) for lp, hp in expected_warnings))
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
# Homogeneous highpass in Hertz
with warnings.catch_warnings(record=True): # filter settings
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_highpass_hz_path,
montage=montage, eog=eog, event_id=event_id)
assert_equal(raw.info['highpass'], 10.)
assert_equal(raw.info['lowpass'], 250.)
# Heterogeneous highpass in Hertz
with warnings.catch_warnings(record=True): # filter settings
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_hz_path,
montage=montage, eog=eog, event_id=event_id)
trigger_warning = ['parse triggers that' in str(ww.message)
for ww in w]
lowpass_warning = ['different lowpass filters' in str(ww.message)
for ww in w]
highpass_warning = ['different highpass filters' in str(ww.message)
for ww in w]
expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning)
assert_true(all(any([trg, lp, hp]) for trg, lp, hp in expected_warnings))
assert_equal(raw.info['highpass'], 5.)
assert_equal(raw.info['lowpass'], 250.)
def test_brainvision_data_lowpass_filters():
"""Test files with amplifier LP filter settings."""
# Homogeneous lowpass in Hertz (default measurement unit)
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_lowpass_path,
montage=montage, eog=eog, event_id=event_id)
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
# Heterogeneous lowpass in Hertz (default measurement unit)
with warnings.catch_warnings(record=True) as w: # event parsing
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_mixed_lowpass_path,
montage=montage, eog=eog, event_id=event_id)
lowpass_warning = ['different lowpass filters' in str(ww.message)
for ww in w]
highpass_warning = ['different highpass filters' in str(ww.message)
for ww in w]
expected_warnings = zip(lowpass_warning, highpass_warning)
assert_true(all(any([lp, hp]) for lp, hp in expected_warnings))
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
# Homogeneous lowpass in seconds
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_lowpass_s_path,
montage=montage, eog=eog, event_id=event_id)
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
# Heterogeneous lowpass in seconds
with warnings.catch_warnings(record=True) as w: # filter settings
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_mixed_lowpass_s_path,
montage=montage, eog=eog, event_id=event_id)
lowpass_warning = ['different lowpass filters' in str(ww.message)
for ww in w]
highpass_warning = ['different highpass filters' in str(ww.message)
for ww in w]
expected_warnings = zip(lowpass_warning, highpass_warning)
assert_true(all(any([lp, hp]) for lp, hp in expected_warnings))
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
def test_brainvision_data_partially_disabled_hw_filters():
"""Test heterogeneous filter settings including non-numeric values."""
with warnings.catch_warnings(record=True) as w: # event parsing
raw = _test_raw_reader(
read_raw_brainvision,
vhdr_fname=vhdr_partially_disabled_hw_filter_path,
montage=montage, eog=eog)
trigger_warning = ['parse triggers that' in str(ww.message)
for ww in w]
lowpass_warning = ['different lowpass filters' in str(ww.message)
for ww in w]
highpass_warning = ['different highpass filters' in str(ww.message)
for ww in w]
expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning)
assert_true(all(any([trg, lp, hp]) for trg, lp, hp in expected_warnings))
assert_equal(raw.info['highpass'], 0.)
assert_equal(raw.info['lowpass'], 500.)
def test_brainvision_data_software_filters_latin1_global_units():
"""Test reading raw Brain Vision files."""
with warnings.catch_warnings(record=True) as w: # event parsing
raw = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_old_path,
eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), misc=("A2",))
assert_true(all('software filter detected' in str(ww.message) for ww in w))
assert_equal(raw.info['highpass'], 1. / 0.9)
assert_equal(raw.info['lowpass'], 50.)
def test_brainvision_data():
"""Test reading raw Brain Vision files."""
assert_raises(IOError, read_raw_brainvision, vmrk_path)
assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,
preload=True, scale="foo")
raw_py = _test_raw_reader(
read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,
eog=eog, misc='auto', event_id=event_id)
assert_true('RawBrainVision' in repr(raw_py))
assert_equal(raw_py.info['highpass'], 0.)
assert_equal(raw_py.info['lowpass'], 250.)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, times_py = raw_py[picks]
# compare with a file that was generated using MNE-C
raw_bin = read_raw_fif(eeg_bin, preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_bin, times_bin = raw_bin[picks]
assert_array_almost_equal(data_py, data_bin)
assert_array_almost_equal(times_py, times_bin)
# Make sure EOG channels are marked correctly
for ch in raw_py.info['chs']:
if ch['ch_name'] in eog:
assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)
elif ch['ch_name'] == 'STI 014':
assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)
elif ch['ch_name'] in ('CP5', 'CP6'):
assert_equal(ch['kind'], FIFF.FIFFV_MISC_CH)
assert_equal(ch['unit'], FIFF.FIFF_UNIT_NONE)
elif ch['ch_name'] == 'ReRef':
assert_equal(ch['kind'], FIFF.FIFFV_MISC_CH)
assert_equal(ch['unit'], FIFF.FIFF_UNIT_CEL)
elif ch['ch_name'] in raw_py.info['ch_names']:
assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)
assert_equal(ch['unit'], FIFF.FIFF_UNIT_V)
else:
raise RuntimeError("Unknown Channel: %s" % ch['ch_name'])
# test loading v2
read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,
response_trig_shift=1000)
def test_brainvision_vectorized_data():
"""Test reading BrainVision data files with vectorized data."""
raw = read_raw_brainvision(vhdr_old_path, preload=True)
assert_array_equal(raw._data.shape, (30, 251))
first_two_samples_all_chs = np.array([[+5.22000008e-06, +5.10000000e-06],
[+2.10000000e-06, +2.27000008e-06],
[+1.15000000e-06, +1.33000002e-06],
[+4.00000000e-07, +4.00000000e-07],
[-3.02999992e-06, -2.82000008e-06],
[+2.71000004e-06, +2.45000000e-06],
[+2.41000004e-06, +2.36000004e-06],
[+1.01999998e-06, +1.18000002e-06],
[-1.33999996e-06, -1.25000000e-06],
[-2.60000000e-06, -2.46000004e-06],
[+6.80000019e-07, +8.00000000e-07],
[+1.48000002e-06, +1.48999996e-06],
[+1.61000004e-06, +1.51000004e-06],
[+7.19999981e-07, +8.60000038e-07],
[-3.00000000e-07, -4.00000006e-08],
[-1.20000005e-07, +6.00000024e-08],
[+8.19999981e-07, +9.89999962e-07],
[+1.13000002e-06, +1.28000002e-06],
[+1.08000002e-06, +1.33999996e-06],
[+2.20000005e-07, +5.69999981e-07],
[-4.09999990e-07, +4.00000006e-08],
[+5.19999981e-07, +9.39999962e-07],
[+1.01000004e-06, +1.51999998e-06],
[+1.01000004e-06, +1.55000000e-06],
[-1.43000002e-06, -1.13999996e-06],
[+3.65000000e-06, +3.65999985e-06],
[+4.15999985e-06, +3.79000015e-06],
[+9.26999969e-06, +8.95999985e-06],
[-7.35999985e-06, -7.18000031e-06],
[+0.00000000e+00, +0.00000000e+00]])
assert_array_almost_equal(raw._data[:, :2], first_two_samples_all_chs)
def test_events():
"""Test reading and modifying events."""
tempdir = _TempDir()
# check that events are read and stim channel is synthesized correcly
raw = read_raw_brainvision(vhdr_path, eog=eog, event_id=event_id)
events = raw._get_brainvision_events()
events = events[events[:, 2] != event_id['Sync On']]
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6000, 1, 255],
[6620, 1, 254],
[6630, 1, 255]])
# check that events are read and stim channel is synthesized correcly and
# response triggers are shifted like they're supposed to be.
raw = read_raw_brainvision(vhdr_path, eog=eog,
response_trig_shift=1000, event_id=event_id)
events = raw._get_brainvision_events()
events = events[events[:, 2] != event_id['Sync On']]
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6000, 1, 1255],
[6620, 1, 254],
[6630, 1, 255]])
# check that events are read and stim channel is synthesized correcly and
# response triggers are ignored.
with warnings.catch_warnings(record=True): # ignored events
raw = read_raw_brainvision(vhdr_path, eog=eog,
response_trig_shift=None)
events = raw._get_brainvision_events()
events = events[events[:, 2] != event_id['Sync On']]
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6620, 1, 254],
[6630, 1, 255]])
# check that events are read properly when event_id is specified for
# auxiliary events
with warnings.catch_warnings(record=True): # dropped events
raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
response_trig_shift=None,
event_id=event_id)
events = raw._get_brainvision_events()
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6620, 1, 254],
[6630, 1, 255],
[7630, 1, 5]])
assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
preload=True, response_trig_shift=0.1)
assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
preload=True, response_trig_shift=np.nan)
# Test that both response_trig_shit and event_id can be set
read_raw_brainvision(vhdr_path, eog=eog, preload=False,
response_trig_shift=100, event_id=event_id)
mne_events = find_events(raw, stim_channel='STI 014')
assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
# modify events and check that stim channel is updated
index = events[:, 2] == 255
events = events[index]
raw._set_brainvision_events(events)
mne_events = find_events(raw, stim_channel='STI 014')
assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
# remove events
nchan = raw.info['nchan']
ch_name = raw.info['chs'][-2]['ch_name']
events = np.empty((0, 3))
raw._set_brainvision_events(events)
assert_equal(raw.info['nchan'], nchan)
assert_equal(len(raw._data), nchan)
assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
assert_equal(len(find_events(raw, 'STI 014')), 0)
assert_allclose(raw[-1][0], 0.)
fname = op.join(tempdir, 'evt_raw.fif')
raw.save(fname)
# add events back in
events = [[10, 1, 2]]
raw._set_brainvision_events(events)
assert_equal(raw.info['nchan'], nchan)
assert_equal(len(raw._data), nchan)
assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
run_tests_if_main()
|
|
#!/Users/toma/python278i/bin/python
# Tom Anderson
# Thermal simulation prototype
# Sun Jul 13 22:30:26 PDT 2014
#
# Thermonous pertains to stimulation by heat.
# The literal ancient Greek is hot minded.
#
# Thermonice is like spice. Thermospice.
#
# TODO:
# Make the spice netlist generation use a string buffer and a file.
# Create test harness for sweeps of problem size.
# Hook up PNG files.
# Hook up HDF5 files
# Create ASCII files for layers, materials, and mesh parameters
# Make problem 3D
# Make tests for 2D, put modules into separate files so that code is
# shared with 3D.
# Separate the 2D-specific code in Solver2D.py.
# Separate the 2D-specific code in Spice2D.py.
# Create test harnesses for each module
# Measure xyce memory usage with
# http://stackoverflow.com/questions/13607391/subprocess-memory-usage-in-python
# Xyce uses about 7-10 times the memory and takes about 3 times as long as the raw matrix.
# 826M
# 26 seconds to 108 seconds by adding Xyce.
from PIL import Image, ImageDraw
import subprocess, os
import pstats
import cProfile
import numpy as np
import Layers
import Matls
import Mesh2D
import Solver2D
import Spice2D
import MatrixDiagnostic
import interactivePlot
# This can scale by using a PNG input instead of code
def defineScalableProblem(lyr, matls, x, y):
"""
defineScalableProblem(Layer lyr, Mesh mesh, Matls matls, int xsize, int ysize)
Create a sample test problem for thermal analysis that can scale
to a wide variety of sizes.
It initializes the mesh based on fractions of the size of the mesh.
The conductivities in the problem are based on the material properties
in the matls object.
"""
mesh = Mesh2D.Mesh(x, y, lyr, matls)
# Heat source
hsx= 0.5
hsy= 0.5
hswidth= 0.25
hsheight= 0.25
heat= 10.0
srcl= round(mesh.width*(hsx-hswidth*0.5))
srcr= round(mesh.width*(hsx+hswidth*0.5))
srct= round(mesh.height*(hsy-hsheight*0.5))
srcb= round(mesh.height*(hsy+hsheight*0.5))
numHeatCells= (srcr - srcl)*(srcb-srct)
heatPerCell= heat/numHeatCells
print "Heat per cell = ", heatPerCell
mesh.field[srcl:srcr, srct:srcb, lyr.heat] = heatPerCell
mesh.field[srcl:srcr, srct:srcb, lyr.resis] = matls.copperResistancePerSquare
# Boundary conditions
mesh.field[0, 0:mesh.height, lyr.isodeg] = 25.0
mesh.field[mesh.width-1, 0:mesh.height, lyr.isodeg] = 25.0
mesh.field[0:mesh.width, 0, lyr.isodeg] = 25.0
mesh.field[0:mesh.width, mesh.height-1, lyr.isodeg] = 25.0
mesh.ifield[0, 0:mesh.height, lyr.isoflag] = 1
mesh.ifield[mesh.width-1, 0:mesh.height, lyr.isoflag] = 1
mesh.ifield[0:mesh.width, 0, lyr.isoflag] = 1
mesh.ifield[0:mesh.width, mesh.height-1, lyr.isoflag] = 1
# Thermal conductors
condwidth= 0.05
cond1l= round(mesh.width*hsx - mesh.width*condwidth*0.5)
cond1r= round(mesh.width*hsx + mesh.width*condwidth*0.5)
cond1t= round(mesh.height*hsy - mesh.height*condwidth*0.5)
cond1b= round(mesh.height*hsy + mesh.height*condwidth*0.5)
mesh.field[0:mesh.width, cond1t:cond1b, lyr.resis] = matls.copperResistancePerSquare
mesh.field[cond1l:cond1r, 0:mesh.height, lyr.resis] = matls.copperResistancePerSquare
return mesh
def definePNGProblem(fn, lyr, matls):
"""
Read a PNG file and load the data structure
"""
heatPerCell= 48e-6
pngproblem = Image.open(fn, mode='r')
xysize= pngproblem.size
width= xysize[0]
height= xysize[1]
print "Width: " + str(width) + " Height: " + str(height)
mesh = Mesh2D.Mesh(width, height, lyr, matls)
pix = pngproblem.load()
copperCellCount=0
padCellCount=0
isoCellCount=0
for xn in range(0,width-1):
for tyn in range(0, height-1):
# Graphing package has +y up, png has it down
yn= height - 1 - tyn
if pix[xn,yn][0] > 0:
mesh.field[xn, tyn, lyr.resis] = matls.copperResistancePerSquare
mesh.field[xn, tyn, lyr.heat] = heatPerCell
copperCellCount += 1
padCellCount += 1
if pix[xn,yn][1] > 0:
mesh.field[xn, tyn, lyr.resis] = matls.copperResistancePerSquare
copperCellCount += 1
if pix[xn,yn][2] > 0:
mesh.ifield[xn, tyn, lyr.isoflag] = 1
mesh.field[xn, tyn, lyr.isodeg] = 25.0
isoCellCount += 1
print "Copper px: " + str(copperCellCount) + " Pad px: " + str(padCellCount) + " Iso px: " + str(isoCellCount)
return mesh
def defineTinyProblem(lyr, matls):
"""
defineTinyProblem(Layer lyr, Mesh mesh, Matls matls)
Create a tiny test problem.
"""
mesh = Mesh2D.Mesh(3, 3, lyr, matls)
mesh.ifield[0:3, 0, lyr.isoflag] = 1
mesh.field[1, 1, lyr.heat] = 2.0
print "Mesh: " + str(mesh)
return mesh
def solveAmesos(solv, mesh, lyr):
solv.solveMatrixAmesos()
solv.loadSolutionIntoMesh(lyr, mesh)
solv.checkEnergyBalance(lyr, mesh)
def solveAztecOO(solv, mesh, lyr):
solv.solveMatrixAztecOO(400000)
solv.loadSolutionIntoMesh(lyr, mesh)
solv.checkEnergyBalance(lyr, mesh)
def solveSpice(spice, mesh, lyr):
spice.finishSpiceNetlist()
proc= spice.runSpiceNetlist()
proc.wait()
spice.readSpiceRawFile(lyr, mesh)
def solveSetup(solv):
solv.debug = False
solv.useSpice = True
solv.aztec = False
solv.amesos = True
solv.eigen = False
def Main():
lyr = Layers.Layers()
matls = Matls.Matls()
spice= Spice2D.Spice()
showPlots= True
useTinyProblem= False
mesh = definePNGProblem("Layout4.png", lyr, matls)
#if useTinyProblem:
#mesh = defineTinyProblem(lyr, matls)
#else:
#mesh = defineScalableProblem(lyr, matls, 20, 20)
mesh.mapMeshToSolutionMatrix(lyr)
solv = Solver2D.Solver(lyr, mesh)
solveSetup(solv)
if (solv.useSpice == True):
solv.spiceSim= Spice2D.Spice()
solv.initDebug()
solv.loadMatrix(lyr, mesh, matls, spice)
if (solv.eigen == True):
print "Solving for eigenvalues"
solv.solveEigen()
print "Finished solving for eigenvalues"
if (solv.useSpice == True):
solveSpice(spice, mesh, lyr)
if (solv.aztec == True):
solveAztecOO(solv, mesh, lyr)
if (solv.amesos == True):
solveAmesos(solv, mesh, lyr)
if (solv.debug == True):
webpage = MatrixDiagnostic.MatrixDiagnosticWebpage(solv, lyr, mesh)
webpage.createWebPage()
if (showPlots == True):
plots= interactivePlot.interactivePlot(lyr, mesh)
plots.plotTemperature()
if (solv.useSpice == True):
plots.plotSpicedeg()
plots.plotLayerDifference(lyr.spicedeg, lyr.deg)
plots.show()
showProfile= True
if showProfile == True:
cProfile.run('Main()', 'restats')
p = pstats.Stats('restats')
p.sort_stats('cumulative').print_stats(30)
else:
Main()
# Times without printing much.
# Printing overhead is probably about 10% in this case.
# 10000 iterations
# 100X100 12sec
# 200x200 69sec
# 300x300 154sec
# 1000 iterations
# 200x200 14sec
# 300x300 34 sec
#
# Design notes:
# The Mesh class
# Has a rectangular Numpy field that represents the problem geometry.
# The Mesh elements are squares in a layered 2D field.
# The field has layers that are describe by the Layers object.
# The layers represent details about the geometry of the materials and boundary conditions.
# Has the size of the problem, such as length, width, and the number of elements.
# Is decorated with material properties from Matls.
# Is decorated with the solution to the problem.
# The Layer class
# Has enumerations that describe the layers in the Mesh
# The Map class
# Includes a Numpy grid that is the size of the Solver.
# Is used to access Solver information
# Because the solver information is not always available on the local node,
# the Map class has a local copy of the Solver input data. Some of this
# data is only needed for debugging and can be turned off to save space.
# The Solver class
# Loads the and calls the Trilinos solvers.
#
# This is from http://trilinos.sandia.gov/packages/pytrilinos/UsersGuide.pdf pg 20
# self.x = Epetra.Vector(self.Map)
# self.A.FillComplete()
# MLList = {
# "max levels" : 3,
# "output" : 10,
# "smoother: type" : "symmetric Gauss-Seidel",
# "aggregation: type" : "Uncoupled"
# };
# # Then, we create the preconditioner and compute it,
# Prec = ML.MultiLevelPreconditioner(self.A, False)
# Prec.SetParameterList(MLList)
# Prec.ComputePreconditioner()
# # Finally, we set up the solver, and specifies to use Prec as preconditioner:
# solver = AztecOO.AztecOO(self.A, self.x, self.b)
# solver.SetPrecOperator(Prec)
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_cg);
# solver.SetAztecOption(AztecOO.AZ_output, 16);
# solver.Iterate(1550, 1e-5)
# This segfaults:
# solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_dom_decomp)
# This does not fail but the solution says that there is no preconditioner
# solver.SetAztecOption(AztecOO.AZ_subdomain_solve, AztecOO.AZ_ilu)
# Complains and fails
# solver.SetParameters({"precond": "dom_decomp",
# "subdomain_solve": "ilu",
# "overlap": 1,
# "graph_fill": 1})
# This complains and fails
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_cg)
# This is incredibly fast but complains some:
# This appears to be the default and it works:
# solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_none)
# Solutions on infinite resistor grids:
# http://www.mathpages.com/home/kmath668/kmath668.htm
# Example slides, interesting python code:
# http://trilinos.org/oldsite/packages/pytrilinos/PyTrilinosTutorial.pdf
|
|
"""
plot_points.py
Class instance used to plot information over a set of points.
"""
# Load the needed packages
import numpy as np
import os
import pyart
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as \
NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.colors import Normalize as mlabNormalize
from matplotlib.colorbar import ColorbarBase as mlabColorbarBase
from matplotlib.pyplot import cm
from ..core import Variable, Component, common, VariableChoose, QtGui, QtCore
# Save image file type and DPI (resolution)
IMAGE_EXT = 'png'
DPI = 200
# ========================================================================
class PointsDisplay(Component):
'''
Class to create a display plot, using data from a Points instance.
'''
Vpoints = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
Vlims = None #: see :ref:`shared_variable`
Vcmap = None #: see :ref:`shared_variable`
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class'''
kwargs, independent = \
common._SimplePluginStart("PointsDisplay").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, Vpoints=None, Vfield=None, Vlims=None, Vcmap=None,
plot_type="histogram", name="PointsDisplay", parent=None):
'''
Initialize the class to create display.
Parameters
----------
[Optional]
Vpoints : :py:class:`~artview.core.core.Variable` instance
Points signal variable. If None start new one with None.
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one with empty string.
Vlims : :py:class:`~artview.core.core.Variable` instance
Limits signal variable.
A value of None will instantiate a limits variable.
Vcmap : :py:class:`~artview.core.core.Variable` instance
Colormap signal variable.
A value of None will instantiate a colormap variable.
plot_type : str
Type of plot to produce (e.g. histogram, statistics, table).
name : string
Display window name.
parent : PyQt instance
Parent instance to associate to Display window.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(PointsDisplay, self).__init__(name=name, parent=parent)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
if Vpoints is None:
self.Vpoints = Variable(None)
else:
self.Vpoints = Vpoints
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
if Vlims is None:
self.Vlims = Variable({})
else:
self.Vlims = Vlims
if Vcmap is None:
self.Vcmap = Variable(None)
else:
self.Vcmap = Vcmap
self.sharedVariables = {"Vpoints": self.NewPoints,
"Vfield": self.NewField,
"Vlims": self.NewLims,
"Vcmap": self.NewCmap,
}
# Connect the components
self.connectAllVariables()
# Set plot title and colorbar units to defaults
self.title = self._get_default_title()
self.units = self._get_default_units()
# Find the PyArt colormap names
self.cm_names = ["pyart_" + m for m in pyart.graph.cm.datad
if not m.endswith("_r")]
self.cm_names.sort()
# Create a figure for output
self._set_fig_ax()
# Launch the GUI interface
self.LaunchGUI()
# Set up Default limits and cmap
if Vcmap is None:
self._set_default_cmap(strong=False)
if Vlims is None:
self._set_default_limits(strong=False)
self.plot_type = None
self.changePlotType(plot_type)
self.show()
####################
# GUI methods #
####################
def LaunchGUI(self):
'''Launches a GUI interface.'''
# Create layout
self.layout = QtGui.QVBoxLayout()
# Create the widget
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self._set_figure_canvas()
self.central_widget.setLayout(self.layout)
# Add Menu
self.addStandardMenu()
# Set the status bar to display messages
self.statusbar = self.statusBar()
def addStandardMenu(self):
'''Add Standard Menus.'''
self.menubar = self.menuBar()
self.filemenu = self.menubar.addMenu('File')
openCSV = self.filemenu.addAction('Open Tabular Data')
openCSV.setStatusTip('Open a Region Data CSV file')
openCSV.triggered.connect(self.openTable)
saveCSV = self.filemenu.addAction('Save Tabular Data')
saveCSV.setStatusTip('Save a Region Data CSV file')
saveCSV.triggered.connect(self.saveTable)
self.plotTypeMenu = self.menubar.addMenu('Plot Type')
hist = self.plotTypeMenu.addAction('Histogram')
hist.setStatusTip('Plot histogram of Data ')
hist.triggered.connect(lambda: self.changePlotType('histogram'))
stats = self.plotTypeMenu.addAction('Statistics')
stats.setStatusTip('Show basic statistics of Data')
stats.triggered.connect(lambda: self.changePlotType('statistics'))
table = self.plotTypeMenu.addAction('Table')
table.setStatusTip('Show data in a Table')
table.triggered.connect(lambda: self.changePlotType('table'))
self.displayMenu = self.menubar.addMenu('Display')
##################################
# User display interface methods #
##################################
#############################
# Functionality methods #
#############################
def changePlotType(self, plot_type):
try:
if self.plot_type == 'histogram':
self.layout.removeWidget(self.canvas)
self.canvas.hide()
elif self.plot_type == 'statistics':
self.layout.removeWidget(self.statistics)
self.statistics.close()
elif self.plot_type == 'table':
self.layout.removeWidget(self.table)
self.table.close()
except:
pass
self.plot_type = plot_type
self.displayMenu.clear()
if plot_type == 'histogram':
self._fill_histogram_menu()
# Add the widget to the canvas
self.layout.addWidget(self.canvas, 0)
self.canvas.show()
elif plot_type == 'statistics':
pass
elif plot_type == 'table':
pass
self._update_plot()
def _open_LimsDialog(self):
'''Open a dialog box to change display limits.'''
from .limits import limits_dialog
limits, cmap, change = limits_dialog(
self.Vlims.value, self.Vcmap.value, self.name)
if change == 1:
self.Vcmap.change(cmap)
self.Vlims.change(limits)
def _title_input(self):
'''Retrieve new plot title.'''
val, entry = common.string_dialog_with_reset(
self.title, "Plot Title", "Title:", self._get_default_title())
if entry is True:
self.title = val
self._update_plot()
def _units_input(self):
'''Retrieve new plot units.'''
val, entry = common.string_dialog_with_reset(
self.units, "Plot Units", "Units:", self._get_default_units())
if entry is True:
self.units = val
self._update_plot()
def _fill_histogram_menu(self):
'''Create the Display Options Button menu.'''
self.dispButton = QtGui.QPushButton("Display Options")
self.dispButton.setToolTip("Adjust display properties")
self.dispButton.setFocusPolicy(QtCore.Qt.NoFocus)
dispmenu = QtGui.QMenu(self)
dispLimits = self.displayMenu.addAction("Adjust Display Limits")
dispLimits.setToolTip("Set data, X, and Y range limits")
# dispTitle = dispmenu.addAction("Change Title")
# dispTitle.setToolTip("Change plot title")
# dispUnit = dispmenu.addAction("Change Units")
# dispUnit.setToolTip("Change units string")
dispSaveFile = self.displayMenu.addAction("Save Image")
dispSaveFile.setShortcut("Ctrl+S")
dispSaveFile.setStatusTip("Save Image using dialog")
# dispHelp = self.displayMenu.addAction("Help")
dispLimits.triggered.connect(self._open_LimsDialog)
# dispTitle.triggered.connect(self._title_input)
# dispUnit.triggered.connect(self._units_input)
dispSaveFile.triggered.connect(self._savefile)
# dispHelp.triggered.connect(self._displayHelp) #XXX help is out dated
def _displayHelp(self):
text = (
"<b>Using the PlotPoints Display</b><br><br>"
"<i>Purpose</i>:<br>"
"Display a plot of selected points.<br><br>"
"The limits dialog is a common format that allows the user "
"change:<br>"
"<i>X and Y limits<br>"
"Data limits</i><br>"
"However, not all plots take each argument.<br>"
"For example, a simple line plot has no data min/max data "
"value.<br>")
common.ShowLongText(text, set_html=True)
def NewPoints(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Update fields and tilts lists and MenuBoxes
* Check radar scan type and reset limits if needed
* Reset units and title
* If strong update: update plot
'''
# test for None
if self.Vpoints.value is None:
# self.fieldBox.clear()
return
# Get field names
self.fieldnames = self.Vpoints.value.fields.keys()
# self._fillFieldBox()
self.units = self._get_default_units()
self.title = self._get_default_title()
if strong:
self._update_plot()
# self._update_infolabel()
def NewField(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Reset colormap
* Reset units
* Update fields MenuBox
* If strong update: update plot
'''
self._set_default_cmap(strong=False)
self.units = self._get_default_units()
self.title = self._get_default_title()
# idx = self.fieldBox.findText(value)
# self.fieldBox.setCurrentIndex(idx)
if strong:
self._update_plot()
# self._update_infolabel()
def NewLims(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vlims <artview.core.core.Variable>`.
This will:
* If strong update: update axes
'''
if strong:
self._update_axes()
def NewCmap(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vcmap <artview.core.core.Variable>`.
This will:
* If strong update: update plot
'''
if strong:
pass
# self._update_plot()
########################
# Selectionion methods #
########################
####################
# Plotting methods #
####################
def _set_fig_ax(self):
'''Set the figure and axis to plot.'''
self.XSIZE = 5
self.YSIZE = 5
self.fig = Figure(figsize=(self.XSIZE, self.YSIZE))
self.ax = self.fig.add_axes([0.2, 0.2, 0.7, 0.7])
def _set_figure_canvas(self):
'''Set the figure canvas to draw in window area.'''
self.canvas = FigureCanvasQTAgg(self.fig)
def _update_plot(self):
'''Draw/Redraw the plot.'''
if self.Vpoints.value is None:
return
# Create the plot with PyArt PlotDisplay
self.ax.cla() # Clear the plot axes
# Reset to default title if user entered nothing w/ Title button
colorbar_flag = False
points = self.Vpoints.value
field = self.Vfield.value
cmap = self.Vcmap.value
if field not in points.fields.keys():
self.canvas.draw()
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(255,0,0,255);" +
"color:black;font-weight:bold;}")
self.statusbar.showMessage("Field not Found", msecs=5000)
return
else:
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(0,0,0,0);" +
"color:black;font-weight:bold;}")
self.statusbar.clearMessage()
if self.plot_type == "histogram":
self.plot = self.ax.hist(
points.fields[field]['data'], bins=25,
range=(cmap['vmin'], cmap['vmax']),
figure=self.fig)
self.ax.set_ylabel("Counts")
# If limits exists, update the axes otherwise retrieve
# self._update_axes()
self._update_limits()
# If the colorbar flag is thrown, create it
if colorbar_flag:
# Clear the colorbar axes
self.cax.cla()
self.cax = self.fig.add_axes([0.2, 0.10, 0.7, 0.02])
norm = mlabNormalize(vmin=cmap['vmin'],
vmax=cmap['vmax'])
self.cbar = mlabColorbarBase(self.cax, cmap=self.cm_name,
norm=norm,
orientation='horizontal')
# colorbar - use specified units or default depending on
# what has or has not been entered
self.cbar.set_label(self.units)
self.canvas.draw()
elif self.plot_type == 'statistics':
if (self.Vpoints.value is None or
self.Vfield.value not in self.Vpoints.value.fields):
common.ShowWarning("Please select Region and Field first")
else:
points = self.Vpoints.value
field = self.Vfield.value
SelectRegionstats = common._array_stats(
points.fields[field]['data'])
text = "<b>Basic statistics for the selected Region</b><br><br>"
for stat in SelectRegionstats:
text += ("<i>%s</i>: %5.2f<br>" %
(stat, SelectRegionstats[stat]))
self.statistics = QtGui.QDialog()
layout = QtGui.QGridLayout(self.statistics)
self.statistics = QtGui.QTextEdit("")
self.statistics.setAcceptRichText(True)
self.statistics.setReadOnly(True)
self.statistics.setText(text)
self.layout.addWidget(self.statistics, 0)
elif self.plot_type == "table":
if self.Vpoints.value is not None:
# Instantiate Table
self.table = common.CreateTable(self.Vpoints.value)
self.layout.addWidget(self.table, 0)
self.table.display()
# Show the table
self.table.show()
else:
common.ShowWarning("Please select or open Region first")
def _update_axes(self):
'''Change the Plot Axes.'''
limits = self.Vlims.value
self.ax.set_xlim(limits['xmin'], limits['xmax'])
self.ax.set_ylim(limits['ymin'], limits['ymax'])
self.canvas.draw()
def _update_limits(self):
limits = self.Vlims.value
ax = self.ax.get_xlim()
limits['xmin'] = ax[0]
limits['xmax'] = ax[1]
ax = self.ax.get_ylim()
limits['ymin'] = ax[0]
limits['ymax'] = ax[1]
self.Vlims.update()
def _set_default_cmap(self, strong=True):
''' Set colormap to pre-defined default.'''
cmap = pyart.config.get_field_colormap(self.Vfield.value)
d = {}
d['cmap'] = cmap
lims = pyart.config.get_field_limits(self.Vfield.value,
self.Vpoints.value)
if lims != (None, None):
d['vmin'] = lims[0]
d['vmax'] = lims[1]
else:
d['vmin'] = -10
d['vmax'] = 65
self.Vcmap.change(d, False)
def _set_default_limits(self, strong=True):
''' Set limits to pre-defined default.'''
cmap = self.Vcmap.value
d = {}
d['xmin'] = cmap['vmin']
d['xmax'] = cmap['vmax']
d['ymin'] = 0
d['ymax'] = 1000
self.Vlims.change(d, False)
def _get_default_title(self):
'''Get default title.'''
if (self.Vpoints.value is None or
self.Vfield.value not in self.Vpoints.value.fields):
return ''
return 'Points Plot'
# pyart.graph.common.generate_title(self.Vpoints.value,
# self.Vfield.value,
# 0)
def _get_default_units(self):
'''Get default units for current radar and field.'''
if self.Vpoints.value is not None:
try:
return self.Vpoints.value.fields[self.Vfield.value]['units']
except:
return ''
else:
return ''
########################
# Image save methods #
########################
def _savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display using PyQt dialog interface.'''
file_choices = "PNG (*.png)|*.png"
path = unicode(QtGui.QFileDialog.getSaveFileName(
self, 'Save file', ' ', file_choices))
if path:
self.canvas.print_figure(path, dpi=DPI)
self.statusbar.showMessage('Saved to %s' % path)
def openTable(self):
'''Open a saved table of SelectRegion points from a CSV file.'''
path = QtGui.QFileDialog.getOpenFileName(
self, 'Open File', '', 'CSV(*.csv)')
if path == '':
return
points = read_points_csv(path)
self.Vpoints.change(points)
def saveTable(self):
'''Save a Table of SelectRegion points to a CSV file.'''
points = self.Vpoints.value
if points is not None:
fsuggest = ('SelectRegion_' + self.Vfield.value + '_' +
str(points.axes['x_disp']['data'][:].mean()) + '_' +
str(points.axes['y_disp']['data'][:].mean())+'.csv')
path = QtGui.QFileDialog.getSaveFileName(
self, 'Save CSV Table File', fsuggest, 'CSV(*.csv)')
if not path.isEmpty():
write_points_csv(path, points)
else:
common.ShowWarning("No gate selected, no data to save!")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from ripcord.openstack.common.gettextutils import _
"""
import copy
import gettext
import logging
import os
import re
try:
import UserString as _userString
except ImportError:
import collections as _userString
from babel import localedata
import six
_localedir = os.environ.get('ripcord'.upper() + '_LOCALEDIR')
_t = gettext.translation('ripcord', localedir=_localedir, fallback=True)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, 'ripcord')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
#
# Also included below is an example LocaleHandler that translates
# Messages to an associated locale, effectively allowing many logs,
# each with their own locale.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(_userString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self._locale = None
self.params = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
if six.PY3:
ugettext = lang.gettext
else:
ugettext = lang.ugettext
full_msg = (self._left_extra_msg +
ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return six.text_type(full_msg)
@property
def locale(self):
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
if not self.params:
return
# This Message object may have been constructed with one or more
# Message objects as substitution parameters, given as a single
# Message, or a tuple or Map containing some, so when setting the
# locale for this Message we need to set it for those Messages too.
if isinstance(self.params, Message):
self.params.locale = value
return
if isinstance(self.params, tuple):
for param in self.params:
if isinstance(param, Message):
param.locale = value
return
if isinstance(self.params, dict):
for param in self.params.values():
if isinstance(param, Message):
param.locale = value
def _save_dictionary_parameter(self, dict_param):
full_msg = self.data
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
# apparently the full dictionary is the parameter
params = copy.deepcopy(dict_param)
else:
params = {}
for key in keys:
try:
params[key] = copy.deepcopy(dict_param[key])
except TypeError:
# cast uncopyable thing to unicode string
params[key] = six.text_type(dict_param[key])
return params
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
elif isinstance(other, dict):
self.params = self._save_dictionary_parameter(other)
else:
# fallback to casting to unicode,
# this will handle the problematic python code-like
# objects that cannot be deep-copied
try:
self.params = copy.deepcopy(other)
except TypeError:
self.params = six.text_type(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
if six.PY3:
return self.__unicode__()
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', '_locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return _userString.UserString.__getattribute__(self, name)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and all projects udpate
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def get_localized_message(message, user_locale):
"""Gets a localized version of the given message in the given locale."""
if isinstance(message, Message):
if user_locale:
message.locale = user_locale
return six.text_type(message)
else:
return message
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)
|
|
"""
An API consists of a number of endpoints. Typically a model in a database has one API with two endpoints.
The API handles the CRUD operations for that model. The first endpoint is used to list a model's objects, the
second returns a specific object by ID.
The first endpoint (the list endpoint) allows for its results to be filtered by specifying querystring
parameters that map to field names. When calling the API endpoint via an HTTP request, developers can prefix a
field name with a - (minus) symbol to exlucde, rather than filter by that value. So for example, the URL
``/polls/question.json?-id=1`` would do this::
Poll.objects.exclude(id = 1)
Submitting a 'POST' request to the first endpoint creates a new object, populated by the data submitted in
the POST.
The second endpoint (the object endpoint) takes a single required argument: the object ID, unless it has a
parent API, in which case it also requires the ID of the parent object. Submitting a 'PUT' request to this
endpoint will update the current object, and a 'DELETE' request will delete the object.
APIs are defined almost exactly likt ``ModelAdmin`` classes, complete with inlines for foreign key
relationships.
User restrictions
-----------------
By default there are no restrictions placed on endpoints, so it's completely up to you to determine whether a
particular user is permitted to update, delete or even access certain objects. You can limit access easily by
overriding the ``get_query_set()`` function within your API, but it's your responsibility to lock down the
rest.
"""
from django.conf.urls import patterns, url, include
from django.utils.functional import curry
from django.utils.timezone import now
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.db import models, transaction
from django.forms import Form, ModelForm
from django.forms.models import modelform_factory
from django.http import Http404
from bambu_api import helpers
from bambu_api import decorators
from bambu_api.transformers import library
from bambu_api.exceptions import APIException
class API(object):
"""
Defines a standard API, where the developer must specify all the URLs that make up each endpoint.
The class is instantiated by ``bambu_api.site.register``.
"""
parent = None
"""The parent ``API`` object (default is ``None``)"""
form = Form
"""The ``Form`` class used to interact with this API"""
def __init__(self, api_site):
self.api_site = api_site
def get_urls():
"""
Returns a list of URL patterns, where each view is a method of this class.
"""
return patterns('')
@property
def urls(self):
return self.get_urls()
def get_form(self, request, **kwargs):
"""
Return an instance of a ``Form`` class that can be used to supply data to the API.
"""
return self.form()
class ModelAPI(API):
"""
Defines an API that derives its endpoints' URLs and functionality from a particular Django model.
Like ``API``, it's instantiated by ``bambu_api.site.register``.
"""
form = ModelForm
exclude = ()
"""A tuple of field names not to expose to the API consumer"""
fields = ()
"""A tuple of field names to expose to the API consumer"""
inlines = []
"""A list of classes deriving from ``ModelInline`` that specify child models in foreign key relationships"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
"""A list of permitted HTTP verbs"""
allowed_formats = ('xml', 'json')
"""A list of permitted data formats"""
raw_id_fields = ()
readonly_fields = ()
"""A tuple of fieldnames whose data cannot be modified by API consumers"""
return_values = {}
def __init__(self, model, api_site):
super(ModelAPI, self).__init__(api_site)
self.model = model
self.inline_instances = []
for inline_class in self.inlines:
fks_to_parent = [
f for f in inline_class.model._meta.fields
if isinstance(f, models.ForeignKey) and (
f.rel.to == self.model
or f.rel.to in self.model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
rel_name = fk.rel.related_name or '%s_set' % (
inline_class.model._meta.module_name
)
self.inline_instances.append(
inline_class(
inline_class.model, self, fk,
rel_name, self.api_site
)
)
elif len(fks_to_parent) == 0:
raise Exception(
'%s has no ForeignKey to %s' % (
inline_class.model, self.model
)
)
else:
raise Exception(
'%s has more than 1 ForeignKey to %s' % (
inline_class.model, self.model
)
)
@property
def list_allowed_methods(self):
"""Returns a list of the allowed HTTP verbs for object list endpoints"""
return [m for m in self.allowed_methods if m in ('GET', 'POST')]
@property
def object_allowed_methods(self):
"""Returns a list of the allowed HTTP verbs for single object endpoints"""
return [m for m in self.allowed_methods if m in ('GET', 'PUT', 'DELETE')]
def example_object(self, index = 0):
"""Provides a dictionary of sample data used for documentation"""
return {}
def example_list_response(self, count = 3):
"""Provides a list of sample dictionaries by calling ``example_object`` ``count`` number of times"""
""":param count: The number of sample objects to return"""
return [self.example_object(i) for i in range(0, count)]
def example_object_response(self):
"""An alias for ``example_object``"""
return self.example_object()
def get_urls(self):
"""
Automatically creates URL patterns for the model the class is registered to, and then runs
through each ``ModelInline`` specified in ``inlines`` to add its URL patterns to this list.
It automatically provides basic descriptions for each argument, and creates required arguments
for each inline API. For example, the 1 in the URL ``/api/polls/question/1/choices.json`` is a
required argument for ``ChoiceInline.get_query_set()``. The relationship between the ``Choice``
and ``Question`` models is automatically discovered and the correct name given to the argument.
For now, the argument is always considered to be an integer (the primary key of the parent
object).
"""
info = self.model._meta.app_label, self.model._meta.module_name
singular = unicode(self.model._meta.verbose_name)
plural = unicode(self.model._meta.verbose_name_plural)
if singular.islower():
singular = singular.capitalize()
if plural.islower():
plural = plural.capitalize()
this = self
while this.parent:
parent_singular = unicode(this.parent.model._meta.verbose_name)
if parent_singular.islower():
parent_singular = parent_singular.capitalize()
singular = '%s: %s' % (parent_singular, singular)
plural = '%s: %s' % (parent_singular, plural)
this = this.parent
plural = '%s: %s' % (self.model._meta.app_label.capitalize(), plural)
singular = '%s: %s' % (self.model._meta.app_label.capitalize(), singular)
plural_view = helpers.wrap_api_function(
self.api_site, self.list_view, 1,
self.list_allowed_methods, self.prepare_output_data,
plural
)
single_view = helpers.wrap_api_function(
self.api_site, self.object_view, 2,
self.object_allowed_methods, self.prepare_output_data,
singular
)
single_view = decorators.argument('object_id', 'id', u'The ID of the %s to return' % self.model._meta.verbose_name)(single_view)
single_view = decorators.argument('format', 'str', u'The data format to return')(single_view)
plural_view = decorators.argument('format', 'str', u'The data format to return')(plural_view)
if self.fields is None or not any(self.fields):
fields = []
else:
fields = list(self.fields)
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
returns = {}
for f in self.model._meta.local_fields:
if f.name in exclude:
continue
if any(fields) and not f.name in fields:
continue
ft = 'str'
if isinstance(f, models.IntegerField):
ft = 'int'
elif isinstance(f, models.DecimalField):
ft = 'float'
elif isinstance(f, models.BooleanField):
ft = 'bool'
elif isinstance(f, models.ForeignKey):
ft = 'int'
elif isinstance(f, models.ManyToManyField):
ft = 'list'
description = self.return_values.get(f.name,
f.help_text or (u'The %s\'s %s' % (self.model._meta.verbose_name, f.verbose_name))
)
returns[f.name] = (ft, description)
single_view = decorators.returns(returns)(single_view)
plural_view = decorators.returns(returns)(plural_view)
urlpatterns = patterns('',
url(r'^\.(?P<format>' + '|'.join(self.allowed_formats) + ')$',
plural_view,
name = '%s_%s_list' % info
),
url(r'^/(?P<object_id>\d+)\.(?P<format>' + '|'.join(self.allowed_formats) + ')$',
single_view,
name = '%s_%s_single' % info
)
)
for inline in self.inline_instances:
vn = inline.rel_field.rel.to._meta.verbose_name
urlpatterns += patterns('',
decorators.argument(
inline.rel_field.name, 'int', u'The ID of the parent %s' % vn
)(
url(
r'^/(?P<' + inline.rel_field.name + '>\d+)/' + inline.rel_name,
include(inline.get_urls())
)
)
)
return urlpatterns
def get_form(self, request, obj = None, **kwargs):
if self.fields is None or not any(self.fields):
fields = None
else:
fields = list(self.fields)
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.readonly_fields)
exclude = exclude or None
defaults = {
'form': self.form,
'fields': fields,
'exclude': exclude,
'formfield_callback': curry(
self.formfield_for_dbfield,
request = request
)
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Works in the same way as ``django.contrib.admin.ModelAdmin`` instances.
"""
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
if isinstance(db_field, models.ForeignKey):
return self.formfield_for_foreignkey(db_field, request, **kwargs)
if isinstance(db_field, models.ManyToManyField):
return self.formfield_for_manytomany(db_field, request, **kwargs)
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Works in the same way as ``django.contrib.admin.ModelAdmin`` instances.
"""
return db_field.formfield(**kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Works in the same way as ``django.contrib.admin.ModelAdmin`` instances.
"""
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Works in the same way as ``django.contrib.admin.ModelAdmin`` instances.
"""
if not db_field.rel.through._meta.auto_created:
return None
return db_field.formfield(**kwargs)
def save_form(self, request, form, obj = None):
"""Save the data from the ``Form`` instance in memory, but not to the database"""
return form.save(commit = False)
def save_object(self, request, obj):
"""Save the data from the object to the database"""
obj.save()
def prepare_initial_data(self, form_class, obj = None, **kwargs):
"""Populates the model's form with data from the specified object"""
return helpers.form_initial_data(form_class, obj, **kwargs)
def get_query_set(self, request, **kwargs):
"""
Filter the API's model manager queryset using the specified kwargs. Override this function
to limit the results returned (ie: only showing data relating to the authenticated user)
"""
return self.model.objects.filter(**kwargs)
def prepare_output_data(self, request, obj, max_detail_level = 1):
"""
Transform the data from a ``Model`` instance found by ``get_query_set()`` into a dict so it
can be serialised
"""
return library.transform(
obj, max_detail_level,
fields = self.fields,
exclude = self.exclude
)
def add_field_to_include_filter(self, queryset, field, value):
"""
Filters a queryset where the value of the ``field`` column is set to that of ``value``.
If ``value`` is a list, the query filters out any row where ``field`` contains a value
found within the ``value`` list.
"""
if len(value) > 1:
return queryset.filter(
**{
'%s__in' % field: value
}
)
else:
return queryset.filter(
**{
field: value[0]
}
)
def add_field_to_exclude_filter(self, queryset, field, value):
"""
Excludes rows from a query where the value of the ``field`` column is set to that of ``value``.
If ``value`` is a list, each row wherein ``field`` contains a value found in the ``value`` list
is excluded.
"""
if len(value) > 1:
return queryset.exclude(
**{
'%s__in' % field: value
}
)
else:
return queryset.exclude(
**{
field: value[0]
}
)
def list_view(self, request, **kwargs):
"""
The main view for the first (list) endpoint. This typically accepts the 'GET' and 'POST'
HTTP verbs.
Where 'GET' is the verb, a queryset is generated by comgining the specified kwargs with the
querystring, and a list of matching objects is returned.
Where 'PUT' is the verb, a ``Form`` class is instantiated and the data from the POST added to it.
The form is saved, and if successful, the saved object is returned. If not, an ``APIException``
is raised, with the descriptions of the offending fields' validation exceptions contained within.
"""
if request.method == 'GET':
include = {}
exclude = {}
fields = [f.name for f in self.model._meta.local_fields]
order_by = []
qs = self.get_query_set(request, **kwargs)
for key, value in request.GET.items():
values = request.GET.getlist(key)
if key == 'order':
order_by = values
elif key.startswith('-'):
if not key[1:] in fields:
continue
qs = self.add_field_to_exclude_filter(qs, key[1:], values)
else:
if key.startswith('+'):
key = key[1:]
if not key in fields:
continue
qs = self.add_field_to_include_filter(qs, key, values)
if hasattr(qs.query, 'select_fields'):
fields = [f.name for f in qs.query.select_fields] + list(qs.query.aggregate_select.keys())
else:
fields = [f.field.name for f in qs.query.select] + list(qs.query.aggregate_select.keys())
if not any(fields):
fields = [f.name for f in self.model._meta.local_fields]
if any(include):
qs = qs.filter(**include)
if any(exclude):
qs = qs.exclude(**exclude)
if any(order_by):
orders = []
for order in order_by:
direction = ''
while order.startswith('-'):
if direction == '':
direction = '-'
else:
direction = ''
order = order[1:]
if order in fields or order == '?':
if order == '?' and direction == '-':
raise APIException(u'Cannot order negatively by random')
if any(orders):
raise APIException(u'Cannot order by random when already ordering by other fields')
else:
raise APIException(u'Cannot order by %s' % order)
if '?' in orders:
raise APIException(u'Cannot order by random when already ordering by other fields')
orders.append(direction + order)
qs = qs.order_by(*orders)
return qs
elif request.method == 'POST':
form_class = self.get_form(request)
data = self.prepare_initial_data(form_class, **kwargs)
for key, value in request.POST.items():
data[key] = value
form = form_class(data, request.FILES)
if form.is_valid():
obj = self.save_form(request, form)
self.save_object(request, obj)
return obj
errors = []
for error in form.non_field_errors():
errors.append(error)
for field in form:
if field.errors and any(field.errors):
inline_errors = list([e for e in field.errors])
errors.append(
{
field.name: ', '.join(inline_errors)
}
)
raise APIException(errors)
def get_object(self, request, object_id, **kwargs):
"""
Returns a single object by ID, with the specified kwargs also added to the query. Kwargs are most
commonly specified where the API is a child of another.
"""
try:
return self.get_query_set(request, **kwargs).get(pk = object_id)
except self.model.DoesNotExist:
raise Http404('Object not found.')
def object_view(self, request, object_id, **kwargs):
"""
The view for the second (object) endpoint, wherein a 'GET' request returns an object matching the
given ID and kwargs, a 'PUT' updates the object and a 'DELETE' removes it.
When a 'PUT' request is given, the values of the posted data are given to a newly-instantiated
``Form``. If the data is correct, the updated object is returned. If invalid, an ``APIException`` is
raised, with the descriptions of the offending fields' validation exceptions contained within.
When a 'DELETE' request is given and the operation successful, the value 'OK' is returned.
"""
obj = self.get_object(request, object_id, **kwargs)
if request.method == 'DELETE':
with transaction.commit_on_success():
obj.delete()
return ['OK']
elif request.method == 'PUT':
request.method = 'POST'
request._load_post_and_files()
request.method = 'PUT'
form_class = self.get_form(request, obj)
data = self.prepare_initial_data(form_class, obj)
for key, value in request.POST.items():
data[key] = value
form = form_class(data, request.FILES, instance = obj)
if form.is_valid():
obj = self.save_form(request, form, obj)
self.save_object(request, obj)
return obj
errors = []
for error in form.non_field_errors():
errors.append(error)
for field in form:
if field.errors and any(field.errors):
inline_errors = list([e for e in field.errors])
errors.append(
{
field.name: inline_errors
}
)
raise Exception(errors)
return obj
class ModelInline(ModelAPI):
"""
A child API. This does not need to be registered with an API site; instead it should be referenced in the
``inlines`` property of the parent ``ModelAPI`` class.
"""
def __init__(self, model, parent, rel_field, rel_name, api_site):
super(ModelInline, self).__init__(model, api_site)
self.parent = parent
self.rel_field = rel_field
self.rel_name = rel_name
def prepare_initial_data(self, form_class, obj = None, **kwargs):
data = helpers.form_initial_data(form_class, obj)
if not obj:
data[self.rel_field.name] = kwargs.get(self.rel_field.name)
return data
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses that are useful for training GANs.
The losses belong to two main groups, but there are others that do not:
1) xxxxx_generator_loss
2) xxxxx_discriminator_loss
Example:
1) wasserstein_generator_loss
2) wasserstein_discriminator_loss
Other example:
wasserstein_gradient_penalty
All losses must be able to accept 1D or 2D Tensors, so as to be compatible with
patchGAN style losses (https://arxiv.org/abs/1611.07004).
To make these losses usable in the TFGAN framework, please create a tuple
version of the losses with `losses_utils.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.distributions import distribution as ds
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.losses import util
from tensorflow.python.summary import summary
__all__ = [
'acgan_discriminator_loss',
'acgan_generator_loss',
'least_squares_discriminator_loss',
'least_squares_generator_loss',
'modified_discriminator_loss',
'modified_generator_loss',
'minimax_discriminator_loss',
'minimax_generator_loss',
'wasserstein_discriminator_loss',
'wasserstein_generator_loss',
'wasserstein_gradient_penalty',
'mutual_information_penalty',
'combine_adversarial_loss',
]
# Wasserstein losses from `Wasserstein GAN` (https://arxiv.org/abs/1701.07875).
def wasserstein_generator_loss(
discriminator_gen_outputs,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Wasserstein generator loss for GANs.
See `Wasserstein GAN` (https://arxiv.org/abs/1701.07875) for more details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add detailed summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'generator_wasserstein_loss', (
discriminator_gen_outputs, weights)) as scope:
discriminator_gen_outputs = math_ops.to_float(discriminator_gen_outputs)
loss = - discriminator_gen_outputs
loss = losses.compute_weighted_loss(
loss, weights, scope, loss_collection, reduction)
if add_summaries:
summary.scalar('generator_wass_loss', loss)
return loss
def wasserstein_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Wasserstein discriminator loss for GANs.
See `Wasserstein GAN` (https://arxiv.org/abs/1701.07875) for more details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
real_weights: A scalar or a `Tensor` of size [batch_size, K] used to rescale
the real loss.
generated_weights: A scalar or a `Tensor` of size [batch_size, K] used to
rescale the generated loss.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'discriminator_wasserstein_loss', (
discriminator_real_outputs, discriminator_gen_outputs, real_weights,
generated_weights)) as scope:
discriminator_real_outputs = math_ops.to_float(discriminator_real_outputs)
discriminator_gen_outputs = math_ops.to_float(discriminator_gen_outputs)
discriminator_real_outputs.shape.assert_is_compatible_with(
discriminator_gen_outputs.shape)
loss_on_generated = losses.compute_weighted_loss(
discriminator_gen_outputs, generated_weights, scope,
loss_collection=None, reduction=reduction)
loss_on_real = losses.compute_weighted_loss(
discriminator_real_outputs, real_weights, scope, loss_collection=None,
reduction=reduction)
loss = loss_on_generated - loss_on_real
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_wass_loss', loss_on_generated)
summary.scalar('discriminator_real_wass_loss', loss_on_real)
summary.scalar('discriminator_wass_loss', loss)
return loss
# ACGAN losses from `Conditional Image Synthesis With Auxiliary Classifier GANs`
# (https://arxiv.org/abs/1610.09585).
def acgan_discriminator_loss(
discriminator_gen_classification_logits,
discriminator_real_classification_logits,
one_hot_labels,
label_smoothing=0.0,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the discriminator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_gen_classification_logits: Classification logits for generated
data.
discriminator_real_classification_logits: Classification logits for real
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
label_smoothing: A float in [0, 1]. If greater than 0, smooth the labels for
"discriminator on real data" as suggested in
https://arxiv.org/pdf/1701.00160
real_weights: A scalar or a `Tensor` of size [batch_size, K] used to rescale
the real loss.
generated_weights: A scalar or a `Tensor` of size [batch_size, K] used to
rescale the generated loss.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
TypeError: If the discriminator does not output a tuple.
"""
loss_on_generated = losses.softmax_cross_entropy(
one_hot_labels, discriminator_gen_classification_logits,
weights=generated_weights, scope=scope, loss_collection=None,
reduction=reduction)
loss_on_real = losses.softmax_cross_entropy(
one_hot_labels, discriminator_real_classification_logits,
weights=real_weights, label_smoothing=label_smoothing, scope=scope,
loss_collection=None, reduction=reduction)
loss = loss_on_generated + loss_on_real
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_ac_loss', loss_on_generated)
summary.scalar('discriminator_real_ac_loss', loss_on_real)
summary.scalar('discriminator_ac_loss', loss)
return loss
def acgan_generator_loss(
discriminator_gen_classification_logits,
one_hot_labels,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the generator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_gen_classification_logits: Classification logits for generated
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
ValueError: if arg module not either `generator` or `discriminator`
TypeError: if the discriminator does not output a tuple.
"""
loss = losses.softmax_cross_entropy(
one_hot_labels, discriminator_gen_classification_logits, weights=weights,
scope=scope, loss_collection=loss_collection, reduction=reduction)
if add_summaries:
summary.scalar('generator_ac_loss', loss)
return loss
# Wasserstein Gradient Penalty losses from `Improved Training of Wasserstein
# GANs` (https://arxiv.org/abs/1704.00028).
# TODO(joelshor): Figure out why this function can't be inside a name scope.
def wasserstein_gradient_penalty(
generated_data,
real_data,
generator_inputs,
discriminator_fn,
discriminator_scope,
epsilon=1e-10,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""The gradient penalty for the Wasserstein discriminator loss.
See `Improved Training of Wasserstein GANs`
(https://arxiv.org/abs/1704.00028) for more details.
Args:
generated_data: Output of the generator.
real_data: Real data.
generator_inputs: Exact argument to pass to the generator, which is used
as optional conditioning to the discriminator.
discriminator_fn: A discriminator function that conforms to TFGAN API.
discriminator_scope: If not `None`, reuse discriminators from this scope.
epsilon: A small positive number added for numerical stability when
computing the gradient norm.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
Raises:
ValueError: If the rank of data Tensors is unknown.
"""
if generated_data.shape.ndims is None:
raise ValueError('`generated_data` can\'t have unknown rank.')
if real_data.shape.ndims is None:
raise ValueError('`real_data` can\'t have unknown rank.')
differences = generated_data - real_data
batch_size = differences.shape[0].value or array_ops.shape(differences)[0]
alpha_shape = [batch_size] + [1] * (differences.shape.ndims - 1)
alpha = random_ops.random_uniform(shape=alpha_shape)
interpolates = real_data + (alpha * differences)
# Reuse variables if a discriminator scope already exists.
reuse = False if discriminator_scope is None else True
with variable_scope.variable_scope(discriminator_scope, 'gpenalty_dscope',
reuse=reuse):
disc_interpolates = discriminator_fn(interpolates, generator_inputs)
if isinstance(disc_interpolates, tuple):
# ACGAN case: disc outputs more than one tensor
disc_interpolates = disc_interpolates[0]
gradients = gradients_impl.gradients(disc_interpolates, interpolates)[0]
gradient_squares = math_ops.reduce_sum(
math_ops.square(gradients), axis=list(range(1, gradients.shape.ndims)))
# Propagate shape information, if possible.
if isinstance(batch_size, int):
gradient_squares.set_shape([
batch_size] + gradient_squares.shape.as_list()[1:])
# For numerical stability, add epsilon to the sum before taking the square
# root. Note tf.norm does not add epsilon.
slopes = math_ops.sqrt(gradient_squares + epsilon)
penalties = math_ops.square(slopes - 1.0)
penalty = losses.compute_weighted_loss(
penalties, weights, scope=scope, loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('gradient_penalty_loss', penalty)
return penalty
# Original losses from `Generative Adversarial Nets`
# (https://arxiv.org/abs/1406.2661).
def minimax_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
label_smoothing=0.25,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Original minimax discriminator loss for GANs, with label smoothing.
Note that the authors don't recommend using this loss. A more practically
useful loss is `modified_discriminator_loss`.
L = - real_weights * log(sigmoid(D(x)))
- generated_weights * log(1 - sigmoid(D(G(z))))
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: A scalar or a `Tensor` of size [batch_size, K] used to rescale
the real loss.
generated_weights: A scalar or a `Tensor` of size [batch_size, K] used to
rescale the generated loss.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'discriminator_minimax_loss', (
discriminator_real_outputs, discriminator_gen_outputs, real_weights,
generated_weights, label_smoothing)) as scope:
# -log((1 - label_smoothing) - sigmoid(D(x)))
loss_on_real = losses.sigmoid_cross_entropy(
array_ops.ones_like(discriminator_real_outputs),
discriminator_real_outputs, real_weights, label_smoothing, scope,
loss_collection=None, reduction=reduction)
# -log(- sigmoid(D(G(x))))
loss_on_generated = losses.sigmoid_cross_entropy(
array_ops.zeros_like(discriminator_gen_outputs),
discriminator_gen_outputs, generated_weights, scope=scope,
loss_collection=None, reduction=reduction)
loss = loss_on_real + loss_on_generated
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_minimax_loss', loss_on_generated)
summary.scalar('discriminator_real_minimax_loss', loss_on_real)
summary.scalar('discriminator_minimax_loss', loss)
return loss
def minimax_generator_loss(
discriminator_gen_outputs,
label_smoothing=0.0,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Original minimax generator loss for GANs.
Note that the authors don't recommend using this loss. A more practically
useful loss is `modified_generator_loss`.
L = log(sigmoid(D(x))) + log(1 - sigmoid(D(G(z))))
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
weights: A scalar or a `Tensor` of size [batch_size, K] used to rescale
the loss.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'generator_minimax_loss') as scope:
loss = - minimax_discriminator_loss(
array_ops.ones_like(discriminator_gen_outputs),
discriminator_gen_outputs, label_smoothing, weights, weights, scope,
loss_collection, reduction, add_summaries=False)
if add_summaries:
summary.scalar('generator_minimax_loss', loss)
return loss
def modified_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
label_smoothing=0.25,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Same as minimax discriminator loss.
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: A scalar or a `Tensor` of size [batch_size, K] used to rescale
the real loss.
generated_weights: A scalar or a `Tensor` of size [batch_size, K] used to
rescale the generated loss.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
return minimax_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
label_smoothing,
real_weights,
generated_weights,
scope or 'discriminator_modified_loss',
loss_collection,
reduction,
add_summaries)
def modified_generator_loss(
discriminator_gen_outputs,
label_smoothing=0.0,
weights=1.0,
scope='generator_modified_loss',
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Modified generator loss for GANs.
L = -log(sigmoid(D(G(z))))
This is the trick used in the original paper to avoid vanishing gradients
early in training. See `Generative Adversarial Nets`
(https://arxiv.org/abs/1406.2661) for more details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
loss = losses.sigmoid_cross_entropy(
array_ops.ones_like(discriminator_gen_outputs), discriminator_gen_outputs,
weights, label_smoothing, scope, loss_collection, reduction)
if add_summaries:
summary.scalar('generator_modified_loss', loss)
return loss
# Least Squares loss from `Least Squares Generative Adversarial Networks`
# (https://arxiv.org/abs/1611.04076).
def least_squares_generator_loss(
discriminator_gen_outputs,
real_label=1,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Least squares generator loss.
This loss comes from `Least Squares Generative Adversarial Networks`
(https://arxiv.org/abs/1611.04076).
L = 1/2 * (D(G(z)) - `real_label`) ** 2
where D(y) are discriminator logits.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
real_label: The value that the generator is trying to get the discriminator
to output on generated data.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'lsq_generator_loss',
(discriminator_gen_outputs, real_label)) as scope:
discriminator_gen_outputs = math_ops.to_float(discriminator_gen_outputs)
loss = math_ops.squared_difference(
discriminator_gen_outputs, real_label) / 2.0
loss = losses.compute_weighted_loss(
loss, weights, scope, loss_collection, reduction)
if add_summaries:
summary.scalar('generator_lsq_loss', loss)
return loss
def least_squares_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
real_label=1,
fake_label=0,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Least squares generator loss.
This loss comes from `Least Squares Generative Adversarial Networks`
(https://arxiv.org/abs/1611.04076).
L = 1/2 * (D(x) - `real`) ** 2 +
1/2 * (D(G(z)) - `fake_label`) ** 2
where D(y) are discriminator logits.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
real_label: The value that the discriminator tries to output for real data.
fake_label: The value that the discriminator tries to output for fake data.
real_weights: A scalar or a `Tensor` of size [batch_size, K] used to rescale
the real loss.
generated_weights: A scalar or a `Tensor` of size [batch_size, K] used to
rescale the generated loss.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'lsq_discriminator_loss',
(discriminator_gen_outputs, real_label)) as scope:
discriminator_real_outputs = math_ops.to_float(discriminator_real_outputs)
discriminator_gen_outputs = math_ops.to_float(discriminator_gen_outputs)
discriminator_real_outputs.shape.assert_is_compatible_with(
discriminator_gen_outputs.shape)
real_losses = math_ops.squared_difference(
discriminator_real_outputs, real_label) / 2.0
fake_losses = math_ops.squared_difference(
discriminator_gen_outputs, fake_label) / 2.0
loss_on_real = losses.compute_weighted_loss(
real_losses, real_weights, scope, loss_collection=None,
reduction=reduction)
loss_on_generated = losses.compute_weighted_loss(
fake_losses, generated_weights, scope, loss_collection=None,
reduction=reduction)
loss = loss_on_real + loss_on_generated
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_lsq_loss', loss_on_generated)
summary.scalar('discriminator_real_lsq_loss', loss_on_real)
summary.scalar('discriminator_lsq_loss', loss)
return loss
# InfoGAN loss from `InfoGAN: Interpretable Representation Learning by
# `Information Maximizing Generative Adversarial Nets`
# https://arxiv.org/abs/1606.03657
def _validate_distributions(distributions):
if not isinstance(distributions, (list, tuple)):
raise ValueError('`distributions` must be a list or tuple. Instead, '
'found %s.', type(distributions))
for x in distributions:
if not isinstance(x, ds.Distribution):
raise ValueError('`distributions` must be a list of `Distributions`. '
'Instead, found %s.', type(x))
def _validate_information_penalty_inputs(
structured_generator_inputs, predicted_distributions):
"""Validate input to `mutual_information_penalty`."""
_validate_distributions(predicted_distributions)
if len(structured_generator_inputs) != len(predicted_distributions):
raise ValueError('`structured_generator_inputs` length %i must be the same '
'as `predicted_distributions` length %i.' % (
len(structured_generator_inputs),
len(predicted_distributions)))
def mutual_information_penalty(
structured_generator_inputs,
predicted_distributions,
weights=1.0,
scope='generator_modified_loss',
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Returns a penalty on the mutual information in an InfoGAN model.
This loss comes from an InfoGAN paper https://arxiv.org/abs/1606.03657.
Args:
structured_generator_inputs: A list of Tensors representing the random noise
that must have high mutual information with the generator output. List
length should match `predicted_distributions`.
predicted_distributions: A list of tf.Distributions. Predicted by the
recognizer, and used to evaluate the likelihood of the structured noise.
List length should match `structured_generator_inputs`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A scalar Tensor representing the mutual information loss.
"""
_validate_information_penalty_inputs(
structured_generator_inputs, predicted_distributions)
# Calculate the negative log-likelihood of the reconstructed noise.
log_probs = [math_ops.reduce_mean(dist.log_prob(noise)) for dist, noise in
zip(predicted_distributions, structured_generator_inputs)]
loss = -1 * losses.compute_weighted_loss(
log_probs, weights, scope, loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('mutual_information_penalty', loss)
return loss
def _numerically_stable_global_norm(tensor_list):
"""Compute the global norm of a list of Tensors, with improved stability.
The global norm computation sometimes overflows due to the intermediate L2
step. To avoid this, we divide by a cheap-to-compute max over the
matrix elements.
Args:
tensor_list: A list of tensors, or `None`.
Returns:
A scalar tensor with the global norm.
"""
if np.all([x is None for x in tensor_list]):
return 0.0
list_max = math_ops.reduce_max([math_ops.reduce_max(math_ops.abs(x)) for x in
tensor_list if x is not None])
return list_max * clip_ops.global_norm([x / list_max for x in tensor_list
if x is not None])
def _used_weight(weights_list):
for weight in weights_list:
if weight is not None:
return tensor_util.constant_value(ops.convert_to_tensor(weight))
def _validate_args(losses_list, weight_factor, gradient_ratio):
for loss in losses_list:
loss.shape.assert_is_compatible_with([])
if weight_factor is None and gradient_ratio is None:
raise ValueError(
'`weight_factor` and `gradient_ratio` cannot both be `None.`')
if weight_factor is not None and gradient_ratio is not None:
raise ValueError(
'`weight_factor` and `gradient_ratio` cannot both be specified.')
# TODO(joelshor): Add ability to pass in gradients, to avoid recomputing.
def combine_adversarial_loss(main_loss,
adversarial_loss,
weight_factor=None,
gradient_ratio=None,
gradient_ratio_epsilon=1e-6,
variables=None,
scalar_summaries=True,
gradient_summaries=True,
scope=None):
"""Utility to combine main and adversarial losses.
This utility combines the main and adversarial losses in one of two ways.
1) Fixed coefficient on adversarial loss. Use `weight_factor` in this case.
2) Fixed ratio of gradients. Use `gradient_ratio` in this case. This is often
used to make sure both losses affect weights roughly equally, as in
https://arxiv.org/pdf/1705.05823.
One can optionally also visualize the scalar and gradient behavior of the
losses.
Args:
main_loss: A floating scalar Tensor indicating the main loss.
adversarial_loss: A floating scalar Tensor indication the adversarial loss.
weight_factor: If not `None`, the coefficient by which to multiply the
adversarial loss. Exactly one of this and `gradient_ratio` must be
non-None.
gradient_ratio: If not `None`, the ratio of the magnitude of the gradients.
Specifically,
gradient_ratio = grad_mag(main_loss) / grad_mag(adversarial_loss)
Exactly one of this and `weight_factor` must be non-None.
gradient_ratio_epsilon: An epsilon to add to the adversarial loss
coefficient denominator, to avoid division-by-zero.
variables: List of variables to calculate gradients with respect to. If not
present, defaults to all trainable variables.
scalar_summaries: Create scalar summaries of losses.
gradient_summaries: Create gradient summaries of losses.
scope: Optional name scope.
Returns:
A floating scalar Tensor indicating the desired combined loss.
Raises:
ValueError: Malformed input.
"""
_validate_args([main_loss, adversarial_loss], weight_factor, gradient_ratio)
if variables is None:
variables = contrib_variables_lib.get_trainable_variables()
with ops.name_scope(scope, 'adversarial_loss',
values=[main_loss, adversarial_loss]):
# Compute gradients if we will need them.
if gradient_summaries or gradient_ratio is not None:
main_loss_grad_mag = _numerically_stable_global_norm(
gradients_impl.gradients(main_loss, variables))
adv_loss_grad_mag = _numerically_stable_global_norm(
gradients_impl.gradients(adversarial_loss, variables))
# Add summaries, if applicable.
if scalar_summaries:
summary.scalar('main_loss', main_loss)
summary.scalar('adversarial_loss', adversarial_loss)
if gradient_summaries:
summary.scalar('main_loss_gradients', main_loss_grad_mag)
summary.scalar('adversarial_loss_gradients', adv_loss_grad_mag)
# Combine losses in the appropriate way.
# If `weight_factor` is always `0`, avoid computing the adversarial loss
# tensor entirely.
if _used_weight((weight_factor, gradient_ratio)) == 0:
final_loss = main_loss
elif weight_factor is not None:
final_loss = (main_loss +
array_ops.stop_gradient(weight_factor) * adversarial_loss)
elif gradient_ratio is not None:
grad_mag_ratio = main_loss_grad_mag / (
adv_loss_grad_mag + gradient_ratio_epsilon)
adv_coeff = grad_mag_ratio / gradient_ratio
summary.scalar('adversarial_coefficient', adv_coeff)
final_loss = (main_loss +
array_ops.stop_gradient(adv_coeff) * adversarial_loss)
return final_loss
|
Subsets and Splits