prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>0006_auto_20150916_0219.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
<|fim▁hole|> dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainsite', '0005_auto_20150909_0246'),
]
operations = [
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=100)),
('email_address', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='email',
name='customer',
),
migrations.RemoveField(
model_name='email',
name='location',
),
migrations.RenameField(
model_name='customer',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='location',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='session',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='sessiontype',
old_name='user',
new_name='owner',
),
migrations.AddField(
model_name='address',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='link',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='phone',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.DeleteModel(
name='Email',
),
migrations.AddField(
model_name='emailaddress',
name='customer',
field=models.ForeignKey(to='mainsite.Customer', null=True, blank=True),
),
migrations.AddField(
model_name='emailaddress',
name='location',
field=models.ForeignKey(to='mainsite.Location', null=True, blank=True),
),
migrations.AddField(
model_name='emailaddress',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]<|fim▁end|> | |
<|file_name|>RowLockManager.cpp<|end_file_name|><|fim▁begin|>#include "include/global.h"
#include "alinous.lock/ConcurrentGate.h"
#include "alinous.db.table.lockmonitor/IDatabaseLock.h"
#include "alinous.db.table.lockmonitor/RowLock.h"
#include "alinous.db.table.lockmonitor/IThreadLocker.h"
#include "alinous.db.table/IDatabaseTable.h"
#include "alinous.db.table.lockmonitor.db/RowLockManager.h"
namespace alinous {namespace db {namespace table {namespace lockmonitor {namespace db {
bool RowLockManager::__init_done = __init_static_variables();
bool RowLockManager::__init_static_variables(){
Java2CppSystem::getSelf();
ThreadContext* ctx = ThreadContext::newThreadContext();
{
GCNotifier __refobj1(ctx, __FILEW__, __LINE__, L"RowLockManager", L"__init_static_variables");
}
ctx->localGC();
delete ctx;
return true;
}
RowLockManager::RowLockManager(IDatabaseTable* table, long long oid, ConcurrentGate* concurrentGate, ThreadContext* ctx) throw() : IObject(ctx), table(nullptr), oid(0), list(GCUtils<ArrayList<RowLock> >::ins(this, (new(ctx) ArrayList<RowLock>(ctx)), ctx, __FILEW__, __LINE__, L"")), concurrentGate(nullptr)<|fim▁hole|> this->oid = oid;
__GC_MV(this, &(this->concurrentGate), concurrentGate, ConcurrentGate);
}
void RowLockManager::__construct_impl(IDatabaseTable* table, long long oid, ConcurrentGate* concurrentGate, ThreadContext* ctx) throw()
{
__GC_MV(this, &(this->table), table, IDatabaseTable);
this->oid = oid;
__GC_MV(this, &(this->concurrentGate), concurrentGate, ConcurrentGate);
}
RowLockManager::~RowLockManager() throw()
{
ThreadContext *ctx = ThreadContext::getCurentContext();
if(ctx != nullptr){ctx->incGcDenial();}
__releaseRegerences(false, ctx);
if(ctx != nullptr){ctx->decGcDenial();}
}
void RowLockManager::__releaseRegerences(bool prepare, ThreadContext* ctx) throw()
{
ObjectEraser __e_obj1(ctx, __FILEW__, __LINE__, L"RowLockManager", L"~RowLockManager");
__e_obj1.add(this->table, this);
table = nullptr;
__e_obj1.add(this->list, this);
list = nullptr;
__e_obj1.add(this->concurrentGate, this);
concurrentGate = nullptr;
if(!prepare){
return;
}
}
ConcurrentGate* RowLockManager::getConcurrentGate(ThreadContext* ctx) throw()
{
return concurrentGate;
}
RowLock* RowLockManager::newLock(IThreadLocker* locker, bool update, ThreadContext* ctx) throw()
{
ArrayList<RowLock>* list = this->list;
int maxLoop = list->size(ctx);
for(int i = 0; i != maxLoop; ++i)
{
RowLock* lock = list->get(i, ctx);
if(lock->locker == locker)
{
return lock;
}
}
RowLock* lock = (new(ctx) RowLock(this->table, this->oid, update, locker, this->concurrentGate, ctx));
this->list->add(lock, ctx);
return lock;
}
RowLock* RowLockManager::releaseLock(IThreadLocker* locker, ThreadContext* ctx) throw()
{
ArrayList<RowLock>* list = this->list;
int maxLoop = list->size(ctx);
for(int i = 0; i != maxLoop; ++i)
{
RowLock* lock = list->get(i, ctx);
if(lock->locker == locker)
{
if(lock->count == 1)
{
this->list->remove(i, ctx);
}
return lock;
}
}
return nullptr;
}
void RowLockManager::__cleanUp(ThreadContext* ctx){
}
}}}}}<|fim▁end|> | {
__GC_MV(this, &(this->table), table, IDatabaseTable); |
<|file_name|>auth_saml.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
from openerp.osv import fields
from openerp.osv import osv
import lasso
import simplejson
class auth_saml_provider(osv.osv):
"""Class defining the configuration values of an Saml2 provider"""<|fim▁hole|>
_name = 'auth.saml.provider'
_description = 'SAML2 provider'
_order = 'name'
def _get_lasso_for_provider(self, cr, uid, provider_id, context=None):
"""internal helper to get a configured lasso.Login object for the
given provider id"""
provider = self.browse(cr, uid, provider_id, context=context)
# TODO: we should cache those results somewhere because it is
# really costly to always recreate a login variable from buffers
server = lasso.Server.newFromBuffers(
provider.sp_metadata,
provider.sp_pkey
)
server.addProviderFromBuffer(
lasso.PROVIDER_ROLE_IDP,
provider.idp_metadata
)
return lasso.Login(server)
def _get_matching_attr_for_provider(
self, cr, uid, provider_id, context=None
):
"""internal helper to fetch the matching attribute for this SAML
provider. Returns a unicode object.
"""
provider = self.browse(cr, uid, provider_id, context=context)
return provider.matching_attribute
def _get_auth_request(self, cr, uid, id_, state, context=None):
"""build an authentication request and give it back to our client
WARNING: this method cannot be used for multiple ids
"""
login = self._get_lasso_for_provider(cr, uid, id_, context=context)
# ! -- this is the part that MUST be performed on each call and
# cannot be cached
login.initAuthnRequest()
login.request.nameIdPolicy.format = None
login.request.nameIdPolicy.allowCreate = True
login.msgRelayState = simplejson.dumps(state)
login.buildAuthnRequestMsg()
# msgUrl is a fully encoded url ready for redirect use
# obtained after the buildAuthnRequestMsg() call
return login.msgUrl
_columns = {
# Name of the OAuth2 entity, authentic, xcg...
'name': fields.char('Provider name'),
'idp_metadata': fields.text('IDP Configuration'),
'sp_metadata': fields.text('SP Configuration'),
'sp_pkey': fields.text(
'Private key of our service provider (this openerpserver)'
),
'matching_attribute': fields.text('Matching Attribute', required=True),
'enabled': fields.boolean('Enabled'),
'css_class': fields.char('CSS class'),
'body': fields.char(
'Body',
required=True,
),
'sequence': fields.integer(),
}
_defaults = {
'enabled': False,
'matching_attribute': "subject.nameId",
'css_class': 'zocial saml',
'body': 'Authentic',
}<|fim▁end|> | |
<|file_name|>ensure_exception_handled.py<|end_file_name|><|fim▁begin|>import traceback
class EnsureExceptionHandledGuard:
"""Helper for ensuring that Future's exceptions were handled.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
<|fim▁hole|> The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _EnsureExceptionHandledGuard,
and then the _EnsureExceptionHandledGuard would be included in a cycle,
which is what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield from') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ['exc', 'tb', 'hndl', 'cls']
def __init__(self, exc, handler):
self.exc = exc
self.hndl = handler
self.cls = type(exc)
self.tb = None
def activate(self):
exc = self.exc
if exc is not None:
self.exc = None
self.tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
def clear(self):
self.exc = None
self.tb = None
def __del__(self):
if self.tb:
self.hndl(self.cls, self.tb)<|fim▁end|> | |
<|file_name|>tech-docs.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|><|fim▁end|> | exports.baseTechPath = require.resolve('./level-proto.js'); |
<|file_name|>SkScan_AAAPath.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2016 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "include/core/SkPath.h"
#include "include/core/SkRegion.h"
#include "include/private/SkTemplates.h"
#include "include/private/SkTo.h"
#include "src/core/SkAnalyticEdge.h"
#include "src/core/SkAntiRun.h"
#include "src/core/SkAutoMalloc.h"
#include "src/core/SkBlitter.h"
#include "src/core/SkEdge.h"
#include "src/core/SkEdgeBuilder.h"
#include "src/core/SkGeometry.h"
#include "src/core/SkQuadClipper.h"
#include "src/core/SkRasterClip.h"
#include "src/core/SkScan.h"
#include "src/core/SkScanPriv.h"
#include "src/core/SkTSort.h"
#include <utility>
#if defined(SK_DISABLE_AAA)
void SkScan::AAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
SkDEBUGFAIL("AAA Disabled");
return;
}
#else
/*
The following is a high-level overview of our analytic anti-aliasing
algorithm. We consider a path as a collection of line segments, as
quadratic/cubic curves are converted to small line segments. Without loss of
generality, let's assume that the draw region is [0, W] x [0, H].
Our algorithm is based on horizontal scan lines (y = c_i) as the previous
sampling-based algorithm did. However, our algorithm uses non-equal-spaced
scan lines, while the previous method always uses equal-spaced scan lines,
such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
16-supersampling AA algorithm.
Our algorithm contains scan lines y = c_i for c_i that is either:
1. an integer between [0, H]
2. the y value of a line segment endpoint
3. the y value of an intersection of two line segments
For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
the coverage of this horizontal strip of our path on each pixel. This can be
done very efficiently because the strip of our path now only consists of
trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
rectangles and triangles as special cases).
We now describe how the coverage of single pixel is computed against such a
trapezoid. That coverage is essentially the intersection area of a rectangle
(e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
could be complicated, as shown in the example region A below:
+-----------\----+
| \ C|
| \ |
\ \ |
|\ A \|
| \ \
| \ |
| B \ |
+----\-----------+
However, we don't have to compute the area of A directly. Instead, we can
compute the excluded area, which are B and C, quite easily, because they're
just triangles. In fact, we can prove that an excluded region (take B as an
example) is either itself a simple trapezoid (including rectangles, triangles,
and empty regions), or its opposite (the opposite of B is A + C) is a simple
trapezoid. In any case, we can compute its area efficiently.
In summary, our algorithm has a higher quality because it generates ground-
truth coverages analytically. It is also faster because it has much fewer
unnessasary horizontal scan lines. For example, given a triangle path, the
number of scan lines in our algorithm is only about 3 + H while the
16-supersampling algorithm has about 4H scan lines.
*/
static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
SkASSERT(*alpha + delta <= 256);
*alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
}
static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
*alpha = SkTMin(0xFF, *alpha + delta);
}
class AdditiveBlitter : public SkBlitter {
public:
~AdditiveBlitter() override {}
virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
}
void blitV(int x, int y, int height, SkAlpha alpha) override {
SkDEBUGFAIL("Please call real blitter's blitV instead.");
}
void blitH(int x, int y, int width) override {
SkDEBUGFAIL("Please call real blitter's blitH instead.");
}
void blitRect(int x, int y, int width, int height) override {
SkDEBUGFAIL("Please call real blitter's blitRect instead.");
}
void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
override {
SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
}
virtual int getWidth() = 0;
// Flush the additive alpha cache if floor(y) and floor(nextY) is different
// (i.e., we'll start working on a new pixel row).
virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
};
// We need this mask blitter because it significantly accelerates small path filling.
class MaskAdditiveBlitter : public AdditiveBlitter {
public:
MaskAdditiveBlitter(SkBlitter* realBlitter,
const SkIRect& ir,
const SkIRect& clipBounds,
bool isInverse);
~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
// Most of the time, we still consider this mask blitter as the real blitter
// so we can accelerate blitRect and others. But sometimes we want to return
// the absolute real blitter (e.g., when we fall back to the old code path).
SkBlitter* getRealBlitter(bool forceRealBlitter) override {
return forceRealBlitter ? fRealBlitter : this;
}
// Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
// Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
// Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
void blitAntiH(int x, int y, const SkAlpha alpha) override;
void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
void blitV(int x, int y, int height, SkAlpha alpha) override;
void blitRect(int x, int y, int width, int height) override;
void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
override;
// The flush is only needed for RLE (RunBasedAdditiveBlitter)
void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
int getWidth() override { return fClipRect.width(); }
static bool CanHandleRect(const SkIRect& bounds) {
int width = bounds.width();
if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
return false;
}
int64_t rb = SkAlign4(width);
// use 64bits to detect overflow
int64_t storage = rb * bounds.height();
return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
(storage <= MaskAdditiveBlitter::kMAX_STORAGE);
}
// Return a pointer where pointer[x] corresonds to the alpha of (x, y)
uint8_t* getRow(int y) {
if (y != fY) {
fY = y;
fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
}
return fRow;
}
private:
// so we don't try to do very wide things, where the RLE blitter would be faster
static const int kMAX_WIDTH = 32;
static const int kMAX_STORAGE = 1024;
SkBlitter* fRealBlitter;
SkMask fMask;
SkIRect fClipRect;
// we add 2 because we can write 1 extra byte at either end due to precision error
uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
uint8_t* fRow;
int fY;
};
MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
const SkIRect& ir,
const SkIRect& clipBounds,
bool isInverse) {
SkASSERT(CanHandleRect(ir));
SkASSERT(!isInverse);
fRealBlitter = realBlitter;
fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
fMask.fBounds = ir;
fMask.fRowBytes = ir.width();
fMask.fFormat = SkMask::kA8_Format;
fY = ir.fTop - 1;
fRow = nullptr;
fClipRect = ir;
if (!fClipRect.intersect(clipBounds)) {
SkASSERT(0);
fClipRect.setEmpty();
}
memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
}
void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
SK_ABORT("Don't use this; directly add alphas to the mask.");
}
void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
SkASSERT(x >= fMask.fBounds.fLeft - 1);
add_alpha(&this->getRow(y)[x], alpha);
}
void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
SkASSERT(x >= fMask.fBounds.fLeft - 1);
uint8_t* row = this->getRow(y);
for (int i = 0; i < width; ++i) {
add_alpha(&row[x + i], alpha);
}
}
void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
if (alpha == 0) {
return;
}
SkASSERT(x >= fMask.fBounds.fLeft - 1);
// This must be called as if this is a real blitter.
// So we directly set alpha rather than adding it.
uint8_t* row = this->getRow(y);
for (int i = 0; i < height; ++i) {
row[x] = alpha;
row += fMask.fRowBytes;
}
}
void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
SkASSERT(x >= fMask.fBounds.fLeft - 1);
// This must be called as if this is a real blitter.
// So we directly set alpha rather than adding it.
uint8_t* row = this->getRow(y);
for (int i = 0; i < height; ++i) {
memset(row + x, 0xFF, width);
row += fMask.fRowBytes;
}
}
void MaskAdditiveBlitter::blitAntiRect(int x,
int y,
int width,
int height,
SkAlpha leftAlpha,
SkAlpha rightAlpha) {
blitV(x, y, height, leftAlpha);
blitV(x + 1 + width, y, height, rightAlpha);
blitRect(x + 1, y, width, height);
}
class RunBasedAdditiveBlitter : public AdditiveBlitter {
public:
RunBasedAdditiveBlitter(SkBlitter* realBlitter,
const SkIRect& ir,
const SkIRect& clipBounds,
bool isInverse);
~RunBasedAdditiveBlitter() override { this->flush(); }
SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
void blitAntiH(int x, int y, const SkAlpha alpha) override;
void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
int getWidth() override { return fWidth; }
void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
this->flush();
}
}
protected:
SkBlitter* fRealBlitter;
int fCurrY; // Current y coordinate.
int fWidth; // Widest row of region to be blitted
int fLeft; // Leftmost x coordinate in any row
int fTop; // Initial y coordinate (top of bounds)
// The next three variables are used to track a circular buffer that
// contains the values used in SkAlphaRuns. These variables should only
// ever be updated in advanceRuns(), and fRuns should always point to
// a valid SkAlphaRuns...
int fRunsToBuffer;
void* fRunsBuffer;
int fCurrentRun;
SkAlphaRuns fRuns;
int fOffsetX;
bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
// extra one to store the zero at the end
int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
// This function updates the fRuns variable to point to the next buffer space
// with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
// and resets fRuns to point to an empty scanline.
void advanceRuns() {
const size_t kRunsSz = this->getRunsSz();
fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
fCurrentRun * kRunsSz);
fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
fRuns.reset(fWidth);
}
// Blitting 0xFF and 0 is much faster so we snap alphas close to them
SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
void flush() {
if (fCurrY >= fTop) {
SkASSERT(fCurrentRun < fRunsToBuffer);
for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
// It seems that blitting 255 or 0 is much faster than blitting 254 or 1
fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
}
if (!fRuns.empty()) {
// SkDEBUGCODE(fRuns.dump();)
fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
this->advanceRuns();
fOffsetX = 0;
}
fCurrY = fTop - 1;
}
}
void checkY(int y) {
if (y != fCurrY) {
this->flush();
fCurrY = y;
}
}
};
RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
const SkIRect& ir,
const SkIRect& clipBounds,
bool isInverse) {
fRealBlitter = realBlitter;
SkIRect sectBounds;
if (isInverse) {
// We use the clip bounds instead of the ir, since we may be asked to
// draw outside of the rect when we're a inverse filltype
sectBounds = clipBounds;
} else {
if (!sectBounds.intersect(ir, clipBounds)) {
sectBounds.setEmpty();
}
}
const int left = sectBounds.left();
const int right = sectBounds.right();
fLeft = left;
fWidth = right - left;
fTop = sectBounds.top();
fCurrY = fTop - 1;
fRunsToBuffer = realBlitter->requestRowsPreserved();
fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
fCurrentRun = -1;
this->advanceRuns();
fOffsetX = 0;
}
void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
checkY(y);
x -= fLeft;
if (x < 0) {
len += x;
antialias -= x;
x = 0;
}
len = SkTMin(len, fWidth - x);
SkASSERT(check(x, len));
if (x < fOffsetX) {
fOffsetX = 0;
}
fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
fRuns.fRuns[x + i + j] = 1;
fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
}
fRuns.fRuns[x + i] = 1;
}
for (int i = 0; i < len; ++i) {
add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
}
}
void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
checkY(y);
x -= fLeft;
if (x < fOffsetX) {
fOffsetX = 0;
}
if (this->check(x, 1)) {
fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
}
}
void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
checkY(y);
x -= fLeft;
if (x < fOffsetX) {
fOffsetX = 0;
}
if (this->check(x, width)) {
fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
}
}
// This exists specifically for concave path filling.
// In those cases, we can easily accumulate alpha greater than 0xFF.
class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
public:
SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
const SkIRect& ir,
const SkIRect& clipBounds,
bool isInverse)
: RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
void blitAntiH(int x, int y, const SkAlpha alpha) override;
void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
};
void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
checkY(y);
x -= fLeft;
if (x < 0) {
len += x;
antialias -= x;
x = 0;
}
len = SkTMin(len, fWidth - x);
SkASSERT(check(x, len));
if (x < fOffsetX) {
fOffsetX = 0;
}
fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
fRuns.fRuns[x + i + j] = 1;
fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
}
fRuns.fRuns[x + i] = 1;
}
for (int i = 0; i < len; ++i) {
safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
}
}
void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
checkY(y);
x -= fLeft;
if (x < fOffsetX) {
fOffsetX = 0;
}
if (check(x, 1)) {
// Break the run
fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
safely_add_alpha(&fRuns.fAlpha[x], alpha);
}
}
void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
checkY(y);
x -= fLeft;
if (x < fOffsetX) {
fOffsetX = 0;
}
if (check(x, width)) {
// Break the run
fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
safely_add_alpha(&fRuns.fAlpha[i], alpha);
}
}
}
// Return the alpha of a trapezoid whose height is 1
static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
SkASSERT(l1 >= 0 && l2 >= 0);
SkFixed area = (l1 + l2) / 2;
return SkTo<SkAlpha>(area >> 8);
}
// The alpha of right-triangle (a, a*b)
static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
SkASSERT(a <= SK_Fixed1);
#if 0
// TODO(mtklein): skia:8877
SkASSERT(b <= SK_Fixed1);
#endif
// Approximating...
// SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
#if 0
// TODO(mtklein): skia:8877
return SkTo<SkAlpha>(area >> 8);
#else
return SkTo<SkAlpha>((area >> 8) & 0xFF);
#endif
}
static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
}
static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
return (alpha * fullAlpha) >> 8;
}
// For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
// For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
// This is rarely the problem so we'll only use this for blitting rectangles.
static SkAlpha fixed_to_alpha(SkFixed f) {
SkASSERT(f <= SK_Fixed1);
return get_partial_alpha(0xFF, f);
}
// Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
// approximate (very coarsely) the x coordinate of the intersection.
static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
if (l1 > r1) {
std::swap(l1, r1);
}
if (l2 > r2) {
std::swap(l2, r2);
}
return (SkTMax(l1, l2) + SkTMin(r1, r2)) / 2;
}
// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
static void compute_alpha_above_line(SkAlpha* alphas,
SkFixed l,
SkFixed r,
SkFixed dY,
SkAlpha fullAlpha) {
SkASSERT(l <= r);
SkASSERT(l >> 16 == 0);
int R = SkFixedCeilToInt(r);
if (R == 0) {
return;
} else if (R == 1) {
alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
} else {
SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
for (int i = 1; i < R - 1; ++i) {
alphas[i] = alpha16 >> 8;
alpha16 += dY;
}
alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
}
}
// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
static void compute_alpha_below_line(SkAlpha* alphas,
SkFixed l,
SkFixed r,
SkFixed dY,
SkAlpha fullAlpha) {
SkASSERT(l <= r);
SkASSERT(l >> 16 == 0);
int R = SkFixedCeilToInt(r);
if (R == 0) {
return;
} else if (R == 1) {
alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
} else {
SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
for (int i = R - 2; i > 0; i--) {
alphas[i] = (alpha16 >> 8) & 0xFF;
alpha16 += dY;
}
alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
}
}
// Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter,
int y,
int x,
SkAlpha alpha,
SkAlpha fullAlpha,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter,
bool needSafeCheck) {
if (isUsingMask) {
if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
maskRow[x] = alpha;
} else if (needSafeCheck) {
safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
} else {
add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
}
} else {
if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitV(x, y, 1, alpha);
} else {
blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
}
}
}
static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter,
int y,
int x,
SkAlpha a1,
SkAlpha a2,
SkAlpha fullAlpha,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter,
bool needSafeCheck) {
if (isUsingMask) {
if (needSafeCheck) {
safely_add_alpha(&maskRow[x], a1);
safely_add_alpha(&maskRow[x + 1], a2);
} else {
add_alpha(&maskRow[x], a1);
add_alpha(&maskRow[x + 1], a2);
}
} else {
if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
} else {
blitter->blitAntiH(x, y, a1);
blitter->blitAntiH(x + 1, y, a2);
}
}
}
static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter,
int y,
int x,
int len,
SkAlpha fullAlpha,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter,
bool needSafeCheck) {
if (isUsingMask) {
for (int i = 0; i < len; ++i) {
if (needSafeCheck) {
safely_add_alpha(&maskRow[x + i], fullAlpha);
} else {
add_alpha(&maskRow[x + i], fullAlpha);
}
}
} else {
if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitH(x, y, len);
} else {
blitter->blitAntiH(x, y, len, fullAlpha);
}
}
}
static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
int y,
SkFixed ul,
SkFixed ur,
SkFixed ll,
SkFixed lr,
SkFixed lDY,
SkFixed rDY,
SkAlpha fullAlpha,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter,
bool needSafeCheck) {
int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
int len = R - L;
if (len == 1) {
SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
blit_single_alpha(blitter,
y,
L,
alpha,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
return;
}
const int kQuickLen = 31;
char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
SkAlpha* alphas;
if (len <= kQuickLen) {
alphas = (SkAlpha*)quickMemory;
} else {
alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
}
SkAlpha* tempAlphas = alphas + len + 1;
int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
for (int i = 0; i < len; ++i) {
runs[i] = 1;
alphas[i] = fullAlpha;
}
runs[len] = 0;
int uL = SkFixedFloorToInt(ul);
int lL = SkFixedCeilToInt(ll);
if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
SkFixed second = ll - ul - first;
SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
} else {
compute_alpha_below_line(
tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
for (int i = uL; i < lL; ++i) {
if (alphas[i - L] > tempAlphas[i - L]) {
alphas[i - L] -= tempAlphas[i - L];
} else {
alphas[i - L] = 0;
}
}
}
int uR = SkFixedFloorToInt(ur);
int lR = SkFixedCeilToInt(lr);
if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
SkFixed second = lr - ur - first;
SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
} else {
compute_alpha_above_line(
tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
for (int i = uR; i < lR; ++i) {
if (alphas[i - L] > tempAlphas[i - L]) {
alphas[i - L] -= tempAlphas[i - L];
} else {
alphas[i - L] = 0;
}
}
}
if (isUsingMask) {
for (int i = 0; i < len; ++i) {
if (needSafeCheck) {
safely_add_alpha(&maskRow[L + i], alphas[i]);
} else {
add_alpha(&maskRow[L + i], alphas[i]);
}
}
} else {
if (fullAlpha == 0xFF && !noRealBlitter) {
// Real blitter is faster than RunBasedAdditiveBlitter
blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
} else {
blitter->blitAntiH(L, y, alphas, len);
}
}
if (len > kQuickLen) {
delete[] alphas;
}
}
static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter,
int y,
SkFixed ul,
SkFixed ur,
SkFixed ll,
SkFixed lr,
SkFixed lDY,
SkFixed rDY,
SkAlpha fullAlpha,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter = false,
bool needSafeCheck = false) {
SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
if (ul > ur) {
return;
}
// Edge crosses. Approximate it. This should only happend due to precision limit,
// so the approximation could be very coarse.
if (ll > lr) {
ll = lr = approximate_intersection(ul, ll, ur, lr);
}
if (ul == ur && ll == lr) {
return; // empty trapzoid
}
// We're going to use the left line ul-ll and the rite line ur-lr
// to exclude the area that's not covered by the path.
// Swapping (ul, ll) or (ur, lr) won't affect that exclusion
// so we'll do that for simplicity.
if (ul > ll) {
std::swap(ul, ll);
}
if (ur > lr) {
std::swap(ur, lr);
}
SkFixed joinLeft = SkFixedCeilToFixed(ll);
SkFixed joinRite = SkFixedFloorToFixed(ur);
if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
if (ul < joinLeft) {
int len = SkFixedCeilToInt(joinLeft - ul);
if (len == 1) {
SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
blit_single_alpha(blitter,
y,
ul >> 16,
alpha,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
} else if (len == 2) {
SkFixed first = joinLeft - SK_Fixed1 - ul;
SkFixed second = ll - ul - first;
SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
blit_two_alphas(blitter,
y,
ul >> 16,
a1,
a2,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
} else {
blit_aaa_trapezoid_row(blitter,
y,
ul,
joinLeft,
ll,
joinLeft,
lDY,
SK_MaxS32,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
}
}
// SkAAClip requires that we blit from left to right.
// Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
if (joinLeft < joinRite) {
blit_full_alpha(blitter,
y,
SkFixedFloorToInt(joinLeft),
SkFixedFloorToInt(joinRite - joinLeft),
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
}
if (lr > joinRite) {
int len = SkFixedCeilToInt(lr - joinRite);
if (len == 1) {
SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
blit_single_alpha(blitter,
y,
joinRite >> 16,
alpha,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
} else if (len == 2) {
SkFixed first = joinRite + SK_Fixed1 - ur;
SkFixed second = lr - ur - first;
SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
blit_two_alphas(blitter,
y,
joinRite >> 16,
a1,
a2,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
} else {
blit_aaa_trapezoid_row(blitter,
y,
joinRite,
ur,
joinRite,
lr,
SK_MaxS32,
rDY,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
}
}
} else {
blit_aaa_trapezoid_row(blitter,
y,
ul,
ur,
ll,
lr,
lDY,
rDY,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter,
needSafeCheck);
}
}
static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
int valuea = a.fUpperY;
int valueb = b.fUpperY;
if (valuea == valueb) {
valuea = a.fX;
valueb = b.fX;
}
if (valuea == valueb) {
valuea = a.fDX;
valueb = b.fDX;
}
return valuea < valueb;
}
static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
SkTQSort(list, list + count - 1);
// now make the edges linked in sorted order
for (int i = 1; i < count; ++i) {
list[i - 1]->fNext = list[i];
list[i]->fPrev = list[i - 1];
}
*last = list[count - 1];
return list[0];
}
static void validate_sort(const SkAnalyticEdge* edge) {
#ifdef SK_DEBUG
SkFixed y = SkIntToFixed(-32768);
while (edge->fUpperY != SK_MaxS32) {
edge->validate();
SkASSERT(y <= edge->fUpperY);
y = edge->fUpperY;
edge = (SkAnalyticEdge*)edge->fNext;
}
#endif
}
// For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
// For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
// relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
if (thisEdge->fCurveCount < 0) {
const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
int ddshift = cEdge.fCurveShift;
return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
// current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
(cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
} else if (thisEdge->fCurveCount > 0) {
const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
// current Dy is (fQDy - fQDDy) >> shift
(qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
}
return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
}
// Check if the leftE and riteE are changing smoothly in terms of fDX.
// If yes, we can later skip the fractional y and directly jump to integer y.
static bool is_smooth_enough(SkAnalyticEdge* leftE,
SkAnalyticEdge* riteE,
SkAnalyticEdge* currE,
int stop_y) {
if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
return false; // We're at the end so we won't skip anything
}
if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
} else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
}
// Now both edges are changing, find the second next edge
SkAnalyticEdge* nextCurrE = currE->fNext;
if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
return false;
}
// Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
if (nextCurrE->fUpperX < currE->fUpperX) {
std::swap(currE, nextCurrE);
}
return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
}
static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
AdditiveBlitter* blitter,
int start_y,
int stop_y,
SkFixed leftBound,
SkFixed riteBound,
bool isUsingMask) {
validate_sort((SkAnalyticEdge*)prevHead->fNext);
SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
SkFixed y = SkTMax(leftE->fUpperY, riteE->fUpperY);
for (;;) {
// We have to check fLowerY first because some edges might be alone (e.g., there's only
// a left edge but no right edge in a given y scan line) due to precision limit.
while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
if (!leftE->update(y)) {
if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
goto END_WALK;
}
leftE = currE;
currE = (SkAnalyticEdge*)currE->fNext;
}
}
while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
if (!riteE->update(y)) {
if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
goto END_WALK;
}
riteE = currE;
currE = (SkAnalyticEdge*)currE->fNext;
}
}
SkASSERT(leftE);
SkASSERT(riteE);
// check our bottom clip
if (SkFixedFloorToInt(y) >= stop_y) {
break;
}
SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
leftE->goY(y);
riteE->goY(y);
if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
std::swap(leftE, riteE);
}
SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY);
if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
}
local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y));
SkFixed left = SkTMax(leftBound, leftE->fX);
SkFixed dLeft = leftE->fDX;
SkFixed rite = SkTMin(riteBound, riteE->fX);
SkFixed dRite = riteE->fDX;
if (0 == (dLeft | dRite)) {
int fullLeft = SkFixedCeilToInt(left);
int fullRite = SkFixedFloorToInt(rite);
SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
SkFixed partialRite = rite - SkIntToFixed(fullRite);
int fullTop = SkFixedCeilToInt(y);
int fullBot = SkFixedFloorToInt(local_bot_fixed);
SkFixed partialTop = SkIntToFixed(fullTop) - y;
SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
if (fullTop > fullBot) { // The rectangle is within one pixel height...
partialTop -= (SK_Fixed1 - partialBot);
partialBot = 0;
}
if (fullRite >= fullLeft) {
if (partialTop > 0) { // blit first partial row
if (partialLeft > 0) {
blitter->blitAntiH(fullLeft - 1,
fullTop - 1,
fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
}
blitter->blitAntiH(
fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
if (partialRite > 0) {
blitter->blitAntiH(fullRite,
fullTop - 1,
fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
}
blitter->flush_if_y_changed(y, y + partialTop);
}
// Blit all full-height rows from fullTop to fullBot
if (fullBot > fullTop &&
// SkAAClip cannot handle the empty rect so check the non-emptiness here
// (bug chromium:662800)
(fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
fixed_to_alpha(partialRite) > 0)) {
blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
fullTop,
fullRite - fullLeft,
fullBot - fullTop,
fixed_to_alpha(partialLeft),
fixed_to_alpha(partialRite));
}
if (partialBot > 0) { // blit last partial row
if (partialLeft > 0) {
blitter->blitAntiH(fullLeft - 1,
fullBot,
fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
}
blitter->blitAntiH(
fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
if (partialRite > 0) {
blitter->blitAntiH(fullRite,
fullBot,
fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
}
}
} else { // left and rite are within the same pixel
if (partialTop > 0) {
blitter->blitAntiH(fullLeft - 1,
fullTop - 1,
1,
fixed_to_alpha(SkFixedMul(partialTop, rite - left)));
blitter->flush_if_y_changed(y, y + partialTop);
}
if (fullBot > fullTop) {
blitter->getRealBlitter()->blitV(
fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(rite - left));
}
if (partialBot > 0) {
blitter->blitAntiH(fullLeft - 1,
fullBot,
1,
fixed_to_alpha(SkFixedMul(partialBot, rite - left)));
}
}
y = local_bot_fixed;
} else {
// The following constant are used to snap X
// We snap X mainly for speedup (no tiny triangle) and
// avoiding edge cases caused by precision errors
const SkFixed kSnapDigit = SK_Fixed1 >> 4;
const SkFixed kSnapHalf = kSnapDigit >> 1;
const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
left += kSnapHalf;
rite += kSnapHalf; // For fast rounding
// Number of blit_trapezoid_row calls we'll have
int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
// If we're using mask blitter, we advance the mask row in this function
// to save some "if" condition checks.
SkAlpha* maskRow = nullptr;
if (isUsingMask) {
maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
}
// Instead of writing one loop that handles both partial-row blit_trapezoid_row
// and full-row trapezoid_row together, we use the following 3-stage flow to
// handle partial-row blit and full-row blit separately. It will save us much time
// on changing y, left, and rite.
if (count > 1) {
if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
count--;
SkFixed nextY = SkFixedCeilToFixed(y + 1);
SkFixed dY = nextY - y;
SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
SkFixed nextRite = rite + SkFixedMul(dRite, dY);
SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
(nextLeft & kSnapMask) >= leftBound &&
(nextRite & kSnapMask) <= riteBound);
blit_trapezoid_row(blitter,
y >> 16,
left & kSnapMask,
rite & kSnapMask,
nextLeft & kSnapMask,
nextRite & kSnapMask,
leftE->fDY,
riteE->fDY,
get_partial_alpha(0xFF, dY),
maskRow,
isUsingMask);
blitter->flush_if_y_changed(y, nextY);
left = nextLeft;
rite = nextRite;
y = nextY;
}
while (count > 1) { // Full rows in the middle
count--;
if (isUsingMask) {
maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
}
SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
(nextLeft & kSnapMask) >= leftBound &&
(nextRite & kSnapMask) <= riteBound);
blit_trapezoid_row(blitter,
y >> 16,
left & kSnapMask,
rite & kSnapMask,
nextLeft & kSnapMask,
nextRite & kSnapMask,
leftE->fDY,
riteE->fDY,
0xFF,
maskRow,
isUsingMask);
blitter->flush_if_y_changed(y, nextY);
left = nextLeft;
rite = nextRite;
y = nextY;
}
}
if (isUsingMask) {
maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
}
SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
SkASSERT(dY <= SK_Fixed1);
// Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
// Take them back into the bound here.
// Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
SkFixed nextLeft = SkTMax(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
SkFixed nextRite = SkTMin(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
(nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
blit_trapezoid_row(blitter,
y >> 16,
left & kSnapMask,
rite & kSnapMask,
nextLeft & kSnapMask,
nextRite & kSnapMask,
leftE->fDY,
riteE->fDY,
get_partial_alpha(0xFF, dY),
maskRow,
isUsingMask);
blitter->flush_if_y_changed(y, local_bot_fixed);
left = nextLeft;
rite = nextRite;
y = local_bot_fixed;
left -= kSnapHalf;
rite -= kSnapHalf;
}
leftE->fX = left;
riteE->fX = rite;
leftE->fY = riteE->fY = y;
}
END_WALK:;
}
static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
*nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
}
static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
*nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
}
}
static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
if (newEdge->fUpperY > y) {
update_next_next_y(newEdge->fUpperY, y, nextNextY);
return;
}
SkAnalyticEdge* prev = newEdge->fPrev;
if (prev->fX <= newEdge->fX) {
while (newEdge->fUpperY <= y) {
check_intersection(newEdge, y, nextNextY);
update_next_next_y(newEdge->fLowerY, y, nextNextY);
newEdge = newEdge->fNext;
}
update_next_next_y(newEdge->fUpperY, y, nextNextY);
return;
}
// find first x pos to insert
SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
// insert the lot, fixing up the links as we go
do {
SkAnalyticEdge* next = newEdge->fNext;
do {
if (start->fNext == newEdge) {
goto nextEdge;
}
SkAnalyticEdge* after = start->fNext;
if (after->fX >= newEdge->fX) {
break;
}
SkASSERT(start != after);
start = after;
} while (true);
remove_edge(newEdge);
insert_edge_after(newEdge, start);
nextEdge:
check_intersection(newEdge, y, nextNextY);
update_next_next_y(newEdge->fLowerY, y, nextNextY);
start = newEdge;
newEdge = next;
} while (newEdge->fUpperY <= y);
update_next_next_y(newEdge->fUpperY, y, nextNextY);
}
static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
#ifdef SK_DEBUG
while (edge->fUpperY <= y) {
SkASSERT(edge->fPrev && edge->fNext);
SkASSERT(edge->fPrev->fNext == edge);
SkASSERT(edge->fNext->fPrev == edge);
SkASSERT(edge->fUpperY <= edge->fLowerY);
SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
edge = edge->fNext;
}
#endif
}
// Return true if prev->fX, next->fX are too close in the current pixel row.
static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
// When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
// even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
// next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
// edges prev and next are within one pixel.
constexpr SkFixed SLACK = SK_Fixed1;
// Note that even if the following test failed, the edges might still be very close to each
// other at some point within the current pixel row because of prev->fDX and next->fDX.
// However, to handle that case, we have to sacrafice more performance.
// I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
// so I'll ignore fDX for performance tradeoff.
return next && prev && next->fUpperY < lowerY &&
prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
// The following is more accurate but also slower.
// return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
// prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
}
// This function exists for the case where the previous rite edge is removed because
// its fLowerY <= nextY
static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
}
static void blit_saved_trapezoid(SkAnalyticEdge* leftE,
SkFixed lowerY,
SkFixed lowerLeft,
SkFixed lowerRite,
AdditiveBlitter* blitter,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter,
SkFixed leftClip,
SkFixed rightClip) {
SkAnalyticEdge* riteE = leftE->fRiteE;
SkASSERT(riteE);
SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
int y = SkFixedFloorToInt(leftE->fSavedY);
// Instead of using fixed_to_alpha(lowerY - leftE->fSavedY), we use the following fullAlpha
// to elimiate cumulative error: if there are many fractional y scan lines within the
// same row, the former may accumulate the rounding error while the later won't.
SkAlpha fullAlpha = fixed_to_alpha(lowerY - SkIntToFixed(y)) -
fixed_to_alpha(leftE->fSavedY - SkIntToFixed(y));<|fim▁hole|> SkTMax(leftE->fSavedX, leftClip),
SkTMin(riteE->fSavedX, rightClip),
SkTMax(lowerLeft, leftClip),
SkTMin(lowerRite, rightClip),
leftE->fSavedDY,
riteE->fSavedDY,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter || (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) ||
edges_too_close(riteE, riteE->fNext, lowerY))),
true);
leftE->fRiteE = nullptr;
}
static void deferred_blit(SkAnalyticEdge* leftE,
SkAnalyticEdge* riteE,
SkFixed left,
SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
SkFixed y,
SkFixed nextY,
bool isIntegralNextY,
bool leftEnds,
bool riteEnds,
AdditiveBlitter* blitter,
SkAlpha* maskRow,
bool isUsingMask,
bool noRealBlitter,
SkFixed leftClip,
SkFixed rightClip,
int yShift) {
if (leftE->fRiteE && leftE->fRiteE != riteE) {
// leftE's right edge changed. Blit the saved trapezoid.
SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
blit_saved_trapezoid(leftE,
y,
left,
leftE->fRiteE->fX,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip);
}
if (!leftE->fRiteE) {
// Save and defer blitting the trapezoid
SkASSERT(riteE->fRiteE == nullptr);
SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
leftE->saveXY(left, y, leftDY);
riteE->saveXY(riteE->fX, y, riteE->fDY);
leftE->fRiteE = riteE;
}
SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
riteE->goY(nextY, yShift);
// Always blit when edges end or nextY is integral
if (isIntegralNextY || leftEnds || riteEnds) {
blit_saved_trapezoid(leftE,
nextY,
leftE->fX,
riteE->fX,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip);
}
}
static void aaa_walk_edges(SkAnalyticEdge* prevHead,
SkAnalyticEdge* nextTail,
SkPathFillType fillType,
AdditiveBlitter* blitter,
int start_y,
int stop_y,
SkFixed leftClip,
SkFixed rightClip,
bool isUsingMask,
bool forceRLE,
bool useDeferred,
bool skipIntersect) {
prevHead->fX = prevHead->fUpperX = leftClip;
nextTail->fX = nextTail->fUpperX = rightClip;
SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
SkFixed nextNextY = SK_MaxS32;
{
SkAnalyticEdge* edge;
for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
edge->goY(y);
update_next_next_y(edge->fLowerY, y, &nextNextY);
}
update_next_next_y(edge->fUpperY, y, &nextNextY);
}
int windingMask = SkPathFillType_IsEvenOdd(fillType) ? 1 : -1;
bool isInverse = SkPathFillType_IsInverse(fillType);
if (isInverse && SkIntToFixed(start_y) != y) {
int width = SkFixedFloorToInt(rightClip - leftClip);
if (SkFixedFloorToInt(y) != start_y) {
blitter->getRealBlitter()->blitRect(
SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
start_y = SkFixedFloorToInt(y);
}
SkAlpha* maskRow =
isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
blit_full_alpha(blitter,
start_y,
SkFixedFloorToInt(leftClip),
width,
fixed_to_alpha(y - SkIntToFixed(start_y)),
maskRow,
isUsingMask,
false,
false);
}
while (true) {
int w = 0;
bool in_interval = isInverse;
SkFixed prevX = prevHead->fX;
SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1));
bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
SkAnalyticEdge* currE = prevHead->fNext;
SkAnalyticEdge* leftE = prevHead;
SkFixed left = leftClip;
SkFixed leftDY = 0;
bool leftEnds = false;
int prevRite = SkFixedFloorToInt(leftClip);
nextNextY = SK_MaxS32;
SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
int yShift = 0;
if ((nextY - y) & (SK_Fixed1 >> 2)) {
yShift = 2;
nextY = y + (SK_Fixed1 >> 2);
} else if ((nextY - y) & (SK_Fixed1 >> 1)) {
yShift = 1;
SkASSERT(nextY == y + (SK_Fixed1 >> 1));
}
SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
// If we're using mask blitter, we advance the mask row in this function
// to save some "if" condition checks.
SkAlpha* maskRow = nullptr;
if (isUsingMask) {
maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
}
SkASSERT(currE->fPrev == prevHead);
validate_edges_for_y(currE, y);
// Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
// of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
while (currE->fUpperY <= y) {
SkASSERT(currE->fLowerY >= nextY);
SkASSERT(currE->fY == y);
w += currE->fWinding;
bool prev_in_interval = in_interval;
in_interval = !(w & windingMask) == isInverse;
bool isLeft = in_interval && !prev_in_interval;
bool isRite = !in_interval && prev_in_interval;
bool currEnds = currE->fLowerY == nextY;
if (useDeferred) {
if (currE->fRiteE && !isLeft) {
// currE is a left edge previously, but now it's not.
// Blit the trapezoid between fSavedY and y.
SkASSERT(currE->fRiteE->fY == y);
blit_saved_trapezoid(currE,
y,
currE->fX,
currE->fRiteE->fX,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip);
}
if (leftE->fRiteE == currE && !isRite) {
// currE is a right edge previously, but now it's not.
// Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
// in the previous if clause). Hence we blit the trapezoid.
blit_saved_trapezoid(leftE,
y,
left,
currE->fX,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip);
}
}
if (isRite) {
if (useDeferred) {
deferred_blit(leftE,
currE,
left,
leftDY,
y,
nextY,
isIntegralNextY,
leftEnds,
currEnds,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip,
yShift);
} else {
SkFixed rite = currE->fX;
currE->goY(nextY, yShift);
SkFixed nextLeft = SkTMax(leftClip, leftE->fX);
rite = SkTMin(rightClip, rite);
SkFixed nextRite = SkTMin(rightClip, currE->fX);
blit_trapezoid_row(
blitter,
y >> 16,
left,
rite,
nextLeft,
nextRite,
leftDY,
currE->fDY,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter || (fullAlpha == 0xFF &&
(edges_too_close(prevRite, left, leftE->fX) ||
edges_too_close(currE, currE->fNext, nextY))),
true);
prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX));
}
} else {
if (isLeft) {
left = SkTMax(currE->fX, leftClip);
leftDY = currE->fDY;
leftE = currE;
leftEnds = leftE->fLowerY == nextY;
}
currE->goY(nextY, yShift);
}
SkAnalyticEdge* next = currE->fNext;
SkFixed newX;
while (currE->fLowerY <= nextY) {
if (currE->fCurveCount < 0) {
SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
cubicEdge->keepContinuous();
if (!cubicEdge->updateCubic()) {
break;
}
} else if (currE->fCurveCount > 0) {
SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
quadEdge->keepContinuous();
if (!quadEdge->updateQuadratic()) {
break;
}
} else {
break;
}
}
SkASSERT(currE->fY == nextY);
if (currE->fLowerY <= nextY) {
remove_edge(currE);
} else {
update_next_next_y(currE->fLowerY, nextY, &nextNextY);
newX = currE->fX;
SkASSERT(currE->fLowerY > nextY);
if (newX < prevX) { // ripple currE backwards until it is x-sorted
// If the crossing edge is a right edge, blit the saved trapezoid.
if (leftE->fRiteE == currE && useDeferred) {
SkASSERT(leftE->fY == nextY && currE->fY == nextY);
blit_saved_trapezoid(leftE,
nextY,
leftE->fX,
currE->fX,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip);
}
backward_insert_edge_based_on_x(currE);
} else {
prevX = newX;
}
if (!skipIntersect) {
check_intersection(currE, nextY, &nextNextY);
}
}
currE = next;
SkASSERT(currE);
}
// was our right-edge culled away?
if (in_interval) {
if (useDeferred) {
deferred_blit(leftE,
nextTail,
left,
leftDY,
y,
nextY,
isIntegralNextY,
leftEnds,
false,
blitter,
maskRow,
isUsingMask,
noRealBlitter,
leftClip,
rightClip,
yShift);
} else {
blit_trapezoid_row(blitter,
y >> 16,
left,
rightClip,
SkTMax(leftClip, leftE->fX),
rightClip,
leftDY,
0,
fullAlpha,
maskRow,
isUsingMask,
noRealBlitter || (fullAlpha == 0xFF &&
edges_too_close(leftE->fPrev, leftE, nextY)),
true);
}
}
if (forceRLE) {
((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
}
y = nextY;
if (y >= SkIntToFixed(stop_y)) {
break;
}
// now currE points to the first edge with a fUpperY larger than the previous y
insert_new_edges(currE, y, &nextNextY);
}
}
static SK_ALWAYS_INLINE void aaa_fill_path(
const SkPath& path,
const SkIRect& clipRect,
AdditiveBlitter* blitter,
int start_y,
int stop_y,
bool pathContainedInClip,
bool isUsingMask,
bool forceRLE) { // forceRLE implies that SkAAClip is calling us
SkASSERT(blitter);
SkAnalyticEdgeBuilder builder;
int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
SkAnalyticEdge** list = builder.analyticEdgeList();
SkIRect rect = clipRect;
if (0 == count) {
if (path.isInverseFillType()) {
/*
* Since we are in inverse-fill, our caller has already drawn above
* our top (start_y) and will draw below our bottom (stop_y). Thus
* we need to restrict our drawing to the intersection of the clip
* and those two limits.
*/
if (rect.fTop < start_y) {
rect.fTop = start_y;
}
if (rect.fBottom > stop_y) {
rect.fBottom = stop_y;
}
if (!rect.isEmpty()) {
blitter->getRealBlitter()->blitRect(
rect.fLeft, rect.fTop, rect.width(), rect.height());
}
}
return;
}
SkAnalyticEdge headEdge, tailEdge, *last;
// this returns the first and last edge after they're sorted into a dlink list
SkAnalyticEdge* edge = sort_edges(list, count, &last);
headEdge.fRiteE = nullptr;
headEdge.fPrev = nullptr;
headEdge.fNext = edge;
headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
headEdge.fX = SK_MinS32;
headEdge.fDX = 0;
headEdge.fDY = SK_MaxS32;
headEdge.fUpperX = SK_MinS32;
edge->fPrev = &headEdge;
tailEdge.fRiteE = nullptr;
tailEdge.fPrev = last;
tailEdge.fNext = nullptr;
tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
tailEdge.fX = SK_MaxS32;
tailEdge.fDX = 0;
tailEdge.fDY = SK_MaxS32;
tailEdge.fUpperX = SK_MaxS32;
last->fNext = &tailEdge;
// now edge is the head of the sorted linklist
if (!pathContainedInClip && start_y < clipRect.fTop) {
start_y = clipRect.fTop;
}
if (!pathContainedInClip && stop_y > clipRect.fBottom) {
stop_y = clipRect.fBottom;
}
SkFixed leftBound = SkIntToFixed(rect.fLeft);
SkFixed rightBound = SkIntToFixed(rect.fRight);
if (isUsingMask) {
// If we're using mask, then we have to limit the bound within the path bounds.
// Otherwise, the edge drift may access an invalid address inside the mask.
SkIRect ir;
path.getBounds().roundOut(&ir);
leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
}
if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
aaa_walk_convex_edges(
&headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
} else {
// Only use deferred blitting if there are many edges.
bool useDeferred =
count >
(SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
// We skip intersection computation if there are many points which probably already
// give us enough fractional scan lines.
bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
aaa_walk_edges(&headEdge,
&tailEdge,
path.getFillType(),
blitter,
start_y,
stop_y,
leftBound,
rightBound,
isUsingMask,
forceRLE,
useDeferred,
skipIntersect);
}
}
void SkScan::AAAFillPath(const SkPath& path,
SkBlitter* blitter,
const SkIRect& ir,
const SkIRect& clipBounds,
bool forceRLE) {
bool containedInClip = clipBounds.contains(ir);
bool isInverse = path.isInverseFillType();
// The mask blitter (where we store intermediate alpha values directly in a mask, and then call
// the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
// the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
// region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
// use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
// rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
// blitFatAntiRect to avoid the mask and its overhead.
if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
// blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
// Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path,
clipBounds,
&additiveBlitter,
ir.fTop,
ir.fBottom,
containedInClip,
true,
forceRLE);
}
} else if (!isInverse && path.isConvex()) {
// If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
// aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
// SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
// RunBasedAdditiveBlitter would suffice.
RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path,
clipBounds,
&additiveBlitter,
ir.fTop,
ir.fBottom,
containedInClip,
false,
forceRLE);
} else {
// If the filling area might not be convex, the more involved aaa_walk_edges would
// be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
// does that at a cost of performance.
SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path,
clipBounds,
&additiveBlitter,
ir.fTop,
ir.fBottom,
containedInClip,
false,
forceRLE);
}
}
#endif // defined(SK_DISABLE_AAA)<|fim▁end|> | // We need fSavedDY because the (quad or cubic) edge might be updated
blit_trapezoid_row(
blitter,
y, |
<|file_name|>dos_files.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2002-2010 The DOSBox Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* $Id: dos_files.cpp,v 1.113 2009-08-31 18:03:08 qbix79 Exp $ */
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <ctype.h>
#include "dosbox.h"
#include "bios.h"
#include "mem.h"
#include "regs.h"
#include "dos_inc.h"
#include "drives.h"
#include "cross.h"
#define DOS_FILESTART 4
#define FCB_SUCCESS 0
#define FCB_READ_NODATA 1
#define FCB_READ_PARTIAL 3
#define FCB_ERR_NODATA 1
#define FCB_ERR_EOF 3
#define FCB_ERR_WRITE 1
DOS_File * Files[DOS_FILES];
DOS_Drive * Drives[DOS_DRIVES];
Bit8u DOS_GetDefaultDrive(void) {
// return DOS_SDA(DOS_SDA_SEG,DOS_SDA_OFS).GetDrive();
Bit8u d = DOS_SDA(DOS_SDA_SEG,DOS_SDA_OFS).GetDrive();
if( d != dos.current_drive ) LOG(LOG_DOSMISC,LOG_ERROR)("SDA drive %d not the same as dos.current_drive %d",d,dos.current_drive);
return dos.current_drive;
}
void DOS_SetDefaultDrive(Bit8u drive) {
// if (drive<=DOS_DRIVES && ((drive<2) || Drives[drive])) DOS_SDA(DOS_SDA_SEG,DOS_SDA_OFS).SetDrive(drive);
if (drive<DOS_DRIVES && ((drive<2) || Drives[drive])) {dos.current_drive = drive; DOS_SDA(DOS_SDA_SEG,DOS_SDA_OFS).SetDrive(drive);}
}
bool DOS_MakeName(char const * const name,char * const fullname,Bit8u * drive) {
if(!name || *name == 0 || *name == ' ') {
/* Both \0 and space are seperators and
* empty filenames report file not found */
DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
const char * name_int = name;
char tempdir[DOS_PATHLENGTH];
char upname[DOS_PATHLENGTH];
Bitu r,w;
*drive = DOS_GetDefaultDrive();
/* First get the drive */
if (name_int[1]==':') {
*drive=(name_int[0] | 0x20)-'a';
name_int+=2;
}
if (*drive>=DOS_DRIVES || !Drives[*drive]) {
DOS_SetError(DOSERR_PATH_NOT_FOUND);
return false;
}
r=0;w=0;
while (name_int[r]!=0 && (r<DOS_PATHLENGTH)) {
Bit8u c=name_int[r++];
if ((c>='a') && (c<='z')) {upname[w++]=c-32;continue;}
if ((c>='A') && (c<='Z')) {upname[w++]=c;continue;}
if ((c>='0') && (c<='9')) {upname[w++]=c;continue;}
switch (c) {
case '/':
upname[w++]='\\';
break;
case ' ': /* should be seperator */
break;
case '\\': case '$': case '#': case '@': case '(': case ')':
case '!': case '%': case '{': case '}': case '`': case '~':
case '_': case '-': case '.': case '*': case '?': case '&':
case '\'': case '+': case '^': case 246: case 255: case 0xa0:
case 0xe5: case 0xbd:
upname[w++]=c;
break;
default:
LOG(LOG_FILES,LOG_NORMAL)("Makename encountered an illegal char %c hex:%X in %s!",c,c,name);
DOS_SetError(DOSERR_PATH_NOT_FOUND);return false;
break;
}
}
if (r>=DOS_PATHLENGTH) { DOS_SetError(DOSERR_PATH_NOT_FOUND);return false; }
upname[w]=0;
/* Now parse the new file name to make the final filename */
if (upname[0]!='\\') strcpy(fullname,Drives[*drive]->curdir);
else fullname[0]=0;
Bit32u lastdir=0;Bit32u t=0;
while (fullname[t]!=0) {
if ((fullname[t]=='\\') && (fullname[t+1]!=0)) lastdir=t;
t++;
};
r=0;w=0;
tempdir[0]=0;
bool stop=false;
while (!stop) {
if (upname[r]==0) stop=true;
if ((upname[r]=='\\') || (upname[r]==0)){
tempdir[w]=0;
if (tempdir[0]==0) { w=0;r++;continue;}
if (strcmp(tempdir,".")==0) {
tempdir[0]=0;
w=0;r++;
continue;
}
Bit32s iDown;
bool dots = true;
Bit32s templen=(Bit32s)strlen(tempdir);
for(iDown=0;(iDown < templen) && dots;iDown++)
if(tempdir[iDown] != '.')
dots = false;
// only dots?
if (dots && (templen > 1)) {
Bit32s cDots = templen - 1;
for(iDown=(Bit32s)strlen(fullname)-1;iDown>=0;iDown--) {
if(fullname[iDown]=='\\' || iDown==0) {
lastdir = iDown;
cDots--;
if(cDots==0)
break;
}
}
fullname[lastdir]=0;
t=0;lastdir=0;
while (fullname[t]!=0) {
if ((fullname[t]=='\\') && (fullname[t+1]!=0)) lastdir=t;
t++;
}
tempdir[0]=0;
w=0;r++;
continue;
}
lastdir=(Bit32u)strlen(fullname);
if (lastdir!=0) strcat(fullname,"\\");
char * ext=strchr(tempdir,'.');
if (ext) {
if(strchr(ext+1,'.')) {
//another dot in the extension =>file not found
//Or path not found depending on wether
//we are still in dir check stage or file stage
if(stop)
DOS_SetError(DOSERR_FILE_NOT_FOUND);
else
DOS_SetError(DOSERR_PATH_NOT_FOUND);
return false;
}
ext[4] = 0;
if((strlen(tempdir) - strlen(ext)) > 8) memmove(tempdir + 8, ext, 5);
} else tempdir[8]=0;
if (strlen(fullname)+strlen(tempdir)>=DOS_PATHLENGTH) {
DOS_SetError(DOSERR_PATH_NOT_FOUND);return false;
}
strcat(fullname,tempdir);
tempdir[0]=0;
w=0;r++;
continue;
}
tempdir[w++]=upname[r++];
}
return true;
}
bool DOS_GetCurrentDir(Bit8u drive,char * const buffer) {
if (drive==0) drive=DOS_GetDefaultDrive();
else drive--;
if ((drive>=DOS_DRIVES) || (!Drives[drive])) {
DOS_SetError(DOSERR_INVALID_DRIVE);
return false;
}
strcpy(buffer,Drives[drive]->curdir);
return true;
}
bool DOS_ChangeDir(char const * const dir) {
Bit8u drive;char fulldir[DOS_PATHLENGTH];
if (!DOS_MakeName(dir,fulldir,&drive)) return false;
if (Drives[drive]->TestDir(fulldir)) {
strcpy(Drives[drive]->curdir,fulldir);
return true;
} else {
DOS_SetError(DOSERR_PATH_NOT_FOUND);
}
return false;
}
bool DOS_MakeDir(char const * const dir) {
Bit8u drive;char fulldir[DOS_PATHLENGTH];
size_t len = strlen(dir);
if(!len || dir[len-1] == '\\') {
DOS_SetError(DOSERR_PATH_NOT_FOUND);
return false;
}
if (!DOS_MakeName(dir,fulldir,&drive)) return false;
if(Drives[drive]->MakeDir(fulldir)) return true;
/* Determine reason for failing */
if(Drives[drive]->TestDir(fulldir))
DOS_SetError(DOSERR_ACCESS_DENIED);
else
DOS_SetError(DOSERR_PATH_NOT_FOUND);
return false;
}
bool DOS_RemoveDir(char const * const dir) {
/* We need to do the test before the removal as can not rely on
* the host to forbid removal of the current directory.
* We never change directory. Everything happens in the drives.
*/
Bit8u drive;char fulldir[DOS_PATHLENGTH];
if (!DOS_MakeName(dir,fulldir,&drive)) return false;
/* Check if exists */
if(!Drives[drive]->TestDir(fulldir)) {
DOS_SetError(DOSERR_PATH_NOT_FOUND);
return false;
}
/* See if it's current directory */
char currdir[DOS_PATHLENGTH]= { 0 };
DOS_GetCurrentDir(drive + 1 ,currdir);
if(strcmp(currdir,fulldir) == 0) {
DOS_SetError(DOSERR_REMOVE_CURRENT_DIRECTORY);
return false;
}
if(Drives[drive]->RemoveDir(fulldir)) return true;
/* Failed. We know it exists and it's not the current dir */
/* Assume non empty */
DOS_SetError(DOSERR_ACCESS_DENIED);
return false;
}
bool DOS_Rename(char const * const oldname,char const * const newname) {
Bit8u driveold;char fullold[DOS_PATHLENGTH];
Bit8u drivenew;char fullnew[DOS_PATHLENGTH];
if (!DOS_MakeName(oldname,fullold,&driveold)) return false;
if (!DOS_MakeName(newname,fullnew,&drivenew)) return false;
/* No tricks with devices */
if ( (DOS_FindDevice(oldname) != DOS_DEVICES) ||
(DOS_FindDevice(newname) != DOS_DEVICES) ) {
DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
/* Must be on the same drive */
if(driveold != drivenew) {
DOS_SetError(DOSERR_NOT_SAME_DEVICE);
return false;
}
/*Test if target exists => no access */
Bit16u attr;
if(Drives[drivenew]->GetFileAttr(fullnew,&attr)) {
DOS_SetError(DOSERR_ACCESS_DENIED);
return false;
}
/* Source must exist, check for path ? */
if (!Drives[driveold]->GetFileAttr( fullold, &attr ) ) {
DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
if (Drives[drivenew]->Rename(fullold,fullnew)) return true;
/* If it still fails. which error should we give ? PATH NOT FOUND or EACCESS */
LOG(LOG_FILES,LOG_NORMAL)("Rename fails for %s to %s, no proper errorcode returned.",oldname,newname);
DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
bool DOS_FindFirst(char * search,Bit16u attr,bool fcb_findfirst) {
DOS_DTA dta(dos.dta());
Bit8u drive;char fullsearch[DOS_PATHLENGTH];
char dir[DOS_PATHLENGTH];char pattern[DOS_PATHLENGTH];
size_t len = strlen(search);
if(len && search[len - 1] == '\\' && !( (len > 2) && (search[len - 2] == ':') && (attr == DOS_ATTR_VOLUME) )) {
//Dark Forces installer, but c:\ is allright for volume labels(exclusively set)
DOS_SetError(DOSERR_NO_MORE_FILES);
return false;
}
if (!DOS_MakeName(search,fullsearch,&drive)) return false;
//Check for devices. FindDevice checks for leading subdir as well
bool device = (DOS_FindDevice(search) != DOS_DEVICES);
/* Split the search in dir and pattern */
char * find_last;
find_last=strrchr(fullsearch,'\\');
if (!find_last) { /*No dir */
strcpy(pattern,fullsearch);
dir[0]=0;
} else {
*find_last=0;
strcpy(pattern,find_last+1);
strcpy(dir,fullsearch);
}
dta.SetupSearch(drive,(Bit8u)attr,pattern);
if(device) {
find_last = strrchr(pattern,'.');
if(find_last) *find_last = 0;
//TODO use current date and time
dta.SetResult(pattern,0,0,0,DOS_ATTR_DEVICE);
LOG(LOG_DOSMISC,LOG_WARN)("finding device %s",pattern);
return true;
}
if (Drives[drive]->FindFirst(dir,dta,fcb_findfirst)) return true;
return false;
}
bool DOS_FindNext(void) {
DOS_DTA dta(dos.dta());
Bit8u i = dta.GetSearchDrive();
if(i >= DOS_DRIVES || !Drives[i]) {
/* Corrupt search. */
LOG(LOG_FILES,LOG_ERROR)("Corrupt search!!!!");
DOS_SetError(DOSERR_NO_MORE_FILES);
return false;
}
if (Drives[i]->FindNext(dta)) return true;
return false;
}
bool DOS_ReadFile(Bit16u entry,Bit8u * data,Bit16u * amount) {
Bit32u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle] || !Files[handle]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
/*
if ((Files[handle]->flags & 0x0f) == OPEN_WRITE)) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
}
*/
Bit16u toread=*amount;
bool ret=Files[handle]->Read(data,&toread);
*amount=toread;
return ret;
}
bool DOS_WriteFile(Bit16u entry,Bit8u * data,Bit16u * amount) {
Bit32u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle] || !Files[handle]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
/*
if ((Files[handle]->flags & 0x0f) == OPEN_READ)) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
}
*/
Bit16u towrite=*amount;
bool ret=Files[handle]->Write(data,&towrite);
*amount=towrite;
return ret;
}
bool DOS_SeekFile(Bit16u entry,Bit32u * pos,Bit32u type) {
Bit32u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle] || !Files[handle]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
return Files[handle]->Seek(pos,type);
}
bool DOS_CloseFile(Bit16u entry) {
Bit32u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle]) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (Files[handle]->IsOpen()) {
Files[handle]->Close();
}
DOS_PSP psp(dos.psp());
psp.SetFileHandle(entry,0xff);
if (Files[handle]->RemoveRef()<=0) {
delete Files[handle];
Files[handle]=0;
}
return true;
}
bool DOS_FlushFile(Bit16u entry) {
Bit32u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle] || !Files[handle]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
LOG(LOG_DOSMISC,LOG_NORMAL)("FFlush used.");
return true;
}
static bool PathExists(char const * const name) {
const char* leading = strrchr(name,'\\');
if(!leading) return true;
char temp[CROSS_LEN];
strcpy(temp,name);
char * lead = strrchr(temp,'\\');
if (lead == temp) return true;
*lead = 0;
Bit8u drive;char fulldir[DOS_PATHLENGTH];
if (!DOS_MakeName(temp,fulldir,&drive)) return false;
if(!Drives[drive]->TestDir(fulldir)) return false;
return true;
}
bool DOS_CreateFile(char const * name,Bit16u attributes,Bit16u * entry) {
// Creation of a device is the same as opening it
// Tc201 installer
if (DOS_FindDevice(name) != DOS_DEVICES)
return DOS_OpenFile(name, OPEN_READ, entry);
LOG(LOG_FILES,LOG_NORMAL)("file create attributes %X file %s",attributes,name);
char fullname[DOS_PATHLENGTH];Bit8u drive;
DOS_PSP psp(dos.psp());
if (!DOS_MakeName(name,fullname,&drive)) return false;
/* Check for a free file handle */
Bit8u handle=DOS_FILES;Bit8u i;
for (i=0;i<DOS_FILES;i++) {
if (!Files[i]) {
handle=i;
break;
}
}
if (handle==DOS_FILES) {
DOS_SetError(DOSERR_TOO_MANY_OPEN_FILES);
return false;
}
/* We have a position in the main table now find one in the psp table */
*entry = psp.FindFreeFileEntry();
if (*entry==0xff) {
DOS_SetError(DOSERR_TOO_MANY_OPEN_FILES);
return false;
}
/* Don't allow directories to be created */
if (attributes&DOS_ATTR_DIRECTORY) {
DOS_SetError(DOSERR_ACCESS_DENIED);
return false;
}
bool foundit=Drives[drive]->FileCreate(&Files[handle],fullname,attributes);
if (foundit) {
Files[handle]->SetDrive(drive);
Files[handle]->AddRef();
psp.SetFileHandle(*entry,handle);
return true;
} else {
if(!PathExists(name)) DOS_SetError(DOSERR_PATH_NOT_FOUND);
else DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
}
bool DOS_OpenFile(char const * name,Bit8u flags,Bit16u * entry) {
/* First check for devices */
if (flags>2) LOG(LOG_FILES,LOG_ERROR)("Special file open command %X file %s",flags,name);
else LOG(LOG_FILES,LOG_NORMAL)("file open command %X file %s",flags,name);
DOS_PSP psp(dos.psp());
Bit16u attr = 0;
Bit8u devnum = DOS_FindDevice(name);
bool device = (devnum != DOS_DEVICES);
if(!device && DOS_GetFileAttr(name,&attr)) {
//DON'T ALLOW directories to be openened.(skip test if file is device).
if((attr & DOS_ATTR_DIRECTORY) || (attr & DOS_ATTR_VOLUME)){
DOS_SetError(DOSERR_ACCESS_DENIED);
return false;
}
}
char fullname[DOS_PATHLENGTH];Bit8u drive;Bit8u i;
/* First check if the name is correct */
if (!DOS_MakeName(name,fullname,&drive)) return false;
Bit8u handle=255;
/* Check for a free file handle */
for (i=0;i<DOS_FILES;i++) {
if (!Files[i]) {
handle=i;
break;
}
}
if (handle==255) {
DOS_SetError(DOSERR_TOO_MANY_OPEN_FILES);
return false;
}
/* We have a position in the main table now find one in the psp table */
*entry = psp.FindFreeFileEntry();
if (*entry==0xff) {
DOS_SetError(DOSERR_TOO_MANY_OPEN_FILES);
return false;
}
bool exists=false;
if (device) {
Files[handle]=new DOS_Device(*Devices[devnum]);
} else {
exists=Drives[drive]->FileOpen(&Files[handle],fullname,flags);
if (exists) Files[handle]->SetDrive(drive);
}
if (exists || device ) {
Files[handle]->AddRef();
psp.SetFileHandle(*entry,handle);
return true;
} else {
//Test if file exists, but opened in read-write mode (and writeprotected)
if(((flags&3) != OPEN_READ) && Drives[drive]->FileExists(fullname))
DOS_SetError(DOSERR_ACCESS_DENIED);
else {
if(!PathExists(name)) DOS_SetError(DOSERR_PATH_NOT_FOUND);
else DOS_SetError(DOSERR_FILE_NOT_FOUND);
}
return false;
}
}
bool DOS_OpenFileExtended(char const * name, Bit16u flags, Bit16u createAttr, Bit16u action, Bit16u *entry, Bit16u* status) {
// FIXME: Not yet supported : Bit 13 of flags (int 0x24 on critical error)
Bit16u result = 0;
if (action==0) {
// always fail setting
DOS_SetError(DOSERR_FUNCTION_NUMBER_INVALID);
return false;
} else {
if (((action & 0x0f)>2) || ((action & 0xf0)>0x10)) {
// invalid action parameter
DOS_SetError(DOSERR_FUNCTION_NUMBER_INVALID);
return false;
}
}
if (DOS_OpenFile(name, (Bit8u)(flags&0xff), entry)) {
// File already exists
switch (action & 0x0f) {
case 0x00: // failed
DOS_SetError(DOSERR_FILE_ALREADY_EXISTS);
return false;
case 0x01: // file open (already done)
result = 1;
break;
case 0x02: // replace
DOS_CloseFile(*entry);
if (!DOS_CreateFile(name, createAttr, entry)) return false;
result = 3;
break;
default:
DOS_SetError(DOSERR_FUNCTION_NUMBER_INVALID);
E_Exit("DOS: OpenFileExtended: Unknown action.");
break;
}
} else {
// File doesn't exist
if ((action & 0xf0)==0) {
// uses error code from failed open<|fim▁hole|> if (!DOS_CreateFile(name, createAttr, entry)) {
// uses error code from failed create
return false;
}
result = 2;
}
// success
*status = result;
return true;
}
bool DOS_UnlinkFile(char const * const name) {
char fullname[DOS_PATHLENGTH];Bit8u drive;
if (!DOS_MakeName(name,fullname,&drive)) return false;
if(Drives[drive]->FileUnlink(fullname)){
return true;
} else {
DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
}
bool DOS_GetFileAttr(char const * const name,Bit16u * attr) {
char fullname[DOS_PATHLENGTH];Bit8u drive;
if (!DOS_MakeName(name,fullname,&drive)) return false;
if (Drives[drive]->GetFileAttr(fullname,attr)) {
return true;
} else {
DOS_SetError(DOSERR_FILE_NOT_FOUND);
return false;
}
}
bool DOS_SetFileAttr(char const * const name,Bit16u /*attr*/)
// this function does not change the file attributs
// it just does some tests if file is available
// returns false when using on cdrom (stonekeep)
{
Bit16u attrTemp;
char fullname[DOS_PATHLENGTH];Bit8u drive;
if (!DOS_MakeName(name,fullname,&drive)) return false;
if (strncmp(Drives[drive]->GetInfo(),"CDRom ",6)==0 || strncmp(Drives[drive]->GetInfo(),"isoDrive ",9)==0) {
DOS_SetError(DOSERR_ACCESS_DENIED);
return false;
}
return Drives[drive]->GetFileAttr(fullname,&attrTemp);
}
bool DOS_Canonicalize(char const * const name,char * const big) {
//TODO Add Better support for devices and shit but will it be needed i doubt it :)
Bit8u drive;
char fullname[DOS_PATHLENGTH];
if (!DOS_MakeName(name,fullname,&drive)) return false;
big[0]=drive+'A';
big[1]=':';
big[2]='\\';
strcpy(&big[3],fullname);
return true;
}
bool DOS_GetFreeDiskSpace(Bit8u drive,Bit16u * bytes,Bit8u * sectors,Bit16u * clusters,Bit16u * free) {
if (drive==0) drive=DOS_GetDefaultDrive();
else drive--;
if ((drive>=DOS_DRIVES) || (!Drives[drive])) {
DOS_SetError(DOSERR_INVALID_DRIVE);
return false;
}
return Drives[drive]->AllocationInfo(bytes,sectors,clusters,free);
}
bool DOS_DuplicateEntry(Bit16u entry,Bit16u * newentry) {
// Dont duplicate console handles
/* if (entry<=STDPRN) {
*newentry = entry;
return true;
};
*/
Bit8u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle] || !Files[handle]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
DOS_PSP psp(dos.psp());
*newentry = psp.FindFreeFileEntry();
if (*newentry==0xff) {
DOS_SetError(DOSERR_TOO_MANY_OPEN_FILES);
return false;
}
Files[handle]->AddRef();
psp.SetFileHandle(*newentry,handle);
return true;
}
bool DOS_ForceDuplicateEntry(Bit16u entry,Bit16u newentry) {
if(entry == newentry) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
}
Bit8u orig = RealHandle(entry);
if (orig >= DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[orig] || !Files[orig]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
Bit8u newone = RealHandle(newentry);
if (newone < DOS_FILES && Files[newone]) {
DOS_CloseFile(newentry);
}
DOS_PSP psp(dos.psp());
Files[orig]->AddRef();
psp.SetFileHandle(newentry,orig);
return true;
}
bool DOS_CreateTempFile(char * const name,Bit16u * entry) {
size_t namelen=strlen(name);
char * tempname=name+namelen;
if (namelen==0) {
// temp file created in root directory
tempname[0]='\\';
tempname++;
} else {
if ((name[namelen-1]!='\\') && (name[namelen-1]!='/')) {
tempname[0]='\\';
tempname++;
}
}
dos.errorcode=0;
/* add random crap to the end of the name and try to open */
do {
Bit32u i;
for (i=0;i<8;i++) {
tempname[i]=(rand()%26)+'A';
}
tempname[8]=0;
} while ((!DOS_CreateFile(name,0,entry)) && (dos.errorcode==DOSERR_FILE_ALREADY_EXISTS));
if (dos.errorcode) return false;
return true;
}
#define FCB_SEP ":.;,=+"
#define ILLEGAL ":.;,=+ \t/\"[]<>|"
static bool isvalid(const char in){
const char ill[]=ILLEGAL;
return (Bit8u(in)>0x1F) && (!strchr(ill,in));
}
#define PARSE_SEP_STOP 0x01
#define PARSE_DFLT_DRIVE 0x02
#define PARSE_BLNK_FNAME 0x04
#define PARSE_BLNK_FEXT 0x08
#define PARSE_RET_NOWILD 0
#define PARSE_RET_WILD 1
#define PARSE_RET_BADDRIVE 0xff
Bit8u FCB_Parsename(Bit16u seg,Bit16u offset,Bit8u parser ,char *string, Bit8u *change) {
char * string_begin=string;
Bit8u ret=0;
if (!(parser & PARSE_DFLT_DRIVE)) {
// default drive forced, this intentionally invalidates an extended FCB
mem_writeb(PhysMake(seg,offset),0);
}
DOS_FCB fcb(seg,offset,false); // always a non-extended FCB
bool hasdrive,hasname,hasext,finished;
hasdrive=hasname=hasext=finished=false;
Bitu index=0;
Bit8u fill=' ';
/* First get the old data from the fcb */
#ifdef _MSC_VER
#pragma pack (1)
#endif
union {
struct {
char drive[2];
char name[9];
char ext[4];
} GCC_ATTRIBUTE (packed) part;
char full[DOS_FCBNAME];
} fcb_name;
#ifdef _MSC_VER
#pragma pack()
#endif
/* Get the old information from the previous fcb */
fcb.GetName(fcb_name.full);
fcb_name.part.drive[0]-='A'-1;fcb_name.part.drive[1]=0;
fcb_name.part.name[8]=0;fcb_name.part.ext[3]=0;
/* Strip of the leading sepetaror */
if((parser & PARSE_SEP_STOP) && *string) { //ignore leading seperator
char sep[] = FCB_SEP;char a[2];
a[0]= *string;a[1]='\0';
if (strcspn(a,sep)==0) string++;
}
/* strip leading spaces */
while((*string==' ')||(*string=='\t')) string++;
/* Check for a drive */
if (string[1]==':') {
fcb_name.part.drive[0]=0;
hasdrive=true;
if (isalpha(string[0]) && Drives[toupper(string[0])-'A']) {
fcb_name.part.drive[0]=(char)(toupper(string[0])-'A'+1);
} else ret=0xff;
string+=2;
}
/* Special checks for . and .. */
if (string[0]=='.') {
string++;
if (!string[0]) {
hasname=true;
ret=PARSE_RET_NOWILD;
strcpy(fcb_name.part.name,". ");
goto savefcb;
}
if (string[1]=='.' && !string[1]) {
string++;
hasname=true;
ret=PARSE_RET_NOWILD;
strcpy(fcb_name.part.name,".. ");
goto savefcb;
}
goto checkext;
}
/* Copy the name */
hasname=true;finished=false;fill=' ';index=0;
while (index<8) {
if (!finished) {
if (string[0]=='*') {fill='?';fcb_name.part.name[index]='?';if (!ret) ret=1;finished=true;}
else if (string[0]=='?') {fcb_name.part.name[index]='?';if (!ret) ret=1;}
else if (isvalid(string[0])) {fcb_name.part.name[index]=(char)(toupper(string[0]));}
else { finished=true;continue; }
string++;
} else {
fcb_name.part.name[index]=fill;
}
index++;
}
if (!(string[0]=='.')) goto savefcb;
string++;
checkext:
/* Copy the extension */
hasext=true;finished=false;fill=' ';index=0;
while (index<3) {
if (!finished) {
if (string[0]=='*') {fill='?';fcb_name.part.ext[index]='?';finished=true;}
else if (string[0]=='?') {fcb_name.part.ext[index]='?';if (!ret) ret=1;}
else if (isvalid(string[0])) {fcb_name.part.ext[index]=(char)(toupper(string[0]));}
else { finished=true;continue; }
string++;
} else {
fcb_name.part.ext[index]=fill;
}
index++;
}
savefcb:
if (!hasdrive & !(parser & PARSE_DFLT_DRIVE)) fcb_name.part.drive[0] = 0;
if (!hasname & !(parser & PARSE_BLNK_FNAME)) strcpy(fcb_name.part.name," ");
if (!hasext & !(parser & PARSE_BLNK_FEXT)) strcpy(fcb_name.part.ext," ");
fcb.SetName(fcb_name.part.drive[0],fcb_name.part.name,fcb_name.part.ext);
*change=(Bit8u)(string-string_begin);
return ret;
}
static void DTAExtendName(char * const name,char * const filename,char * const ext) {
char * find=strchr(name,'.');
if (find) {
strcpy(ext,find+1);
*find=0;
} else ext[0]=0;
strcpy(filename,name);
size_t i;
for (i=strlen(name);i<8;i++) filename[i]=' ';
filename[8]=0;
for (i=strlen(ext);i<3;i++) ext[i]=' ';
ext[3]=0;
}
static void SaveFindResult(DOS_FCB & find_fcb) {
DOS_DTA find_dta(dos.tables.tempdta);
char name[DOS_NAMELENGTH_ASCII];Bit32u size;Bit16u date;Bit16u time;Bit8u attr;Bit8u drive;
char file_name[9];char ext[4];
find_dta.GetResult(name,size,date,time,attr);
drive=find_fcb.GetDrive()+1;
/* Create a correct file and extention */
DTAExtendName(name,file_name,ext);
DOS_FCB fcb(RealSeg(dos.dta()),RealOff(dos.dta()));//TODO
fcb.Create(find_fcb.Extended());
fcb.SetName(drive,file_name,ext);
fcb.SetAttr(attr); /* Only adds attribute if fcb is extended */
fcb.SetSizeDateTime(size,date,time);
}
bool DOS_FCBCreate(Bit16u seg,Bit16u offset) {
DOS_FCB fcb(seg,offset);
char shortname[DOS_FCBNAME];Bit16u handle;
fcb.GetName(shortname);
if (!DOS_CreateFile(shortname,DOS_ATTR_ARCHIVE,&handle)) return false;
fcb.FileOpen((Bit8u)handle);
return true;
}
bool DOS_FCBOpen(Bit16u seg,Bit16u offset) {
DOS_FCB fcb(seg,offset);
char shortname[DOS_FCBNAME];Bit16u handle;
fcb.GetName(shortname);
/* First check if the name is correct */
Bit8u drive;
char fullname[DOS_PATHLENGTH];
if (!DOS_MakeName(shortname,fullname,&drive)) return false;
/* Check, if file is already opened */
for (Bit8u i=0;i<DOS_FILES;i++) {
DOS_PSP psp(dos.psp());
if (Files[i] && Files[i]->IsOpen() && Files[i]->IsName(fullname)) {
handle = psp.FindEntryByHandle(i);
if (handle==0xFF) {
// This shouldnt happen
LOG(LOG_FILES,LOG_ERROR)("DOS: File %s is opened but has no psp entry.",shortname);
return false;
}
fcb.FileOpen((Bit8u)handle);
return true;
}
}
if (!DOS_OpenFile(shortname,OPEN_READWRITE,&handle)) return false;
fcb.FileOpen((Bit8u)handle);
return true;
}
bool DOS_FCBClose(Bit16u seg,Bit16u offset) {
DOS_FCB fcb(seg,offset);
if(!fcb.Valid()) return false;
Bit8u fhandle;
fcb.FileClose(fhandle);
DOS_CloseFile(fhandle);
return true;
}
bool DOS_FCBFindFirst(Bit16u seg,Bit16u offset) {
DOS_FCB fcb(seg,offset);
RealPt old_dta=dos.dta();dos.dta(dos.tables.tempdta);
char name[DOS_FCBNAME];fcb.GetName(name);
Bit8u attr = DOS_ATTR_ARCHIVE;
fcb.GetAttr(attr); /* Gets search attributes if extended */
bool ret=DOS_FindFirst(name,attr,true);
dos.dta(old_dta);
if (ret) SaveFindResult(fcb);
return ret;
}
bool DOS_FCBFindNext(Bit16u seg,Bit16u offset) {
DOS_FCB fcb(seg,offset);
RealPt old_dta=dos.dta();dos.dta(dos.tables.tempdta);
bool ret=DOS_FindNext();
dos.dta(old_dta);
if (ret) SaveFindResult(fcb);
return ret;
}
Bit8u DOS_FCBRead(Bit16u seg,Bit16u offset,Bit16u recno) {
DOS_FCB fcb(seg,offset);
Bit8u fhandle,cur_rec;Bit16u cur_block,rec_size;
fcb.GetSeqData(fhandle,rec_size);
fcb.GetRecord(cur_block,cur_rec);
Bit32u pos=((cur_block*128)+cur_rec)*rec_size;
if (!DOS_SeekFile(fhandle,&pos,DOS_SEEK_SET)) return FCB_READ_NODATA;
Bit16u toread=rec_size;
if (!DOS_ReadFile(fhandle,dos_copybuf,&toread)) return FCB_READ_NODATA;
if (toread==0) return FCB_READ_NODATA;
if (toread < rec_size) { //Zero pad copybuffer to rec_size
Bitu i = toread;
while(i < rec_size) dos_copybuf[i++] = 0;
}
MEM_BlockWrite(Real2Phys(dos.dta())+recno*rec_size,dos_copybuf,rec_size);
if (++cur_rec>127) { cur_block++;cur_rec=0; }
fcb.SetRecord(cur_block,cur_rec);
if (toread==rec_size) return FCB_SUCCESS;
if (toread==0) return FCB_READ_NODATA;
return FCB_READ_PARTIAL;
}
Bit8u DOS_FCBWrite(Bit16u seg,Bit16u offset,Bit16u recno) {
DOS_FCB fcb(seg,offset);
Bit8u fhandle,cur_rec;Bit16u cur_block,rec_size;
fcb.GetSeqData(fhandle,rec_size);
fcb.GetRecord(cur_block,cur_rec);
Bit32u pos=((cur_block*128)+cur_rec)*rec_size;
if (!DOS_SeekFile(fhandle,&pos,DOS_SEEK_SET)) return FCB_ERR_WRITE;
MEM_BlockRead(Real2Phys(dos.dta())+recno*rec_size,dos_copybuf,rec_size);
Bit16u towrite=rec_size;
if (!DOS_WriteFile(fhandle,dos_copybuf,&towrite)) return FCB_ERR_WRITE;
Bit32u size;Bit16u date,time;
fcb.GetSizeDateTime(size,date,time);
if (pos+towrite>size) size=pos+towrite;
//time doesn't keep track of endofday
date = DOS_PackDate(dos.date.year,dos.date.month,dos.date.day);
Bit32u ticks = mem_readd(BIOS_TIMER);
Bit32u seconds = (ticks*10)/182;
Bit16u hour = (Bit16u)(seconds/3600);
Bit16u min = (Bit16u)((seconds % 3600)/60);
Bit16u sec = (Bit16u)(seconds % 60);
time = DOS_PackTime(hour,min,sec);
Bit8u temp=RealHandle(fhandle);
Files[temp]->time=time;
Files[temp]->date=date;
fcb.SetSizeDateTime(size,date,time);
if (++cur_rec>127) { cur_block++;cur_rec=0; }
fcb.SetRecord(cur_block,cur_rec);
return FCB_SUCCESS;
}
Bit8u DOS_FCBIncreaseSize(Bit16u seg,Bit16u offset) {
DOS_FCB fcb(seg,offset);
Bit8u fhandle,cur_rec;Bit16u cur_block,rec_size;
fcb.GetSeqData(fhandle,rec_size);
fcb.GetRecord(cur_block,cur_rec);
Bit32u pos=((cur_block*128)+cur_rec)*rec_size;
if (!DOS_SeekFile(fhandle,&pos,DOS_SEEK_SET)) return FCB_ERR_WRITE;
Bit16u towrite=0;
if (!DOS_WriteFile(fhandle,dos_copybuf,&towrite)) return FCB_ERR_WRITE;
Bit32u size;Bit16u date,time;
fcb.GetSizeDateTime(size,date,time);
if (pos+towrite>size) size=pos+towrite;
//time doesn't keep track of endofday
date = DOS_PackDate(dos.date.year,dos.date.month,dos.date.day);
Bit32u ticks = mem_readd(BIOS_TIMER);
Bit32u seconds = (ticks*10)/182;
Bit16u hour = (Bit16u)(seconds/3600);
Bit16u min = (Bit16u)((seconds % 3600)/60);
Bit16u sec = (Bit16u)(seconds % 60);
time = DOS_PackTime(hour,min,sec);
Bit8u temp=RealHandle(fhandle);
Files[temp]->time=time;
Files[temp]->date=date;
fcb.SetSizeDateTime(size,date,time);
fcb.SetRecord(cur_block,cur_rec);
return FCB_SUCCESS;
}
Bit8u DOS_FCBRandomRead(Bit16u seg,Bit16u offset,Bit16u numRec,bool restore) {
/* if restore is true :random read else random blok read.
* random read updates old block and old record to reflect the random data
* before the read!!!!!!!!! and the random data is not updated! (user must do this)
* Random block read updates these fields to reflect the state after the read!
*/
/* BUG: numRec should return the amount of records read!
* Not implemented yet as I'm unsure how to count on error states (partial/failed)
*/
DOS_FCB fcb(seg,offset);
Bit32u random;
Bit16u old_block=0;
Bit8u old_rec=0;
Bit8u error=0;
/* Set the correct record from the random data */
fcb.GetRandom(random);
fcb.SetRecord((Bit16u)(random / 128),(Bit8u)(random & 127));
if (restore) fcb.GetRecord(old_block,old_rec);//store this for after the read.
// Read records
for (int i=0; i<numRec; i++) {
error = DOS_FCBRead(seg,offset,(Bit16u)i);
if (error!=0x00) break;
}
Bit16u new_block;Bit8u new_rec;
fcb.GetRecord(new_block,new_rec);
if (restore) fcb.SetRecord(old_block,old_rec);
/* Update the random record pointer with new position only when restore is false*/
if(!restore) fcb.SetRandom(new_block*128+new_rec);
return error;
}
Bit8u DOS_FCBRandomWrite(Bit16u seg,Bit16u offset,Bit16u numRec,bool restore) {
/* see FCB_RandomRead */
DOS_FCB fcb(seg,offset);
Bit32u random;
Bit16u old_block=0;
Bit8u old_rec=0;
Bit8u error=0;
/* Set the correct record from the random data */
fcb.GetRandom(random);
fcb.SetRecord((Bit16u)(random / 128),(Bit8u)(random & 127));
if (restore) fcb.GetRecord(old_block,old_rec);
if (numRec>0) {
/* Write records */
for (int i=0; i<numRec; i++) {
error = DOS_FCBWrite(seg,offset,(Bit16u)i);// dos_fcbwrite return 0 false when true...
if (error!=0x00) break;
}
} else {
DOS_FCBIncreaseSize(seg,offset);
}
Bit16u new_block;Bit8u new_rec;
fcb.GetRecord(new_block,new_rec);
if (restore) fcb.SetRecord(old_block,old_rec);
/* Update the random record pointer with new position only when restore is false */
if(!restore) fcb.SetRandom(new_block*128+new_rec);
return error;
}
bool DOS_FCBGetFileSize(Bit16u seg,Bit16u offset) {
char shortname[DOS_PATHLENGTH];Bit16u entry;Bit8u handle;Bit16u rec_size;
DOS_FCB fcb(seg,offset);
fcb.GetName(shortname);
if (!DOS_OpenFile(shortname,OPEN_READ,&entry)) return false;
handle = RealHandle(entry);
Bit32u size = 0;
Files[handle]->Seek(&size,DOS_SEEK_END);
DOS_CloseFile(entry);fcb.GetSeqData(handle,rec_size);
Bit32u random=(size/rec_size);
if (size % rec_size) random++;
fcb.SetRandom(random);
return true;
}
bool DOS_FCBDeleteFile(Bit16u seg,Bit16u offset){
/* FCB DELETE honours wildcards. it will return true if one or more
* files get deleted.
* To get this: the dta is set to temporary dta in which found files are
* stored. This can not be the tempdta as that one is used by fcbfindfirst
*/
RealPt old_dta=dos.dta();dos.dta(dos.tables.tempdta_fcbdelete);
DOS_FCB fcb(RealSeg(dos.dta()),RealOff(dos.dta()));
bool nextfile = false;
bool return_value = false;
nextfile = DOS_FCBFindFirst(seg,offset);
while(nextfile) {
char shortname[DOS_FCBNAME] = { 0 };
fcb.GetName(shortname);
bool res=DOS_UnlinkFile(shortname);
if(!return_value && res) return_value = true; //at least one file deleted
nextfile = DOS_FCBFindNext(seg,offset);
}
dos.dta(old_dta); /*Restore dta */
return return_value;
}
bool DOS_FCBRenameFile(Bit16u seg, Bit16u offset){
DOS_FCB fcbold(seg,offset);
DOS_FCB fcbnew(seg,offset+16);
char oldname[DOS_FCBNAME];
char newname[DOS_FCBNAME];
fcbold.GetName(oldname);fcbnew.GetName(newname);
return DOS_Rename(oldname,newname);
}
void DOS_FCBSetRandomRecord(Bit16u seg, Bit16u offset) {
DOS_FCB fcb(seg,offset);
Bit16u block;Bit8u rec;
fcb.GetRecord(block,rec);
fcb.SetRandom(block*128+rec);
}
bool DOS_FileExists(char const * const name) {
char fullname[DOS_PATHLENGTH];Bit8u drive;
if (!DOS_MakeName(name,fullname,&drive)) return false;
return Drives[drive]->FileExists(fullname);
}
bool DOS_GetAllocationInfo(Bit8u drive,Bit16u * _bytes_sector,Bit8u * _sectors_cluster,Bit16u * _total_clusters) {
if (!drive) drive = DOS_GetDefaultDrive();
else drive--;
if (drive >= DOS_DRIVES || !Drives[drive]) return false;
Bit16u _free_clusters;
Drives[drive]->AllocationInfo(_bytes_sector,_sectors_cluster,_total_clusters,&_free_clusters);
SegSet16(ds,RealSeg(dos.tables.mediaid));
reg_bx=RealOff(dos.tables.mediaid+drive*2);
return true;
}
bool DOS_SetDrive(Bit8u drive) {
if (Drives[drive]) {
DOS_SetDefaultDrive(drive);
return true;
} else {
return false;
}
}
bool DOS_GetFileDate(Bit16u entry, Bit16u* otime, Bit16u* odate) {
Bit32u handle=RealHandle(entry);
if (handle>=DOS_FILES) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle] || !Files[handle]->IsOpen()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
};
if (!Files[handle]->UpdateDateTimeFromHost()) {
DOS_SetError(DOSERR_INVALID_HANDLE);
return false;
}
*otime = Files[handle]->time;
*odate = Files[handle]->date;
return true;
}
void DOS_SetupFiles (void) {
/* Setup the File Handles */
Bit32u i;
for (i=0;i<DOS_FILES;i++) {
Files[i]=0;
}
/* Setup the Virtual Disk System */
for (i=0;i<DOS_DRIVES;i++) {
Drives[i]=0;
}
Drives[25]=new Virtual_Drive();
}<|fim▁end|> | return false;
}
// Create File |
<|file_name|>yieldoneBidAdapter_spec.js<|end_file_name|><|fim▁begin|>import { expect } from 'chai';
import { spec } from 'modules/yieldoneBidAdapter.js';
import { newBidder } from 'src/adapters/bidderFactory.js';
import { deepClone } from 'src/utils.js';
const ENDPOINT = 'https://y.one.impact-ad.jp/h_bid';
const USER_SYNC_URL = 'https://y.one.impact-ad.jp/push_sync';
const VIDEO_PLAYER_URL = 'https://img.ak.impact-ad.jp/ic/pone/ivt/firstview/js/dac-video-prebid.min.js';
const DEFAULT_VIDEO_SIZE = {w: 640, h: 360};
describe('yieldoneBidAdapter', function() {
const adapter = newBidder(spec);
describe('isBidRequestValid', function () {
let bid = {
'bidder': 'yieldone',
'params': {
placementId: '36891'
},
'adUnitCode': 'adunit-code',
'sizes': [[300, 250], [336, 280]],
'bidId': '23beaa6af6cdde',
'bidderRequestId': '19c0c1efdf37e7',
'auctionId': '61466567-d482-4a16-96f0-fe5f25ffbdf1',
};
it('should return true when required params found', function () {
expect(spec.isBidRequestValid(bid)).to.equal(true);
});
it('should return false when placementId not passed correctly', function () {
bid.params.placementId = '';
expect(spec.isBidRequestValid(bid)).to.equal(false);
});
it('should return false when require params are not passed', function () {
let bid = Object.assign({}, bid);
bid.params = {};
expect(spec.isBidRequestValid(bid)).to.equal(false);
});
});
describe('buildRequests', function () {
const bidderRequest = {
refererInfo: {
numIframes: 0,
reachedTop: true,
referer: 'http://example.com',
stack: ['http://example.com']
}
};
describe('Basic', function () {
const bidRequests = [
{
'bidder': 'yieldone',
'params': {placementId: '36891'},
'adUnitCode': 'adunit-code1',
'bidId': '23beaa6af6cdde',
'bidderRequestId': '19c0c1efdf37e7',
'auctionId': '61466567-d482-4a16-96f0-fe5f25ffbdf1',
},
{
'bidder': 'yieldone',
'params': {placementId: '47919'},
'adUnitCode': 'adunit-code2',
'bidId': '382091349b149f"',
'bidderRequestId': '"1f9c98192de251"',
'auctionId': '61466567-d482-4a16-96f0-fe5f25ffbdf1',
}
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('sends bid request to our endpoint via GET', function () {
expect(request[0].method).to.equal('GET');
expect(request[1].method).to.equal('GET');
});
it('attaches source and version to endpoint URL as query params', function () {
expect(request[0].url).to.equal(ENDPOINT);
expect(request[1].url).to.equal(ENDPOINT);
});
it('adUnitCode should be sent as uc parameters on any requests', function () {
expect(request[0].data.uc).to.equal('adunit-code1');
expect(request[1].data.uc).to.equal('adunit-code2');
});
});
describe('Old Format', function () {
const bidRequests = [
{
params: {placementId: '0'},
mediaType: 'banner',
sizes: [[300, 250], [336, 280]],
},
{
params: {placementId: '1'},
mediaType: 'banner',
sizes: [[336, 280]],
},
{
// It doesn't actually exist.
params: {placementId: '2'},
},
{
params: {placementId: '3'},
mediaType: 'video',
sizes: [[1280, 720], [1920, 1080]],
},
{
params: {placementId: '4'},
mediaType: 'video',
sizes: [[1920, 1080]],
},
{
params: {placementId: '5'},
mediaType: 'video',
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data.sz).to.equal('336x280');
expect(request[2].data.sz).to.equal('');
expect(request[3].data).to.not.have.property('sz');
expect(request[4].data).to.not.have.property('sz');
expect(request[5].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data).to.not.have.property('w');
expect(request[2].data).to.not.have.property('w');
expect(request[3].data.w).to.equal(1280);
expect(request[3].data.h).to.equal(720);
expect(request[4].data.w).to.equal(1920);
expect(request[4].data.h).to.equal(1080);
expect(request[5].data.w).to.equal(DEFAULT_VIDEO_SIZE.w);
expect(request[5].data.h).to.equal(DEFAULT_VIDEO_SIZE.h);
});
});
describe('New Format', function () {
const bidRequests = [
{
params: {placementId: '0'},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
},
},
{
params: {placementId: '1'},
mediaTypes: {
banner: {
sizes: [[336, 280]],
},
},
},
{
// It doesn't actually exist.
params: {placementId: '2'},
mediaTypes: {
banner: {
},
},
},
{
params: {placementId: '3'},
mediaTypes: {
video: {
context: 'outstream',
playerSize: [[1280, 720], [1920, 1080]],
},
},
},
{
params: {placementId: '4'},
mediaTypes: {
video: {
context: 'outstream',
playerSize: [1920, 1080],
},
},
},
{
params: {placementId: '5'},
mediaTypes: {
video: {
context: 'outstream',
},
},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data.sz).to.equal('336x280');
expect(request[2].data.sz).to.equal('');
expect(request[3].data).to.not.have.property('sz');
expect(request[4].data).to.not.have.property('sz');
expect(request[5].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data).to.not.have.property('w');
expect(request[2].data).to.not.have.property('w');
expect(request[3].data.w).to.equal(1280);
expect(request[3].data.h).to.equal(720);
expect(request[4].data.w).to.equal(1920);
expect(request[4].data.h).to.equal(1080);
expect(request[5].data.w).to.equal(DEFAULT_VIDEO_SIZE.w);
expect(request[5].data.h).to.equal(DEFAULT_VIDEO_SIZE.h);
});
});
describe('Multiple Format', function () {
const bidRequests = [
{
// It will be treated as a banner.
params: {
placementId: '0',
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [1920, 1080],
},
},
},
{
// It will be treated as a video.
params: {
placementId: '1',
playerParams: {},
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [1920, 1080],
},
},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data.w).to.equal(1920);
expect(request[1].data.h).to.equal(1080);
});
});
describe('FLUX Format', function () {
const bidRequests = [
{
// It will be treated as a banner.
params: {
placementId: '0',
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [[1, 1]],
},
},
},
{
// It will be treated as a video.
params: {
placementId: '1',
playerParams: {},
playerSize: [1920, 1080],
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [[1, 1]],
},
},
},
{
// It will be treated as a video.
params: {
placementId: '2',
playerParams: {},
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [[1, 1]],
},
},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data).to.not.have.property('sz');
expect(request[2].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data.w).to.equal(1920);
expect(request[1].data.h).to.equal(1080);
expect(request[2].data.w).to.equal(DEFAULT_VIDEO_SIZE.w);
expect(request[2].data.h).to.equal(DEFAULT_VIDEO_SIZE.h);
});
});
describe('LiveRampID', function () {
it('dont send LiveRampID if undefined', function () {
const bidRequests = [
{
params: {placementId: '0'},
},
{
params: {placementId: '1'},
userId: {},
},
{
params: {placementId: '2'},
userId: undefined,
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data).to.not.have.property('lr_env');
expect(request[1].data).to.not.have.property('lr_env');
expect(request[2].data).to.not.have.property('lr_env');
});
it('should send LiveRampID if available', function () {
const bidRequests = [
{
params: {placementId: '0'},
userId: {idl_env: 'idl_env_sample'},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data.lr_env).to.equal('idl_env_sample');
});
});
describe('IMID', function () {
it('dont send IMID if undefined', function () {
const bidRequests = [
{
params: {placementId: '0'},
},
{
params: {placementId: '1'},
userId: {},
},
{
params: {placementId: '2'},<|fim▁hole|> ];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data).to.not.have.property('imuid');
expect(request[1].data).to.not.have.property('imuid');
expect(request[2].data).to.not.have.property('imuid');
});
it('should send IMID if available', function () {
const bidRequests = [
{
params: {placementId: '0'},
userId: {imuid: 'imuid_sample'},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data.imuid).to.equal('imuid_sample');
});
});
});
describe('interpretResponse', function () {
let bidRequestBanner = [
{
'method': 'GET',
'url': 'https://y.one.impact-ad.jp/h_bid',
'data': {
'v': 'hb1',
'p': '36891',
'sz': '300x250,336x280',
'cb': 12892917383,
'r': 'http%3A%2F%2Flocalhost%3A9876%2F%3Fid%3D74552836',
'uid': '23beaa6af6cdde',
't': 'i'
}
}
];
let serverResponseBanner = {
body: {
'adTag': '<!-- adtag -->',
'uid': '23beaa6af6cdde',
'height': 250,
'width': 300,
'cpm': 0.0536616,
'crid': '2494768',
'currency': 'JPY',
'statusMessage': 'Bid available',
'dealId': 'P1-FIX-7800-DSP-MON',
'admoain': [
'www.example.com'
]
}
};
it('should get the correct bid response for banner', function () {
let expectedResponse = [{
'requestId': '23beaa6af6cdde',
'cpm': 53.6616,
'width': 300,
'height': 250,
'creativeId': '2494768',
'dealId': 'P1-FIX-7800-DSP-MON',
'currency': 'JPY',
'netRevenue': true,
'ttl': 3000,
'referrer': '',
'meta': {
'advertiserDomains': [
'www.example.com'
]
},
'mediaType': 'banner',
'ad': '<!-- adtag -->'
}];
let result = spec.interpretResponse(serverResponseBanner, bidRequestBanner[0]);
expect(Object.keys(result[0])).to.deep.equal(Object.keys(expectedResponse[0]));
expect(result[0].mediaType).to.equal(expectedResponse[0].mediaType);
});
let serverResponseVideo = {
body: {
'uid': '23beaa6af6cdde',
'height': 360,
'width': 640,
'cpm': 0.0536616,
'dealId': 'P1-FIX-766-DSP-MON',
'crid': '2494768',
'currency': 'JPY',
'statusMessage': 'Bid available',
'adm': '<!-- vast -->'
}
};
let bidRequestVideo = [
{
'method': 'GET',
'url': 'https://y.one.impact-ad.jp/h_bid',
'data': {
'v': 'hb1',
'p': '41993',
'w': '640',
'h': '360',
'cb': 12892917383,
'r': 'http%3A%2F%2Flocalhost%3A9876%2F%3Fid%3D74552836',
'uid': '23beaa6af6cdde',
't': 'i'
}
}
];
it('should get the correct bid response for video', function () {
let expectedResponse = [{
'requestId': '23beaa6af6cdde',
'cpm': 53.6616,
'width': 640,
'height': 360,
'creativeId': '2494768',
'dealId': 'P1-FIX-7800-DSP-MON',
'currency': 'JPY',
'netRevenue': true,
'ttl': 3000,
'referrer': '',
'meta': {
'advertiserDomains': []
},
'mediaType': 'video',
'vastXml': '<!-- vast -->',
'renderer': {
id: '23beaa6af6cdde',
url: VIDEO_PLAYER_URL
}
}];
let result = spec.interpretResponse(serverResponseVideo, bidRequestVideo[0]);
expect(Object.keys(result[0])).to.deep.equal(Object.keys(expectedResponse[0]));
expect(result[0].mediaType).to.equal(expectedResponse[0].mediaType);
expect(result[0].renderer.id).to.equal(expectedResponse[0].renderer.id);
expect(result[0].renderer.url).to.equal(expectedResponse[0].renderer.url);
});
it('handles empty bid response', function () {
let response = {
body: {
'uid': '2c0b634db95a01',
'height': 0,
'crid': '',
'statusMessage': 'Bid returned empty or error response',
'width': 0,
'cpm': 0
}
};
let result = spec.interpretResponse(response, bidRequestBanner[0]);
expect(result.length).to.equal(0);
});
});
describe('getUserSyncs', function () {
it('handles empty sync options', function () {
expect(spec.getUserSyncs({})).to.be.undefined;
});
it('should return a sync url if iframe syncs are enabled', function () {
expect(spec.getUserSyncs({
'iframeEnabled': true
})).to.deep.equal([{
type: 'iframe', url: USER_SYNC_URL
}]);
});
});
});<|fim▁end|> | userId: undefined,
}, |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""<|fim▁hole|>WSGI config for Carkinos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Carkinos.settings")
application = get_wsgi_application()<|fim▁end|> | |
<|file_name|>costMemory.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function<|fim▁hole|>import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
self.cost = cost
self.seen = True
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class CostMemory(handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()])
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemory = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
self.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length
if len(self.intervals.keys()) > 1:
sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed<|fim▁end|> | from __future__ import absolute_import
import os |
<|file_name|>plane.rs<|end_file_name|><|fim▁begin|>extern crate tray_rust;
extern crate rand;
extern crate image;
use std::sync::Arc;
use rand::StdRng;
use tray_rust::linalg::{AnimatedTransform, Transform, Point, Vector};
use tray_rust::film::{Colorf, RenderTarget, Camera, ImageSample};
use tray_rust::film::filter::MitchellNetravali;
use tray_rust::geometry::{Rectangle, Instance};
use tray_rust::material::Matte;
use tray_rust::sampler::{BlockQueue, LowDiscrepancy, Sampler};
use tray_rust::texture;
fn main() {
let width = 800usize;
let height = 600usize;
let filter =
Box::new(MitchellNetravali::new(2.0, 2.0, 0.333333333333333333, 0.333333333333333333));
let rt = RenderTarget::new((width, height), (20, 20), filter);
let transform =
AnimatedTransform::unanimated(&Transform::look_at(&Point::new(0.0, 0.0, -10.0),
&Point::new(0.0, 0.0, 0.0),
&Vector::new(0.0, 1.0, 0.0)));
let camera = Camera::new(transform, 40.0, rt.dimensions(), 0.5, 0);
let plane = Rectangle::new(2.0, 2.0);
let geometry_lock = Arc::new(plane);
// TODO: From a code usage standpoint it might be nice to have a constant version
// of the material ctor exposed which takes the plain types and builds the textures internally
let texture = Arc::new(texture::ConstantColor::new(Colorf::new(0.740063, 0.742313, 0.733934)));
let roughness = Arc::new(texture::ConstantScalar::new(1.0));
let white_wall = Matte::new(texture, roughness);
let material_lock = Arc::new(white_wall);
let position_transform =
AnimatedTransform::unanimated(&Transform::translate(&Vector::new(0.0, 2.0, 0.0)));
let instance = Instance::receiver(geometry_lock,
material_lock,
position_transform,
"single_plane".to_string());
let dim = rt.dimensions();
// A block queue is how work is distributed among threads, it's a list of tiles
// of the image that have yet to be rendered. Each thread will pull a block from
// this queue and render it.
let block_queue = BlockQueue::new((dim.0 as u32, dim.1 as u32), (8, 8), (0, 0));
let block_dim = block_queue.block_dim();
// A sample is responsible for choosing randomly placed locations within a pixel to
// get a good sampling of the image. Using a poor quality sampler will resuly in a
// noiser and more aliased image that converges slower. The LowDiscrepency sampler
// is a good choice for quality.
let mut sampler = LowDiscrepancy::new(block_dim, 2);
let mut sample_pos = Vec::with_capacity(sampler.max_spp());
let mut block_samples = Vec::with_capacity(sampler.max_spp() *
(block_dim.0 * block_dim.1) as usize);
let mut rng = match StdRng::new() {
Ok(r) => r,
Err(e) => {
println!("Failed to get StdRng, {}", e);
return;
}
};
// Grab a block from the queue and start working on it, submitting samples
// to the render target thread after each pixel
for b in block_queue.iter() {
sampler.select_block(b);
// While the sampler has samples left to take for this pixel, take some samples
while sampler.has_samples() {
// Get samples for a pixel and render them
sampler.get_samples(&mut sample_pos, &mut rng);
for s in &sample_pos[..] {
let mut ray = camera.generate_ray(s, 0.0);
if let Some(_) = instance.intersect(&mut ray) {
block_samples.push(ImageSample::new(s.0, s.1, Colorf::broadcast(1.0)));
} else {
// For correct filtering we also MUST set a background color of some kind
// if we miss, otherwise the pixel weights will be wrong and we'll see object
// fringes and artifacts at object boundaries w/ nothing. Try removing this
// line and rendering again.
block_samples.push(ImageSample::new(s.0, s.1, Colorf::black()));
}
}
}
// We write all samples at once so we don't need to lock the render target tiles as often
rt.write(&block_samples, sampler.get_region());
block_samples.clear();
}
// Get the sRGB8 render buffer from the floating point framebuffer and save it
let img = rt.get_render();
match image::save_buffer("plane.png",
&img[..],
dim.0 as u32,
dim.1 as u32,
image::RGB(8)) {
Ok(_) => {}
Err(e) => println!("Error saving image, {}", e),<|fim▁hole|><|fim▁end|> | };
} |
<|file_name|>models.go<|end_file_name|><|fim▁begin|>//go:build go1.9
// +build go1.9
<|fim▁hole|>// Licensed under the MIT License. See License.txt in the project root for license information.
// This code was auto-generated by:
// github.com/Azure/azure-sdk-for-go/eng/tools/profileBuilder
package eventhubapi
import original "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub/eventhubapi"
type ClustersClientAPI = original.ClustersClientAPI
type ConfigurationClientAPI = original.ConfigurationClientAPI
type ConsumerGroupsClientAPI = original.ConsumerGroupsClientAPI
type DisasterRecoveryConfigsClientAPI = original.DisasterRecoveryConfigsClientAPI
type EventHubsClientAPI = original.EventHubsClientAPI
type NamespacesClientAPI = original.NamespacesClientAPI
type OperationsClientAPI = original.OperationsClientAPI
type PrivateEndpointConnectionsClientAPI = original.PrivateEndpointConnectionsClientAPI
type PrivateLinkResourcesClientAPI = original.PrivateLinkResourcesClientAPI
type RegionsClientAPI = original.RegionsClientAPI<|fim▁end|> | // Copyright (c) Microsoft Corporation. All rights reserved. |
<|file_name|>chain_decay.py<|end_file_name|><|fim▁begin|># coding=utf-8
<|fim▁hole|>#
# This file is part of ReaDDy.
#
# ReaDDy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
"""
Created on 21.06.17
@author: clonker
"""
from __future__ import print_function
from contextlib import closing
import numpy as np
import readdy._internal.readdybinding.api.top as top
import readdy._internal.readdybinding.common.io as io
import readdy._internal.readdybinding.common as common
from readdy._internal.readdybinding.api import KernelProvider
from readdy._internal.readdybinding.api import ParticleTypeFlavor
from readdy._internal.readdybinding.api import Simulation
from readdy.util import platform_utils
class ChainDecay(object):
def __init__(self, kernel, time_step):
self.kernel_provider = KernelProvider.get()
self.kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
self.kernel = kernel
self.time_step = time_step
def _get_split_reaction(self):
def reaction_function(topology):
recipe = top.Recipe(topology)
if topology.get_n_particles() > 1:
edge = np.random.randint(0, topology.get_n_particles() - 1)
recipe.remove_edge(edge, edge + 1)
return recipe
def rate_function(topology):
if topology.get_n_particles() > 1:
return float(topology.get_n_particles()) / 5.
else:
return .0
fun1 = top.ReactionFunction(reaction_function)
fun2 = top.RateFunction(rate_function)
reaction = top.TopologyReaction(fun1, fun2)
reaction.roll_back_if_invalid()
reaction.create_child_topologies_after_reaction()
return reaction
def _get_decay_reaction(self, typeidb):
def reaction_function(topology):
recipe = top.Recipe(topology)
if topology.get_n_particles() == 1:
recipe.change_particle_type(0, typeidb)
return recipe
def rate_function(topology):
return 1./self.time_step if topology.get_n_particles() == 1 else 0
fun1, fun2 = top.ReactionFunction(reaction_function), top.RateFunction(rate_function)
reaction = top.TopologyReaction(fun1, fun2)
reaction.raise_if_invalid()
reaction.create_child_topologies_after_reaction()
return reaction
def run(self, time_steps, out_file):
sim = Simulation()
sim.set_kernel(self.kernel)
sim.box_size = common.Vec(60, 20, 20)
sim.periodic_boundary = [True, True, True]
typeid_b = sim.register_particle_type("B", 1.0, 1.0, ParticleTypeFlavor.NORMAL)
sim.register_particle_type("Topology A", .5, .5, ParticleTypeFlavor.TOPOLOGY)
sim.register_potential_harmonic_repulsion("Topology A", "Topology A", 10)
sim.register_potential_harmonic_repulsion("Topology A", "B", 10)
sim.register_potential_harmonic_repulsion("B", "B", 10)
sim.configure_topology_bond_potential("Topology A", "Topology A", 10, 1.)
sim.configure_topology_angle_potential("Topology A", "Topology A", "Topology A", 10, np.pi)
# sim.configure_topology_dihedral_potential("Topology A", "Topology A", "Topology A", "Topology A", 1, 1, -np.pi)
n_elements = 50.
particles = [sim.create_topology_particle("Topology A", common.Vec(-25. + i, 0, 0))
for i in range(int(n_elements))]
topology = sim.add_topology(particles)
for i in range(int(n_elements - 1)):
topology.get_graph().add_edge(i, i + 1)
topology.add_reaction(self._get_decay_reaction(typeid_b))
topology.add_reaction(self._get_split_reaction())
traj_handle = sim.register_observable_flat_trajectory(1)
with closing(io.File(out_file, io.FileAction.CREATE, io.FileFlag.OVERWRITE)) as f:
traj_handle.enable_write_to_file(f, u"", 50)
sim.run_scheme_readdy(True)\
.evaluate_topology_reactions()\
.write_config_to_file(f)\
.configure_and_run(time_steps, self.time_step)
print("currently %s topologies" % len(sim.current_topologies()))
if __name__ == '__main__':
sim = ChainDecay("SingleCPU", .001)
sim.run(10000, "out.h5")<|fim▁end|> | # Copyright © 2016 Computational Molecular Biology Group,
# Freie Universität Berlin (GER) |
<|file_name|>shape_detection_with_callback.cpp<|end_file_name|><|fim▁begin|>#if defined (_MSC_VER) && !defined (_WIN64)
#pragma warning(disable:4244) // boost::number_distance::distance()
// converts 64 to 32 bits integers
#endif
#include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/IO/read_xyz_points.h>
#include <CGAL/Point_with_normal_3.h>
#include <CGAL/property_map.h>
#include <CGAL/Shape_detection_3.h>
#include <CGAL/Timer.h>
#include <iostream>
#include <fstream>
// Type declarations
typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel;
typedef std::pair<Kernel::Point_3, Kernel::Vector_3> Point_with_normal;
typedef std::vector<Point_with_normal> Pwn_vector;
typedef CGAL::First_of_pair_property_map<Point_with_normal> Point_map;
typedef CGAL::Second_of_pair_property_map<Point_with_normal> Normal_map;
// In Shape_detection_traits the basic types, i.e., Point and Vector types
// as well as iterator type and property maps, are defined.
typedef CGAL::Shape_detection_3::Shape_detection_traits<|fim▁hole|>typedef CGAL::Shape_detection_3::Plane<Traits> Plane;
struct Timeout_callback
{
mutable int nb;
mutable CGAL::Timer timer;
const double limit;
Timeout_callback(double limit) : nb(0), limit(limit)
{
timer.start();
}
bool operator()(double advancement) const
{
// Avoid calling time() at every single iteration, which could
// impact performances very badly
++ nb;
if (nb % 1000 != 0)
return true;
// If the limit is reach, interrupt the algorithm
if (timer.time() > limit)
{
std::cerr << "Algorithm takes too long, exiting ("
<< 100. * advancement << "% done)" << std::endl;
return false;
}
return true;
}
};
// This program both works for RANSAC and Region Growing
template <typename ShapeDetection>
int run(const char* filename)
{
Pwn_vector points;
std::ifstream stream(filename);
if (!stream ||
!CGAL::read_xyz_points(stream,
std::back_inserter(points),
CGAL::parameters::point_map(Point_map()).
normal_map(Normal_map())))
{
std::cerr << "Error: cannot read file cube.pwn" << std::endl;
return EXIT_FAILURE;
}
ShapeDetection shape_detection;
shape_detection.set_input(points);
shape_detection.template add_shape_factory<Plane>();
// Create callback that interrupts the algorithm if it takes more than half a second
Timeout_callback timeout_callback(0.5);
// Detects registered shapes with default parameters.
shape_detection.detect(typename ShapeDetection::Parameters(),
timeout_callback);
return EXIT_SUCCESS;
}
int main (int argc, char** argv)
{
if (argc > 1 && std::string(argv[1]) == "-r")
{
std::cout << "Efficient RANSAC" << std::endl;
return run<Efficient_ransac> ((argc > 2) ? argv[2] : "data/cube.pwn");
}
std::cout << "Region Growing" << std::endl;
return run<Region_growing> ((argc > 1) ? argv[1] : "data/cube.pwn");
}<|fim▁end|> | <Kernel, Pwn_vector, Point_map, Normal_map> Traits;
typedef CGAL::Shape_detection_3::Efficient_RANSAC<Traits> Efficient_ransac;
typedef CGAL::Shape_detection_3::Region_growing<Traits> Region_growing; |
<|file_name|>backend_test.go<|end_file_name|><|fim▁begin|>package cert
import (
"context"
"crypto/rand"
"net/http"
"golang.org/x/net/http2"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"io"
"io/ioutil"
"math/big"
"net"
"os"
"reflect"
"testing"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/vault/builtin/logical/pki"
"github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/mapstructure"
)
const (
serverCertPath = "test-fixtures/cacert.pem"
serverKeyPath = "test-fixtures/cakey.pem"
serverCAPath = serverCertPath
testRootCACertPath1 = "test-fixtures/testcacert1.pem"
testRootCAKeyPath1 = "test-fixtures/testcakey1.pem"
testCertPath1 = "test-fixtures/testissuedcert4.pem"
testKeyPath1 = "test-fixtures/testissuedkey4.pem"
testIssuedCertCRL = "test-fixtures/issuedcertcrl"
testRootCACertPath2 = "test-fixtures/testcacert2.pem"
testRootCAKeyPath2 = "test-fixtures/testcakey2.pem"
testRootCertCRL = "test-fixtures/cacert2crl"
)
// Unlike testConnState, this method does not use the same 'tls.Config' objects for
// both dialing and listening. Instead, it runs the server without specifying its CA.
// But the client, presents the CA cert of the server to trust the server.
// The client can present a cert and key which is completely independent of server's CA.
// The connection state returned will contain the certificate presented by the client.
func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) (tls.ConnectionState, error) {
serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
if err != nil {
return tls.ConnectionState{}, err
}
// Prepare the listener configuration with server's key pair
listenConf := &tls.Config{
Certificates: []tls.Certificate{serverKeyPair},
ClientAuth: tls.RequestClientCert,
}
clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
if err != nil {
return tls.ConnectionState{}, err
}
// Load the CA cert required by the client to authenticate the server.
rootConfig := &rootcerts.Config{
CAFile: serverCAPath,
}
serverCAs, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
return tls.ConnectionState{}, err
}
// Prepare the dial configuration that the client uses to establish the connection.
dialConf := &tls.Config{
Certificates: []tls.Certificate{clientKeyPair},
RootCAs: serverCAs,
}
// Start the server.
list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
if err != nil {
return tls.ConnectionState{}, err
}
defer list.Close()
// Accept connections.
serverErrors := make(chan error, 1)
connState := make(chan tls.ConnectionState)
go func() {
defer close(connState)
serverConn, err := list.Accept()
if err != nil {
serverErrors <- err
close(serverErrors)
return
}
defer serverConn.Close()
// Read the ping
buf := make([]byte, 4)
_, err = serverConn.Read(buf)
if (err != nil) && (err != io.EOF) {
serverErrors <- err
close(serverErrors)
return
}
close(serverErrors)
connState <- serverConn.(*tls.Conn).ConnectionState()
}()
// Establish a connection from the client side and write a few bytes.
clientErrors := make(chan error, 1)
go func() {
addr := list.Addr().String()
conn, err := tls.Dial("tcp", addr, dialConf)
if err != nil {
clientErrors <- err
close(clientErrors)
return
}
defer conn.Close()
// Write ping
_, err = conn.Write([]byte("ping"))
if err != nil {
clientErrors <- err
}
close(clientErrors)
}()
for err = range clientErrors {
if err != nil {
return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err)
}
}
for err = range serverErrors {
if err != nil {
return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err)
}
}
// Grab the current state
return <-connState, nil
}
func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) {
// Enable PKI secret engine and Cert auth method
coreConfig := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
Logger: log.NewNullLogger(),
CredentialBackends: map[string]logical.Factory{
"cert": Factory,
},
LogicalBackends: map[string]logical.Factory{
"pki": pki.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
vault.TestWaitActive(t, cores[0].Core)
client := cores[0].Client
var err error
// Mount /pki as a root CA
err = client.Sys().Mount("pki", &api.MountInput{
Type: "pki",
Config: api.MountConfigInput{
DefaultLeaseTTL: "16h",
MaxLeaseTTL: "32h",
},
})
if err != nil {
t.Fatal(err)
}
// Set the cluster's certificate as the root CA in /pki
pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM)
_, err = client.Logical().Write("pki/config/ca", map[string]interface{}{
"pem_bundle": pemBundleRootCA,
})
if err != nil {
t.Fatal(err)
}
// Mount /pki2 to operate as an intermediate CA
err = client.Sys().Mount("pki2", &api.MountInput{
Type: "pki",
Config: api.MountConfigInput{
DefaultLeaseTTL: "16h",
MaxLeaseTTL: "32h",
},
})
if err != nil {
t.Fatal(err)
}
// Create a CSR for the intermediate CA
secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil)
if err != nil {
t.Fatal(err)
}
intermediateCSR := secret.Data["csr"].(string)
// Sign the intermediate CSR using /pki
secret, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{
"permitted_dns_domains": ".myvault.com",
"csr": intermediateCSR,
})
if err != nil {
t.Fatal(err)
}
intermediateCertPEM := secret.Data["certificate"].(string)
// Configure the intermediate cert as the CA in /pki2
_, err = client.Logical().Write("pki2/intermediate/set-signed", map[string]interface{}{
"certificate": intermediateCertPEM,
})
if err != nil {
t.Fatal(err)
}
// Create a role on the intermediate CA mount
_, err = client.Logical().Write("pki2/roles/myvault-dot-com", map[string]interface{}{
"allowed_domains": "myvault.com",
"allow_subdomains": "true",
"max_ttl": "5m",
})
if err != nil {
t.Fatal(err)
}
// Issue a leaf cert using the intermediate CA
secret, err = client.Logical().Write("pki2/issue/myvault-dot-com", map[string]interface{}{
"common_name": "cert.myvault.com",
"format": "pem",
"ip_sans": "127.0.0.1",
})
if err != nil {
t.Fatal(err)
}
leafCertPEM := secret.Data["certificate"].(string)
leafCertKeyPEM := secret.Data["private_key"].(string)
// Enable the cert auth method
err = client.Sys().EnableAuthWithOptions("cert", &api.EnableAuthOptions{
Type: "cert",
})
if err != nil {
t.Fatal(err)
}
// Set the intermediate CA cert as a trusted certificate in the backend
_, err = client.Logical().Write("auth/cert/certs/myvault-dot-com", map[string]interface{}{
"display_name": "myvault.com",
"policies": "default",
"certificate": intermediateCertPEM,
})
if err != nil {
t.Fatal(err)
}
// Create temporary files for CA cert, client cert and client cert key.
// This is used to configure TLS in the api client.
caCertFile, err := ioutil.TempFile("", "caCert")
if err != nil {
t.Fatal(err)
}
defer os.Remove(caCertFile.Name())
if _, err := caCertFile.Write([]byte(cluster.CACertPEM)); err != nil {
t.Fatal(err)
}
if err := caCertFile.Close(); err != nil {
t.Fatal(err)
}
leafCertFile, err := ioutil.TempFile("", "leafCert")
if err != nil {
t.Fatal(err)
}
defer os.Remove(leafCertFile.Name())
if _, err := leafCertFile.Write([]byte(leafCertPEM)); err != nil {
t.Fatal(err)
}
if err := leafCertFile.Close(); err != nil {
t.Fatal(err)
}
leafCertKeyFile, err := ioutil.TempFile("", "leafCertKey")
if err != nil {
t.Fatal(err)
}
defer os.Remove(leafCertKeyFile.Name())
if _, err := leafCertKeyFile.Write([]byte(leafCertKeyPEM)); err != nil {
t.Fatal(err)
}
if err := leafCertKeyFile.Close(); err != nil {
t.Fatal(err)
}
// This function is a copy-pasta from the NewTestCluster, with the
// modification to reconfigure the TLS on the api client with the leaf
// certificate generated above.
getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client {
transport := cleanhttp.DefaultPooledTransport()
transport.TLSClientConfig = tlsConfig.Clone()
if err := http2.ConfigureTransport(transport); err != nil {
t.Fatal(err)
}
client := &http.Client{
Transport: transport,
CheckRedirect: func(*http.Request, []*http.Request) error {
// This can of course be overridden per-test by using its own client
return fmt.Errorf("redirects not allowed in these tests")
},
}
config := api.DefaultConfig()
if config.Error != nil {
t.Fatal(config.Error)
}
config.Address = fmt.Sprintf("https://127.0.0.1:%d", port)
config.HttpClient = client
// Set the above issued certificates as the client certificates
config.ConfigureTLS(&api.TLSConfig{
CACert: caCertFile.Name(),
ClientCert: leafCertFile.Name(),
ClientKey: leafCertKeyFile.Name(),
})
apiClient, err := api.NewClient(config)
if err != nil {
t.Fatal(err)
}
return apiClient<|fim▁hole|> // Create a new api client with the desired TLS configuration
newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig)
// Set the intermediate CA cert as a trusted certificate in the backend
secret, err = newClient.Logical().Write("auth/cert/login", map[string]interface{}{
"name": "myvault-dot-com",
})
if err != nil {
t.Fatal(err)
}
if secret.Auth == nil || secret.Auth.ClientToken == "" {
t.Fatalf("expected a successful authentication")
}
}
func TestBackend_NonCAExpiry(t *testing.T) {
var resp *logical.Response
var err error
// Create a self-signed certificate and issue a leaf certificate using the
// CA cert
template := &x509.Certificate{
SerialNumber: big.NewInt(1234),
Subject: pkix.Name{
CommonName: "localhost",
Organization: []string{"hashicorp"},
OrganizationalUnit: []string{"vault"},
},
BasicConstraintsValid: true,
NotBefore: time.Now().Add(-30 * time.Second),
NotAfter: time.Now().Add(50 * time.Second),
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
}
// Set IP SAN
parsedIP := net.ParseIP("127.0.0.1")
if parsedIP == nil {
t.Fatalf("failed to create parsed IP")
}
template.IPAddresses = []net.IP{parsedIP}
// Private key for CA cert
caPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
// Marshalling to be able to create PEM file
caPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(caPrivateKey)
caPublicKey := &caPrivateKey.PublicKey
template.IsCA = true
caCertBytes, err := x509.CreateCertificate(rand.Reader, template, template, caPublicKey, caPrivateKey)
if err != nil {
t.Fatal(err)
}
caCert, err := x509.ParseCertificate(caCertBytes)
if err != nil {
t.Fatal(err)
}
parsedCaBundle := &certutil.ParsedCertBundle{
Certificate: caCert,
CertificateBytes: caCertBytes,
PrivateKeyBytes: caPrivateKeyBytes,
PrivateKeyType: certutil.RSAPrivateKey,
}
caCertBundle, err := parsedCaBundle.ToCertBundle()
if err != nil {
t.Fatal(err)
}
caCertFile, err := ioutil.TempFile("", "caCert")
if err != nil {
t.Fatal(err)
}
defer os.Remove(caCertFile.Name())
if _, err := caCertFile.Write([]byte(caCertBundle.Certificate)); err != nil {
t.Fatal(err)
}
if err := caCertFile.Close(); err != nil {
t.Fatal(err)
}
caKeyFile, err := ioutil.TempFile("", "caKey")
if err != nil {
t.Fatal(err)
}
defer os.Remove(caKeyFile.Name())
if _, err := caKeyFile.Write([]byte(caCertBundle.PrivateKey)); err != nil {
t.Fatal(err)
}
if err := caKeyFile.Close(); err != nil {
t.Fatal(err)
}
// Prepare template for non-CA cert
template.IsCA = false
template.SerialNumber = big.NewInt(5678)
template.KeyUsage = x509.KeyUsage(x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign)
issuedPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
issuedPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(issuedPrivateKey)
issuedPublicKey := &issuedPrivateKey.PublicKey
// Keep a short certificate lifetime so logins can be tested both when
// cert is valid and when it gets expired
template.NotBefore = time.Now().Add(-2 * time.Second)
template.NotAfter = time.Now().Add(3 * time.Second)
issuedCertBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, issuedPublicKey, caPrivateKey)
if err != nil {
t.Fatal(err)
}
issuedCert, err := x509.ParseCertificate(issuedCertBytes)
if err != nil {
t.Fatal(err)
}
parsedIssuedBundle := &certutil.ParsedCertBundle{
Certificate: issuedCert,
CertificateBytes: issuedCertBytes,
PrivateKeyBytes: issuedPrivateKeyBytes,
PrivateKeyType: certutil.RSAPrivateKey,
}
issuedCertBundle, err := parsedIssuedBundle.ToCertBundle()
if err != nil {
t.Fatal(err)
}
issuedCertFile, err := ioutil.TempFile("", "issuedCert")
if err != nil {
t.Fatal(err)
}
defer os.Remove(issuedCertFile.Name())
if _, err := issuedCertFile.Write([]byte(issuedCertBundle.Certificate)); err != nil {
t.Fatal(err)
}
if err := issuedCertFile.Close(); err != nil {
t.Fatal(err)
}
issuedKeyFile, err := ioutil.TempFile("", "issuedKey")
if err != nil {
t.Fatal(err)
}
defer os.Remove(issuedKeyFile.Name())
if _, err := issuedKeyFile.Write([]byte(issuedCertBundle.PrivateKey)); err != nil {
t.Fatal(err)
}
if err := issuedKeyFile.Close(); err != nil {
t.Fatal(err)
}
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
// Register the Non-CA certificate of the client key pair
certData := map[string]interface{}{
"certificate": issuedCertBundle.Certificate,
"policies": "abc",
"display_name": "cert1",
"ttl": 10000,
}
certReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "certs/cert1",
Storage: storage,
Data: certData,
}
resp, err = b.HandleRequest(context.Background(), certReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Create connection state using the certificates generated
connState, err := connectionState(caCertFile.Name(), caCertFile.Name(), caKeyFile.Name(), issuedCertFile.Name(), issuedKeyFile.Name())
if err != nil {
t.Fatalf("error testing connection state:%v", err)
}
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "login",
Connection: &logical.Connection{
ConnState: &connState,
},
}
// Login when the certificate is still valid. Login should succeed.
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Wait until the certificate expires
time.Sleep(5 * time.Second)
// Login attempt after certificate expiry should fail
resp, err = b.HandleRequest(context.Background(), loginReq)
if err == nil {
t.Fatalf("expected error due to expired certificate")
}
}
func TestBackend_RegisteredNonCA_CRL(t *testing.T) {
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
nonCACert, err := ioutil.ReadFile(testCertPath1)
if err != nil {
t.Fatal(err)
}
// Register the Non-CA certificate of the client key pair
certData := map[string]interface{}{
"certificate": nonCACert,
"policies": "abc",
"display_name": "cert1",
"ttl": 10000,
}
certReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "certs/cert1",
Storage: storage,
Data: certData,
}
resp, err := b.HandleRequest(context.Background(), certReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Connection state is presenting the client Non-CA cert and its key.
// This is exactly what is registered at the backend.
connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
if err != nil {
t.Fatalf("error testing connection state:%v", err)
}
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "login",
Connection: &logical.Connection{
ConnState: &connState,
},
}
// Login should succeed.
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Register a CRL containing the issued client certificate used above.
issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
if err != nil {
t.Fatal(err)
}
crlData := map[string]interface{}{
"crl": issuedCRL,
}
crlReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "crls/issuedcrl",
Data: crlData,
}
resp, err = b.HandleRequest(context.Background(), crlReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Attempt login with the same connection state but with the CRL registered
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.IsError() {
t.Fatalf("expected failure due to revoked certificate")
}
}
func TestBackend_CRLs(t *testing.T) {
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
clientCA1, err := ioutil.ReadFile(testRootCACertPath1)
if err != nil {
t.Fatal(err)
}
// Register the CA certificate of the client key pair
certData := map[string]interface{}{
"certificate": clientCA1,
"policies": "abc",
"display_name": "cert1",
"ttl": 10000,
}
certReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "certs/cert1",
Storage: storage,
Data: certData,
}
resp, err := b.HandleRequest(context.Background(), certReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Connection state is presenting the client CA cert and its key.
// This is exactly what is registered at the backend.
connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1)
if err != nil {
t.Fatalf("error testing connection state:%v", err)
}
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "login",
Connection: &logical.Connection{
ConnState: &connState,
},
}
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Now, without changing the registered client CA cert, present from
// the client side, a cert issued using the registered CA.
connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
loginReq.Connection.ConnState = &connState
// Attempt login with the updated connection
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Register a CRL containing the issued client certificate used above.
issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
if err != nil {
t.Fatal(err)
}
crlData := map[string]interface{}{
"crl": issuedCRL,
}
crlReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "crls/issuedcrl",
Data: crlData,
}
resp, err = b.HandleRequest(context.Background(), crlReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Attempt login with the revoked certificate.
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.IsError() {
t.Fatalf("expected failure due to revoked certificate")
}
// Register a different client CA certificate.
clientCA2, err := ioutil.ReadFile(testRootCACertPath2)
if err != nil {
t.Fatal(err)
}
certData["certificate"] = clientCA2
resp, err = b.HandleRequest(context.Background(), certReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Test login using a different client CA cert pair.
connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2)
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
loginReq.Connection.ConnState = &connState
// Attempt login with the updated connection
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Register a CRL containing the root CA certificate used above.
rootCRL, err := ioutil.ReadFile(testRootCertCRL)
if err != nil {
t.Fatal(err)
}
crlData["crl"] = rootCRL
resp, err = b.HandleRequest(context.Background(), crlReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
// Attempt login with the same connection state but with the CRL registered
resp, err = b.HandleRequest(context.Background(), loginReq)
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.IsError() {
t.Fatalf("expected failure due to revoked certificate")
}
}
func testFactory(t *testing.T) logical.Backend {
b, err := Factory(context.Background(), &logical.BackendConfig{
System: &logical.StaticSystemView{
DefaultLeaseTTLVal: 1000 * time.Second,
MaxLeaseTTLVal: 1800 * time.Second,
},
StorageView: &logical.InmemStorage{},
})
if err != nil {
t.Fatalf("error: %s", err)
}
return b
}
// Test the certificates being registered to the backend
func TestBackend_CertWrites(t *testing.T) {
// CA cert
ca1, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
// Non CA Cert
ca2, err := ioutil.ReadFile("test-fixtures/keys/cert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
// Non CA cert without TLS web client authentication
ca3, err := ioutil.ReadFile("test-fixtures/noclientauthcert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
tc := logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "aaa", ca1, "foo", allowed{}, false),
testAccStepCert(t, "bbb", ca2, "foo", allowed{}, false),
testAccStepCert(t, "ccc", ca3, "foo", allowed{}, true),
},
}
tc.Steps = append(tc.Steps, testAccStepListCerts(t, []string{"aaa", "bbb"})...)
logicaltest.Test(t, tc)
}
// Test a client trusted by a CA
func TestBackend_basic_CA(t *testing.T) {
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{}, false),
testAccStepLogin(t, connState),
testAccStepCertLease(t, "web", ca, "foo"),
testAccStepCertTTL(t, "web", ca, "foo"),
testAccStepLogin(t, connState),
testAccStepCertMaxTTL(t, "web", ca, "foo"),
testAccStepLogin(t, connState),
testAccStepCertNoLease(t, "web", ca, "foo"),
testAccStepLoginDefaultLease(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "*.example.com"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "*.invalid.com"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
// Test CRL behavior
func TestBackend_Basic_CRLs(t *testing.T) {
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
crl, err := ioutil.ReadFile("test-fixtures/root/root.crl")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCertNoLease(t, "web", ca, "foo"),
testAccStepLoginDefaultLease(t, connState),
testAccStepAddCRL(t, crl, connState),
testAccStepReadCRL(t, connState),
testAccStepLoginInvalid(t, connState),
testAccStepDeleteCRL(t, connState),
testAccStepLoginDefaultLease(t, connState),
},
})
}
// Test a self-signed client (root CA) that is trusted
func TestBackend_basic_singleCert(t *testing.T) {
connState, err := testConnState("test-fixtures/root/rootcacert.pem",
"test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.4:invalid"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
func TestBackend_common_name_singleCert(t *testing.T) {
connState, err := testConnState("test-fixtures/root/rootcacert.pem",
"test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{common_names: "example.com"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{common_names: "invalid"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.4:invalid"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
// Test a self-signed client with custom ext (root CA) that is trusted
func TestBackend_ext_singleCert(t *testing.T) {
connState, err := testConnState(
"test-fixtures/root/rootcawextcert.pem",
"test-fixtures/root/rootcawextkey.pem",
"test-fixtures/root/rootcacert.pem",
)
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:A UTF8String Extension"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.45:*"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:,2.1.1.2:*"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:A UTF8String Extension"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "1.2.3.45:*"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:A UTF8String Extension"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "1.2.3.45:*"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
// Test a self-signed client with URI alt names (root CA) that is trusted
func TestBackend_dns_singleCert(t *testing.T) {
connState, err := testConnState(
"test-fixtures/root/rootcawdnscert.pem",
"test-fixtures/root/rootcawdnskey.pem",
"test-fixtures/root/rootcacert.pem",
)
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{dns: "*ample.com"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{dns: "notincert.com"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{dns: "abc"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{dns: "*.example.com"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
// Test a self-signed client with URI alt names (root CA) that is trusted
func TestBackend_email_singleCert(t *testing.T) {
connState, err := testConnState(
"test-fixtures/root/rootcawemailcert.pem",
"test-fixtures/root/rootcawemailkey.pem",
"test-fixtures/root/rootcacert.pem",
)
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{emails: "[email protected]"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{emails: "*@example.com"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{emails: "[email protected]"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{emails: "abc"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{emails: "*.example.com"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
// Test a self-signed client with URI alt names (root CA) that is trusted
func TestBackend_uri_singleCert(t *testing.T) {
connState, err := testConnState(
"test-fixtures/root/rootcawuricert.pem",
"test-fixtures/root/rootcawurikey.pem",
"test-fixtures/root/rootcacert.pem",
)
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/*"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/host"}, false),
testAccStepLogin(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/invalid"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{uris: "abc"}, false),
testAccStepLoginInvalid(t, connState),
testAccStepCert(t, "web", ca, "foo", allowed{uris: "http://www.google.com"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
// Test against a collection of matching and non-matching rules
func TestBackend_mixed_constraints(t *testing.T) {
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepCert(t, "1unconstrained", ca, "foo", allowed{}, false),
testAccStepCert(t, "2matching", ca, "foo", allowed{names: "*.example.com,whatever"}, false),
testAccStepCert(t, "3invalid", ca, "foo", allowed{names: "invalid"}, false),
testAccStepLogin(t, connState),
// Assumes CertEntries are processed in alphabetical order (due to store.List), so we only match 2matching if 1unconstrained doesn't match
testAccStepLoginWithName(t, connState, "2matching"),
testAccStepLoginWithNameInvalid(t, connState, "3invalid"),
},
})
}
// Test an untrusted client
func TestBackend_untrusted(t *testing.T) {
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
testAccStepLoginInvalid(t, connState),
},
})
}
func TestBackend_validCIDR(t *testing.T) {
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
name := "web"
addCertReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
Data: map[string]interface{}{
"certificate": string(ca),
"policies": "foo",
"display_name": name,
"allowed_names": "",
"required_extensions": "",
"lease": 1000,
"bound_cidrs": []string{"127.0.0.1/32", "128.252.0.0/16"},
},
Storage: storage,
Connection: &logical.Connection{ConnState: &connState},
}
_, err = b.HandleRequest(context.Background(), addCertReq)
if err != nil {
t.Fatal(err)
}
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "login",
Unauthenticated: true,
Data: map[string]interface{}{
"name": name,
},
Storage: storage,
Connection: &logical.Connection{ConnState: &connState},
}
// override the remote address with an IPV4 that is authorized
loginReq.Connection.RemoteAddr = "127.0.0.1/32"
_, err = b.HandleRequest(context.Background(), loginReq)
if err != nil {
t.Fatal(err.Error())
}
}
func TestBackend_invalidCIDR(t *testing.T) {
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
}
name := "web"
addCertReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
Data: map[string]interface{}{
"certificate": string(ca),
"policies": "foo",
"display_name": name,
"allowed_names": "",
"required_extensions": "",
"lease": 1000,
"bound_cidrs": []string{"127.0.0.1/32", "128.252.0.0/16"},
},
Storage: storage,
Connection: &logical.Connection{ConnState: &connState},
}
_, err = b.HandleRequest(context.Background(), addCertReq)
if err != nil {
t.Fatal(err)
}
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "login",
Unauthenticated: true,
Data: map[string]interface{}{
"name": name,
},
Storage: storage,
Connection: &logical.Connection{ConnState: &connState},
}
// override the remote address with an IPV4 that isn't authorized
loginReq.Connection.RemoteAddr = "127.0.0.1/8"
_, err = b.HandleRequest(context.Background(), loginReq)
if err == nil {
t.Fatal("expected \"ERROR: permission denied\"")
}
}
func testAccStepAddCRL(t *testing.T, crl []byte, connState tls.ConnectionState) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "crls/test",
ConnState: &connState,
Data: map[string]interface{}{
"crl": crl,
},
}
}
func testAccStepReadCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "crls/test",
ConnState: &connState,
Check: func(resp *logical.Response) error {
crlInfo := CRLInfo{}
err := mapstructure.Decode(resp.Data, &crlInfo)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(crlInfo.Serials) != 1 {
t.Fatalf("bad: expected CRL with length 1, got %d", len(crlInfo.Serials))
}
if _, ok := crlInfo.Serials["637101449987587619778072672905061040630001617053"]; !ok {
t.Fatalf("bad: expected serial number not found in CRL")
}
return nil
},
}
}
func testAccStepDeleteCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: "crls/test",
ConnState: &connState,
}
}
func testAccStepLogin(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
return testAccStepLoginWithName(t, connState, "")
}
func testAccStepLoginWithName(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "login",
Unauthenticated: true,
ConnState: &connState,
Check: func(resp *logical.Response) error {
if resp.Auth.TTL != 1000*time.Second {
t.Fatalf("bad lease length: %#v", resp.Auth)
}
if certName != "" && resp.Auth.DisplayName != ("mnt-"+certName) {
t.Fatalf("matched the wrong cert: %#v", resp.Auth.DisplayName)
}
fn := logicaltest.TestCheckAuth([]string{"default", "foo"})
return fn(resp)
},
Data: map[string]interface{}{
"name": certName,
},
}
}
func testAccStepLoginDefaultLease(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "login",
Unauthenticated: true,
ConnState: &connState,
Check: func(resp *logical.Response) error {
if resp.Auth.TTL != 1000*time.Second {
t.Fatalf("bad lease length: %#v", resp.Auth)
}
fn := logicaltest.TestCheckAuth([]string{"default", "foo"})
return fn(resp)
},
}
}
func testAccStepLoginInvalid(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
return testAccStepLoginWithNameInvalid(t, connState, "")
}
func testAccStepLoginWithNameInvalid(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "login",
Unauthenticated: true,
ConnState: &connState,
Check: func(resp *logical.Response) error {
if resp.Auth != nil {
return fmt.Errorf("should not be authorized: %#v", resp)
}
return nil
},
Data: map[string]interface{}{
"name": certName,
},
ErrorOk: true,
}
}
func testAccStepListCerts(
t *testing.T, certs []string) []logicaltest.TestStep {
return []logicaltest.TestStep{
logicaltest.TestStep{
Operation: logical.ListOperation,
Path: "certs",
Check: func(resp *logical.Response) error {
if resp == nil {
return fmt.Errorf("nil response")
}
if resp.Data == nil {
return fmt.Errorf("nil data")
}
if resp.Data["keys"] == interface{}(nil) {
return fmt.Errorf("nil keys")
}
keys := resp.Data["keys"].([]string)
if !reflect.DeepEqual(keys, certs) {
return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs)
}
return nil
},
}, logicaltest.TestStep{
Operation: logical.ListOperation,
Path: "certs/",
Check: func(resp *logical.Response) error {
if resp == nil {
return fmt.Errorf("nil response")
}
if resp.Data == nil {
return fmt.Errorf("nil data")
}
if resp.Data["keys"] == interface{}(nil) {
return fmt.Errorf("nil keys")
}
keys := resp.Data["keys"].([]string)
if !reflect.DeepEqual(keys, certs) {
return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs)
}
return nil
},
},
}
}
type allowed struct {
names string // allowed names in the certificate, looks at common, name, dns, email [depricated]
common_names string // allowed common names in the certificate
dns string // allowed dns names in the SAN extension of the certificate
emails string // allowed email names in SAN extension of the certificate
uris string // allowed uris in SAN extension of the certificate
ext string // required extensions in the certificate
}
func testAccStepCert(
t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
ErrorOk: expectError,
Data: map[string]interface{}{
"certificate": string(cert),
"policies": policies,
"display_name": name,
"allowed_names": testData.names,
"allowed_common_names": testData.common_names,
"allowed_dns_sans": testData.dns,
"allowed_email_sans": testData.emails,
"allowed_uri_sans": testData.uris,
"required_extensions": testData.ext,
"lease": 1000,
},
Check: func(resp *logical.Response) error {
if resp == nil && expectError {
return fmt.Errorf("expected error but received nil")
}
return nil
},
}
}
func testAccStepCertLease(
t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
Data: map[string]interface{}{
"certificate": string(cert),
"policies": policies,
"display_name": name,
"lease": 1000,
},
}
}
func testAccStepCertTTL(
t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
Data: map[string]interface{}{
"certificate": string(cert),
"policies": policies,
"display_name": name,
"ttl": "1000s",
},
}
}
func testAccStepCertMaxTTL(
t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
Data: map[string]interface{}{
"certificate": string(cert),
"policies": policies,
"display_name": name,
"ttl": "1000s",
"max_ttl": "1200s",
},
}
}
func testAccStepCertNoLease(
t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
Data: map[string]interface{}{
"certificate": string(cert),
"policies": policies,
"display_name": name,
},
}
}
func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, error) {
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return tls.ConnectionState{}, err
}
rootConfig := &rootcerts.Config{
CAFile: rootCertPath,
}
rootCAs, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
return tls.ConnectionState{}, err
}
listenConf := &tls.Config{
Certificates: []tls.Certificate{cert},
ClientAuth: tls.RequestClientCert,
InsecureSkipVerify: false,
RootCAs: rootCAs,
}
dialConf := listenConf.Clone()
// start a server
list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
if err != nil {
return tls.ConnectionState{}, err
}
defer list.Close()
// Accept connections.
serverErrors := make(chan error, 1)
connState := make(chan tls.ConnectionState)
go func() {
defer close(connState)
serverConn, err := list.Accept()
serverErrors <- err
if err != nil {
close(serverErrors)
return
}
defer serverConn.Close()
// Read the ping
buf := make([]byte, 4)
_, err = serverConn.Read(buf)
if (err != nil) && (err != io.EOF) {
serverErrors <- err
close(serverErrors)
return
} else {
// EOF is a reasonable error condition, so swallow it.
serverErrors <- nil
}
close(serverErrors)
connState <- serverConn.(*tls.Conn).ConnectionState()
}()
// Establish a connection from the client side and write a few bytes.
clientErrors := make(chan error, 1)
go func() {
addr := list.Addr().String()
conn, err := tls.Dial("tcp", addr, dialConf)
clientErrors <- err
if err != nil {
close(clientErrors)
return
}
defer conn.Close()
// Write ping
_, err = conn.Write([]byte("ping"))
clientErrors <- err
close(clientErrors)
}()
for err = range clientErrors {
if err != nil {
return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err)
}
}
for err = range serverErrors {
if err != nil {
return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err)
}
}
// Grab the current state
return <-connState, nil
}
func Test_Renew(t *testing.T) {
storage := &logical.InmemStorage{}
lb, err := Factory(context.Background(), &logical.BackendConfig{
System: &logical.StaticSystemView{
DefaultLeaseTTLVal: 300 * time.Second,
MaxLeaseTTLVal: 1800 * time.Second,
},
StorageView: storage,
})
if err != nil {
t.Fatalf("error: %s", err)
}
b := lb.(*backend)
connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("error testing connection state: %v", err)
}
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatal(err)
}
req := &logical.Request{
Connection: &logical.Connection{
ConnState: &connState,
},
Storage: storage,
Auth: &logical.Auth{},
}
fd := &framework.FieldData{
Raw: map[string]interface{}{
"name": "test",
"certificate": ca,
"policies": "foo,bar",
},
Schema: pathCerts(b).Fields,
}
resp, err := b.pathCertWrite(context.Background(), req, fd)
if err != nil {
t.Fatal(err)
}
empty_login_fd := &framework.FieldData{
Raw: map[string]interface{}{},
Schema: pathLogin(b).Fields,
}
resp, err = b.pathLogin(context.Background(), req, empty_login_fd)
if err != nil {
t.Fatal(err)
}
if resp.IsError() {
t.Fatalf("got error: %#v", *resp)
}
req.Auth.InternalData = resp.Auth.InternalData
req.Auth.Metadata = resp.Auth.Metadata
req.Auth.LeaseOptions = resp.Auth.LeaseOptions
req.Auth.Policies = resp.Auth.Policies
req.Auth.Period = resp.Auth.Period
// Normal renewal
resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("got nil response from renew")
}
if resp.IsError() {
t.Fatalf("got error: %#v", *resp)
}
// Change the policies -- this should fail
fd.Raw["policies"] = "zip,zap"
resp, err = b.pathCertWrite(context.Background(), req, fd)
if err != nil {
t.Fatal(err)
}
resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd)
if err == nil {
t.Fatal("expected error")
}
// Put the policies back, this should be okay
fd.Raw["policies"] = "bar,foo"
resp, err = b.pathCertWrite(context.Background(), req, fd)
if err != nil {
t.Fatal(err)
}
resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("got nil response from renew")
}
if resp.IsError() {
t.Fatalf("got error: %#v", *resp)
}
// Add period value to cert entry
period := 350 * time.Second
fd.Raw["period"] = period.String()
resp, err = b.pathCertWrite(context.Background(), req, fd)
if err != nil {
t.Fatal(err)
}
resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("got nil response from renew")
}
if resp.IsError() {
t.Fatalf("got error: %#v", *resp)
}
if resp.Auth.Period != period {
t.Fatalf("expected a period value of %s in the response, got: %s", period, resp.Auth.Period)
}
// Delete CA, make sure we can't renew
resp, err = b.pathCertDelete(context.Background(), req, fd)
if err != nil {
t.Fatal(err)
}
resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("got nil response from renew")
}
if !resp.IsError() {
t.Fatal("expected error")
}
}<|fim▁end|> | }
|
<|file_name|>test.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
from . import transform
from ..util.yaml import load_yaml
logger = logging.getLogger(__name__)
class TestTask(transform.TransformTask):
"""
A task implementing a Gecko test.
"""
@classmethod
def get_inputs(cls, kind, path, config, params, loaded_tasks):
# the kind on which this one depends
if len(config.get('kind-dependencies', [])) != 1:
raise Exception("TestTask kinds must have exactly one item in kind-dependencies")
dep_kind = config['kind-dependencies'][0]
# get build tasks, keyed by build platform
builds_by_platform = cls.get_builds_by_platform(dep_kind, loaded_tasks)
# get the test platforms for those build tasks
test_platforms_cfg = load_yaml(path, 'test-platforms.yml')
test_platforms = cls.get_test_platforms(test_platforms_cfg, builds_by_platform)
# expand the test sets for each of those platforms
test_sets_cfg = load_yaml(path, 'test-sets.yml')
test_platforms = cls.expand_tests(test_sets_cfg, test_platforms)
# load the test descriptions
test_descriptions = load_yaml(path, 'tests.yml')
# generate all tests for all test platforms
for test_platform_name, test_platform in test_platforms.iteritems():
for test_name in test_platform['test-names']:
test = copy.deepcopy(test_descriptions[test_name])
test['build-platform'] = test_platform['build-platform']
test['test-platform'] = test_platform_name
test['build-label'] = test_platform['build-label']
test['test-name'] = test_name
if test_platform['nightly']:
test.setdefault('attributes', {})['nightly'] = True
logger.debug("Generating tasks for test {} on platform {}".format(
test_name, test['test-platform']))
yield test
@classmethod
def get_builds_by_platform(cls, dep_kind, loaded_tasks):
"""Find the build tasks on which tests will depend, keyed by
platform/type. Returns a dictionary mapping build platform to task."""
builds_by_platform = {}
for task in loaded_tasks:
if task.kind != dep_kind:
continue
build_platform = task.attributes.get('build_platform')
build_type = task.attributes.get('build_type')
if not build_platform or not build_type:
continue
platform = "{}/{}".format(build_platform, build_type)
if platform in builds_by_platform:
raise Exception("multiple build jobs for " + platform)
builds_by_platform[platform] = task
return builds_by_platform
@classmethod
def get_test_platforms(cls, test_platforms_cfg, builds_by_platform):
"""Get the test platforms for which test tasks should be generated,
based on the available build platforms. Returns a dictionary mapping
test platform to {test-set, build-platform, build-label}."""
test_platforms = {}
for test_platform, cfg in test_platforms_cfg.iteritems():
build_platform = cfg['build-platform']
if build_platform not in builds_by_platform:
logger.warning(
"No build task with platform {}; ignoring test platform {}".format(
build_platform, test_platform))
continue
test_platforms[test_platform] = {
'nightly': builds_by_platform[build_platform].attributes.get('nightly', False),
'build-platform': build_platform,
'build-label': builds_by_platform[build_platform].label,
}
test_platforms[test_platform].update(cfg)
return test_platforms
@classmethod
def expand_tests(cls, test_sets_cfg, test_platforms):
"""Expand the test sets in `test_platforms` out to sets of test names.
Returns a dictionary like `get_test_platforms`, with an additional
`test-names` key for each test platform, containing a set of test
names."""
rv = {}
for test_platform, cfg in test_platforms.iteritems():
test_sets = cfg['test-sets']
if not set(test_sets) < set(test_sets_cfg):
raise Exception(
"Test sets {} for test platform {} are not defined".format(
', '.join(test_sets), test_platform))
test_names = set()
for test_set in test_sets:
test_names.update(test_sets_cfg[test_set])<|fim▁hole|> rv[test_platform]['test-names'] = test_names
return rv<|fim▁end|> | rv[test_platform] = cfg.copy() |
<|file_name|>basic.2.go<|end_file_name|><|fim▁begin|>package main
import "fmt"
import "math"
func main() {
fmt.Println("hello world")
fmt.Printf("%t\n", 1==2)
fmt.Printf("二进制:%b\n", 255)
fmt.Printf("八进制:%o\n", 255)
fmt.Printf("十六进制:%X\n", 255)
fmt.Printf("十进制:%d\n", 255)
fmt.Printf("浮点数:%f\n", math.Pi)
fmt.Printf("字符串:%s\n", "hello world")
{
//声明初始化一个变量
var x int = 100
var str string = "hello world"</pre>
//声明初始化多个变量
var i, j, k int = 1, 2, 3
//不用指明类型,通过初始化值来推导
var b = true //bool型
x := 100 //等价于 var x int = 100;<|fim▁hole|>
// 数组
{
var a [5]int
fmt.Println("array a:", a)
a[1] = 10
a[3] = 30
fmt.Println("assign:", a)
fmt.Println("len:", len(a))
b := [5]int{1, 2, 3, 4, 5}
fmt.Println("init:", b)
var c [2][3]int
for i := 0; i < 2; i++ {
for j := 0; j < 3; j++ {
c[i][j] = i + j
}
}
fmt.Println("2d: ", c)
a := [5]int{1, 2, 3, 4, 5}
b := a[2:4] // a[2] 和 a[3],但不包括a[4]
fmt.Println(b)
b = a[:4] // 从 a[0]到a[4],但不包括a[4]
fmt.Println(b)
b = a[2:] // 从 a[2]到a[4],且包括a[2]
fmt.Println(b)
}
{
// if 语句没有圆括号,而必需要有花括号
//if 语句
if x % 2 == 0 {
//...
}
//if - else
if x % 2 == 0 {
//偶数...
} else {
//奇数...
}
//多分支
if num < 0 {
//负数
} else if num == 0 {
//零
} else {
//正数
}
}
{
// switch语句没有break,还可以使用逗号case多个值
switch i {
case 1:
fmt.Println("one")
case 2:
fmt.Println("two")
case 3:
fmt.Println("three")
case 4,5,6:
fmt.Println("four, five, six")
default:
fmt.Println("invalid value!")
}
}
//经典的for语句 init; condition; post
for i := 0; i<10; i++{
fmt.Println(i)
}
//精简的for语句 condition
i := 1
for i<10 {
fmt.Println(i)
i++
}
//死循环的for语句 相当于for(;;)
i :=1
for {
if i>10 {
break
}
i++
}
}<|fim▁end|> |
const s string = "hello world"
const pi float32 = 3.1415926
} |
<|file_name|>Rectangulo.java<|end_file_name|><|fim▁begin|>public class Rectangulo {
public int Base;
public int Altura;
//Ejercicio realizado con ayuda de esta pagina:http://diagramas-de-flujo.blogspot.com/2013/02/calcular-perimetro-rectangulo-Java.html
//aqui llamamos las dos variables que utilizaremos.
Rectangulo(int Base, int Altura)
{
this.Base = Base;
this.Altura = Altura;
}
//COmo pueden observar aqui se obtiene la base y se asigna el valor de la base.
int getBase ()
{
return Base;
}
//aqui devolvemos ese valor
void setBase (int Base)
{
this.Base = Base;
}
//aqui de igual forma se obtiene la altura y se le asigna el valor
int getAltura ()
{
return Altura;
}
//aqui devuelve el valor de la altura
void setAltura (int Altura)
{
this.Altura = Altura;
}
//aqui con una formula matematica se obtiene el perimetro hacemos una suma y una multiplicacion
int getPerimetro()
{
return 2*(Base+Altura);<|fim▁hole|> //aqui solo se hace un calculo matematico como la multiplicacion
int getArea()
{
return Base*Altura;
}
}<|fim▁end|> | } |
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|>// MIT Licensed
exports.GridFS = require('./lib/GridFS');
exports.GridStream = require('./lib/GridStream');<|fim▁end|> | // GridFS
// Copyright(c) 2013 Siddharth Mahendraker <[email protected]> |
<|file_name|>topic_stats_auth_decorator.cc<|end_file_name|><|fim▁begin|><|fim▁hole|>//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated by the Codegen C++ plugin.
// If you make any local changes, they will be lost.
// source: google/cloud/pubsublite/v1/topic_stats.proto
#include "google/cloud/pubsublite/internal/topic_stats_auth_decorator.h"
#include <google/cloud/pubsublite/v1/topic_stats.grpc.pb.h>
#include <memory>
namespace google {
namespace cloud {
namespace pubsublite_internal {
GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_BEGIN
TopicStatsServiceAuth::TopicStatsServiceAuth(
std::shared_ptr<google::cloud::internal::GrpcAuthenticationStrategy> auth,
std::shared_ptr<TopicStatsServiceStub> child)
: auth_(std::move(auth)), child_(std::move(child)) {}
StatusOr<google::cloud::pubsublite::v1::ComputeMessageStatsResponse>
TopicStatsServiceAuth::ComputeMessageStats(
grpc::ClientContext& context,
google::cloud::pubsublite::v1::ComputeMessageStatsRequest const& request) {
auto status = auth_->ConfigureContext(context);
if (!status.ok()) return status;
return child_->ComputeMessageStats(context, request);
}
StatusOr<google::cloud::pubsublite::v1::ComputeHeadCursorResponse>
TopicStatsServiceAuth::ComputeHeadCursor(
grpc::ClientContext& context,
google::cloud::pubsublite::v1::ComputeHeadCursorRequest const& request) {
auto status = auth_->ConfigureContext(context);
if (!status.ok()) return status;
return child_->ComputeHeadCursor(context, request);
}
StatusOr<google::cloud::pubsublite::v1::ComputeTimeCursorResponse>
TopicStatsServiceAuth::ComputeTimeCursor(
grpc::ClientContext& context,
google::cloud::pubsublite::v1::ComputeTimeCursorRequest const& request) {
auto status = auth_->ConfigureContext(context);
if (!status.ok()) return status;
return child_->ComputeTimeCursor(context, request);
}
GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_END
} // namespace pubsublite_internal
} // namespace cloud
} // namespace google<|fim▁end|> | // Copyright 2022 Google LLC |
<|file_name|>modulefs.py<|end_file_name|><|fim▁begin|>import json
import os
import os.path
import types
from django.conf import settings
from models import FSExpirations
if settings.DJFS['type'] == 'osfs':
from fs.osfs import OSFS
elif settings.DJFS['type'] == 's3fs':
from fs.s3fs import S3FS
from boto.s3.connection import S3Connection
from boto.s3.key import Key
s3conn = S3Connection()
else: <|fim▁hole|>def get_filesystem(namespace):
''' Returns a pyfilesystem for static module storage.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
'''
if settings.DJFS['type'] == 'osfs':
return get_osfs( namespace )
elif settings.DJFS['type'] == 's3fs':
return get_s3fs( namespace )
else:
raise AttributeError("Bad filesystem: "+str(settings.DJFS['type']))
def expire_objects():
''' Remove all obsolete objects from the file systems. Untested. '''
objects = sorted(FSExpirations.expired(), key=lambda x:x.module)
fs = None
module = None
for o in objects:
if module != o.module:
module = o.module
fs = get_filesystem(module)
if fs.exists(o.filename):
fs.remove(o.filename)
o.delete()
def patch_fs(fs, namespace, url_method):
''' Patch a filesystem object to add two methods:
get_url returns a URL for a resource stored on that filesystem. It takes two parameters:
filename: Which resource
timeout: How long that resource is available for
expire sets a timeout on how long the system should keep the resource. It takes four parameters:
filename: Which resource
seconds: How long we will keep it
days: (optional) More user-friendly if a while
expires: (optional) boolean; if set to False, we keep the resource forever.
Without calling this method, we provide no guarantees on how long resources will stick around.
'''
def expire(self, filename, seconds, days=0, expires = True):
''' Set the lifespan of a file on the filesystem.
filename: Name of file
expire: False means the file will never be removed
seconds and days give time to expiration.
'''
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires = expires)
fs.expire = types.MethodType(expire, fs)
fs.get_url = types.MethodType(url_method, fs)
return fs
def get_osfs(namespace):
''' Helper method to get_filesystem for a file system on disk '''
full_path = os.path.join(settings.DJFS['directory_root'], namespace)
if not os.path.exists(full_path):
os.makedirs(full_path)
osfs = OSFS(full_path)
osfs = patch_fs(osfs, namespace, lambda self, filename, timeout=0:os.path.join(settings.DJFS['url_root'], namespace, filename))
return osfs
def get_s3fs(namespace):
''' Helper method to get_filesystem for a file system on S3 '''
fullpath = namespace
if 'prefix' in settings.DJFS:
fullpath = os.path.join(settings.DJFS['prefix'], fullpath)
s3fs = S3FS(settings.DJFS['bucket'], fullpath)
def get_s3_url(self, filename, timeout=60):
global s3conn
try:
return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
except: # If connection has timed out
s3conn = S3Connection()
return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
s3fs = patch_fs(s3fs, namespace, get_s3_url)
return s3fs<|fim▁end|> | raise AttributeError("Bad filesystem: "+str(settings.DJFS['type']))
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
mod test_cache_fill;
mod test_reader;<|fim▁hole|>mod test_writer;
mod util;<|fim▁end|> | |
<|file_name|>capa_base.py<|end_file_name|><|fim▁begin|>"""Implements basics of Capa, including class CapaModule."""
import cgi
import copy
import datetime
import hashlib
import json
import logging
import os
import traceback
import struct
import sys
import re
# We don't want to force a dependency on datadog, so make the import conditional
try:
import dogstats_wrapper as dog_stats_api
except ImportError:
dog_stats_api = None
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.responsetypes import StudentInputError, \
ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames, get_inner_html_from_xpath
from .progress import Progress
from xmodule.exceptions import NotFoundError
from xblock.fields import Scope, String, Boolean, Dict, Integer, Float
from .fields import Timedelta, Date
from django.utils.timezone import UTC
from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER
from django.conf import settings
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
"""
Pick a randomization bin for the problem given the user's seed and a problem id.
We do this because we only want e.g. 20 randomizations of a problem to make analytics
interesting. To avoid having sets of students that always get the same problems,
we'll combine the system's per-student seed with the problem id in picking the bin.
"""
r_hash = hashlib.sha1()
r_hash.update(str(seed))
r_hash.update(str(problem_id))
# get the first few digits of the hash, convert to an int, then mod.
return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS
class Randomization(String):
"""
Define a field to store how to randomize a problem.
"""
def from_json(self, value):
if value in ("", "true"):
return RANDOMIZATION.ALWAYS
elif value == "false":
return RANDOMIZATION.PER_STUDENT
return value
to_json = from_json
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers
"""
def default(self, obj):
"""
Print a nicely formatted complex number, or default to the JSON encoder
"""
if isinstance(obj, complex):
return u"{real:.7g}{imag:+.7g}*j".format(real=obj.real, imag=obj.imag)
return json.JSONEncoder.default(self, obj)
class CapaFields(object):
"""
Define the possible fields for a Capa problem
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default=_("Blank Advanced Problem")
)
attempts = Integer(
help=_("Number of attempts taken by the student on this problem"),
default=0,
scope=Scope.user_state)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
due = Date(help=_("Date that this problem is due by"), scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
showanswer = String(
display_name=_("Show Answer"),
help=_("Defines when to show the answer to the problem. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=SHOWANSWER.FINISHED,
values=[
{"display_name": _("Always"), "value": SHOWANSWER.ALWAYS},
{"display_name": _("Answered"), "value": SHOWANSWER.ANSWERED},
{"display_name": _("Attempted"), "value": SHOWANSWER.ATTEMPTED},
{"display_name": _("Closed"), "value": SHOWANSWER.CLOSED},
{"display_name": _("Finished"), "value": SHOWANSWER.FINISHED},
{"display_name": _("Correct or Past Due"), "value": SHOWANSWER.CORRECT_OR_PAST_DUE},
{"display_name": _("Past Due"), "value": SHOWANSWER.PAST_DUE},
{"display_name": _("Never"), "value": SHOWANSWER.NEVER}]
)
force_save_button = Boolean(
help=_("Whether to force the save button to appear on the page"),
scope=Scope.settings,
default=False
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button"),
help=_("Determines whether a 'Reset' button is shown so the user may reset their answer. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=default_reset_button
)
rerandomize = Randomization(
display_name=_("Randomization"),
help=_(
'Defines when to randomize the variables specified in the associated Python script. '
'For problems that do not randomize values, specify \"Never\". '
),
default=RANDOMIZATION.NEVER,
scope=Scope.settings,
values=[
{"display_name": _("Always"), "value": RANDOMIZATION.ALWAYS},
{"display_name": _("On Reset"), "value": RANDOMIZATION.ONRESET},
{"display_name": _("Never"), "value": RANDOMIZATION.NEVER},
{"display_name": _("Per Student"), "value": RANDOMIZATION.PER_STUDENT}
]
)
data = String(help=_("XML data for the problem"), scope=Scope.content, default="<problem></problem>")
correct_map = Dict(help=_("Dictionary with the correctness of current student answers"),
scope=Scope.user_state, default={})
input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state)
student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state)
done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state)
seed = Integer(help=_("Random seed for this student"), scope=Scope.user_state)
last_submission_time = Date(help=_("Last submission time"), scope=Scope.user_state)
submission_wait_seconds = Integer(
display_name=_("Timer Between Attempts"),
help=_("Seconds a student must wait between submissions for a problem with multiple attempts."),
scope=Scope.settings,
default=0)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. "
"If the value is not set, each response field in the problem is worth one point."),
values={"min": 0, "step": .1},
scope=Scope.settings
)
markdown = String(help=_("Markdown source of this module"), default=None, scope=Scope.settings)
source_code = String(
help=_("Source code for LaTeX and Word problems. This feature is not well-supported."),
scope=Scope.settings
)
text_customization = Dict(
help=_("String customization substitutions for particular locations"),
scope=Scope.settings
# TODO: someday it should be possible to not duplicate this definition here
# and in inheritance.py
)
use_latex_compiler = Boolean(
help=_("Enable LaTeX templates?"),
default=False,
scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use by this course for the specified duration. "
"Please do not share the API key with other courses and notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact [email protected]"),
scope=Scope.settings
)
class CapaMixin(CapaFields):
"""
Core logic for Capa Problem, which can be used by XModules or XBlocks.
"""
def __init__(self, *args, **kwargs):
super(CapaMixin, self).__init__(*args, **kwargs)
due_date = self.due
if self.graceperiod is not None and due_date:
self.close_date = due_date + self.graceperiod
else:
self.close_date = due_date
if self.seed is None:
self.choose_new_seed()
# Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it
# there.
self.runtime.set('location', self.location.to_deprecated_string())
try:
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = self.new_lcp(self.get_state_for_lcp())
# At this point, we need to persist the randomization seed
# so that when the problem is re-loaded (to check/view/save)
# it stays the same.
# However, we do not want to write to the database
# every time the module is loaded.
# So we set the seed ONLY when there is not one set already
if self.seed is None:
self.seed = self.lcp.seed
except Exception as err: # pylint: disable=broad-except
msg = u'cannot create LoncapaProblem {loc}: {err}'.format(
loc=self.location.to_deprecated_string(), err=err)
# TODO (vshnayder): do modules need error handlers too?
# We shouldn't be switching on DEBUG.
if self.runtime.DEBUG:
log.warning(msg)
# TODO (vshnayder): This logic should be general, not here--and may
# want to preserve the data instead of replacing it.
# e.g. in the CMS
msg = u'<p>{msg}</p>'.format(msg=cgi.escape(msg))
msg += u'<p><pre>{tb}</pre></p>'.format(
# just the traceback, no message - it is already present above
tb=cgi.escape(
u''.join(<|fim▁hole|> )
# create a dummy problem with error message instead of failing
problem_text = (u'<problem><text><span class="inline-error">'
u'Problem {url} has an error:</span>{msg}</text></problem>'.format(
url=self.location.to_deprecated_string(),
msg=msg)
)
self.lcp = self.new_lcp(self.get_state_for_lcp(), text=problem_text)
else:
# add extra info and raise
raise Exception(msg), None, sys.exc_info()[2]
self.set_state_from_lcp()
assert self.seed is not None
def choose_new_seed(self):
"""
Choose a new seed.
"""
if self.rerandomize == RANDOMIZATION.NEVER:
self.seed = 1
elif self.rerandomize == RANDOMIZATION.PER_STUDENT and hasattr(self.runtime, 'seed'):
# see comment on randomization_bin
self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))
else:
self.seed = struct.unpack('i', os.urandom(4))[0]
# So that sandboxed code execution can be cached, but still have an interesting
# number of possibilities, cap the number of different random seeds.
self.seed %= MAX_RANDOMIZATION_BINS
def new_lcp(self, state, text=None):
"""
Generate a new Loncapa Problem
"""
if text is None:
text = self.data
capa_system = LoncapaSystem(
ajax_url=self.runtime.ajax_url,
anonymous_student_id=self.runtime.anonymous_student_id,
cache=self.runtime.cache,
can_execute_unsafe_code=self.runtime.can_execute_unsafe_code,
get_python_lib_zip=self.runtime.get_python_lib_zip,
DEBUG=self.runtime.DEBUG,
filestore=self.runtime.filestore,
i18n=self.runtime.service(self, "i18n"),
node_path=self.runtime.node_path,
render_template=self.runtime.render_template,
seed=self.runtime.seed, # Why do we do this if we have self.seed?
STATIC_URL=self.runtime.STATIC_URL,
xqueue=self.runtime.xqueue,
matlab_api_key=self.matlab_api_key
)
return LoncapaProblem(
problem_text=text,
id=self.location.html_id(),
state=state,
seed=self.seed,
capa_system=capa_system,
capa_module=self, # njp
)
def get_state_for_lcp(self):
"""
Give a dictionary holding the state of the module
"""
return {
'done': self.done,
'correct_map': self.correct_map,
'student_answers': self.student_answers,
'input_state': self.input_state,
'seed': self.seed,
}
def set_state_from_lcp(self):
"""
Set the module's state from the settings in `self.lcp`
"""
lcp_state = self.lcp.get_state()
self.done = lcp_state['done']
self.correct_map = lcp_state['correct_map']
self.input_state = lcp_state['input_state']
self.student_answers = lcp_state['student_answers']
self.seed = lcp_state['seed']
def set_last_submission_time(self):
"""
Set the module's last submission time (when the problem was checked)
"""
self.last_submission_time = datetime.datetime.now(UTC())
def get_score(self):
"""
Access the problem's score
"""
return self.lcp.get_score()
def max_score(self):
"""
Access the problem's max score
"""
return self.lcp.get_max_score()
def get_progress(self):
"""
For now, just return score / max_score
"""
score_dict = self.get_score()
score = score_dict['score']
total = score_dict['total']
if total > 0:
if self.weight is not None:
# Progress objects expect total > 0
if self.weight == 0:
return None
# scale score and total by weight/total:
score = score * self.weight / total
total = self.weight
try:
return Progress(score, total)
except (TypeError, ValueError):
log.exception("Got bad progress")
return None
return None
def get_html(self):
"""
Return some html with data about the module
"""
progress = self.get_progress()
return self.runtime.render_template('problem_ajax.html', {
'element_id': self.location.html_id(),
'id': self.location.to_deprecated_string(),
'ajax_url': self.runtime.ajax_url,
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'content': self.get_problem_html(encapsulate=False),
})
def check_button_name(self):
"""
Determine the name for the "check" button.
Usually it is just "Check", but if this is the student's
final attempt, change the name to "Final Check".
The text can be customized by the text_customization setting.
"""
# The logic flow is a little odd so that _('xxx') strings can be found for
# translation while also running _() just once for each string.
_ = self.runtime.service(self, "i18n").ugettext
check = _('Check')
final_check = _('Final Check')
# Apply customizations if present
if 'custom_check' in self.text_customization:
check = _(self.text_customization.get('custom_check')) # pylint: disable=translation-of-non-string
if 'custom_final_check' in self.text_customization:
final_check = _(self.text_customization.get('custom_final_check')) # pylint: disable=translation-of-non-string
# TODO: need a way to get the customized words into the list of
# words to be translated
if self.max_attempts is not None and self.attempts >= self.max_attempts - 1:
return final_check
else:
return check
def check_button_checking_name(self):
"""
Return the "checking..." text for the "check" button.
After the user presses the "check" button, the button will briefly
display the value returned by this function until a response is
received by the server.
The text can be customized by the text_customization setting.
"""
# Apply customizations if present
if 'custom_checking' in self.text_customization:
return self.text_customization.get('custom_checking')
_ = self.runtime.service(self, "i18n").ugettext
return _('Checking...')
def should_show_check_button(self):
"""
Return True/False to indicate whether to show the "Check" button.
"""
submitted_without_reset = (self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS)
# If the problem is closed (past due / too many attempts)
# then we do NOT show the "check" button
# Also, do not show the "check" button if we're waiting
# for the user to reset a randomized problem
if self.closed() or submitted_without_reset:
return False
else:
return True
def should_show_reset_button(self):
"""
Return True/False to indicate whether to show the "Reset" button.
"""
is_survey_question = (self.max_attempts == 0)
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the reset button.
if self.closed() and not is_survey_question:
return False
# Button only shows up for randomized problems if the question has been submitted
if self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET] and self.is_submitted():
return True
else:
# Do NOT show the button if the problem is correct
if self.is_correct():
return False
else:
return self.show_reset_button
def should_show_save_button(self):
"""
Return True/False to indicate whether to show the "Save" button.
"""
# If the user has forced the save button to display,
# then show it as long as the problem is not closed
# (past due / too many attempts)
if self.force_save_button:
return not self.closed()
else:
is_survey_question = (self.max_attempts == 0)
needs_reset = self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS
# If the student has unlimited attempts, and their answers
# are not randomized, then we do not need a save button
# because they can use the "Check" button without consequences.
#
# The consequences we want to avoid are:
# * Using up an attempt (if max_attempts is set)
# * Changing the current problem, and no longer being
# able to view it (if rerandomize is "always")
#
# In those cases. the if statement below is false,
# and the save button can still be displayed.
#
if self.max_attempts is None and self.rerandomize != RANDOMIZATION.ALWAYS:
return False
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the save button
# If we're waiting for the user to reset a randomized problem
# then do NOT show the save button
elif (self.closed() and not is_survey_question) or needs_reset:
return False
else:
return True
def handle_problem_html_error(self, err):
"""
Create a dummy problem to represent any errors.
Change our problem to a dummy problem containing a warning message to
display to users. Returns the HTML to show to users
`err` is the Exception encountered while rendering the problem HTML.
"""
log.exception(err.message)
# TODO (vshnayder): another switch on DEBUG.
if self.runtime.DEBUG:
msg = (
u'[courseware.capa.capa_module] <font size="+1" color="red">'
u'Failed to generate HTML for problem {url}</font>'.format(
url=cgi.escape(self.location.to_deprecated_string()))
)
msg += u'<p>Error:</p><p><pre>{msg}</pre></p>'.format(msg=cgi.escape(err.message))
msg += u'<p><pre>{tb}</pre></p>'.format(tb=cgi.escape(traceback.format_exc()))
html = msg
else:
# We're in non-debug mode, and possibly even in production. We want
# to avoid bricking of problem as much as possible
# Presumably, student submission has corrupted LoncapaProblem HTML.
# First, pull down all student answers
student_answers = self.lcp.student_answers
answer_ids = student_answers.keys()
# Some inputtypes, such as dynamath, have additional "hidden" state that
# is not exposed to the student. Keep those hidden
# TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id
hidden_state_keywords = ['dynamath']
for answer_id in answer_ids:
for hidden_state_keyword in hidden_state_keywords:
if answer_id.find(hidden_state_keyword) >= 0:
student_answers.pop(answer_id)
# Next, generate a fresh LoncapaProblem
self.lcp = self.new_lcp(None)
self.set_state_from_lcp()
# Prepend a scary warning to the student
_ = self.runtime.service(self, "i18n").ugettext
warning_msg = _("Warning: The problem has been reset to its initial state!")
warning = '<div class="capa_reset"> <h2> ' + warning_msg + '</h2>'
# Translators: Following this message, there will be a bulleted list of items.
warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:")
warning += warning_msg + '<ul>'
for student_answer in student_answers.values():
if student_answer != '':
warning += '<li>' + cgi.escape(student_answer) + '</li>'
warning_msg = _('If this error persists, please contact the course staff.')
warning += '</ul>' + warning_msg + '</div>'
html = warning
try:
html += self.lcp.get_html()
except Exception:
# Couldn't do it. Give up.
log.exception("Unable to generate html from LoncapaProblem")
raise
return html
def get_demand_hint(self, hint_index):
"""
Return html for the problem.
Adds check, reset, save, and hint buttons as necessary based on the problem config
and state.
encapsulate: if True (the default) embed the html in a problem <div>
hint_index: (None is the default) if not None, this is the index of the next demand
hint to show.
"""
demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint")
hint_index = hint_index % len(demand_hints)
_ = self.runtime.service(self, "i18n").ugettext
hint_element = demand_hints[hint_index]
hint_text = get_inner_html_from_xpath(hint_element)
if len(demand_hints) == 1:
prefix = _('Hint: ')
else:
# Translators: e.g. "Hint 1 of 3" meaning we are showing the first of three hints.
prefix = _('Hint ({hint_num} of {hints_count}): ').format(hint_num=hint_index + 1,
hints_count=len(demand_hints))
# Log this demand-hint request
event_info = dict()
event_info['module_id'] = self.location.to_deprecated_string()
event_info['hint_index'] = hint_index
event_info['hint_len'] = len(demand_hints)
event_info['hint_text'] = hint_text
self.runtime.publish(self, 'edx.problem.hint.demandhint_displayed', event_info)
# We report the index of this hint, the client works out what index to use to get the next hint
return {
'success': True,
'contents': prefix + hint_text,
'hint_index': hint_index
}
def get_problem_html(self, encapsulate=True):
"""
Return html for the problem.
Adds check, reset, save, and hint buttons as necessary based on the problem config
and state.
encapsulate: if True (the default) embed the html in a problem <div>
"""
try:
html = self.lcp.get_html()
# If we cannot construct the problem HTML,
# then generate an error message instead.
except Exception as err: # pylint: disable=broad-except
html = self.handle_problem_html_error(err)
html = self.remove_tags_from_html(html)
# The convention is to pass the name of the check button if we want
# to show a check button, and False otherwise This works because
# non-empty strings evaluate to True. We use the same convention
# for the "checking" state text.
if self.should_show_check_button():
check_button = self.check_button_name()
check_button_checking = self.check_button_checking_name()
else:
check_button = False
check_button_checking = False
content = {
'name': self.display_name_with_default,
'html': html,
'weight': self.weight,
}
# If demand hints are available, emit hint button and div.
demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint")
demand_hint_possible = len(demand_hints) > 0
context = {
'problem': content,
'id': self.location.to_deprecated_string(),
'check_button': check_button,
'check_button_checking': check_button_checking,
'reset_button': self.should_show_reset_button(),
'save_button': self.should_show_save_button(),
'answer_available': self.answer_available(),
'attempts_used': self.attempts,
'attempts_allowed': self.max_attempts,
'demand_hint_possible': demand_hint_possible
}
html = self.runtime.render_template('problem.html', context)
if encapsulate:
html = u'<div id="problem_{id}" class="problem" data-url="{ajax_url}">'.format(
id=self.location.html_id(), ajax_url=self.runtime.ajax_url
) + html + "</div>"
# Now do all the substitutions which the LMS module_render normally does, but
# we need to do here explicitly since we can get called for our HTML via AJAX
html = self.runtime.replace_urls(html)
if self.runtime.replace_course_urls:
html = self.runtime.replace_course_urls(html)
if self.runtime.replace_jump_to_id_urls:
html = self.runtime.replace_jump_to_id_urls(html)
return html
def remove_tags_from_html(self, html):
"""
The capa xml includes many tags such as <additional_answer> or <demandhint> which are not
meant to be part of the client html. We strip them all and return the resulting html.
"""
tags = ['demandhint', 'choicehint', 'optionhint', 'stringhint', 'numerichint', 'optionhint',
'correcthint', 'regexphint', 'additional_answer', 'stringequalhint', 'compoundhint',
'stringequalhint']
for tag in tags:
html = re.sub(r'<%s.*?>.*?</%s>' % (tag, tag), '', html, flags=re.DOTALL)
# Some of these tags span multiple lines
# Note: could probably speed this up by calling sub() once with a big regex
# vs. simply calling sub() many times as we have here.
return html
def hint_button(self, data):
"""
Hint button handler, returns new html using hint_index from the client.
"""
hint_index = int(data['hint_index'])
return self.get_demand_hint(hint_index)
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
return (self.close_date is not None and
datetime.datetime.now(UTC()) > self.close_date)
def closed(self):
"""
Is the student still allowed to submit answers?
"""
if self.max_attempts is not None and self.attempts >= self.max_attempts:
return True
if self.is_past_due():
return True
return False
def is_submitted(self):
"""
Used to decide to show or hide RESET or CHECK buttons.
Means that student submitted problem and nothing more.
Problem can be completely wrong.
Pressing RESET button makes this function to return False.
"""
# used by conditional module
return self.lcp.done
def is_attempted(self):
"""
Has the problem been attempted?
used by conditional module
"""
return self.attempts > 0
def is_correct(self):
"""
True iff full points
"""
score_dict = self.get_score()
return score_dict['score'] == score_dict['total']
def answer_available(self):
"""
Is the user allowed to see an answer?
"""
if self.showanswer == '':
return False
elif self.showanswer == SHOWANSWER.NEVER:
return False
elif self.runtime.user_is_staff:
# This is after the 'never' check because admins can see the answer
# unless the problem explicitly prevents it
return True
elif self.showanswer == SHOWANSWER.ATTEMPTED:
return self.attempts > 0
elif self.showanswer == SHOWANSWER.ANSWERED:
# NOTE: this is slightly different from 'attempted' -- resetting the problems
# makes lcp.done False, but leaves attempts unchanged.
return self.lcp.done
elif self.showanswer == SHOWANSWER.CLOSED:
return self.closed()
elif self.showanswer == SHOWANSWER.FINISHED:
return self.closed() or self.is_correct()
elif self.showanswer == SHOWANSWER.CORRECT_OR_PAST_DUE:
return self.is_correct() or self.is_past_due()
elif self.showanswer == SHOWANSWER.PAST_DUE:
return self.is_past_due()
elif self.showanswer == SHOWANSWER.ALWAYS:
return True
return False
def update_score(self, data):
"""
Delivers grading response (e.g. from asynchronous code checking) to
the capa problem, so its score can be updated
'data' must have a key 'response' which is a string that contains the
grader's response
No ajax return is needed. Return empty dict.
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
self.lcp.update_score(score_msg, queuekey)
self.set_state_from_lcp()
self.publish_grade()
return dict() # No AJAX return is needed
def handle_ungraded_response(self, data):
"""
Delivers a response from the XQueue to the capa problem
The score of the problem will not be updated
Args:
- data (dict) must contain keys:
queuekey - a key specific to this response
xqueue_body - the body of the response
Returns:
empty dictionary
No ajax return is needed, so an empty dict is returned
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
# pass along the xqueue message to the problem
self.lcp.ungraded_response(score_msg, queuekey)
self.set_state_from_lcp()
return dict()
def handle_input_ajax(self, data):
"""
Handle ajax calls meant for a particular input in the problem
Args:
- data (dict) - data that should be passed to the input
Returns:
- dict containing the response from the input
"""
response = self.lcp.handle_input_ajax(data)
# save any state changes that may occur
self.set_state_from_lcp()
return response
def get_answer(self, _data):
"""
For the "show answer" button.
Returns the answers: {'answers' : answers}
"""
event_info = dict()
event_info['problem_id'] = self.location.to_deprecated_string()
self.track_function_unmask('showanswer', event_info)
if not self.answer_available():
raise NotFoundError('Answer is not available')
else:
answers = self.lcp.get_question_answers()
self.set_state_from_lcp()
# answers (eg <solution>) may have embedded images
# but be careful, some problems are using non-string answer dicts
new_answers = dict()
for answer_id in answers:
try:
answer_content = self.runtime.replace_urls(answers[answer_id])
if self.runtime.replace_jump_to_id_urls:
answer_content = self.runtime.replace_jump_to_id_urls(answer_content)
new_answer = {answer_id: answer_content}
except TypeError:
log.debug(u'Unable to perform URL substitution on answers[%s]: %s',
answer_id, answers[answer_id])
new_answer = {answer_id: answers[answer_id]}
new_answers.update(new_answer)
return {'answers': new_answers}
# Figure out if we should move these to capa_problem?
def get_problem(self, _data):
"""
Return results of get_problem_html, as a simple dict for json-ing.
{ 'html': <the-html> }
Used if we want to reconfirm we have the right thing e.g. after
several AJAX calls.
"""
return {'html': self.get_problem_html(encapsulate=False)}
@staticmethod
def make_dict_of_responses(data):
"""
Make dictionary of student responses (aka "answers")
`data` is POST dictionary (webob.multidict.MultiDict).
The `data` dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example,
'input_1_2_3' would be mapped to '1_2_3' in the returned dict.
Some inputs always expect a list in the returned dict
(e.g. checkbox inputs). The convention is that
keys in the `data` dict that end with '[]' will always
have list values in the returned dict.
For example, if the `data` dict contains {'input_1[]': 'test' }
then the output dict would contain {'1': ['test'] }
(the value is a list).
Some other inputs such as ChoiceTextInput expect a dict of values in the returned
dict If the key ends with '{}' then we will assume that the value is a json
encoded dict and deserialize it.
For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'}
then the output dict would contain {'1': {"1_2_1": 1} }
(the value is a dictionary)
Raises an exception if:
-A key in the `data` dictionary does not contain at least one underscore
(e.g. "input" is invalid, but "input_1" is valid)
-Two keys end up with the same name in the returned dict.
(e.g. 'input_1' and 'input_1[]', which both get mapped to 'input_1'
in the returned dict)
"""
answers = dict()
# webob.multidict.MultiDict is a view of a list of tuples,
# so it will return a multi-value key once for each value.
# We only want to consider each key a single time, so we use set(data.keys())
for key in set(data.keys()):
# e.g. input_resistor_1 ==> resistor_1
_, _, name = key.partition('_')
# If key has no underscores, then partition
# will return (key, '', '')
# We detect this and raise an error
if not name:
raise ValueError(u"{key} must contain at least one underscore".format(key=key))
else:
# This allows for answers which require more than one value for
# the same form input (e.g. checkbox inputs). The convention is that
# if the name ends with '[]' (which looks like an array), then the
# answer will be an array.
# if the name ends with '{}' (Which looks like a dict),
# then the answer will be a dict
is_list_key = name.endswith('[]')
is_dict_key = name.endswith('{}')
name = name[:-2] if is_list_key or is_dict_key else name
if is_list_key:
val = data.getall(key)
elif is_dict_key:
try:
val = json.loads(data[key])
# If the submission wasn't deserializable, raise an error.
except(KeyError, ValueError):
raise ValueError(
u"Invalid submission: {val} for {key}".format(val=data[key], key=key)
)
else:
val = data[key]
# If the name already exists, then we don't want
# to override it. Raise an error instead
if name in answers:
raise ValueError(u"Key {name} already exists in answers dict".format(name=name))
else:
answers[name] = val
return answers
def publish_grade(self):
"""
Publishes the student's current grade to the system as an event
"""
score = self.lcp.get_score()
self.runtime.publish(
self,
'grade',
{
'value': score['score'],
'max_value': score['total'],
}
)
return {'grade': score['score'], 'max_grade': score['total']}
# pylint: disable=too-many-statements
def check_problem(self, data, override_time=False):
"""
Checks whether answers to a problem are correct
Returns a map of correct/incorrect answers:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string,
'contents' : html}
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
answers_without_files = convert_files_to_filenames(answers)
event_info['answers'] = answers_without_files
metric_name = u'capa.check_problem.{}'.format
# Can override current time
current_time = datetime.datetime.now(UTC())
if override_time is not False:
current_time = override_time
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:closed'])
raise NotFoundError(_("Problem is closed."))
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'unreset'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:unreset'])
raise NotFoundError(_("Problem must be reset before it can be checked again."))
# Problem queued. Students must wait a specified waittime before they are allowed to submit
# IDEA: consider stealing code from below: pretty-print of seconds, cueing of time remaining
if self.lcp.is_queued():
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.runtime.xqueue['waittime']
if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
msg = _(u"You must wait at least {wait} seconds between submissions.").format(
wait=waittime_between_requests)
return {'success': msg, 'html': ''}
# Wait time between resets: check if is too soon for submission.
if self.last_submission_time is not None and self.submission_wait_seconds != 0:
if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds:
remaining_secs = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds())
msg = _(u'You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.').format(
wait_secs=self.pretty_print_seconds(self.submission_wait_seconds),
remaining_secs=self.pretty_print_seconds(remaining_secs))
return {
'success': msg,
'html': ''
}
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
if self.runtime.DEBUG:
log.warning(
"StudentInputError in capa_module:problem_check",
exc_info=True
)
# Save the user's state before failing
self.set_state_from_lcp()
# If the user is a staff member, include
# the full exception, including traceback,
# in the response
if self.runtime.user_is_staff:
msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))
# Otherwise, display just an error message,
# without a stack trace
else:
# Translators: {msg} will be replaced with a problem's error message.
msg = _(u"Error: {msg}").format(msg=inst.message)
return {'success': msg}
except Exception as err:
# Save the user's state before failing
self.set_state_from_lcp()
if self.runtime.DEBUG:
msg = u"Error checking problem: {}".format(err.message)
msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
return {'success': msg}
raise
published_grade = self.publish_grade()
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['grade'] = published_grade['grade']
event_info['max_grade'] = published_grade['max_grade']
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
self.track_function_unmask('problem_check', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:success'])
if published_grade['max_grade'] != 0:
dog_stats_api.histogram(
metric_name('correct_pct'),
float(published_grade['grade']) / published_grade['max_grade'],
)
dog_stats_api.histogram(
metric_name('attempts'),
self.attempts,
)
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
return {
'success': success,
'contents': html
}
# pylint: enable=too-many-statements
def track_function_unmask(self, title, event_info):
"""
All calls to runtime.track_function route through here so that the
choice names can be unmasked.
"""
# Do the unmask translates on a copy of event_info,
# avoiding problems where an event_info is unmasked twice.
event_unmasked = copy.deepcopy(event_info)
self.unmask_event(event_unmasked)
self.runtime.publish(self, title, event_unmasked)
def unmask_event(self, event_info):
"""
Translates in-place the event_info to account for masking
and adds information about permutation options in force.
"""
# answers is like: {u'i4x-Stanford-CS99-problem-dada976e76f34c24bc8415039dee1300_2_1': u'mask_0'}
# Each response values has an answer_id which matches the key in answers.
for response in self.lcp.responders.values():
# Un-mask choice names in event_info for masked responses.
if response.has_mask():
# We don't assume much about the structure of event_info,
# but check for the existence of the things we need to un-mask.
# Look for answers/id
answer = event_info.get('answers', {}).get(response.answer_id)
if answer is not None:
event_info['answers'][response.answer_id] = response.unmask_name(answer)
# Look for state/student_answers/id
answer = event_info.get('state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Look for old_state/student_answers/id -- parallel to the above case, happens on reset
answer = event_info.get('old_state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['old_state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Add 'permutation' to event_info for permuted responses.
permutation_option = None
if response.has_shuffle():
permutation_option = 'shuffle'
elif response.has_answerpool():
permutation_option = 'answerpool'
if permutation_option is not None:
# Add permutation record tuple: (one of:'shuffle'/'answerpool', [as-displayed list])
if 'permutation' not in event_info:
event_info['permutation'] = {}
event_info['permutation'][response.answer_id] = (permutation_option, response.unmask_order())
def pretty_print_seconds(self, num_seconds):
"""
Returns time duration nicely formated, e.g. "3 minutes 4 seconds"
"""
# Here _ is the N variant ungettext that does pluralization with a 3-arg call
ungettext = self.runtime.service(self, "i18n").ungettext
hours = num_seconds // 3600
sub_hour = num_seconds % 3600
minutes = sub_hour // 60
seconds = sub_hour % 60
display = ""
if hours > 0:
display += ungettext("{num_hour} hour", "{num_hour} hours", hours).format(num_hour=hours)
if minutes > 0:
if display != "":
display += " "
# translators: "minute" refers to a minute of time
display += ungettext("{num_minute} minute", "{num_minute} minutes", minutes).format(num_minute=minutes)
# Taking care to make "0 seconds" instead of "" for 0 time
if seconds > 0 or (hours == 0 and minutes == 0):
if display != "":
display += " "
# translators: "second" refers to a second of time
display += ungettext("{num_second} second", "{num_second} seconds", seconds).format(num_second=seconds)
return display
def get_submission_metadata_safe(self, answers, correct_map):
"""
Ensures that no exceptions are thrown while generating input metadata summaries. Returns the
summary if it is successfully created, otherwise an empty dictionary.
"""
try:
return self.get_submission_metadata(answers, correct_map)
except Exception: # pylint: disable=broad-except
# NOTE: The above process requires deep inspection of capa structures that may break for some
# uncommon problem types. Ensure that it does not prevent answer submission in those
# cases. Any occurrences of errors in this block should be investigated and resolved.
log.exception('Unable to gather submission metadata, it will not be included in the event.')
return {}
def get_submission_metadata(self, answers, correct_map):
"""
Return a map of inputs to their corresponding summarized metadata.
Returns:
A map whose keys are a unique identifier for the input (in this case a capa input_id) and
whose values are:
question (str): Is the prompt that was presented to the student. It corresponds to the
label of the input.
answer (mixed): Is the answer the student provided. This may be a rich structure,
however it must be json serializable.
response_type (str): The XML tag of the capa response type.
input_type (str): The XML tag of the capa input type.
correct (bool): Whether or not the provided answer is correct. Will be an empty
string if correctness could not be determined.
variant (str): In some cases the same question can have several different variants.
This string should uniquely identify the variant of the question that was answered.
In the capa context this corresponds to the `seed`.
This function attempts to be very conservative and make very few assumptions about the structure
of the problem. If problem related metadata cannot be located it should be replaced with empty
strings ''.
"""
input_metadata = {}
for input_id, internal_answer in answers.iteritems():
answer_input = self.lcp.inputs.get(input_id)
if answer_input is None:
log.warning('Input id %s is not mapped to an input type.', input_id)
answer_response = None
for response, responder in self.lcp.responders.iteritems():
if input_id in responder.answer_ids:
answer_response = responder
if answer_response is None:
log.warning('Answer responder could not be found for input_id %s.', input_id)
user_visible_answer = internal_answer
if hasattr(answer_input, 'get_user_visible_answer'):
user_visible_answer = answer_input.get_user_visible_answer(internal_answer)
# If this problem has rerandomize enabled, then it will generate N variants of the
# question, one per unique seed value. In this case we would like to know which
# variant was selected. Ideally it would be nice to have the exact question that
# was presented to the user, with values interpolated etc, but that can be done
# later if necessary.
variant = ''
if self.rerandomize != RANDOMIZATION.NEVER:
variant = self.seed
is_correct = correct_map.is_correct(input_id)
if is_correct is None:
is_correct = ''
input_metadata[input_id] = {
'question': answer_input.response_data.get('label', ''),
'answer': user_visible_answer,
'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),
'input_type': getattr(answer_input, 'tag', ''),
'correct': is_correct,
'variant': variant,
}
# Add group_label in event data only if the responsetype contains multiple inputtypes
if answer_input.response_data.get('group_label'):
input_metadata[input_id]['group_label'] = answer_input.response_data.get('group_label')
return input_metadata
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.to_deprecated_string()}
_ = self.runtime.service(self, "i18n").ugettext
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.track_function_unmask('problem_rescore_fail', event_info)
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise NotImplementedError(_("Problem's definition does not support rescoring."))
if not self.done:
event_info['failure'] = 'unanswered'
self.track_function_unmask('problem_rescore_fail', event_info)
raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.track_function_unmask('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.track_function_unmask('problem_rescore_fail', event_info)
if self.runtime.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.track_function_unmask('problem_rescore', event_info)
return {'success': success}
def save_problem(self, data):
"""
Save the passed in answers.
Returns a dict { 'success' : bool, 'msg' : message }
The message is informative on success, and an error message on failure.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
event_info['answers'] = answers
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed() and not self.max_attempts == 0:
event_info['failure'] = 'closed'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'msg': _("Problem is closed.")
}
# Problem submitted. Student should reset before saving
# again.
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'done'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
'msg': _("Problem needs to be reset prior to save.")
}
self.lcp.student_answers = answers
self.set_state_from_lcp()
self.track_function_unmask('save_problem_success', event_info)
msg = _("Your answers have been saved.")
if not self.max_attempts == 0:
msg = _(
"Your answers have been saved but not graded. Click '{button_name}' to grade them."
).format(button_name=self.check_button_name())
return {
'success': True,
'msg': msg,
'html': self.get_problem_html(encapsulate=False),
}
def reset_problem(self, _data):
"""
Changes problem state to unfinished -- removes student answers,
Causes problem to rerender itself if randomization is enabled.
Returns a dictionary of the form:
{'success': True/False,
'html': Problem HTML string }
If an error occurs, the dictionary will also have an
`error` key containing an error message.
"""
event_info = dict()
event_info['old_state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
_ = self.runtime.service(self, "i18n").ugettext
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'error': _("Problem is closed."),
}
if not self.is_submitted():
event_info['failure'] = 'not_done'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: A student must "make an attempt" to solve the problem on the page before they can reset it.
'error': _("Refresh the page and make an attempt before resetting."),
}
if self.is_submitted() and self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET]:
# Reset random number generator seed.
self.choose_new_seed()
# Generate a new problem with either the previous seed or a new seed
self.lcp = self.new_lcp(None)
# Pull in the new problem seed
self.set_state_from_lcp()
# Grade may have changed, so publish new value
self.publish_grade()
event_info['new_state'] = self.lcp.get_state()
self.track_function_unmask('reset_problem', event_info)
return {
'success': True,
'html': self.get_problem_html(encapsulate=False),
}<|fim▁end|> | ['Traceback (most recent call last):\n'] +
traceback.format_tb(sys.exc_info()[2])
)
) |
<|file_name|>uCursor.py<|end_file_name|><|fim▁begin|>"""Module for testing cursor objects."""
import cx_Oracle
class TestCursor(BaseTestCase):
def testExecuteNoArgs(self):
"""test executing a statement without any arguments"""
result = self.cursor.execute(u"begin null; end;")
self.failUnlessEqual(result, None)
def testExecuteNoStatementWithArgs(self):
"""test executing a None statement with bind variables"""
self.failUnlessRaises(cx_Oracle.ProgrammingError, self.cursor.execute,
None, x = 5)
def testExecuteEmptyKeywordArgs(self):
"""test executing a statement with args and empty keyword args"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
args = [simpleVar]
kwArgs = {}
result = self.cursor.execute(u"begin :1 := 25; end;", args, **kwArgs)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 25)
def testExecuteKeywordArgs(self):
"""test executing a statement with keyword arguments"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
result = self.cursor.execute(u"begin :value := 5; end;",
value = simpleVar)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 5)
def testExecuteDictionaryArg(self):
"""test executing a statement with a dictionary argument"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
dictArg = { u"value" : simpleVar }
result = self.cursor.execute(u"begin :value := 10; end;", dictArg)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 10)
def testExecuteMultipleMethod(self):
"""test executing a statement with both a dict arg and keyword args"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
dictArg = { u"value" : simpleVar }
self.failUnlessRaises(cx_Oracle.InterfaceError, self.cursor.execute,
u"begin :value := 15; end;", dictArg, value = simpleVar)
def testExecuteAndModifyArraySize(self):
"""test executing a statement and then changing the array size"""
self.cursor.execute(u"select IntCol from TestNumbers")
self.cursor.arraysize = 20
self.failUnlessEqual(len(self.cursor.fetchall()), 10)
def testCallProc(self):
"""test executing a stored procedure"""
var = self.cursor.var(cx_Oracle.NUMBER)
results = self.cursor.callproc(u"proc_Test", (u"hi", 5, var))
self.failUnlessEqual(results, [u"hi", 10, 2.0])
def testCallProcNoArgs(self):
"""test executing a stored procedure without any arguments"""
results = self.cursor.callproc(u"proc_TestNoArgs")
self.failUnlessEqual(results, [])
def testCallFunc(self):
"""test executing a stored function"""
results = self.cursor.callfunc(u"func_Test", cx_Oracle.NUMBER,
(u"hi", 5))
self.failUnlessEqual(results, 7)
def testCallFuncNoArgs(self):
"""test executing a stored function without any arguments"""
results = self.cursor.callfunc(u"func_TestNoArgs", cx_Oracle.NUMBER)
self.failUnlessEqual(results, 712)
def testExecuteManyByName(self):
"""test executing a statement multiple times (named args)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ { u"value" : n } for n in range(250) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:value)"
self.cursor.executemany(statement, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyByPosition(self):
"""test executing a statement multiple times (positional args)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(230) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.executemany(statement, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithPrepare(self):
"""test executing a statement multiple times (with prepare)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(225) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.prepare(statement)
self.cursor.executemany(None, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithRebind(self):
"""test executing a statement multiple times (with rebind)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(235) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.executemany(statement, rows[:50])
self.cursor.executemany(statement, rows[50:])
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithExecption(self):
"""test executing a statement multiple times (with exception)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ { u"value" : n } for n in (1, 2, 3, 2, 5) ]
statement = u"insert into TestExecuteMany (IntCol) values (:value)"
self.failUnlessRaises(cx_Oracle.DatabaseError, self.cursor.executemany,
statement, rows)
self.failUnlessEqual(self.cursor.rowcount, 3)
def testPrepare(self):
"""test preparing a statement and executing it multiple times"""
self.failUnlessEqual(self.cursor.statement, None)
statement = u"begin :value := :value + 5; end;"
self.cursor.prepare(statement)
var = self.cursor.var(cx_Oracle.NUMBER)
self.failUnlessEqual(self.cursor.statement, statement)
var.setvalue(0, 2)
self.cursor.execute(None, value = var)
self.failUnlessEqual(var.getvalue(), 7)
self.cursor.execute(None, value = var)
self.failUnlessEqual(var.getvalue(), 12)
self.cursor.execute(u"begin :value2 := 3; end;", value2 = var)
self.failUnlessEqual(var.getvalue(), 3)
def testExceptionOnClose(self):
"confirm an exception is raised after closing a cursor"
self.cursor.close()
self.failUnlessRaises(cx_Oracle.InterfaceError, self.cursor.execute,
u"select 1 from dual")
def testIterators(self):
"""test iterators"""
self.cursor.execute(u"""
select IntCol
from TestNumbers
where IntCol between 1 and 3
order by IntCol""")
rows = []
for row in self.cursor:
rows.append(row[0])
self.failUnlessEqual(rows, [1, 2, 3])
def testIteratorsInterrupted(self):
"""test iterators (with intermediate execute)"""
self.cursor.execute(u"truncate table TestExecuteMany")
self.cursor.execute(u"""
select IntCol
from TestNumbers
where IntCol between 1 and 3
order by IntCol""")
testIter = iter(self.cursor)
value, = testIter.next()
self.cursor.execute(u"insert into TestExecuteMany (IntCol) values (1)")
self.failUnlessRaises(cx_Oracle.InterfaceError, testIter.next)
def testBindNames(self):<|fim▁hole|> self.failUnlessEqual(self.cursor.bindnames(), [])
self.cursor.prepare(u"begin :retval := :inval + 5; end;")
self.failUnlessEqual(self.cursor.bindnames(), ["RETVAL", "INVAL"])
self.cursor.prepare(u"begin :retval := :a * :a + :b * :b; end;")
self.failUnlessEqual(self.cursor.bindnames(), ["RETVAL", "A", "B"])
self.cursor.prepare(u"begin :a := :b + :c + :d + :e + :f + :g + " + \
":h + :i + :j + :k + :l; end;")
self.failUnlessEqual(self.cursor.bindnames(),
[u"A", u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"I", u"J",
u"K", u"L"])
def testBadPrepare(self):
"""test that subsequent executes succeed after bad prepare"""
self.failUnlessRaises(cx_Oracle.DatabaseError,
self.cursor.execute,
u"begin raise_application_error(-20000, 'this); end;")
self.cursor.execute(u"begin null; end;")
def testBadExecute(self):
"""test that subsequent fetches fail after bad execute"""
self.failUnlessRaises(cx_Oracle.DatabaseError,
self.cursor.execute, u"select y from dual")
self.failUnlessRaises(cx_Oracle.InterfaceError,
self.cursor.fetchall)
def testSetInputSizesMultipleMethod(self):
"""test setting input sizes with both positional and keyword args"""
self.failUnlessRaises(cx_Oracle.InterfaceError,
self.cursor.setinputsizes, 5, x = 5)
def testSetInputSizesByPosition(self):
"""test setting input sizes with positional args"""
var = self.cursor.var(cx_Oracle.STRING, 100)
self.cursor.setinputsizes(None, 5, None, 10, None, cx_Oracle.NUMBER)
self.cursor.execute(u"""
begin
:1 := :2 || to_char(:3) || :4 || to_char(:5) || to_char(:6);
end;""", [var, u'test_', 5, u'_second_', 3, 7])
self.failUnlessEqual(var.getvalue(), u"test_5_second_37")<|fim▁end|> | """test that bindnames() works correctly."""
self.failUnlessRaises(cx_Oracle.ProgrammingError,
self.cursor.bindnames)
self.cursor.prepare(u"begin null; end;") |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django.contrib import admin
<|fim▁hole|>admin.site.register(Callout, WidgyAdmin)<|fim▁end|> | from widgy.admin import WidgyAdmin
from widgy.contrib.page_builder.models import Callout
|
<|file_name|>S11.12_A2.1_T3.js<|end_file_name|><|fim▁begin|>// Copyright 2009 the Sputnik authors. All rights reserved.
/**<|fim▁hole|> * @path ch11/11.12/S11.12_A2.1_T3.js
* @description If ToBoolean(x) is true and GetBase(y) is null, throw ReferenceError
*/
//CHECK#1
try {
true ? y : false;
$ERROR('#1.1: true ? y : false throw ReferenceError. Actual: ' + (true ? y : false));
}
catch (e) {
if ((e instanceof ReferenceError) !== true) {
$ERROR('#1.2: true ? y : false throw ReferenceError. Actual: ' + (e));
}
}<|fim▁end|> | * Operator x ? y : z uses GetValue
* |
<|file_name|>terrain.rs<|end_file_name|><|fim▁begin|>//! Read and draw terrain data in 3D.
use gl;
use yaglw::gl_context::GLContext;
use yaglw::shader::Shader;
#[allow(missing_docs)]
pub struct TerrainShader<'a> {
#[allow(missing_docs)]
pub shader: Shader<'a>,
}
impl<'a> TerrainShader<'a> {
#[allow(missing_docs)]
pub fn new<'b:'a>(gl: &'a GLContext) -> TerrainShader<'b> {
let components = vec!(
(gl::VERTEX_SHADER, "
#version 330 core
uniform mat4 projection_matrix;<|fim▁hole|> flat out int face_id;
out vec3 world_position;
out vec3 normal;
void main() {
// Mutiply by 3 because there are 3 components for each normal vector.
int position_id = gl_VertexID * 3;
world_position.x = texelFetch(positions, position_id + 0).r;
world_position.y = texelFetch(positions, position_id + 1).r;
world_position.z = texelFetch(positions, position_id + 2).r;
int normal_id = position_id;
normal.x = texelFetch(normals, normal_id + 0).r;
normal.y = texelFetch(normals, normal_id + 1).r;
normal.z = texelFetch(normals, normal_id + 2).r;
face_id = gl_VertexID / 3;
gl_Position = projection_matrix * vec4(world_position, 1.0);
}".to_string()),
(gl::FRAGMENT_SHADER, format!("
#version 330 core
uniform struct Sun {{
vec3 direction;
vec3 intensity;
}} sun;
uniform vec3 ambient_light;
uniform samplerBuffer positions;
flat in int face_id;
in vec3 world_position;
in vec3 normal;
out vec4 frag_color;
float perlin(const float x, const float y) {{
float amplitude = 1;
float frequency = 1.0 / 64.0;
float persistence = 0.8;
const float lacunarity = 2.4;
const int octaves = 2;
float r = 0.0;
float max_crest = 0.0;
for (int o = 0; o < octaves; ++o) {{
float f = frequency * 2 * 3.14;
r += amplitude * (sin((o+x) * f) + sin((o+y) * f)) / 2;
max_crest += amplitude;
amplitude *= persistence;
frequency *= lacunarity;
}}
// Scale to [-1, 1]. N.B. There is no clamping.
r /= max_crest;
return r;
}}
void main() {{
int color_id = face_id * 3;
vec4 base_color = vec4(0, 0.5, 0, 1);
float p = 0
+ perlin(0, world_position.x)
+ perlin(0, world_position.y)
+ perlin(0, world_position.z)
;
// shift, scale, clamp to [0, 1]
p = (p + 3) / 6;
p = clamp(p, 0, 1);
base_color.r = (1 - p) / 2;
base_color.g = (3/2 + p) / 5;
base_color.b = (1 - p) / 5;
base_color.a = 1.0;
float brightness = dot(normal, sun.direction);
brightness = clamp(brightness, 0, 1);
vec3 lighting = brightness * sun.intensity + ambient_light;
frag_color = vec4(clamp(lighting, 0, 1), 1) * base_color;
}}",
)),
);
TerrainShader {
shader: Shader::new(gl, components.into_iter()),
}
}
}<|fim▁end|> |
uniform samplerBuffer positions;
uniform samplerBuffer normals;
|
<|file_name|>md5.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import hashlib
<|fim▁hole|>#print md5_hex('[email protected]'), "\n";
perl_result = "cbc41284e23c8c7ed98f589b6d6ebfd6"
md5 = hashlib.md5()
md5.update('[email protected]')
hex1 = md5.hexdigest()
if hex1 == perl_result:
print "ok"
else:
print "FAIL perl_result = %s" % str(perl_result)
print "FAIL hex1 = %s" % str(hex1)<|fim▁end|> |
# perl
## http://stackoverflow.com/questions/9991757/sha256-digest-in-perl
#use Digest::MD5 qw(md5_hex); |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod basic;
pub mod iterator;
pub mod map;
pub mod option;
pub mod result;
pub mod set;
pub mod string;
pub mod vec;
#[cfg(feature = "float")]
pub mod float;
#[cfg(any(test, doc, feature = "testing"))]
pub(crate) mod testing;<|fim▁end|> | // Copyright 2021 Google LLC |
<|file_name|>PopupWindowActivity.java<|end_file_name|><|fim▁begin|>package com.carlisle.incubators.PopupWindow;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import com.carlisle.incubators.R;
<|fim▁hole|>/**
* Created by chengxin on 11/24/16.
*/
public class PopupWindowActivity extends AppCompatActivity {
private PopupView popupView;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_popup_window);
popupView = new PopupView(this);
}
public void onButtonClick(View view) {
//popupView.showAsDropDown(view);
popupView.showAsDropUp(view);
}
}<|fim▁end|> | |
<|file_name|>active-roadmap-page.tsx<|end_file_name|><|fim▁begin|>import { connect } from "react-redux"
import { RoadmapPage } from "../components/roadmap-page/roadmap-page"
const getActiveRoadmap = (roadmaps: any[], activeRoadmapId: number) => {
return roadmaps[activeRoadmapId]
}
const mapStateToProps = (state: any, ownProps: any) => {<|fim▁hole|>}
export const ActiveRoadmapPage = connect(
mapStateToProps
)(RoadmapPage)<|fim▁end|> | return {
roadmap: getActiveRoadmap(state.roadmaps, ownProps.params.id)
} |
<|file_name|>advance.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::{Arc, Mutex};
use std::time::Duration;
use collections::HashMap;
use concurrency_manager::ConcurrencyManager;
use engine_traits::KvEngine;
use futures::compat::Future01CompatExt;
use grpcio::{ChannelBuilder, Environment};
use kvproto::kvrpcpb::{CheckLeaderRequest, LeaderInfo};
use kvproto::metapb::{Peer, PeerRole};
use kvproto::tikvpb::TikvClient;
use pd_client::PdClient;
use protobuf::Message;
use raftstore::store::fsm::StoreMeta;
use raftstore::store::util::RegionReadProgressRegistry;
use security::SecurityManager;
use tikv_util::timer::SteadyTimer;
use tikv_util::worker::Scheduler;
use tokio::runtime::{Builder, Runtime};
use txn_types::TimeStamp;
use crate::endpoint::Task;
use crate::errors::Result;
use crate::metrics::{CHECK_LEADER_REQ_ITEM_COUNT_HISTOGRAM, CHECK_LEADER_REQ_SIZE_HISTOGRAM};
const DEFAULT_CHECK_LEADER_TIMEOUT_MILLISECONDS: u64 = 5_000; // 5s
pub struct AdvanceTsWorker<E: KvEngine> {
store_meta: Arc<Mutex<StoreMeta>>,
region_read_progress: RegionReadProgressRegistry,
pd_client: Arc<dyn PdClient>,
timer: SteadyTimer,
worker: Runtime,
scheduler: Scheduler<Task<E::Snapshot>>,
/// The concurrency manager for transactions. It's needed for CDC to check locks when
/// calculating resolved_ts.
concurrency_manager: ConcurrencyManager,
// store_id -> client
tikv_clients: Arc<Mutex<HashMap<u64, TikvClient>>>,
env: Arc<Environment>,
security_mgr: Arc<SecurityManager>,
}
impl<E: KvEngine> AdvanceTsWorker<E> {
pub fn new(
pd_client: Arc<dyn PdClient>,
scheduler: Scheduler<Task<E::Snapshot>>,
store_meta: Arc<Mutex<StoreMeta>>,
region_read_progress: RegionReadProgressRegistry,
concurrency_manager: ConcurrencyManager,
env: Arc<Environment>,
security_mgr: Arc<SecurityManager>,
) -> Self {
let worker = Builder::new_multi_thread()
.thread_name("advance-ts")
.worker_threads(1)
.enable_time()
.build()
.unwrap();
Self {
env,
security_mgr,
scheduler,
pd_client,
worker,
timer: SteadyTimer::default(),
store_meta,
region_read_progress,
concurrency_manager,
tikv_clients: Arc::new(Mutex::new(HashMap::default())),
}
}
}
impl<E: KvEngine> AdvanceTsWorker<E> {
pub fn advance_ts_for_regions(&self, regions: Vec<u64>) {
let pd_client = self.pd_client.clone();
let scheduler = self.scheduler.clone();
let cm: ConcurrencyManager = self.concurrency_manager.clone();
let env = self.env.clone();
let security_mgr = self.security_mgr.clone();
let store_meta = self.store_meta.clone();
let tikv_clients = self.tikv_clients.clone();
let region_read_progress = self.region_read_progress.clone();
let fut = async move {
// Ignore get tso errors since we will retry every `advance_ts_interval`.
let mut min_ts = pd_client.get_tso().await.unwrap_or_default();
// Sync with concurrency manager so that it can work correctly when optimizations
// like async commit is enabled.
// Note: This step must be done before scheduling `Task::MinTS` task, and the
// resolver must be checked in or after `Task::MinTS`' execution.
cm.update_max_ts(min_ts);
if let Some(min_mem_lock_ts) = cm.global_min_lock_ts() {
if min_mem_lock_ts < min_ts {
min_ts = min_mem_lock_ts;
}
}
let regions = Self::region_resolved_ts_store(
regions,
store_meta,
region_read_progress,
pd_client,
security_mgr,
env,
tikv_clients,
min_ts,
)
.await;
if !regions.is_empty() {
if let Err(e) = scheduler.schedule(Task::AdvanceResolvedTs {
regions,
ts: min_ts,
}) {
info!("failed to schedule advance event"; "err" => ?e);
}
}
};
self.worker.spawn(fut);
}
pub fn register_next_event(&self, advance_ts_interval: Duration, cfg_version: usize) {
let scheduler = self.scheduler.clone();
let timeout = self.timer.delay(advance_ts_interval);
let fut = async move {
let _ = timeout.compat().await;
if let Err(e) = scheduler.schedule(Task::RegisterAdvanceEvent { cfg_version }) {
info!("failed to schedule register advance event"; "err" => ?e);
}
};
self.worker.spawn(fut);
}
// Confirms leadership of region peer before trying to advance resolved ts.
// This function broadcasts a special message to all stores, get the leader id of them to confirm whether
// current peer has a quorum which accept its leadership.
async fn region_resolved_ts_store(
regions: Vec<u64>,
store_meta: Arc<Mutex<StoreMeta>>,
region_read_progress: RegionReadProgressRegistry,
pd_client: Arc<dyn PdClient>,
security_mgr: Arc<SecurityManager>,
env: Arc<Environment>,
cdc_clients: Arc<Mutex<HashMap<u64, TikvClient>>>,
min_ts: TimeStamp,
) -> Vec<u64> {
#[cfg(feature = "failpoint")]
(|| fail_point!("before_sync_replica_read_state", |_| regions))();
let store_id = match store_meta.lock().unwrap().store_id {
Some(id) => id,
None => return vec![],
};
// store_id -> leaders info, record the request to each stores
let mut store_map: HashMap<u64, Vec<LeaderInfo>> = HashMap::default();
// region_id -> region, cache the information of regions
let mut region_map: HashMap<u64, Vec<Peer>> = HashMap::default();
// region_id -> peers id, record the responses
let mut resp_map: HashMap<u64, Vec<u64>> = HashMap::default();
// region_id -> `(Vec<Peer>, LeaderInfo)`
let info_map = region_read_progress.dump_leader_infos(®ions);
for (region_id, (peer_list, leader_info)) in info_map {
let leader_id = leader_info.get_peer_id();
// Check if the leader in this store
if find_store_id(&peer_list, leader_id) != Some(store_id) {
continue;
}
for peer in &peer_list {
if peer.store_id == store_id && peer.id == leader_id {
resp_map.entry(region_id).or_default().push(store_id);
continue;
}
store_map
.entry(peer.store_id)
.or_default()
.push(leader_info.clone());
}
region_map.insert(region_id, peer_list);
}
// Approximate `LeaderInfo` size
let leader_info_size = store_map
.values()
.next()
.map_or(0, |regions| regions[0].compute_size());
let stores = store_map.into_iter().map(|(store_id, regions)| {
let cdc_clients = cdc_clients.clone();
let env = env.clone();
let pd_client = pd_client.clone();
let security_mgr = security_mgr.clone();
let region_num = regions.len() as u32;
CHECK_LEADER_REQ_SIZE_HISTOGRAM.observe((leader_info_size * region_num) as f64);
CHECK_LEADER_REQ_ITEM_COUNT_HISTOGRAM.observe(region_num as f64);
async move {
if cdc_clients.lock().unwrap().get(&store_id).is_none() {
let store = box_try!(pd_client.get_store_async(store_id).await);
let cb = ChannelBuilder::new(env.clone());
let channel = security_mgr.connect(cb, &store.address);
cdc_clients
.lock()
.unwrap()
.insert(store_id, TikvClient::new(channel));
}
let client = cdc_clients.lock().unwrap().get(&store_id).unwrap().clone();
let mut req = CheckLeaderRequest::default();
req.set_regions(regions.into());
req.set_ts(min_ts.into_inner());
let res = box_try!(
tokio::time::timeout(
Duration::from_millis(DEFAULT_CHECK_LEADER_TIMEOUT_MILLISECONDS),
box_try!(client.check_leader_async(&req))
)
.await
);
let resp = box_try!(res);
Result::Ok((store_id, resp))
}
});
let resps = futures::future::join_all(stores).await;
resps
.into_iter()
.filter_map(|resp| match resp {
Ok(resp) => Some(resp),<|fim▁hole|> }
})
.map(|(store_id, resp)| {
resp.regions
.into_iter()
.map(move |region_id| (store_id, region_id))
})
.flatten()
.for_each(|(store_id, region_id)| {
resp_map.entry(region_id).or_default().push(store_id);
});
resp_map
.into_iter()
.filter_map(|(region_id, stores)| {
if region_has_quorum(®ion_map[®ion_id], &stores) {
Some(region_id)
} else {
debug!(
"resolved-ts cannot get quorum for resolved ts";
"region_id" => region_id,
"stores" => ?stores,
"region" => ?®ion_map[®ion_id]
);
None
}
})
.collect()
}
}
fn region_has_quorum(peers: &[Peer], stores: &[u64]) -> bool {
let mut voters = 0;
let mut incoming_voters = 0;
let mut demoting_voters = 0;
let mut resp_voters = 0;
let mut resp_incoming_voters = 0;
let mut resp_demoting_voters = 0;
peers.iter().for_each(|peer| {
let mut in_resp = false;
for store_id in stores {
if *store_id == peer.store_id {
in_resp = true;
break;
}
}
match peer.get_role() {
PeerRole::Voter => {
voters += 1;
if in_resp {
resp_voters += 1;
}
}
PeerRole::IncomingVoter => {
incoming_voters += 1;
if in_resp {
resp_incoming_voters += 1;
}
}
PeerRole::DemotingVoter => {
demoting_voters += 1;
if in_resp {
resp_demoting_voters += 1;
}
}
PeerRole::Learner => (),
}
});
let has_incoming_majority =
(resp_voters + resp_incoming_voters) >= ((voters + incoming_voters) / 2 + 1);
let has_demoting_majority =
(resp_voters + resp_demoting_voters) >= ((voters + demoting_voters) / 2 + 1);
has_incoming_majority && has_demoting_majority
}
fn find_store_id(peer_list: &[Peer], peer_id: u64) -> Option<u64> {
for peer in peer_list {
if peer.id == peer_id {
return Some(peer.store_id);
}
}
None
}<|fim▁end|> | Err(e) => {
debug!("resolved-ts check leader error"; "err" =>?e);
None |
<|file_name|>set_webhook_parameters.go<|end_file_name|><|fim▁begin|>// Code generated by go-swagger; DO NOT EDIT.
package updates
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"time"
"golang.org/x/net/context"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/swag"
strfmt "github.com/go-openapi/strfmt"
)
// NewSetWebhookParams creates a new SetWebhookParams object
// with the default values initialized.
func NewSetWebhookParams() *SetWebhookParams {
var ()
return &SetWebhookParams{
timeout: cr.DefaultTimeout,
}
}
// NewSetWebhookParamsWithTimeout creates a new SetWebhookParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewSetWebhookParamsWithTimeout(timeout time.Duration) *SetWebhookParams {
var ()
return &SetWebhookParams{
timeout: timeout,
}
}
// NewSetWebhookParamsWithContext creates a new SetWebhookParams object
// with the default values initialized, and the ability to set a context for a request
func NewSetWebhookParamsWithContext(ctx context.Context) *SetWebhookParams {
var ()
return &SetWebhookParams{
Context: ctx,
}
}
// NewSetWebhookParamsWithHTTPClient creates a new SetWebhookParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewSetWebhookParamsWithHTTPClient(client *http.Client) *SetWebhookParams {
var ()
return &SetWebhookParams{
HTTPClient: client,
}
}
/*SetWebhookParams contains all the parameters to send to the API endpoint
for the set webhook operation typically these are written to a http.Request
*/
type SetWebhookParams struct {
/*AllowedUpdates*/
AllowedUpdates []string
/*Certificate*/
Certificate runtime.NamedReadCloser
/*MaxConnections*/
MaxConnections *int64
/*Token
bot's token to authorize the request
*/
Token *string
/*URL*/
URL string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the set webhook params
func (o *SetWebhookParams) WithTimeout(timeout time.Duration) *SetWebhookParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the set webhook params
func (o *SetWebhookParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the set webhook params
func (o *SetWebhookParams) WithContext(ctx context.Context) *SetWebhookParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the set webhook params<|fim▁hole|>
// WithHTTPClient adds the HTTPClient to the set webhook params
func (o *SetWebhookParams) WithHTTPClient(client *http.Client) *SetWebhookParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the set webhook params
func (o *SetWebhookParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithAllowedUpdates adds the allowedUpdates to the set webhook params
func (o *SetWebhookParams) WithAllowedUpdates(allowedUpdates []string) *SetWebhookParams {
o.SetAllowedUpdates(allowedUpdates)
return o
}
// SetAllowedUpdates adds the allowedUpdates to the set webhook params
func (o *SetWebhookParams) SetAllowedUpdates(allowedUpdates []string) {
o.AllowedUpdates = allowedUpdates
}
// WithCertificate adds the certificate to the set webhook params
func (o *SetWebhookParams) WithCertificate(certificate runtime.NamedReadCloser) *SetWebhookParams {
o.SetCertificate(certificate)
return o
}
// SetCertificate adds the certificate to the set webhook params
func (o *SetWebhookParams) SetCertificate(certificate runtime.NamedReadCloser) {
o.Certificate = certificate
}
// WithMaxConnections adds the maxConnections to the set webhook params
func (o *SetWebhookParams) WithMaxConnections(maxConnections *int64) *SetWebhookParams {
o.SetMaxConnections(maxConnections)
return o
}
// SetMaxConnections adds the maxConnections to the set webhook params
func (o *SetWebhookParams) SetMaxConnections(maxConnections *int64) {
o.MaxConnections = maxConnections
}
// WithToken adds the token to the set webhook params
func (o *SetWebhookParams) WithToken(token *string) *SetWebhookParams {
o.SetToken(token)
return o
}
// SetToken adds the token to the set webhook params
func (o *SetWebhookParams) SetToken(token *string) {
o.Token = token
}
// WithURL adds the url to the set webhook params
func (o *SetWebhookParams) WithURL(url string) *SetWebhookParams {
o.SetURL(url)
return o
}
// SetURL adds the url to the set webhook params
func (o *SetWebhookParams) SetURL(url string) {
o.URL = url
}
// WriteToRequest writes these params to a swagger request
func (o *SetWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
valuesAllowedUpdates := o.AllowedUpdates
joinedAllowedUpdates := swag.JoinByFormat(valuesAllowedUpdates, "multi")
// form array param allowed_updates
if err := r.SetFormParam("allowed_updates", joinedAllowedUpdates...); err != nil {
return err
}
if o.Certificate != nil {
if o.Certificate != nil {
// form file param certificate
if err := r.SetFileParam("certificate", o.Certificate); err != nil {
return err
}
}
}
if o.MaxConnections != nil {
// form param max_connections
var frMaxConnections int64
if o.MaxConnections != nil {
frMaxConnections = *o.MaxConnections
}
fMaxConnections := swag.FormatInt64(frMaxConnections)
if fMaxConnections != "" {
if err := r.SetFormParam("max_connections", fMaxConnections); err != nil {
return err
}
}
}
if o.Token != nil {
// path param token
if err := r.SetPathParam("token", *o.Token); err != nil {
return err
}
}
// form param url
frURL := o.URL
fURL := frURL
if fURL != "" {
if err := r.SetFormParam("url", fURL); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}<|fim▁end|> | func (o *SetWebhookParams) SetContext(ctx context.Context) {
o.Context = ctx
} |
<|file_name|>defaults.py<|end_file_name|><|fim▁begin|>DEBUG = False
BASEDIR = ''
SUBDIR = ''
PREFIX = ''
QUALITY = 85
CONVERT = '/usr/bin/convert'
WVPS = '/usr/bin/wvPS'
PROCESSORS = (
'populous.thumbnail.processors.colorspace',<|fim▁hole|> 'populous.thumbnail.processors.autocrop',
'populous.thumbnail.processors.scale_and_crop',
'populous.thumbnail.processors.filters',
)<|fim▁end|> | |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn main() {
let mut sum: u32 = 0;<|fim▁hole|> sum = sum + i;
}
}
println!("The result is: {}", sum);
}<|fim▁end|> |
for i in 1..1000 {
if (i % 3 == 0) || (i % 5 == 0){ |
<|file_name|>ReactTestUtils.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2013 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @providesModule ReactTestUtils
*/
var EventConstants = require('EventConstants');
var React = require('React');
var ReactComponent = require('ReactComponent');
var ReactDOM = require('ReactDOM');
var ReactEventEmitter = require('ReactEventEmitter');
var ReactTextComponent = require('ReactTextComponent');
var ReactMount = require('ReactMount');
var mergeInto = require('mergeInto');
var copyProperties = require('copyProperties');
var topLevelTypes = EventConstants.topLevelTypes;
function Event(suffix) {}
/**
* @class ReactTestUtils
*/
/**
* Todo: Support the entire DOM.scry query syntax. For now, these simple
* utilities will suffice for testing purposes.
* @lends ReactTestUtils
*/
var ReactTestUtils = {
renderIntoDocument: function(instance) {
var div = document.createElement('div');
document.documentElement.appendChild(div);
return React.renderComponent(instance, div);
},
isComponentOfType: function(inst, type) {
return !!(
inst &&<|fim▁hole|> },
isDOMComponent: function(inst) {
return !!(inst &&
ReactComponent.isValidComponent(inst) &&
!!inst.tagName);
},
isCompositeComponent: function(inst) {
return !!(
inst &&
ReactComponent.isValidComponent(inst) &&
typeof inst.render === 'function' &&
typeof inst.setState === 'function' &&
typeof inst.updateComponent === 'function'
);
},
isCompositeComponentWithType: function(inst, type) {
return !!(ReactTestUtils.isCompositeComponent(inst) &&
(inst.constructor === type.componentConstructor ||
inst.constructor === type));
},
isTextComponent: function(inst) {
return inst instanceof ReactTextComponent;
},
findAllInRenderedTree: function(inst, test) {
if (!inst) {
return [];
}
var ret = test(inst) ? [inst] : [];
if (ReactTestUtils.isDOMComponent(inst)) {
var renderedChildren = inst._renderedChildren;
var key;
for (key in renderedChildren) {
if (!renderedChildren.hasOwnProperty(key)) {
continue;
}
ret = ret.concat(
ReactTestUtils.findAllInRenderedTree(renderedChildren[key], test)
);
}
} else if (ReactTestUtils.isCompositeComponent(inst)) {
ret = ret.concat(
ReactTestUtils.findAllInRenderedTree(inst._renderedComponent, test)
);
}
return ret;
},
/**
* Finds all instance of components in the rendered tree that are DOM
* components with the class name matching `className`.
* @return an array of all the matches.
*/
scryRenderedDOMComponentsWithClass: function(root, className) {
return ReactTestUtils.findAllInRenderedTree(root, function(inst) {
var instClassName = inst.props.className;
return ReactTestUtils.isDOMComponent(inst) && (
instClassName &&
(' ' + instClassName + ' ').indexOf(' ' + className + ' ') !== -1
);
});
},
/**
* Like scryRenderedDOMComponentsWithClass but expects there to be one result,
* and returns that one result, or throws exception if there is any other
* number of matches besides one.
* @return {!ReactDOMComponent} The one match.
*/
findRenderedDOMComponentWithClass: function(root, className) {
var all =
ReactTestUtils.scryRenderedDOMComponentsWithClass(root, className);
if (all.length !== 1) {
throw new Error('Did not find exactly one match for class:' + className);
}
return all[0];
},
/**
* Finds all instance of components in the rendered tree that are DOM
* components with the tag name matching `tagName`.
* @return an array of all the matches.
*/
scryRenderedDOMComponentsWithTag: function(root, tagName) {
return ReactTestUtils.findAllInRenderedTree(root, function(inst) {
return ReactTestUtils.isDOMComponent(inst) &&
inst.tagName === tagName.toUpperCase();
});
},
/**
* Like scryRenderedDOMComponentsWithTag but expects there to be one result,
* and returns that one result, or throws exception if there is any other
* number of matches besides one.
* @return {!ReactDOMComponent} The one match.
*/
findRenderedDOMComponentWithTag: function(root, tagName) {
var all = ReactTestUtils.scryRenderedDOMComponentsWithTag(root, tagName);
if (all.length !== 1) {
throw new Error('Did not find exactly one match for tag:' + tagName);
}
return all[0];
},
/**
* Finds all instances of components with type equal to `componentType`.
* @return an array of all the matches.
*/
scryRenderedComponentsWithType: function(root, componentType) {
return ReactTestUtils.findAllInRenderedTree(root, function(inst) {
return ReactTestUtils.isCompositeComponentWithType(inst, componentType);
});
},
/**
* Same as `scryRenderedComponentsWithType` but expects there to be one result
* and returns that one result, or throws exception if there is any other
* number of matches besides one.
* @return {!ReactComponent} The one match.
*/
findRenderedComponentWithType: function(root, componentType) {
var all = ReactTestUtils.scryRenderedComponentsWithType(
root,
componentType
);
if (all.length !== 1) {
throw new Error(
'Did not find exactly one match for componentType:' + componentType
);
}
return all[0];
},
/**
* Pass a mocked component module to this method to augment it with
* useful methods that allow it to be used as a dummy React component.
* Instead of rendering as usual, the component will become a simple
* <div> containing any provided children.
*
* @param {object} module the mock function object exported from a
* module that defines the component to be mocked
* @param {?string} mockTagName optional dummy root tag name to return
* from render method (overrides
* module.mockTagName if provided)
* @return {object} the ReactTestUtils object (for chaining)
*/
mockComponent: function(module, mockTagName) {
var ConvenienceConstructor = React.createClass({
render: function() {
var mockTagName = mockTagName || module.mockTagName || "div";
return ReactDOM[mockTagName](null, this.props.children);
}
});
copyProperties(module, ConvenienceConstructor);
module.mockImplementation(ConvenienceConstructor);
return this;
},
/**
* Simulates a top level event being dispatched from a raw event that occured
* on and `Element` node.
* @param topLevelType {Object} A type from `EventConstants.topLevelTypes`
* @param {!Element} node The dom to simulate an event occurring on.
* @param {?Event} fakeNativeEvent Fake native event to use in SyntheticEvent.
*/
simulateEventOnNode: function(topLevelType, node, fakeNativeEvent) {
var virtualHandler =
ReactEventEmitter.TopLevelCallbackCreator.createTopLevelCallback(
topLevelType
);
fakeNativeEvent.target = node;
virtualHandler(fakeNativeEvent);
},
/**
* Simulates a top level event being dispatched from a raw event that occured
* on the `ReactDOMComponent` `comp`.
* @param topLevelType {Object} A type from `EventConstants.topLevelTypes`.
* @param comp {!ReactDOMComponent}
* @param {?Event} fakeNativeEvent Fake native event to use in SyntheticEvent.
*/
simulateEventOnDOMComponent: function(topLevelType, comp, fakeNativeEvent) {
var reactRootID = comp._rootNodeID || comp._rootDomId;
if (!reactRootID) {
throw new Error('Simulating event on non-rendered component');
}
var virtualHandler =
ReactEventEmitter.TopLevelCallbackCreator.createTopLevelCallback(
topLevelType
);
var node = ReactMount.getNode(reactRootID);
fakeNativeEvent.target = node;
/* jsdom is returning nodes without id's - fixing that issue here. */
ReactMount.setID(node, reactRootID);
virtualHandler(fakeNativeEvent);
},
nativeTouchData: function(x, y) {
return {
touches: [
{pageX: x, pageY: y}
]
};
},
Simulate: null // Will populate
};
/**
* Exports:
*
* - `ReactTestUtils.Simulate.click(Element/ReactDOMComponent)`
* - `ReactTestUtils.Simulate.mouseMove(Element/ReactDOMComponent)`
* - `ReactTestUtils.Simulate.mouseIn/ReactDOMComponent)`
* - `ReactTestUtils.Simulate.mouseOut(Element/ReactDOMComponent)`
* - ... (All keys from `EventConstants.topLevelTypes`)
*
* Note: Top level event types are a subset of the entire set of handler types
* (which include a broader set of "synthetic" events). For example, onDragDone
* is a synthetic event. You certainly may write test cases for these event
* types, but it doesn't make sense to simulate them at this low of a level. In
* this case, the way you test an `onDragDone` event is by simulating a series
* of `mouseMove`/ `mouseDown`/`mouseUp` events - Then, a synthetic event of
* type `onDragDone` will be constructed and dispached through your system
* automatically.
*/
function makeSimulator(eventType) {
return function(domComponentOrNode, nativeEventData) {
var fakeNativeEvent = new Event(eventType);
mergeInto(fakeNativeEvent, nativeEventData);
if (ReactTestUtils.isDOMComponent(domComponentOrNode)) {
ReactTestUtils.simulateEventOnDOMComponent(
eventType,
domComponentOrNode,
fakeNativeEvent
);
} else if (!!domComponentOrNode.tagName) {
// Will allow on actual dom nodes.
ReactTestUtils.simulateEventOnNode(
eventType,
domComponentOrNode,
fakeNativeEvent
);
}
};
}
ReactTestUtils.Simulate = {};
var eventType;
for (eventType in topLevelTypes) {
// Event type is stored as 'topClick' - we transform that to 'click'
var convenienceName = eventType.indexOf('top') === 0 ?
eventType.charAt(3).toLowerCase() + eventType.substr(4) : eventType;
/**
* @param {!Element || ReactDOMComponent} domComponentOrNode
* @param {?Event} nativeEventData Fake native event to use in SyntheticEvent.
*/
ReactTestUtils.Simulate[convenienceName] = makeSimulator(eventType);
}
module.exports = ReactTestUtils;<|fim▁end|> | ReactComponent.isValidComponent(inst) &&
inst.constructor === type.componentConstructor
); |
<|file_name|>SubscriptionUpgradePeriod.java<|end_file_name|><|fim▁begin|>package com.iyzipay.model.subscription.enumtype;
public enum SubscriptionUpgradePeriod {
NOW(1),
NEXT_PERIOD(2);
private final Integer value;
SubscriptionUpgradePeriod(Integer value) {
this.value = value;<|fim▁hole|> return value;
}
}<|fim▁end|> | }
public Integer getValue() { |
<|file_name|>drawplugins.gpr.py<|end_file_name|><|fim▁begin|>#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#<|fim▁hole|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.plug._pluginreg import *
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
MODULE_VERSION="5.1"
# this is the default in gen/plug/_pluginreg.py: plg.require_active = True
#------------------------------------------------------------------------
#
# Ancestor Tree
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'ancestor_chart,BKI'
plg.name = _("Ancestor Chart")
plg.description = _("Produces a graphical ancestral chart")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'ancestortree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'AncestorTree'
plg.optionclass = 'AncestorTreeOptions'
plg.report_modes = [REPORT_MODE_BKI]
plg = newplugin()
plg.id = 'ancestor_chart'
plg.name = _("Ancestor Tree")
plg.description = _("Produces a graphical ancestral tree")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'ancestortree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'AncestorTree'
plg.optionclass = 'AncestorTreeOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Calendar
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'calendar'
plg.name = _("Calendar")
plg.description = _("Produces a graphical calendar")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'calendarreport.py'
plg.ptype = REPORT
plg.authors = ["Douglas S. Blank"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'Calendar'
plg.optionclass = 'CalendarOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Descendant Tree
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'descend_chart,BKI'
plg.name = _("Descendant Chart")
plg.description = _("Produces a graphical descendant chart")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_BKI]
plg = newplugin()
plg.id = 'descend_chart'
plg.name = _("Descendant Tree")
plg.description = _("Produces a graphical descendant tree")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Family Descendant Tree
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'family_descend_chart,BKI'
plg.name = _("Family Descendant Chart")
plg.description = _("Produces a graphical descendant chart around a family")
plg.version = '1.0'
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.category = CATEGORY_DRAW
plg.gramps_target_version = MODULE_VERSION
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["[email protected]"]
plg.require_active = True
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_BKI]
plg = newplugin()
plg.id = 'family_descend_chart'
plg.name = _("Family Descendant Tree")
plg.description = _("Produces a graphical descendant tree around a family")
plg.version = '1.0'
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.category = CATEGORY_DRAW
plg.gramps_target_version = MODULE_VERSION
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["[email protected]"]
plg.require_active = True
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Fan Chart
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'fan_chart'
plg.name = _("Fan Chart")
plg.description = _("Produces fan charts")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'fanchart.py'
plg.ptype = REPORT
plg.authors = ["Donald N. Allingham"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'FanChart'
plg.optionclass = 'FanChartOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Statistics Charts
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'statistics_chart'
plg.name = _("Statistics Charts")
plg.description = _("Produces statistical bar and pie charts of the people "
"in the database")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'statisticschart.py'
plg.ptype = REPORT
plg.authors = ["Eero Tamminen"]
plg.authors_email = [""]
plg.category = CATEGORY_DRAW
plg.reportclass = 'StatisticsChart'
plg.optionclass = 'StatisticsChartOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
plg.require_active = False
#------------------------------------------------------------------------
#
# Timeline Chart
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'timeline'
plg.name = _("Timeline Chart")
plg.description = _("Produces a timeline chart.")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'timeline.py'
plg.ptype = REPORT
plg.authors = ["Donald N. Allingham"]
plg.authors_email = ["[email protected]"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'TimeLine'
plg.optionclass = 'TimeLineOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]<|fim▁end|> | |
<|file_name|>ipng.cpp<|end_file_name|><|fim▁begin|>/* Copyright (C) 2003-2013 Runtime Revolution Ltd.
This file is part of LiveCode.
LiveCode is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License v3 as published by the Free
Software Foundation.
LiveCode is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with LiveCode. If not see <http://www.gnu.org/licenses/>. */
#include "prefix.h"
#include "globdefs.h"
#include "png.h"
#include "filedefs.h"
#include "objdefs.h"
#include "parsedef.h"
#include "mcio.h"
#include "uidc.h"
#include "util.h"
#include "image.h"
#include "globals.h"
#include "imageloader.h"
#define NATIVE_ALPHA_BEFORE ((kMCGPixelFormatNative & kMCGPixelAlphaPositionFirst) == kMCGPixelAlphaPositionFirst)
#define NATIVE_ORDER_BGR ((kMCGPixelFormatNative & kMCGPixelOrderRGB) == 0)
#if NATIVE_ALPHA_BEFORE
#define MCPNG_FILLER_POSITION PNG_FILLER_BEFORE
#else
#define MCPNG_FILLER_POSITION PNG_FILLER_AFTER
#endif
extern "C" void fakeread(png_structp png_ptr, png_bytep data, png_size_t length)
{
uint8_t **t_data_ptr = (uint8_t**)png_get_io_ptr(png_ptr);
memcpy(data, *t_data_ptr, length);
*t_data_ptr += length;
}
extern "C" void fakeflush(png_structp png_ptr)
{}
bool MCImageValidatePng(const void *p_data, uint32_t p_length, uint16_t& r_width, uint16_t& r_height)
{
png_structp png_ptr;
png_ptr = png_create_read_struct((char *)PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr;
info_ptr = png_create_info_struct(png_ptr);
png_infop end_info_ptr;
end_info_ptr = png_create_info_struct(png_ptr);
// If we return to this point, its an error.
if (setjmp(png_jmpbuf(png_ptr)))
{
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info_ptr);
return false;
}
png_set_read_fn(png_ptr, &p_data, fakeread);
png_read_info(png_ptr, info_ptr);
png_uint_32 width, height;
int interlace_type, compression_type, filter_type, bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, &interlace_type, &compression_type, &filter_type);
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info_ptr);
r_width = (uint16_t)width;
r_height = (uint16_t)height;
return true;
}
////////////////////////////////////////////////////////////////////////////////
extern "C" void stream_read(png_structp png_ptr, png_bytep data, png_size_t length)
{
IO_handle t_stream = (IO_handle)png_get_io_ptr(png_ptr);
uint4 t_length;
t_length = length;
if (IO_read(data, length, t_stream) != IO_NORMAL)
png_error(png_ptr, (char *)"pnglib read error");
}
static void MCPNGSetNativePixelFormat(png_structp p_png)
{
#if NATIVE_ORDER_BGR
png_set_bgr(p_png);
#endif
#if NATIVE_ALPHA_BEFORE
png_set_swap_alpha(p_png);
#endif
}
class MCPNGImageLoader : public MCImageLoader
{
public:
MCPNGImageLoader(IO_handle p_stream);
virtual ~MCPNGImageLoader();
virtual MCImageLoaderFormat GetFormat() { return kMCImageFormatPNG; }
protected:
virtual bool LoadHeader(uint32_t &r_width, uint32_t &r_height, uint32_t &r_xhot, uint32_t &r_yhot, MCStringRef &r_name, uint32_t &r_frame_count, MCImageMetadata &r_metadata);
virtual bool LoadFrames(MCBitmapFrame *&r_frames, uint32_t &r_count);
private:
png_structp m_png;
png_infop m_info;
png_infop m_end_info;
int m_bit_depth;
int m_color_type;
};
MCPNGImageLoader::MCPNGImageLoader(IO_handle p_stream) : MCImageLoader(p_stream)
{
m_png = nil;
m_info = nil;
m_end_info = nil;
}
MCPNGImageLoader::~MCPNGImageLoader()
{
if (m_png != nil)
png_destroy_read_struct(&m_png, &m_info, &m_end_info);
}
bool MCPNGImageLoader::LoadHeader(uint32_t &r_width, uint32_t &r_height, uint32_t &r_xhot, uint32_t &r_yhot, MCStringRef &r_name, uint32_t &r_frame_count, MCImageMetadata &r_metadata)
{
bool t_success = true;
t_success = nil != (m_png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL));
if (t_success)
t_success = nil != (m_info = png_create_info_struct(m_png));
if (t_success)
t_success = nil != (m_end_info = png_create_info_struct(m_png));
if (t_success)
{
if (setjmp(png_jmpbuf(m_png)))
{
t_success = false;
}
}
if (t_success)
{
png_set_read_fn(m_png, GetStream(), stream_read);
png_read_info(m_png, m_info);
}
png_uint_32 t_width, t_height;
int t_interlace_method, t_compression_method, t_filter_method;
if (t_success)
{
png_get_IHDR(m_png, m_info, &t_width, &t_height,
&m_bit_depth, &m_color_type,
&t_interlace_method, &t_compression_method, &t_filter_method);
}
// MERG-2014-09-12: [[ ImageMetadata ]] load image metatadata
if (t_success)
{
uint32_t t_X;
uint32_t t_Y;
int t_units;
if (png_get_pHYs(m_png, m_info, &t_X, &t_Y, &t_units) && t_units != PNG_RESOLUTION_UNKNOWN)
{
MCImageMetadata t_metadata;
MCMemoryClear(&t_metadata, sizeof(t_metadata));
t_metadata.has_density = true;
t_metadata.density = floor(t_X * 0.0254 + 0.5);
r_metadata = t_metadata;
}
}
if (t_success)
{
r_width = t_width;
r_height = t_height;
r_xhot = r_yhot = 0;
r_name = MCValueRetain(kMCEmptyString);
r_frame_count = 1;
}
return t_success;
}
bool MCPNGImageLoader::LoadFrames(MCBitmapFrame *&r_frames, uint32_t &r_count)
{
bool t_success = true;
MCBitmapFrame *t_frame;
t_frame = nil;
MCColorTransformRef t_color_xform;
t_color_xform = nil;
if (setjmp(png_jmpbuf(m_png)))
{
t_success = false;
}
int t_interlace_passes;
uint32_t t_width, t_height;
<|fim▁hole|> if (t_success)
t_success = MCMemoryNew(t_frame);
if (t_success)
t_success = MCImageBitmapCreate(t_width, t_height, t_frame->image);
if (t_success)
{
bool t_need_alpha = false;
t_interlace_passes = png_set_interlace_handling(m_png);
if (m_color_type == PNG_COLOR_TYPE_PALETTE)
png_set_palette_to_rgb(m_png);
if (m_color_type == PNG_COLOR_TYPE_GRAY || m_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
png_set_gray_to_rgb(m_png);
if (png_get_valid(m_png, m_info, PNG_INFO_tRNS))
{
png_set_tRNS_to_alpha(m_png);
t_need_alpha = true;
/* OVERHAUL - REVISIT - assume image has transparent pixels if tRNS is present */
t_frame->image->has_transparency = true;
}
if (m_color_type & PNG_COLOR_MASK_ALPHA)
{
t_need_alpha = true;
/* OVERHAUL - REVISIT - assume image has alpha if color type allows it */
t_frame->image->has_alpha = t_frame->image->has_transparency = true;
}
else if (!t_need_alpha)
png_set_add_alpha(m_png, 0xFF, MCPNG_FILLER_POSITION);
if (m_bit_depth == 16)
png_set_strip_16(m_png);
MCPNGSetNativePixelFormat(m_png);
}
// MW-2009-12-10: Support for color profiles
// Try to get an embedded ICC profile...
if (t_success && t_color_xform == nil && png_get_valid(m_png, m_info, PNG_INFO_iCCP))
{
png_charp t_ccp_name;
png_bytep t_ccp_profile;
int t_ccp_compression_type;
png_uint_32 t_ccp_profile_length;
png_get_iCCP(m_png, m_info, &t_ccp_name, &t_ccp_compression_type, &t_ccp_profile, &t_ccp_profile_length);
MCColorSpaceInfo t_csinfo;
t_csinfo . type = kMCColorSpaceEmbedded;
t_csinfo . embedded . data = t_ccp_profile;
t_csinfo . embedded . data_size = t_ccp_profile_length;
t_color_xform = MCscreen -> createcolortransform(t_csinfo);
}
// Next try an sRGB style profile...
if (t_success && t_color_xform == nil && png_get_valid(m_png, m_info, PNG_INFO_sRGB))
{
int t_intent;
png_get_sRGB(m_png, m_info, &t_intent);
MCColorSpaceInfo t_csinfo;
t_csinfo . type = kMCColorSpaceStandardRGB;
t_csinfo . standard . intent = (MCColorSpaceIntent)t_intent;
t_color_xform = MCscreen -> createcolortransform(t_csinfo);
}
// Finally try for cHRM + gAMA...
if (t_success && t_color_xform == nil && png_get_valid(m_png, m_info, PNG_INFO_cHRM) &&
png_get_valid(m_png, m_info, PNG_INFO_gAMA))
{
MCColorSpaceInfo t_csinfo;
t_csinfo . type = kMCColorSpaceCalibratedRGB;
png_get_cHRM(m_png, m_info,
&t_csinfo . calibrated . white_x, &t_csinfo . calibrated . white_y,
&t_csinfo . calibrated . red_x, &t_csinfo . calibrated . red_y,
&t_csinfo . calibrated . green_x, &t_csinfo . calibrated . green_y,
&t_csinfo . calibrated . blue_x, &t_csinfo . calibrated . blue_y);
png_get_gAMA(m_png, m_info, &t_csinfo . calibrated . gamma);
t_color_xform = MCscreen -> createcolortransform(t_csinfo);
}
// Could not create any kind, so fallback to gamma transform.
if (t_success && t_color_xform == nil)
{
double image_gamma;
if (png_get_gAMA(m_png, m_info, &image_gamma))
png_set_gamma(m_png, MCgamma, image_gamma);
else
png_set_gamma(m_png, MCgamma, 0.45);
}
if (t_success)
{
for (uindex_t t_pass = 0; t_pass < t_interlace_passes; t_pass++)
{
png_bytep t_data_ptr = (png_bytep)t_frame->image->data;
for (uindex_t i = 0; i < t_height; i++)
{
png_read_row(m_png, t_data_ptr, nil);
t_data_ptr += t_frame->image->stride;
}
}
}
if (t_success)
png_read_end(m_png, m_end_info);
// transform colours using extracted colour profile
if (t_success && t_color_xform != nil)
MCImageBitmapApplyColorTransform(t_frame->image, t_color_xform);
if (t_color_xform != nil)
MCscreen -> destroycolortransform(t_color_xform);
if (t_success)
{
r_frames = t_frame;
r_count = 1;
}
else
MCImageFreeFrames(t_frame, 1);
return t_success;
}
bool MCImageLoaderCreateForPNGStream(IO_handle p_stream, MCImageLoader *&r_loader)
{
MCPNGImageLoader *t_loader;
t_loader = new MCPNGImageLoader(p_stream);
if (t_loader == nil)
return false;
r_loader = t_loader;
return true;
}
////////////////////////////////////////////////////////////////////////////////
// embed stream within wrapper struct containing byte count
// so we can update byte_count as we go
struct MCPNGWriteContext
{
IO_handle stream;
uindex_t byte_count;
};
extern "C" void fakewrite(png_structp png_ptr, png_bytep data, png_size_t length)
{
MCPNGWriteContext *t_context = (MCPNGWriteContext*)png_get_io_ptr(png_ptr);
if (IO_write(data, sizeof(uint1), length, t_context->stream) != IO_NORMAL)
png_error(png_ptr, (char *)"pnglib write error");
t_context->byte_count += length;
}
// MERG-2014-07-16: [[ ImageMetadata ]] Parse the metadata array
static void parsemetadata(png_structp png_ptr, png_infop info_ptr, MCImageMetadata *p_metadata)
{
if (p_metadata == nil)
return;
if (p_metadata -> has_density)
{
real64_t t_ppi = p_metadata -> density;
if (t_ppi > 0)
{
// Convert to pixels per metre from pixels per inch
png_set_pHYs(png_ptr, info_ptr, t_ppi / 0.0254, t_ppi / 0.0254, PNG_RESOLUTION_METER);
}
}
}
bool MCImageEncodePNG(MCImageIndexedBitmap *p_indexed, MCImageMetadata *p_metadata, IO_handle p_stream, uindex_t &r_bytes_written)
{
bool t_success = true;
MCPNGWriteContext t_context;
t_context.stream = p_stream;
t_context.byte_count = 0;
png_structp t_png_ptr = nil;
png_infop t_info_ptr = nil;
png_color *t_png_palette = nil;
png_byte *t_png_transparency = nil;
png_bytep t_data_ptr = nil;
uindex_t t_stride = 0;
/*init png stuff*/
if (t_success)
{
t_success = nil != (t_png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING,
(png_voidp)NULL, (png_error_ptr)NULL,
(png_error_ptr)NULL));
}
if (t_success)
t_success = nil != (t_info_ptr = png_create_info_struct(t_png_ptr));
/*in case of png error*/
if (setjmp(png_jmpbuf(t_png_ptr)))
t_success = false;
if (t_success)
png_set_write_fn(t_png_ptr,(png_voidp)&t_context,fakewrite,fakeflush);
if (t_success)
{
png_set_IHDR(t_png_ptr, t_info_ptr, p_indexed->width, p_indexed->height, 8,
PNG_COLOR_TYPE_PALETTE, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_gAMA(t_png_ptr, t_info_ptr, 1/MCgamma);
}
// MERG-2014-07-16: [[ ImageMetadata ]] Parse the metadata array
if (t_success)
parsemetadata(t_png_ptr, t_info_ptr, p_metadata);
if (t_success)
t_success = MCMemoryNewArray(p_indexed->palette_size, t_png_palette);
/*create palette for 8 bit*/
if (t_success)
{
for (uindex_t i = 0; i < p_indexed->palette_size ; i++)
{
t_png_palette[i].red = p_indexed->palette[i].red >> 8;
t_png_palette[i].green = p_indexed->palette[i].green >> 8;
t_png_palette[i].blue = p_indexed->palette[i].blue >> 8;
}
png_set_PLTE(t_png_ptr, t_info_ptr, t_png_palette, p_indexed->palette_size);
}
if (MCImageIndexedBitmapHasTransparency(p_indexed))
{
if (t_success)
t_success = MCMemoryAllocate(p_indexed->palette_size, t_png_transparency);
if (t_success)
{
memset(t_png_transparency, 0xFF, p_indexed->palette_size);
t_png_transparency[p_indexed->transparent_index] = 0x00;
png_set_tRNS(t_png_ptr, t_info_ptr, t_png_transparency, p_indexed->palette_size, NULL);
}
}
if (t_success)
png_write_info(t_png_ptr, t_info_ptr);
if (t_success)
{
t_data_ptr = (png_bytep)p_indexed->data;
t_stride = p_indexed->stride;
}
if (t_success)
{
for (uindex_t i = 0; i < p_indexed->height; i++)
{
png_write_row(t_png_ptr, t_data_ptr);
t_data_ptr += t_stride;
}
}
if (t_success)
png_write_end(t_png_ptr, t_info_ptr);
if (t_png_ptr != nil)
png_destroy_write_struct(&t_png_ptr, &t_info_ptr);
if (t_png_palette != nil)
MCMemoryDeleteArray(t_png_palette);
if (t_png_transparency != nil)
MCMemoryDeallocate(t_png_transparency);
if (t_success)
r_bytes_written = t_context.byte_count;
return t_success;
}
bool MCImageEncodePNG(MCImageBitmap *p_bitmap, MCImageMetadata *p_metadata, IO_handle p_stream, uindex_t &r_bytes_written)
{
bool t_success = true;
MCPNGWriteContext t_context;
t_context.stream = p_stream;
t_context.byte_count = 0;
png_structp t_png_ptr = nil;
png_infop t_info_ptr = nil;
png_color *t_png_palette = nil;
png_byte *t_png_transparency = nil;
png_bytep t_data_ptr = nil;
uindex_t t_stride = 0;
MCImageIndexedBitmap *t_indexed = nil;
if (MCImageConvertBitmapToIndexed(p_bitmap, false, t_indexed))
{
t_success = MCImageEncodePNG(t_indexed, p_metadata, p_stream, r_bytes_written);
MCImageFreeIndexedBitmap(t_indexed);
return t_success;
}
/*init png stuff*/
if (t_success)
{
t_success = nil != (t_png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING,
(png_voidp)NULL, (png_error_ptr)NULL,
(png_error_ptr)NULL));
}
if (t_success)
t_success = nil != (t_info_ptr = png_create_info_struct(t_png_ptr));
/*in case of png error*/
if (setjmp(png_jmpbuf(t_png_ptr)))
t_success = false;
if (t_success)
png_set_write_fn(t_png_ptr,(png_voidp)&t_context,fakewrite,fakeflush);
bool t_fully_opaque = true;
if (t_success)
{
t_fully_opaque = !MCImageBitmapHasTransparency(p_bitmap);
png_set_IHDR(t_png_ptr, t_info_ptr, p_bitmap->width, p_bitmap->height, 8,
t_fully_opaque ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_gAMA(t_png_ptr, t_info_ptr, 1/MCgamma);
}
// MERG-2014-07-16: [[ ImageMetadata ]] Parse the metadata array
if (t_success)
parsemetadata(t_png_ptr, t_info_ptr, p_metadata);
if (t_success)
{
png_write_info(t_png_ptr, t_info_ptr);
if (t_fully_opaque)
png_set_filler(t_png_ptr, 0, MCPNG_FILLER_POSITION);
MCPNGSetNativePixelFormat(t_png_ptr);
}
if (t_success)
{
t_data_ptr = (png_bytep)p_bitmap->data;
t_stride = p_bitmap->stride;
}
if (t_success)
{
for (uindex_t i = 0; i < p_bitmap->height; i++)
{
png_write_row(t_png_ptr, t_data_ptr);
t_data_ptr += t_stride;
}
}
if (t_success)
png_write_end(t_png_ptr, t_info_ptr);
if (t_png_ptr != nil)
png_destroy_write_struct(&t_png_ptr, &t_info_ptr);
if (t_png_palette != nil)
MCMemoryDeleteArray(t_png_palette);
if (t_png_transparency != nil)
MCMemoryDeallocate(t_png_transparency);
if (t_success)
r_bytes_written = t_context.byte_count;
return t_success;
}
////////////////////////////////////////////////////////////////////////////////<|fim▁end|> | if (t_success)
t_success = GetGeometry(t_width, t_height);
|
<|file_name|>CWE762_Mismatched_Memory_Management_Routines__delete_int64_t_calloc_52c.cpp<|end_file_name|><|fim▁begin|>/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE762_Mismatched_Memory_Management_Routines__delete_int64_t_calloc_52c.cpp
Label Definition File: CWE762_Mismatched_Memory_Management_Routines__delete.label.xml
Template File: sources-sinks-52c.tmpl.cpp
*/
/*
* @description
* CWE: 762 Mismatched Memory Management Routines
* BadSource: calloc Allocate data using calloc()
* GoodSource: Allocate data using new
* Sinks:
* GoodSink: Deallocate data using free()
* BadSink : Deallocate data using delete
* Flow Variant: 52 Data flow: data passed as an argument from one function to another to another in three different source files
*
* */
#include "std_testcase.h"
namespace CWE762_Mismatched_Memory_Management_Routines__delete_int64_t_calloc_52
{
#ifndef OMITBAD
void badSink_c(int64_t * data)
{
/* POTENTIAL FLAW: Deallocate memory using delete - the source memory allocation function may
* require a call to free() to deallocate the memory */
delete data;
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B uses the GoodSource with the BadSink */
void goodG2BSink_c(int64_t * data)
{
/* POTENTIAL FLAW: Deallocate memory using delete - the source memory allocation function may
* require a call to free() to deallocate the memory */
delete data;
}
<|fim▁hole|>{
/* FIX: Deallocate the memory using free() */
free(data);
}
#endif /* OMITGOOD */
} /* close namespace */<|fim▁end|> |
/* goodB2G uses the BadSource with the GoodSink */
void goodB2GSink_c(int64_t * data)
|
<|file_name|>filterField.js<|end_file_name|><|fim▁begin|>'use strict';
var inheritance = require('./../../helpers/inheritance'),
Field = require('./field');
var FilterField = function(){};
inheritance.inherits(Field,FilterField);
FilterField.prototype.isOpen = function(){
return this.world.helper.elementGetter(this._root,this._data.elements.body).isDisplayed();
};
FilterField.prototype.accordionSelf = function(status){
var _this=this;<|fim▁hole|> switch(status){
case 'open':
return _this.isOpen()
.then(function(is){
if(!is){
return _this._root.scrollIntoView()
.then(function(){
return _this._root.element(by.css('span.filter__sub-title')).click();
})
.then(function(){
return _this.world.helper.elementGetter(_this._root,_this._data.elements.body).waitToBeCompletelyVisibleAndStable();
});
}
});
case 'close':
return _this.isOpen()
.then(function(is){
if(is){
return _this._root.scrollIntoView()
.then(function(){
return _this._root.element(by.css('span.filter__sub-title')).click();
})
.then(function(){
return _this.world.helper.elementGetter(_this._root,_this._data.elements.body).waitToBeHidden();
});
}
});
default:
throw new Error('Wrong status of slider: '+status);
}
};
module.exports = FilterField;<|fim▁end|> | |
<|file_name|>ftx.go<|end_file_name|><|fim▁begin|>package ftx
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/common/crypto"
"github.com/thrasher-corp/gocryptotrader/currency"
exchange "github.com/thrasher-corp/gocryptotrader/exchanges"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/order"
"github.com/thrasher-corp/gocryptotrader/exchanges/request"
)
// FTX is the overarching type across this package
type FTX struct {
exchange.Base
}
const (
ftxAPIURL = "https://ftx.com/api"
// Public endpoints
getMarkets = "/markets"
getMarket = "/markets/"
getOrderbook = "/markets/%s/orderbook?depth=%s"
getTrades = "/markets/%s/trades"
getHistoricalData = "/markets/%s/candles"
getFutures = "/futures"
getFuture = "/futures/"
getFutureStats = "/futures/%s/stats"
getFundingRates = "/funding_rates"
getIndexWeights = "/indexes/%s/weights"
getAllWalletBalances = "/wallet/all_balances"
getIndexCandles = "/indexes/%s/candles"
// Authenticated endpoints
getAccountInfo = "/account"
getPositions = "/positions"
setLeverage = "/account/leverage"
getCoins = "/wallet/coins"
getBalances = "/wallet/balances"
getDepositAddress = "/wallet/deposit_address/"
getDepositHistory = "/wallet/deposits"
getWithdrawalHistory = "/wallet/withdrawals"
withdrawRequest = "/wallet/withdrawals"
getOpenOrders = "/orders"
getOrderHistory = "/orders/history"
getOpenTriggerOrders = "/conditional_orders"
getTriggerOrderTriggers = "/conditional_orders/%s/triggers"
getTriggerOrderHistory = "/conditional_orders/history"
placeOrder = "/orders"
placeTriggerOrder = "/conditional_orders"
modifyOrder = "/orders/%s/modify"
modifyOrderByClientID = "/orders/by_client_id/%s/modify"
modifyTriggerOrder = "/conditional_orders/%s/modify"
getOrderStatus = "/orders/"
getOrderStatusByClientID = "/orders/by_client_id/"
deleteOrder = "/orders/"
deleteOrderByClientID = "/orders/by_client_id/"
cancelTriggerOrder = "/conditional_orders/"
getFills = "/fills"
getFundingPayments = "/funding_payments"
getLeveragedTokens = "/lt/tokens"
getTokenInfo = "/lt/"
getLTBalances = "/lt/balances"
getLTCreations = "/lt/creations"
requestLTCreation = "/lt/%s/create"
getLTRedemptions = "/lt/redemptions"
requestLTRedemption = "/lt/%s/redeem"
getListQuotes = "/options/requests"
getMyQuotesRequests = "/options/my_requests"
createQuoteRequest = "/options/requests"
deleteQuote = "/options/requests/"
endpointQuote = "/options/requests/%s/quotes"
getMyQuotes = "/options/my_quotes"
deleteMyQuote = "/options/quotes/"
acceptQuote = "/options/quotes/%s/accept"
getOptionsInfo = "/options/account_info"
getOptionsPositions = "/options/positions"
getPublicOptionsTrades = "/options/trades"
getOptionsFills = "/options/fills"
requestOTCQuote = "/otc/quotes"
getOTCQuoteStatus = "/otc/quotes/"
acceptOTCQuote = "/otc/quotes/%s/accept"
subaccounts = "/subaccounts"
subaccountsUpdateName = "/subaccounts/update_name"
subaccountsBalance = "/subaccounts/%s/balances"
subaccountsTransfer = "/subaccounts/transfer"
// Margin Endpoints
marginBorrowRates = "/spot_margin/borrow_rates"
marginLendingRates = "/spot_margin/lending_rates"
marginLendingHistory = "/spot_margin/history"
dailyBorrowedAmounts = "/spot_margin/borrow_summary"
marginMarketInfo = "/spot_margin/market_info?market=%s"
marginBorrowHistory = "/spot_margin/borrow_history"
marginLendHistory = "/spot_margin/lending_history"
marginLendingOffers = "/spot_margin/offers"
marginLendingInfo = "/spot_margin/lending_info"
submitLendingOrder = "/spot_margin/offers"
// Staking endpoints
stakes = "/staking/stakes"
unstakeRequests = "/staking/unstake_requests"
stakeBalances = "/staking/balances"
stakingRewards = "/staking/staking_rewards"
serumStakes = "/srm_stakes/stakes"
// Other Consts
trailingStopOrderType = "trailingStop"
takeProfitOrderType = "takeProfit"
closedStatus = "closed"
spotString = "spot"
futuresString = "future"
ratePeriod = time.Second
rateLimit = 30
)
var (
errInvalidOrderID = errors.New("invalid order ID")
errStartTimeCannotBeAfterEndTime = errors.New("start timestamp cannot be after end timestamp")
errSubaccountNameMustBeSpecified = errors.New("a subaccount name must be specified")
errSubaccountUpdateNameInvalid = errors.New("invalid subaccount old/new name")
errCoinMustBeSpecified = errors.New("a coin must be specified")
errSubaccountTransferSizeGreaterThanZero = errors.New("transfer size must be greater than 0")
errSubaccountTransferSourceDestinationMustNotBeEqual = errors.New("subaccount transfer source and destination must not be the same value")
errUnrecognisedOrderStatus = errors.New("unrecognised order status received")
errInvalidOrderAmounts = errors.New("filled amount should not exceed order amount")
validResolutionData = []int64{15, 60, 300, 900, 3600, 14400, 86400}
)
// GetHistoricalIndex gets historical index data
func (f *FTX) GetHistoricalIndex(ctx context.Context, indexName string, resolution int64, startTime, endTime time.Time) ([]OHLCVData, error) {
params := url.Values{}
if indexName == "" {
return nil, errors.New("indexName is a mandatory field")
}
params.Set("index_name", indexName)<|fim▁hole|> err := checkResolution(resolution)
if err != nil {
return nil, err
}
params.Set("resolution", strconv.FormatInt(resolution, 10))
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
resp := struct {
Data []OHLCVData `json:"result"`
}{}
endpoint := common.EncodeURLValues(fmt.Sprintf(getIndexCandles, indexName), params)
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, endpoint, &resp)
}
func checkResolution(res int64) error {
for x := range validResolutionData {
if validResolutionData[x] == res {
return nil
}
}
return errors.New("resolution data is a mandatory field and the data provided is invalid")
}
// GetMarkets gets market data
func (f *FTX) GetMarkets(ctx context.Context) ([]MarketData, error) {
resp := struct {
Data []MarketData `json:"result"`
}{}
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, getMarkets, &resp)
}
// GetMarket gets market data for a provided asset type
func (f *FTX) GetMarket(ctx context.Context, marketName string) (MarketData, error) {
resp := struct {
Data MarketData `json:"result"`
}{}
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, getMarket+marketName,
&resp)
}
// GetOrderbook gets orderbook for a given market with a given depth (default depth 20)
func (f *FTX) GetOrderbook(ctx context.Context, marketName string, depth int64) (OrderbookData, error) {
result := struct {
Data TempOBData `json:"result"`
}{}
strDepth := "20" // If we send a zero value we get zero asks from the
// endpoint
if depth != 0 {
strDepth = strconv.FormatInt(depth, 10)
}
var resp OrderbookData
err := f.SendHTTPRequest(ctx, exchange.RestSpot, fmt.Sprintf(getOrderbook, marketName, strDepth), &result)
if err != nil {
return resp, err
}
resp.MarketName = marketName
for x := range result.Data.Asks {
resp.Asks = append(resp.Asks, OData{
Price: result.Data.Asks[x][0],
Size: result.Data.Asks[x][1],
})
}
for y := range result.Data.Bids {
resp.Bids = append(resp.Bids, OData{
Price: result.Data.Bids[y][0],
Size: result.Data.Bids[y][1],
})
}
return resp, nil
}
// GetTrades gets trades based on the conditions specified
func (f *FTX) GetTrades(ctx context.Context, marketName string, startTime, endTime, limit int64) ([]TradeData, error) {
if marketName == "" {
return nil, errors.New("a market pair must be specified")
}
params := url.Values{}
if limit != 0 {
params.Set("limit", strconv.FormatInt(limit, 10))
}
if startTime > 0 && endTime > 0 {
if startTime >= (endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime, 10))
params.Set("end_time", strconv.FormatInt(endTime, 10))
}
resp := struct {
Data []TradeData `json:"result"`
}{}
endpoint := common.EncodeURLValues(fmt.Sprintf(getTrades, marketName), params)
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, endpoint, &resp)
}
// GetHistoricalData gets historical OHLCV data for a given market pair
func (f *FTX) GetHistoricalData(ctx context.Context, marketName string, timeInterval, limit int64, startTime, endTime time.Time) ([]OHLCVData, error) {
if marketName == "" {
return nil, errors.New("a market pair must be specified")
}
err := checkResolution(timeInterval)
if err != nil {
return nil, err
}
params := url.Values{}
params.Set("resolution", strconv.FormatInt(timeInterval, 10))
if limit != 0 {
params.Set("limit", strconv.FormatInt(limit, 10))
}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
resp := struct {
Data []OHLCVData `json:"result"`
}{}
endpoint := common.EncodeURLValues(fmt.Sprintf(getHistoricalData, marketName), params)
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, endpoint, &resp)
}
// GetFutures gets data on futures
func (f *FTX) GetFutures(ctx context.Context) ([]FuturesData, error) {
resp := struct {
Data []FuturesData `json:"result"`
}{}
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, getFutures, &resp)
}
// GetFuture gets data on a given future
func (f *FTX) GetFuture(ctx context.Context, futureName string) (FuturesData, error) {
resp := struct {
Data FuturesData `json:"result"`
}{}
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, getFuture+futureName, &resp)
}
// GetFutureStats gets data on a given future's stats
func (f *FTX) GetFutureStats(ctx context.Context, futureName string) (FutureStatsData, error) {
resp := struct {
Data FutureStatsData `json:"result"`
}{}
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, fmt.Sprintf(getFutureStats, futureName), &resp)
}
// GetFundingRates gets data on funding rates
func (f *FTX) GetFundingRates(ctx context.Context, startTime, endTime time.Time, future string) ([]FundingRatesData, error) {
resp := struct {
Data []FundingRatesData `json:"result"`
}{}
params := url.Values{}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return resp.Data, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
if future != "" {
params.Set("future", future)
}
endpoint := common.EncodeURLValues(getFundingRates, params)
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, endpoint, &resp)
}
// GetIndexWeights gets index weights
func (f *FTX) GetIndexWeights(ctx context.Context, index string) (IndexWeights, error) {
var resp IndexWeights
return resp, f.SendHTTPRequest(ctx, exchange.RestSpot, fmt.Sprintf(getIndexWeights, index), &resp)
}
// SendHTTPRequest sends an unauthenticated HTTP request
func (f *FTX) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error {
endpoint, err := f.API.Endpoints.GetURL(ep)
if err != nil {
return err
}
item := &request.Item{
Method: http.MethodGet,
Path: endpoint + path,
Result: result,
Verbose: f.Verbose,
HTTPDebugging: f.HTTPDebugging,
HTTPRecording: f.HTTPRecording,
}
return f.SendPayload(ctx, request.Unset, func() (*request.Item, error) {
return item, nil
})
}
// GetMarginBorrowRates gets borrowing rates for margin trading
func (f *FTX) GetMarginBorrowRates(ctx context.Context) ([]MarginFundingData, error) {
r := struct {
Data []MarginFundingData `json:"result"`
}{}
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, marginBorrowRates, nil, &r)
}
// GetMarginLendingRates gets lending rates for margin trading
func (f *FTX) GetMarginLendingRates(ctx context.Context) ([]MarginFundingData, error) {
r := struct {
Data []MarginFundingData `json:"result"`
}{}
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, marginLendingRates, nil, &r)
}
// MarginDailyBorrowedAmounts gets daily borrowed amounts for margin
func (f *FTX) MarginDailyBorrowedAmounts(ctx context.Context) ([]MarginDailyBorrowStats, error) {
r := struct {
Data []MarginDailyBorrowStats `json:"result"`
}{}
return r.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, dailyBorrowedAmounts, &r)
}
// GetMarginMarketInfo gets margin market data
func (f *FTX) GetMarginMarketInfo(ctx context.Context, market string) ([]MarginMarketInfo, error) {
r := struct {
Data []MarginMarketInfo `json:"result"`
}{}
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, fmt.Sprintf(marginMarketInfo, market), nil, &r)
}
// GetMarginBorrowHistory gets the margin borrow history data
func (f *FTX) GetMarginBorrowHistory(ctx context.Context, startTime, endTime time.Time) ([]MarginTransactionHistoryData, error) {
r := struct {
Data []MarginTransactionHistoryData `json:"result"`
}{}
params := url.Values{}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
endpoint := common.EncodeURLValues(marginBorrowHistory, params)
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &r)
}
// GetMarginMarketLendingHistory gets the markets margin lending rate history
func (f *FTX) GetMarginMarketLendingHistory(ctx context.Context, coin currency.Code, startTime, endTime time.Time) ([]MarginTransactionHistoryData, error) {
r := struct {
Data []MarginTransactionHistoryData `json:"result"`
}{}
params := url.Values{}
if !coin.IsEmpty() {
params.Set("coin", coin.Upper().String())
}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
endpoint := common.EncodeURLValues(marginLendingHistory, params)
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, params, &r)
}
// GetMarginLendingHistory gets margin lending history
func (f *FTX) GetMarginLendingHistory(ctx context.Context, coin currency.Code, startTime, endTime time.Time) ([]MarginTransactionHistoryData, error) {
r := struct {
Data []MarginTransactionHistoryData `json:"result"`
}{}
params := url.Values{}
if !coin.IsEmpty() {
params.Set("coin", coin.Upper().String())
}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
endpoint := common.EncodeURLValues(marginLendHistory, params)
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, marginLendHistory, endpoint, &r)
}
// GetMarginLendingOffers gets margin lending offers
func (f *FTX) GetMarginLendingOffers(ctx context.Context) ([]LendingOffersData, error) {
r := struct {
Data []LendingOffersData `json:"result"`
}{}
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, marginLendingOffers, nil, &r)
}
// GetLendingInfo gets margin lending info
func (f *FTX) GetLendingInfo(ctx context.Context) ([]LendingInfoData, error) {
r := struct {
Data []LendingInfoData `json:"result"`
}{}
return r.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, marginLendingInfo, nil, &r)
}
// SubmitLendingOffer submits an offer for margin lending
func (f *FTX) SubmitLendingOffer(ctx context.Context, coin currency.Code, size, rate float64) error {
resp := struct {
Result string `json:"result"`
Success bool `json:"success"`
}{}
req := make(map[string]interface{})
req["coin"] = coin.Upper().String()
req["size"] = size
req["rate"] = rate
if err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, marginLendingOffers, req, &resp); err != nil {
return err
}
if !resp.Success {
return errors.New(resp.Result)
}
return nil
}
// GetAccountInfo gets account info
func (f *FTX) GetAccountInfo(ctx context.Context) (AccountInfoData, error) {
resp := struct {
Data AccountInfoData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getAccountInfo, nil, &resp)
}
// GetPositions gets the users positions
func (f *FTX) GetPositions(ctx context.Context) ([]PositionData, error) {
resp := struct {
Data []PositionData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getPositions, nil, &resp)
}
// ChangeAccountLeverage changes default leverage used by account
func (f *FTX) ChangeAccountLeverage(ctx context.Context, leverage float64) error {
req := make(map[string]interface{})
req["leverage"] = leverage
return f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, setLeverage, req, nil)
}
// GetCoins gets coins' data in the account wallet
func (f *FTX) GetCoins(ctx context.Context) ([]WalletCoinsData, error) {
resp := struct {
Data []WalletCoinsData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getCoins, nil, &resp)
}
// GetBalances gets balances of the account
func (f *FTX) GetBalances(ctx context.Context) ([]WalletBalance, error) {
resp := struct {
Data []WalletBalance `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getBalances, nil, &resp)
}
// GetAllWalletBalances gets all wallets' balances
func (f *FTX) GetAllWalletBalances(ctx context.Context) (AllWalletBalances, error) {
resp := struct {
Data AllWalletBalances `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getAllWalletBalances, nil, &resp)
}
// FetchDepositAddress gets deposit address for a given coin
func (f *FTX) FetchDepositAddress(ctx context.Context, coin currency.Code, chain string) (*DepositData, error) {
resp := struct {
Data DepositData `json:"result"`
}{}
vals := url.Values{}
if chain != "" {
vals.Set("method", strings.ToLower(chain))
}
path := common.EncodeURLValues(getDepositAddress+coin.Upper().String(), vals)
return &resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, path, nil, &resp)
}
// FetchDepositHistory gets deposit history
func (f *FTX) FetchDepositHistory(ctx context.Context) ([]DepositItem, error) {
resp := struct {
Data []DepositItem `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getDepositHistory, nil, &resp)
}
// FetchWithdrawalHistory gets withdrawal history
func (f *FTX) FetchWithdrawalHistory(ctx context.Context) ([]WithdrawItem, error) {
resp := struct {
Data []WithdrawItem `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getWithdrawalHistory, nil, &resp)
}
// Withdraw sends a withdrawal request
func (f *FTX) Withdraw(ctx context.Context, coin currency.Code, address, tag, password, chain, code string, size float64) (*WithdrawItem, error) {
if coin.IsEmpty() || address == "" || size == 0 {
return nil, errors.New("coin, address and size must be specified")
}
req := make(map[string]interface{})
req["coin"] = coin.Upper().String()
req["size"] = size
req["address"] = address
if code != "" {
req["code"] = code
}
if tag != "" {
req["tag"] = tag
}
if password != "" {
req["password"] = password
}
if chain != "" {
req["method"] = chain
}
resp := struct {
Data WithdrawItem `json:"result"`
}{}
return &resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, withdrawRequest, req, &resp)
}
// GetOpenOrders gets open orders
func (f *FTX) GetOpenOrders(ctx context.Context, marketName string) ([]OrderData, error) {
params := url.Values{}
if marketName != "" {
params.Set("market", marketName)
}
resp := struct {
Data []OrderData `json:"result"`
}{}
endpoint := common.EncodeURLValues(getOpenOrders, params)
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &resp)
}
// FetchOrderHistory gets order history
func (f *FTX) FetchOrderHistory(ctx context.Context, marketName string, startTime, endTime time.Time, limit string) ([]OrderData, error) {
resp := struct {
Data []OrderData `json:"result"`
}{}
params := url.Values{}
if marketName != "" {
params.Set("market", marketName)
}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return resp.Data, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
if limit != "" {
params.Set("limit", limit)
}
endpoint := common.EncodeURLValues(getOrderHistory, params)
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &resp)
}
// GetOpenTriggerOrders gets trigger orders that are currently open
func (f *FTX) GetOpenTriggerOrders(ctx context.Context, marketName, orderType string) ([]TriggerOrderData, error) {
params := url.Values{}
if marketName != "" {
params.Set("market", marketName)
}
if orderType != "" {
params.Set("type", orderType)
}
resp := struct {
Data []TriggerOrderData `json:"result"`
}{}
endpoint := common.EncodeURLValues(getOpenTriggerOrders, params)
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &resp)
}
// GetTriggerOrderTriggers gets trigger orders that are currently open
func (f *FTX) GetTriggerOrderTriggers(ctx context.Context, orderID string) ([]TriggerData, error) {
resp := struct {
Data []TriggerData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, fmt.Sprintf(getTriggerOrderTriggers, orderID), nil, &resp)
}
// GetTriggerOrderHistory gets trigger orders that are currently open
func (f *FTX) GetTriggerOrderHistory(ctx context.Context, marketName string, startTime, endTime time.Time, side, orderType, limit string) ([]TriggerOrderData, error) {
params := url.Values{}
if marketName != "" {
params.Set("market", marketName)
}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
if side != "" {
params.Set("side", side)
}
if orderType != "" {
params.Set("type", orderType)
}
if limit != "" {
params.Set("limit", limit)
}
resp := struct {
Data []TriggerOrderData `json:"result"`
}{}
endpoint := common.EncodeURLValues(getTriggerOrderHistory, params)
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &resp)
}
// Order places an order
func (f *FTX) Order(
ctx context.Context,
marketName, side, orderType string,
reduceOnly, ioc, postOnly bool,
clientID string,
price, size float64,
) (OrderData, error) {
req := make(map[string]interface{})
req["market"] = marketName
req["side"] = side
req["price"] = price
req["type"] = orderType
req["size"] = size
if reduceOnly {
req["reduceOnly"] = reduceOnly
}
if ioc {
req["ioc"] = ioc
}
if postOnly {
req["postOnly"] = postOnly
}
if clientID != "" {
req["clientId"] = clientID
}
resp := struct {
Data OrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, placeOrder, req, &resp)
}
// TriggerOrder places an order
func (f *FTX) TriggerOrder(ctx context.Context, marketName, side, orderType, reduceOnly, retryUntilFilled string, size, triggerPrice, orderPrice, trailValue float64) (TriggerOrderData, error) {
req := make(map[string]interface{})
req["market"] = marketName
req["side"] = side
req["type"] = orderType
req["size"] = size
if reduceOnly != "" {
req["reduceOnly"] = reduceOnly
}
if retryUntilFilled != "" {
req["retryUntilFilled"] = retryUntilFilled
}
if orderType == order.Stop.Lower() || orderType == "" {
req["triggerPrice"] = triggerPrice
req["orderPrice"] = orderPrice
}
if orderType == trailingStopOrderType {
req["trailValue"] = trailValue
}
if orderType == takeProfitOrderType {
req["triggerPrice"] = triggerPrice
req["orderPrice"] = orderPrice
}
resp := struct {
Data TriggerOrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, placeTriggerOrder, req, &resp)
}
// ModifyPlacedOrder modifies a placed order
func (f *FTX) ModifyPlacedOrder(ctx context.Context, orderID, clientID string, price, size float64) (OrderData, error) {
req := make(map[string]interface{})
req["price"] = price
req["size"] = size
if clientID != "" {
req["clientID"] = clientID
}
resp := struct {
Data OrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(modifyOrder, orderID), req, &resp)
}
// ModifyOrderByClientID modifies a placed order via clientOrderID
func (f *FTX) ModifyOrderByClientID(ctx context.Context, clientOrderID, clientID string, price, size float64) (OrderData, error) {
req := make(map[string]interface{})
req["price"] = price
req["size"] = size
if clientID != "" {
req["clientID"] = clientID
}
resp := struct {
Data OrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(modifyOrderByClientID, clientOrderID), req, &resp)
}
// ModifyTriggerOrder modifies an existing trigger order
// Choices for ordertype include stop, trailingStop, takeProfit
func (f *FTX) ModifyTriggerOrder(ctx context.Context, orderID, orderType string, size, triggerPrice, orderPrice, trailValue float64) (TriggerOrderData, error) {
req := make(map[string]interface{})
req["size"] = size
if orderType == order.Stop.Lower() || orderType == "" {
req["triggerPrice"] = triggerPrice
req["orderPrice"] = orderPrice
}
if orderType == trailingStopOrderType {
req["trailValue"] = trailValue
}
if orderType == takeProfitOrderType {
req["triggerPrice"] = triggerPrice
req["orderPrice"] = orderPrice
}
resp := struct {
Data TriggerOrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(modifyTriggerOrder, orderID), req, &resp)
}
// GetOrderStatus gets the order status of a given orderID
func (f *FTX) GetOrderStatus(ctx context.Context, orderID string) (OrderData, error) {
resp := struct {
Data OrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getOrderStatus+orderID, nil, &resp)
}
// GetOrderStatusByClientID gets the order status of a given clientOrderID
func (f *FTX) GetOrderStatusByClientID(ctx context.Context, clientOrderID string) (OrderData, error) {
resp := struct {
Data OrderData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getOrderStatusByClientID+clientOrderID, nil, &resp)
}
func (f *FTX) deleteOrderByPath(ctx context.Context, path string) (string, error) {
resp := struct {
Result string `json:"result"`
Success bool `json:"success"`
Error string `json:"error"`
}{}
err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodDelete, path, nil, &resp)
// If there is an error reported, but the resp struct reports one of a very few
// specific error causes, we still consider this a successful cancellation.
if err != nil && !resp.Success && (resp.Error == "Order already closed" || resp.Error == "Order already queued for cancellation") {
return resp.Error, nil
}
return resp.Result, err
}
// DeleteOrder deletes an order
func (f *FTX) DeleteOrder(ctx context.Context, orderID string) (string, error) {
if orderID == "" {
return "", errInvalidOrderID
}
return f.deleteOrderByPath(ctx, deleteOrder+orderID)
}
// DeleteOrderByClientID deletes an order
func (f *FTX) DeleteOrderByClientID(ctx context.Context, clientID string) (string, error) {
if clientID == "" {
return "", errInvalidOrderID
}
return f.deleteOrderByPath(ctx, deleteOrderByClientID+clientID)
}
// DeleteTriggerOrder deletes an order
func (f *FTX) DeleteTriggerOrder(ctx context.Context, orderID string) (string, error) {
if orderID == "" {
return "", errInvalidOrderID
}
return f.deleteOrderByPath(ctx, cancelTriggerOrder+orderID)
}
// GetFills gets fills' data
func (f *FTX) GetFills(ctx context.Context, market, limit string, startTime, endTime time.Time) ([]FillsData, error) {
resp := struct {
Data []FillsData `json:"result"`
}{}
params := url.Values{}
if market != "" {
params.Set("market", market)
}
if limit != "" {
params.Set("limit", limit)
}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return resp.Data, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
endpoint := common.EncodeURLValues(getFills, params)
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &resp)
}
// GetFundingPayments gets funding payments
func (f *FTX) GetFundingPayments(ctx context.Context, startTime, endTime time.Time, future string) ([]FundingPaymentsData, error) {
resp := struct {
Data []FundingPaymentsData `json:"result"`
}{}
params := url.Values{}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return resp.Data, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
if future != "" {
params.Set("future", future)
}
endpoint := common.EncodeURLValues(getFundingPayments, params)
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, endpoint, nil, &resp)
}
// ListLeveragedTokens lists leveraged tokens
func (f *FTX) ListLeveragedTokens(ctx context.Context) ([]LeveragedTokensData, error) {
resp := struct {
Data []LeveragedTokensData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getLeveragedTokens, nil, &resp)
}
// GetTokenInfo gets token info
func (f *FTX) GetTokenInfo(ctx context.Context, tokenName string) ([]LeveragedTokensData, error) {
resp := struct {
Data []LeveragedTokensData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getTokenInfo+tokenName, nil, &resp)
}
// ListLTBalances gets leveraged tokens' balances
func (f *FTX) ListLTBalances(ctx context.Context) ([]LTBalanceData, error) {
resp := struct {
Data []LTBalanceData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getLTBalances, nil, &resp)
}
// ListLTCreations lists the leveraged tokens' creation requests
func (f *FTX) ListLTCreations(ctx context.Context) ([]LTCreationData, error) {
resp := struct {
Data []LTCreationData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getLTCreations, nil, &resp)
}
// RequestLTCreation sends a request to create a leveraged token
func (f *FTX) RequestLTCreation(ctx context.Context, tokenName string, size float64) (RequestTokenCreationData, error) {
req := make(map[string]interface{})
req["size"] = size
resp := struct {
Data RequestTokenCreationData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(requestLTCreation, tokenName), req, &resp)
}
// ListLTRedemptions lists the leveraged tokens' redemption requests
func (f *FTX) ListLTRedemptions(ctx context.Context) ([]LTRedemptionData, error) {
resp := struct {
Data []LTRedemptionData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getLTRedemptions, nil, &resp)
}
// RequestLTRedemption sends a request to redeem a leveraged token
func (f *FTX) RequestLTRedemption(ctx context.Context, tokenName string, size float64) (LTRedemptionRequestData, error) {
req := make(map[string]interface{})
req["size"] = size
resp := struct {
Data LTRedemptionRequestData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(requestLTRedemption, tokenName), req, &resp)
}
// GetQuoteRequests gets a list of quote requests
func (f *FTX) GetQuoteRequests(ctx context.Context) ([]QuoteRequestData, error) {
resp := struct {
Data []QuoteRequestData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getListQuotes, nil, &resp)
}
// GetYourQuoteRequests gets a list of your quote requests
func (f *FTX) GetYourQuoteRequests(ctx context.Context) ([]PersonalQuotesData, error) {
resp := struct {
Data []PersonalQuotesData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getMyQuotesRequests, nil, &resp)
}
// CreateQuoteRequest sends a request to create a quote
func (f *FTX) CreateQuoteRequest(ctx context.Context, underlying currency.Code, optionType, side string, expiry int64, requestExpiry string, strike, size, limitPrice, counterPartyID float64, hideLimitPrice bool) (CreateQuoteRequestData, error) {
req := make(map[string]interface{})
req["underlying"] = underlying.Upper().String()
req["type"] = optionType
req["side"] = side
req["strike"] = strike
req["expiry"] = expiry
req["size"] = size
if limitPrice != 0 {
req["limitPrice"] = limitPrice
}
if requestExpiry != "" {
req["requestExpiry"] = requestExpiry
}
if counterPartyID != 0 {
req["counterpartyId"] = counterPartyID
}
req["hideLimitPrice"] = hideLimitPrice
resp := struct {
Data CreateQuoteRequestData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, createQuoteRequest, req, &resp)
}
// DeleteQuote sends request to cancel a quote
func (f *FTX) DeleteQuote(ctx context.Context, requestID string) (CancelQuoteRequestData, error) {
resp := struct {
Data CancelQuoteRequestData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodDelete, deleteQuote+requestID, nil, &resp)
}
// GetQuotesForYourQuote gets a list of quotes for your quote
func (f *FTX) GetQuotesForYourQuote(ctx context.Context, requestID string) (QuoteForQuoteData, error) {
var resp QuoteForQuoteData
return resp, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, fmt.Sprintf(endpointQuote, requestID), nil, &resp)
}
// MakeQuote makes a quote for a quote
func (f *FTX) MakeQuote(ctx context.Context, requestID, price string) ([]QuoteForQuoteData, error) {
params := url.Values{}
params.Set("price", price)
resp := struct {
Data []QuoteForQuoteData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(endpointQuote, requestID), nil, &resp)
}
// MyQuotes gets a list of my quotes for quotes
func (f *FTX) MyQuotes(ctx context.Context) ([]QuoteForQuoteData, error) {
resp := struct {
Data []QuoteForQuoteData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getMyQuotes, nil, &resp)
}
// DeleteMyQuote deletes my quote for quotes
func (f *FTX) DeleteMyQuote(ctx context.Context, quoteID string) ([]QuoteForQuoteData, error) {
resp := struct {
Data []QuoteForQuoteData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodDelete, deleteMyQuote+quoteID, nil, &resp)
}
// AcceptQuote accepts the quote for quote
func (f *FTX) AcceptQuote(ctx context.Context, quoteID string) ([]QuoteForQuoteData, error) {
resp := struct {
Data []QuoteForQuoteData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(acceptQuote, quoteID), nil, &resp)
}
// GetAccountOptionsInfo gets account's options' info
func (f *FTX) GetAccountOptionsInfo(ctx context.Context) (AccountOptionsInfoData, error) {
resp := struct {
Data AccountOptionsInfoData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getOptionsInfo, nil, &resp)
}
// GetOptionsPositions gets options' positions
func (f *FTX) GetOptionsPositions(ctx context.Context) ([]OptionsPositionsData, error) {
resp := struct {
Data []OptionsPositionsData `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getOptionsPositions, nil, &resp)
}
// GetPublicOptionsTrades gets options' trades from public
func (f *FTX) GetPublicOptionsTrades(ctx context.Context, startTime, endTime time.Time, limit string) ([]OptionsTradesData, error) {
params := url.Values{}
if !startTime.IsZero() && !endTime.IsZero() {
if startTime.After(endTime) {
return nil, errStartTimeCannotBeAfterEndTime
}
params.Set("start_time", strconv.FormatInt(startTime.Unix(), 10))
params.Set("end_time", strconv.FormatInt(endTime.Unix(), 10))
}
if limit != "" {
params.Set("limit", limit)
}
resp := struct {
Data []OptionsTradesData `json:"result"`
}{}
endpoint := common.EncodeURLValues(getPublicOptionsTrades, params)
return resp.Data, f.SendHTTPRequest(ctx, exchange.RestSpot, endpoint, &resp)
}
// GetOptionsFills gets fills data for options
func (f *FTX) GetOptionsFills(ctx context.Context, startTime, endTime time.Time, limit string) ([]OptionFillsData, error) {
resp := struct {
Data []OptionFillsData `json:"result"`
}{}
req := make(map[string]interface{})
if !startTime.IsZero() && !endTime.IsZero() {
req["start_time"] = strconv.FormatInt(startTime.Unix(), 10)
req["end_time"] = strconv.FormatInt(endTime.Unix(), 10)
if startTime.After(endTime) {
return resp.Data, errStartTimeCannotBeAfterEndTime
}
}
if limit != "" {
req["limit"] = limit
}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getOptionsFills, req, &resp)
}
// GetStakes returns a list of staked assets
func (f *FTX) GetStakes(ctx context.Context) ([]Stake, error) {
resp := struct {
Data []Stake `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, stakes, nil, &resp)
}
// GetUnstakeRequests returns a collection of unstake requests
func (f *FTX) GetUnstakeRequests(ctx context.Context) ([]UnstakeRequest, error) {
resp := struct {
Data []UnstakeRequest `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, unstakeRequests, nil, &resp)
}
// GetStakeBalances returns a collection of staked coin balances
func (f *FTX) GetStakeBalances(ctx context.Context) ([]StakeBalance, error) {
resp := struct {
Data []StakeBalance `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, stakeBalances, nil, &resp)
}
// UnstakeRequest unstakes an existing staked coin
func (f *FTX) UnstakeRequest(ctx context.Context, coin currency.Code, size float64) (*UnstakeRequest, error) {
resp := struct {
Data UnstakeRequest `json:"result"`
}{}
req := make(map[string]interface{})
req["coin"] = coin.Upper().String()
req["size"] = strconv.FormatFloat(size, 'f', -1, 64)
return &resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, unstakeRequests, req, &resp)
}
// CancelUnstakeRequest cancels a pending unstake request
func (f *FTX) CancelUnstakeRequest(ctx context.Context, requestID int64) (bool, error) {
resp := struct {
Result string
}{}
path := unstakeRequests + "/" + strconv.FormatInt(requestID, 10)
if err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodDelete, path, nil, &resp); err != nil {
return false, err
}
if resp.Result != "Cancelled" {
return false, errors.New("failed to cancel unstake request")
}
return true, nil
}
// GetStakingRewards returns a collection of staking rewards
func (f *FTX) GetStakingRewards(ctx context.Context) ([]StakeReward, error) {
resp := struct {
Data []StakeReward `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, stakingRewards, nil, &resp)
}
// StakeRequest submits a stake request based on the specified currency and size
func (f *FTX) StakeRequest(ctx context.Context, coin currency.Code, size float64) (*Stake, error) {
resp := struct {
Data Stake `json:"result"`
}{}
req := make(map[string]interface{})
req["coin"] = coin.Upper().String()
req["size"] = strconv.FormatFloat(size, 'f', -1, 64)
return &resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, serumStakes, req, &resp)
}
// SendAuthHTTPRequest sends an authenticated request
func (f *FTX) SendAuthHTTPRequest(ctx context.Context, ep exchange.URL, method, path string, data, result interface{}) error {
if !f.AllowAuthenticatedRequest() {
return fmt.Errorf("%s %w", f.Name, exchange.ErrAuthenticatedRequestWithoutCredentialsSet)
}
endpoint, err := f.API.Endpoints.GetURL(ep)
if err != nil {
return err
}
newRequest := func() (*request.Item, error) {
ts := strconv.FormatInt(time.Now().UnixMilli(), 10)
var body io.Reader
var hmac, payload []byte
sigPayload := ts + method + "/api" + path
if data != nil {
payload, err = json.Marshal(data)
if err != nil {
return nil, err
}
body = bytes.NewBuffer(payload)
sigPayload += string(payload)
}
hmac, err = crypto.GetHMAC(crypto.HashSHA256,
[]byte(sigPayload),
[]byte(f.API.Credentials.Secret))
if err != nil {
return nil, err
}
headers := make(map[string]string)
headers["FTX-KEY"] = f.API.Credentials.Key
headers["FTX-SIGN"] = crypto.HexEncodeToString(hmac)
headers["FTX-TS"] = ts
if f.API.Credentials.Subaccount != "" {
headers["FTX-SUBACCOUNT"] = url.QueryEscape(f.API.Credentials.Subaccount)
}
headers["Content-Type"] = "application/json"
return &request.Item{
Method: method,
Path: endpoint + path,
Headers: headers,
Body: body,
Result: result,
AuthRequest: true,
Verbose: f.Verbose,
HTTPDebugging: f.HTTPDebugging,
HTTPRecording: f.HTTPRecording,
}, nil
}
return f.SendPayload(ctx, request.Unset, newRequest)
}
// GetFee returns an estimate of fee based on type of transaction
func (f *FTX) GetFee(ctx context.Context, feeBuilder *exchange.FeeBuilder) (float64, error) {
var fee float64
if !f.GetAuthenticatedAPISupport(exchange.RestAuthentication) {
feeBuilder.FeeType = exchange.OfflineTradeFee
}
switch feeBuilder.FeeType {
case exchange.OfflineTradeFee:
fee = getOfflineTradeFee(feeBuilder)
default:
feeData, err := f.GetAccountInfo(ctx)
if err != nil {
return 0, err
}
switch feeBuilder.IsMaker {
case true:
fee = feeData.MakerFee * feeBuilder.Amount * feeBuilder.PurchasePrice
case false:
fee = feeData.TakerFee * feeBuilder.Amount * feeBuilder.PurchasePrice
}
if fee < 0 {
fee = 0
}
}
return fee, nil
}
// getOfflineTradeFee calculates the worst case-scenario trading fee
func getOfflineTradeFee(feeBuilder *exchange.FeeBuilder) float64 {
if feeBuilder.IsMaker {
return 0.0002 * feeBuilder.PurchasePrice * feeBuilder.Amount
}
return 0.0007 * feeBuilder.PurchasePrice * feeBuilder.Amount
}
func (f *FTX) compatibleOrderVars(ctx context.Context, orderSide, orderStatus, orderType string, amount, filledAmount, avgFillPrice float64) (OrderVars, error) {
if filledAmount > amount {
return OrderVars{}, fmt.Errorf("%w, amount: %f filled: %f", errInvalidOrderAmounts, amount, filledAmount)
}
var resp OrderVars
switch orderSide {
case order.Buy.Lower():
resp.Side = order.Buy
case order.Sell.Lower():
resp.Side = order.Sell
}
switch orderStatus {
case strings.ToLower(order.New.String()):
resp.Status = order.New
case strings.ToLower(order.Open.String()):
resp.Status = order.Open
case closedStatus:
switch {
case filledAmount <= 0:
// Order is closed with a filled amount of 0, which means it's
// cancelled.
resp.Status = order.Cancelled
case math.Abs(filledAmount-amount) > 1e-6:
// Order is closed with filledAmount above 0, but not equal to the
// full amount, which means it's partially executed and then
// cancelled.
resp.Status = order.PartiallyCancelled
default:
// Order is closed and filledAmount == amount, which means it's
// fully executed.
resp.Status = order.Filled
}
default:
return resp, fmt.Errorf("%w %s", errUnrecognisedOrderStatus, orderStatus)
}
var feeBuilder exchange.FeeBuilder
feeBuilder.PurchasePrice = avgFillPrice
feeBuilder.Amount = amount
resp.OrderType = order.Market
if strings.EqualFold(orderType, order.Limit.String()) {
resp.OrderType = order.Limit
feeBuilder.IsMaker = true
}
fee, err := f.GetFee(ctx, &feeBuilder)
if err != nil {
return resp, err
}
resp.Fee = fee
return resp, nil
}
// RequestForQuotes requests for otc quotes
func (f *FTX) RequestForQuotes(ctx context.Context, base, quote currency.Code, amount float64) (RequestQuoteData, error) {
resp := struct {
Data RequestQuoteData `json:"result"`
}{}
req := make(map[string]interface{})
req["fromCoin"] = base.Upper().String()
req["toCoin"] = quote.Upper().String()
req["size"] = amount
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, requestOTCQuote, req, &resp)
}
// GetOTCQuoteStatus gets quote status of a quote
func (f *FTX) GetOTCQuoteStatus(ctx context.Context, marketName, quoteID string) (*QuoteStatusData, error) {
resp := struct {
Data QuoteStatusData `json:"result"`
}{}
params := url.Values{}
params.Set("market", marketName)
return &resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, getOTCQuoteStatus+quoteID, params, &resp)
}
// AcceptOTCQuote requests for otc quotes
func (f *FTX) AcceptOTCQuote(ctx context.Context, quoteID string) error {
return f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, fmt.Sprintf(acceptOTCQuote, quoteID), nil, nil)
}
// GetSubaccounts returns the users subaccounts
func (f *FTX) GetSubaccounts(ctx context.Context) ([]Subaccount, error) {
resp := struct {
Data []Subaccount `json:"result"`
}{}
return resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, subaccounts, nil, &resp)
}
// CreateSubaccount creates a new subaccount
func (f *FTX) CreateSubaccount(ctx context.Context, name string) (*Subaccount, error) {
if name == "" {
return nil, errSubaccountNameMustBeSpecified
}
d := make(map[string]string)
d["nickname"] = name
resp := struct {
Data Subaccount `json:"result"`
}{}
if err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, subaccounts, d, &resp); err != nil {
return nil, err
}
return &resp.Data, nil
}
// UpdateSubaccountName updates an existing subaccount name
func (f *FTX) UpdateSubaccountName(ctx context.Context, oldName, newName string) (*Subaccount, error) {
if oldName == "" || newName == "" || oldName == newName {
return nil, errSubaccountUpdateNameInvalid
}
d := make(map[string]string)
d["nickname"] = oldName
d["newNickname"] = newName
resp := struct {
Data Subaccount `json:"result"`
}{}
if err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, subaccountsUpdateName, d, &resp); err != nil {
return nil, err
}
return &resp.Data, nil
}
// DeleteSubaccount deletes the specified subaccount name
func (f *FTX) DeleteSubaccount(ctx context.Context, name string) error {
if name == "" {
return errSubaccountNameMustBeSpecified
}
d := make(map[string]string)
d["nickname"] = name
resp := struct {
Data Subaccount `json:"result"`
}{}
return f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodDelete, subaccounts, d, &resp)
}
// SubaccountBalances returns the user's subaccount balances
func (f *FTX) SubaccountBalances(ctx context.Context, name string) ([]SubaccountBalance, error) {
if name == "" {
return nil, errSubaccountNameMustBeSpecified
}
resp := struct {
Data []SubaccountBalance `json:"result"`
}{}
if err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, fmt.Sprintf(subaccountsBalance, name), nil, &resp); err != nil {
return nil, err
}
return resp.Data, nil
}
// SubaccountTransfer transfers a desired coin to the specified subaccount
func (f *FTX) SubaccountTransfer(ctx context.Context, coin currency.Code, source, destination string, size float64) (*SubaccountTransferStatus, error) {
if coin.IsEmpty() {
return nil, errCoinMustBeSpecified
}
if size <= 0 {
return nil, errSubaccountTransferSizeGreaterThanZero
}
if source == destination {
return nil, errSubaccountTransferSourceDestinationMustNotBeEqual
}
d := make(map[string]interface{})
d["coin"] = coin.Upper().String()
d["size"] = size
if source == "" {
source = "main"
}
d["source"] = source
if destination == "" {
destination = "main"
}
d["destination"] = destination
resp := struct {
Data SubaccountTransferStatus `json:"result"`
}{}
if err := f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, subaccountsTransfer, d, &resp); err != nil {
return nil, err
}
return &resp.Data, nil
}
// FetchExchangeLimits fetches spot order execution limits
func (f *FTX) FetchExchangeLimits(ctx context.Context) ([]order.MinMaxLevel, error) {
data, err := f.GetMarkets(ctx)
if err != nil {
return nil, err
}
var limits []order.MinMaxLevel
for x := range data {
if !data[x].Enabled {
continue
}
var cp currency.Pair
var a asset.Item
switch data[x].MarketType {
case "future":
a = asset.Futures
cp, err = currency.NewPairFromString(data[x].Name)
if err != nil {
return nil, err
}
case "spot":
a = asset.Spot
cp, err = currency.NewPairFromStrings(data[x].BaseCurrency, data[x].QuoteCurrency)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unhandled data type %s, cannot process exchange limit",
data[x].MarketType)
}
limits = append(limits, order.MinMaxLevel{
Pair: cp,
Asset: a,
StepPrice: data[x].PriceIncrement,
StepAmount: data[x].SizeIncrement,
MinAmount: data[x].MinProvideSize,
})
}
return limits, nil
}<|fim▁end|> | |
<|file_name|>test_iterables.py<|end_file_name|><|fim▁begin|>from textwrap import dedent
from sympy import (
symbols, Integral, Tuple, Dummy, Basic, default_sort_key, Matrix,
factorial, true)
from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation
from sympy.utilities.iterables import (
_partition, _set_partitions, binary_partitions, bracelets, capture,
cartes, common_prefix, common_suffix, dict_merge, flatten,
generate_bell, generate_derangements, generate_involutions,
generate_oriented_forest, group, has_dups, kbins, minlex, multiset,
multiset_combinations, multiset_partitions, multiset_permutations,
necklaces, numbered_symbols, ordered, partitions, permutations,
postfixes, postorder_traversal, prefixes, reshape, rotate_left,
rotate_right, runs, sift, subsets, take, topological_sort, unflatten,
uniq, variations)
from sympy.core.singleton import S
from sympy.functions.elementary.piecewise import Piecewise, ExprCondPair
from sympy.utilities.pytest import raises
w, x, y, z = symbols('w,x,y,z')
def test_postorder_traversal():
expr = z + w*(x + y)
expected = [z, w, x, y, x + y, w*(x + y), w*(x + y) + z]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(expr, keys=True)) == expected
expr = Piecewise((x, x < 1), (x**2, True))
expected = [
x, 1, x, x < 1, ExprCondPair(x, x < 1),
2, x, x**2, true,
ExprCondPair(x**2, True), Piecewise((x, x < 1), (x**2, True))
]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(
[expr], keys=default_sort_key)) == expected + [[expr]]
assert list(postorder_traversal(Integral(x**2, (x, 0, 1)),
keys=default_sort_key)) == [
2, x, x**2, 0, 1, x, Tuple(x, 0, 1),
Integral(x**2, Tuple(x, 0, 1))
]
assert list(postorder_traversal(('abc', ('d', 'ef')))) == [
'abc', 'd', 'ef', ('d', 'ef'), ('abc', ('d', 'ef'))]
def test_flatten():
assert flatten((1, (1,))) == [1, 1]
assert flatten((x, (x,))) == [x, x]
ls = [[(-2, -1), (1, 2)], [(0, 0)]]
assert flatten(ls, levels=0) == ls<|fim▁hole|> raises(ValueError, lambda: flatten(ls, levels=-1))
class MyOp(Basic):
pass
assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z]
assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z]
assert flatten(set([1, 11, 2])) == list(set([1, 11, 2]))
def test_group():
assert group([]) == []
assert group([], multiple=False) == []
assert group([1]) == [[1]]
assert group([1], multiple=False) == [(1, 1)]
assert group([1, 1]) == [[1, 1]]
assert group([1, 1], multiple=False) == [(1, 2)]
assert group([1, 1, 1]) == [[1, 1, 1]]
assert group([1, 1, 1], multiple=False) == [(1, 3)]
assert group([1, 2, 1]) == [[1], [2], [1]]
assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)]
assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]]
assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2),
(2, 3), (1, 1), (3, 2)]
def test_subsets():
# combinations
assert list(subsets([1, 2, 3], 0)) == [()]
assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)]
assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)]
l = list(range(4))
assert list(subsets(l, 0, repetition=True)) == [()]
assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 1), (1, 2),
(1, 3), (2, 2), (2, 3),
(3, 3)]
assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1),
(0, 0, 2), (0, 0, 3),
(0, 1, 1), (0, 1, 2),
(0, 1, 3), (0, 2, 2),
(0, 2, 3), (0, 3, 3),
(1, 1, 1), (1, 1, 2),
(1, 1, 3), (1, 2, 2),
(1, 2, 3), (1, 3, 3),
(2, 2, 2), (2, 2, 3),
(2, 3, 3), (3, 3, 3)]
assert len(list(subsets(l, 4, repetition=True))) == 35
assert list(subsets(l[:2], 3, repetition=False)) == []
assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0),
(0, 0, 1),
(0, 1, 1),
(1, 1, 1)]
assert list(subsets([1, 2], repetition=True)) == \
[(), (1,), (2,), (1, 1), (1, 2), (2, 2)]
assert list(subsets([1, 2], repetition=False)) == \
[(), (1,), (2,), (1, 2)]
assert list(subsets([1, 2, 3], 2)) == \
[(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 2, repetition=True)) == \
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
def test_variations():
# permutations
l = list(range(4))
assert list(variations(l, 0, repetition=False)) == [()]
assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)]
assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
assert list(variations(l, 0, repetition=True)) == [()]
assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 0), (1, 1),
(1, 2), (1, 3), (2, 0),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2),
(3, 3)]
assert len(list(variations(l, 3, repetition=True))) == 64
assert len(list(variations(l, 4, repetition=True))) == 256
assert list(variations(l[:2], 3, repetition=False)) == []
assert list(variations(l[:2], 3, repetition=True)) == [
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)
]
def test_cartes():
assert list(cartes([1, 2], [3, 4, 5])) == \
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)]
assert list(cartes()) == [()]
assert list(cartes('a')) == [('a',)]
assert list(cartes('a', repeat=2)) == [('a', 'a')]
assert list(cartes(list(range(2)))) == [(0,), (1,)]
def test_numbered_symbols():
s = numbered_symbols(cls=Dummy)
assert isinstance(next(s), Dummy)
def test_sift():
assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]}
assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]}
assert sift([S.One], lambda _: _.has(x)) == {False: [1]}
def test_take():
X = numbered_symbols()
assert take(X, 5) == list(symbols('x0:5'))
assert take(X, 5) == list(symbols('x5:10'))
assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5]
def test_dict_merge():
assert dict_merge({}, {1: x, y: z}) == {1: x, y: z}
assert dict_merge({1: x, y: z}, {}) == {1: x, y: z}
assert dict_merge({2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {2: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: y, 2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {1: y, 2: z}) == {1: y, 2: z, y: z}
def test_prefixes():
assert list(prefixes([])) == []
assert list(prefixes([1])) == [[1]]
assert list(prefixes([1, 2])) == [[1], [1, 2]]
assert list(prefixes([1, 2, 3, 4, 5])) == \
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]
def test_postfixes():
assert list(postfixes([])) == []
assert list(postfixes([1])) == [[1]]
assert list(postfixes([1, 2])) == [[2], [1, 2]]
assert list(postfixes([1, 2, 3, 4, 5])) == \
[[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]]
def test_topological_sort():
V = [2, 3, 5, 7, 8, 9, 10, 11]
E = [(7, 11), (7, 8), (5, 11),
(3, 8), (3, 10), (11, 2),
(11, 9), (11, 10), (8, 9)]
assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10]
assert topological_sort((V, E), key=lambda v: -v) == \
[7, 5, 11, 3, 10, 8, 9, 2]
raises(ValueError, lambda: topological_sort((V, E + [(10, 7)])))
def test_rotate():
A = [0, 1, 2, 3, 4]
assert rotate_left(A, 2) == [2, 3, 4, 0, 1]
assert rotate_right(A, 1) == [4, 0, 1, 2, 3]
A = []
B = rotate_right(A, 1)
assert B == []
B.append(1)
assert A == []
B = rotate_left(A, 1)
assert B == []
B.append(1)
assert A == []
def test_multiset_partitions():
A = [0, 1, 2, 3, 4]
assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]]
assert len(list(multiset_partitions(A, 4))) == 10
assert len(list(multiset_partitions(A, 3))) == 25
assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [
[[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]],
[[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]]
assert list(multiset_partitions([1, 1, 2, 2], 2)) == [
[[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]],
[[1, 2], [1, 2]]]
assert list(multiset_partitions([1, 2, 3, 4], 2)) == [
[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
assert list(multiset_partitions([1, 2, 2], 2)) == [
[[1, 2], [2]], [[1], [2, 2]]]
assert list(multiset_partitions(3)) == [
[[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]],
[[0], [1], [2]]]
assert list(multiset_partitions(3, 2)) == [
[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]]
assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]]
assert list(multiset_partitions([1] * 3)) == [
[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
a = [3, 2, 1]
assert list(multiset_partitions(a)) == \
list(multiset_partitions(sorted(a)))
assert list(multiset_partitions(a, 5)) == []
assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]]
assert list(multiset_partitions(a + [4], 5)) == []
assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]]
assert list(multiset_partitions(2, 5)) == []
assert list(multiset_partitions(2, 1)) == [[[0, 1]]]
assert list(multiset_partitions('a')) == [[['a']]]
assert list(multiset_partitions('a', 2)) == []
assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]]
assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]]
assert list(multiset_partitions('aaa', 1)) == [['aaa']]
assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]]
def test_multiset_combinations():
ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips',
'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss']
assert [''.join(i) for i in
list(multiset_combinations('mississippi', 3))] == ans
M = multiset('mississippi')
assert [''.join(i) for i in
list(multiset_combinations(M, 3))] == ans
assert [''.join(i) for i in multiset_combinations(M, 30)] == []
assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]]
assert len(list(multiset_combinations('a', 3))) == 0
assert len(list(multiset_combinations('a', 0))) == 1
assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']]
def test_multiset_permutations():
ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
'byba', 'yabb', 'ybab', 'ybba']
assert [''.join(i) for i in multiset_permutations('baby')] == ans
assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
assert list(multiset_permutations([0, 2, 1], 2)) == [
[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
assert len(list(multiset_permutations('a', 0))) == 1
assert len(list(multiset_permutations('a', 3))) == 0
def test():
for i in range(1, 7):
print(i)
for p in multiset_permutations([0, 0, 1, 0, 1], i):
print(p)
assert capture(lambda: test()) == dedent('''\
1
[0]
[1]
2
[0, 0]
[0, 1]
[1, 0]
[1, 1]
3
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
4
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 1, 1]
[0, 1, 0, 0]
[0, 1, 0, 1]
[0, 1, 1, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[1, 0, 1, 0]
[1, 1, 0, 0]
5
[0, 0, 0, 1, 1]
[0, 0, 1, 0, 1]
[0, 0, 1, 1, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 1, 0]
[0, 1, 1, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 0]
[1, 0, 1, 0, 0]
[1, 1, 0, 0, 0]
6\n''')
def test_partitions():
assert [p.copy() for p in partitions(6, k=2)] == [
{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=3)] == [
{3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},
{1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=2, m=2)] == []
assert [p.copy() for p in partitions(8, k=4, m=3)] == [
{4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [
i.copy() for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)
and sum(i.values()) <=3]
assert [p.copy() for p in partitions(S(3), m=2)] == [
{3: 1}, {1: 1, 2: 1}]
assert [i.copy() for i in partitions(4, k=3)] == [
{1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [
i.copy() for i in partitions(4) if all(k <= 3 for k in i)]
raises(ValueError, lambda: list(partitions(3, 0)))
# Consistency check on output of _partitions and RGS_unrank.
# This provides a sanity test on both routines. Also verifies that
# the total number of partitions is the same in each case.
# (from pkrathmann2)
for n in range(2, 6):
i = 0
for m, q in _set_partitions(n):
assert q == RGS_unrank(i, n)
i = i+1
assert i == RGS_enum(n)
def test_binary_partitions():
assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1],
[4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1],
[4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert len([j[:] for j in binary_partitions(16)]) == 36
def test_bell_perm():
assert [len(list(generate_bell(i))) for i in range(1, 7)] == [
factorial(i) for i in range(1, 7)]
assert list(generate_bell(3)) == [
(0, 1, 2), (1, 0, 2), (1, 2, 0), (2, 1, 0), (2, 0, 1), (0, 2, 1)]
def test_involutions():
lengths = [1, 2, 4, 10, 26, 76]
for n, N in enumerate(lengths):
i = list(generate_involutions(n + 1))
assert len(i) == N
assert len(set([Permutation(j)**2 for j in i])) == 1
def test_derangements():
assert len(list(generate_derangements(list(range(6))))) == 265
assert ''.join(''.join(i) for i in generate_derangements('abcde')) == (
'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd'
'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab'
'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb'
'edbacedbca')
assert list(generate_derangements([0, 1, 2, 3])) == [
[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1],
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]]
assert list(generate_derangements([0, 1, 2, 2])) == [
[2, 2, 0, 1], [2, 2, 1, 0]]
def test_necklaces():
def count(n, k, f):
return len(list(necklaces(n, k, f)))
m = []
for i in range(1, 8):
m.append((
i, count(i, 2, 0), count(i, 2, 1), count(i, 3, 1)))
assert Matrix(m) == Matrix([
[1, 2, 2, 3],
[2, 3, 3, 6],
[3, 4, 4, 10],
[4, 6, 6, 21],
[5, 8, 8, 39],
[6, 14, 13, 92],
[7, 20, 18, 198]])
def test_generate_oriented_forest():
assert list(generate_oriented_forest(5)) == [[0, 1, 2, 3, 4],
[0, 1, 2, 3, 3], [0, 1, 2, 3, 2], [0, 1, 2, 3, 1], [0, 1, 2, 3, 0],
[0, 1, 2, 2, 2], [0, 1, 2, 2, 1], [0, 1, 2, 2, 0], [0, 1, 2, 1, 2],
[0, 1, 2, 1, 1], [0, 1, 2, 1, 0], [0, 1, 2, 0, 1], [0, 1, 2, 0, 0],
[0, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 0, 1], [0, 1, 1, 0, 0],
[0, 1, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]]
assert len(list(generate_oriented_forest(10))) == 1842
def test_unflatten():
r = list(range(10))
assert unflatten(r) == list(zip(r[::2], r[1::2]))
assert unflatten(r, 5) == [tuple(r[:5]), tuple(r[5:])]
raises(ValueError, lambda: unflatten(list(range(10)), 3))
raises(ValueError, lambda: unflatten(list(range(10)), -2))
def test_common_prefix_suffix():
assert common_prefix([], [1]) == []
assert common_prefix(list(range(3))) == [0, 1, 2]
assert common_prefix(list(range(3)), list(range(4))) == [0, 1, 2]
assert common_prefix([1, 2, 3], [1, 2, 5]) == [1, 2]
assert common_prefix([1, 2, 3], [1, 3, 5]) == [1]
assert common_suffix([], [1]) == []
assert common_suffix(list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(4))) == []
assert common_suffix([1, 2, 3], [9, 2, 3]) == [2, 3]
assert common_suffix([1, 2, 3], [9, 7, 3]) == [3]
def test_minlex():
assert minlex([1, 2, 0]) == (0, 1, 2)
assert minlex((1, 2, 0)) == (0, 1, 2)
assert minlex((1, 0, 2)) == (0, 2, 1)
assert minlex((1, 0, 2), directed=False) == (0, 1, 2)
assert minlex('aba') == 'aab'
def test_ordered():
assert list(ordered((x, y), hash, default=False)) in [[x, y], [y, x]]
assert list(ordered((x, y), hash, default=False)) == \
list(ordered((y, x), hash, default=False))
assert list(ordered((x, y))) == [x, y]
seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]],
(lambda x: len(x), lambda x: sum(x))]
assert list(ordered(seq, keys, default=False, warn=False)) == \
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
raises(ValueError, lambda:
list(ordered(seq, keys, default=False, warn=True)))
def test_runs():
assert runs([]) == []
assert runs([1]) == [[1]]
assert runs([1, 1]) == [[1], [1]]
assert runs([1, 1, 2]) == [[1], [1, 2]]
assert runs([1, 2, 1]) == [[1, 2], [1]]
assert runs([2, 1, 1]) == [[2], [1], [1]]
from operator import lt
assert runs([2, 1, 1], lt) == [[2, 1], [1]]
def test_reshape():
seq = list(range(1, 9))
assert reshape(seq, [4]) == \
[[1, 2, 3, 4], [5, 6, 7, 8]]
assert reshape(seq, (4,)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, 2)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, [2])) == \
[(1, 2, [3, 4]), (5, 6, [7, 8])]
assert reshape(seq, ((2,), [2])) == \
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
assert reshape(seq, (1, [2], 1)) == \
[(1, [2, 3], 4), (5, [6, 7], 8)]
assert reshape(tuple(seq), ([[1], 1, (2,)],)) == \
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
assert reshape(tuple(seq), ([1], 1, (2,))) == \
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
assert reshape(list(range(12)), [2, [3], set([2]), (1, (3,), 1)]) == \
[[0, 1, [2, 3, 4], set([5, 6]), (7, (8, 9, 10), 11)]]
def test_uniq():
assert list(uniq(p.copy() for p in partitions(4))) == \
[{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}]
assert list(uniq(x % 2 for x in range(5))) == [0, 1]
assert list(uniq('a')) == ['a']
assert list(uniq('ababc')) == list('abc')
assert list(uniq([[1], [2, 1], [1]])) == [[1], [2, 1]]
assert list(uniq(permutations(i for i in [[1], 2, 2]))) == \
[([1], 2, 2), (2, [1], 2), (2, 2, [1])]
assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \
[2, 3, 4, [2], [1], [3]]
def test_kbins():
assert len(list(kbins('1123', 2, ordered=1))) == 24
assert len(list(kbins('1123', 2, ordered=11))) == 36
assert len(list(kbins('1123', 2, ordered=10))) == 10
assert len(list(kbins('1123', 2, ordered=0))) == 5
assert len(list(kbins('1123', 2, ordered=None))) == 3
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins([0, 0, 1], 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [0, 1]]
[[0, 0], [1]]
ordered = 0
[[0, 0], [1]]
[[0, 1], [0]]
ordered = 1
[[0], [0, 1]]
[[0], [1, 0]]
[[1], [0, 0]]
ordered = 10
[[0, 0], [1]]
[[1], [0, 0]]
[[0, 1], [0]]
[[0], [0, 1]]
ordered = 11
[[0], [0, 1]]
[[0, 0], [1]]
[[0], [1, 0]]
[[0, 1], [0]]
[[1], [0, 0]]
[[1, 0], [0]]\n''')
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins(list(range(3)), 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]\n''')
def test_has_dups():
assert has_dups(set()) is False
assert has_dups(list(range(3))) is False
assert has_dups([1, 2, 1]) is True
def test__partition():
assert _partition('abcde', [1, 0, 1, 2, 0]) == [
['b', 'e'], ['a', 'c'], ['d']]
assert _partition('abcde', [1, 0, 1, 2, 0], 3) == [
['b', 'e'], ['a', 'c'], ['d']]
output = (3, [1, 0, 1, 2, 0])
assert _partition('abcde', *output) == [['b', 'e'], ['a', 'c'], ['d']]<|fim▁end|> | assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)]
assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0]
assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0]
|
<|file_name|>browsing_context.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Liberally derived from the [Firefox JS implementation]
//! (http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/webbrowser.js).
//! Connection point for remote devtools that wish to investigate a particular Browsing Context's contents.
//! Supports dynamic attaching and detaching which control notifications of navigation, etc.
use crate::actor::{Actor, ActorMessageStatus, ActorRegistry};
use crate::actors::console::ConsoleActor;
use crate::protocol::JsonPacketStream;
use devtools_traits::DevtoolScriptControlMsg::{self, WantsLiveNotifications};
use serde_json::{Map, Value};
use std::net::TcpStream;
#[derive(Serialize)]
struct BrowsingContextTraits;
#[derive(Serialize)]
struct BrowsingContextAttachedReply {
from: String,
#[serde(rename = "type")]
type_: String,
threadActor: String,
cacheDisabled: bool,
javascriptEnabled: bool,
traits: BrowsingContextTraits,
}
#[derive(Serialize)]
struct BrowsingContextDetachedReply {
from: String,
#[serde(rename = "type")]
type_: String,
}
#[derive(Serialize)]
struct ReconfigureReply {
from: String,
}
#[derive(Serialize)]
struct ListFramesReply {
from: String,
frames: Vec<FrameMsg>,
}
#[derive(Serialize)]
struct FrameMsg {
id: u32,
url: String,
title: String,
parentID: u32,
}
#[derive(Serialize)]
struct ListWorkersReply {
from: String,
workers: Vec<WorkerMsg>,
}
#[derive(Serialize)]
struct WorkerMsg {
id: u32,
}
#[derive(Serialize)]
pub struct BrowsingContextActorMsg {
actor: String,
title: String,
url: String,
outerWindowID: u32,
consoleActor: String,
emulationActor: String,
inspectorActor: String,
timelineActor: String,
profilerActor: String,
performanceActor: String,
styleSheetsActor: String,
}
pub struct BrowsingContextActor {
pub name: String,
pub title: String,
pub url: String,
pub console: String,
pub emulation: String,
pub inspector: String,
pub timeline: String,
pub profiler: String,
pub performance: String,
pub styleSheets: String,
pub thread: String,
}
impl Actor for BrowsingContextActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(
&self,
registry: &ActorRegistry,
msg_type: &str,
msg: &Map<String, Value>,
stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> {
Ok(match msg_type {
"reconfigure" => {
if let Some(options) = msg.get("options").and_then(|o| o.as_object()) {
if let Some(val) = options.get("performReload") {
if val.as_bool().unwrap_or(false) {
let console_actor = registry.find::<ConsoleActor>(&self.console);
let _ = console_actor
.script_chan
.send(DevtoolScriptControlMsg::Reload(console_actor.pipeline));
}
}
}
stream.write_json_packet(&ReconfigureReply { from: self.name() });
ActorMessageStatus::Processed
},
// https://docs.firefox-dev.tools/backend/protocol.html#listing-browser-tabs
// (see "To attach to a _targetActor_")
"attach" => {
let msg = BrowsingContextAttachedReply {
from: self.name(),
type_: "targetAttached".to_owned(),
threadActor: self.thread.clone(),
cacheDisabled: false,
javascriptEnabled: true,
traits: BrowsingContextTraits,
};
let console_actor = registry.find::<ConsoleActor>(&self.console);
console_actor
.streams
.borrow_mut()
.push(stream.try_clone().unwrap());
stream.write_json_packet(&msg);
console_actor
.script_chan
.send(WantsLiveNotifications(console_actor.pipeline, true))
.unwrap();
ActorMessageStatus::Processed
},
//FIXME: The current implementation won't work for multiple connections. Need to ensure 105
// that the correct stream is removed.
"detach" => {
let msg = BrowsingContextDetachedReply {
from: self.name(),
type_: "detached".to_owned(),
};
let console_actor = registry.find::<ConsoleActor>(&self.console);
console_actor.streams.borrow_mut().pop();
stream.write_json_packet(&msg);
console_actor
.script_chan
.send(WantsLiveNotifications(console_actor.pipeline, false))
.unwrap();
ActorMessageStatus::Processed
},
"listFrames" => {
let msg = ListFramesReply {
from: self.name(),
frames: vec![],
};
stream.write_json_packet(&msg);
ActorMessageStatus::Processed
},
"listWorkers" => {
let msg = ListWorkersReply {
from: self.name(),
workers: vec![],
};
stream.write_json_packet(&msg);
ActorMessageStatus::Processed
},
_ => ActorMessageStatus::Ignored,
})
}
}
impl BrowsingContextActor {<|fim▁hole|> pub fn encodable(&self) -> BrowsingContextActorMsg {
BrowsingContextActorMsg {
actor: self.name(),
title: self.title.clone(),
url: self.url.clone(),
outerWindowID: 0, //FIXME: this should probably be the pipeline id
consoleActor: self.console.clone(),
emulationActor: self.emulation.clone(),
inspectorActor: self.inspector.clone(),
timelineActor: self.timeline.clone(),
profilerActor: self.profiler.clone(),
performanceActor: self.performance.clone(),
styleSheetsActor: self.styleSheets.clone(),
}
}
}<|fim▁end|> | |
<|file_name|>hostReportError.js<|end_file_name|><|fim▁begin|>/** PURE_IMPORTS_START PURE_IMPORTS_END */
export function hostReportError(err) {<|fim▁hole|>}
//# sourceMappingURL=hostReportError.js.map<|fim▁end|> | setTimeout(function () { throw err; }, 0); |
<|file_name|>PMT.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from acq4.devices.OptomechDevice import OptomechDevice
from acq4.devices.DAQGeneric import DAQGeneric
class PMT(DAQGeneric, OptomechDevice):
def __init__(self, dm, config, name):
self.omConf = {}
for k in ['parentDevice', 'transform']:
if k in config:
self.omConf[k] = config.pop(k)
DAQGeneric.__init__(self, dm, config, name)<|fim▁hole|>
def getFilterDevice(self):
# return parent filter device or None
if 'Filter' in self.omConf.get('parentDevice', {}):
return self.omConf['parentDevice']
else:
return None<|fim▁end|> | OptomechDevice.__init__(self, dm, config, name) |
<|file_name|>db.py<|end_file_name|><|fim▁begin|>##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for DB, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil<|fim▁hole|>from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_DB(ConfigureMake):
"""Support for building and installing DB."""
def configure_step(self):
"""Configure build: change to build dir and call configure script."""
try:
os.chdir('build_unix')
except OSError as err:
raise EasyBuildError("Failed to move to build dir: %s", err)
super(EB_DB, self).configure_step(cmd_prefix='../dist/')<|fim▁end|> | |
<|file_name|>CloudClusterPrinterStatus.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import List, Union, Dict, Optional, Any
from cura.PrinterOutput.PrinterOutputController import PrinterOutputController
from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel
from .CloudClusterBuildPlate import CloudClusterBuildPlate
from .CloudClusterPrintCoreConfiguration import CloudClusterPrintCoreConfiguration
from .BaseCloudModel import BaseCloudModel
## Class representing a cluster printer
# Spec: https://api-staging.ultimaker.com/connect/v1/spec
class CloudClusterPrinterStatus(BaseCloudModel):
## Creates a new cluster printer status
# \param enabled: A printer can be disabled if it should not receive new jobs. By default every printer is enabled.
# \param firmware_version: Firmware version installed on the printer. Can differ for each printer in a cluster.
# \param friendly_name: Human readable name of the printer. Can be used for identification purposes.
# \param ip_address: The IP address of the printer in the local network.
# \param machine_variant: The type of printer. Can be 'Ultimaker 3' or 'Ultimaker 3ext'.
# \param status: The status of the printer.<|fim▁hole|> # \param uuid: The unique ID of the printer, also known as GUID.
# \param configuration: The active print core configurations of this printer.
# \param reserved_by: A printer can be claimed by a specific print job.
# \param maintenance_required: Indicates if maintenance is necessary
# \param firmware_update_status: Whether the printer's firmware is up-to-date, value is one of: "up_to_date",
# "pending_update", "update_available", "update_in_progress", "update_failed", "update_impossible"
# \param latest_available_firmware: The version of the latest firmware that is available
# \param build_plate: The build plate that is on the printer
def __init__(self, enabled: bool, firmware_version: str, friendly_name: str, ip_address: str, machine_variant: str,
status: str, unique_name: str, uuid: str,
configuration: List[Union[Dict[str, Any], CloudClusterPrintCoreConfiguration]],
reserved_by: Optional[str] = None, maintenance_required: Optional[bool] = None,
firmware_update_status: Optional[str] = None, latest_available_firmware: Optional[str] = None,
build_plate: Union[Dict[str, Any], CloudClusterBuildPlate] = None, **kwargs) -> None:
self.configuration = self.parseModels(CloudClusterPrintCoreConfiguration, configuration)
self.enabled = enabled
self.firmware_version = firmware_version
self.friendly_name = friendly_name
self.ip_address = ip_address
self.machine_variant = machine_variant
self.status = status
self.unique_name = unique_name
self.uuid = uuid
self.reserved_by = reserved_by
self.maintenance_required = maintenance_required
self.firmware_update_status = firmware_update_status
self.latest_available_firmware = latest_available_firmware
self.build_plate = self.parseModel(CloudClusterBuildPlate, build_plate) if build_plate else None
super().__init__(**kwargs)
## Creates a new output model.
# \param controller - The controller of the model.
def createOutputModel(self, controller: PrinterOutputController) -> PrinterOutputModel:
model = PrinterOutputModel(controller, len(self.configuration), firmware_version = self.firmware_version)
self.updateOutputModel(model)
return model
## Updates the given output model.
# \param model - The output model to update.
def updateOutputModel(self, model: PrinterOutputModel) -> None:
model.updateKey(self.uuid)
model.updateName(self.friendly_name)
model.updateType(self.machine_variant)
model.updateState(self.status if self.enabled else "disabled")
model.updateBuildplate(self.build_plate.type if self.build_plate else "glass")
for configuration, extruder_output, extruder_config in \
zip(self.configuration, model.extruders, model.printerConfiguration.extruderConfigurations):
configuration.updateOutputModel(extruder_output)
configuration.updateConfigurationModel(extruder_config)<|fim▁end|> | # \param unique_name: The unique name of the printer in the network. |
<|file_name|>chunk.py<|end_file_name|><|fim▁begin|>"""
Chunk (N number of bytes at M offset to a source's beginning) provider.
Primarily for file sources but usable by any iterator that has both
seek and read( N ).
"""
import os
import base64
import base
import exceptions
import logging
log = logging.getLogger( __name__ )
# -----------------------------------------------------------------------------
class ChunkDataProvider( base.DataProvider ):
"""
Data provider that yields chunks of data from its file.
Note: this version does not account for lines and works with Binary datatypes.
"""
MAX_CHUNK_SIZE = 2 ** 16
DEFAULT_CHUNK_SIZE = MAX_CHUNK_SIZE
settings = {
'chunk_index' : 'int',
'chunk_size' : 'int'
}
# TODO: subclass from LimitedOffsetDataProvider?
# see web/framework/base.iterate_file, util/__init__.file_reader, and datatypes.tabular
def __init__( self, source, chunk_index=0, chunk_size=DEFAULT_CHUNK_SIZE, **kwargs ):
"""
:param chunk_index: if a source can be divided into N number of
`chunk_size` sections, this is the index of which section to
return.
:param chunk_size: how large are the desired chunks to return
(gen. in bytes).
"""
super( ChunkDataProvider, self ).__init__( source, **kwargs )
self.chunk_size = int( chunk_size )
self.chunk_pos = int( chunk_index ) * self.chunk_size
def validate_source( self, source ):
"""
Does the given source have both the methods `seek` and `read`?
:raises InvalidDataProviderSource: if not.
"""
source = super( ChunkDataProvider, self ).validate_source( source )
if( ( not hasattr( source, 'seek' ) ) or ( not hasattr( source, 'read' ) ) ):
raise exceptions.InvalidDataProviderSource( source )
return source
def __iter__( self ):
# not reeeally an iterator per se
self.__enter__()
self.source.seek( self.chunk_pos, os.SEEK_SET )
chunk = self.encode( self.source.read( self.chunk_size ) )
yield chunk
self.__exit__()
def encode( self, chunk ):
"""
Called on the chunk before returning.
Overrride to modify, encode, or decode chunks.
"""
return chunk<|fim▁hole|>
class Base64ChunkDataProvider( ChunkDataProvider ):
"""
Data provider that yields chunks of base64 encoded data from its file.
"""
def encode( self, chunk ):
"""
Return chunks encoded in base 64.
"""
return base64.b64encode( chunk )<|fim▁end|> | |
<|file_name|>errors.rs<|end_file_name|><|fim▁begin|>//! The rpeek library.
//!
//! # License
//!
//! Copyright (c) 2015 by Stacy Prowell. All rights reserved.
//!
//! Licensed under the BSD 2-Clause license. See the file LICENSE
//! that is part of this distribution. This file may not be copied,
//! modified, or distributed except according to those terms.
use std;
use std::error;
/// Define the kinds of errors that can occur when parsing.
#[derive(Debug)]
pub enum ParseError {
/// Lookahead value specified is too large. This occurs when the user asks to peek ahead
/// too far.
LookaheadTooLarge,
/// Reading seems to have stalled at the end of file.
StalledAtEof,
/// Reading has stalled with no forward progress.
Stalled,
/// A general IO error happened.
IOError(std::io::Error),
/// Some other error happened.<|fim▁hole|> OtherError(Box<std::error::Error>),
}
/// Define the kind of errors that can occur when parsing.
pub type ParseResult<T> = std::result::Result<T, ParseError>;<|fim▁end|> | |
<|file_name|>bitrpc.py<|end_file_name|><|fim▁begin|>from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a StacyCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":<|fim▁hole|> try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a StacyCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"<|fim▁end|> | try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ") |
<|file_name|>MultipleResourcePackageIntegrationTest.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2014-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|> * License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.android;
import static org.junit.Assert.assertFalse;
import com.facebook.buck.core.model.BuildTargetFactory;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.io.filesystem.TestProjectFilesystems;
import com.facebook.buck.testutil.TemporaryPaths;
import com.facebook.buck.testutil.integration.ProjectWorkspace;
import com.facebook.buck.testutil.integration.TestDataHelper;
import java.io.IOException;
import java.nio.file.Path;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
public class MultipleResourcePackageIntegrationTest {
@Rule public TemporaryPaths tmpFolder = new TemporaryPaths();
private ProjectWorkspace workspace;
private ProjectFilesystem filesystem;
@Before
public void setUp() throws InterruptedException, IOException {
workspace =
TestDataHelper.createProjectWorkspaceForScenario(this, "android_project", tmpFolder);
workspace.setUp();
filesystem = TestProjectFilesystems.createProjectFilesystem(workspace.getDestPath());
}
@Test
public void testRDotJavaFilesPerPackage() throws InterruptedException, IOException {
AssumeAndroidPlatform.assumeSdkIsAvailable();
workspace.runBuckBuild("//apps/sample:app_with_multiple_rdot_java_packages").assertSuccess();
Path uberRDotJavaDir =
GenerateRDotJava.getPathToGeneratedRDotJavaSrcFiles(
BuildTargetFactory.newInstance("//apps/sample:app_with_multiple_rdot_java_packages")
.withFlavors(AndroidBinaryResourcesGraphEnhancer.GENERATE_RDOT_JAVA_FLAVOR),
filesystem);
String sampleRJava =
workspace.getFileContents(uberRDotJavaDir.resolve("com/sample/R.java").toString());
String sample2RJava =
workspace.getFileContents(uberRDotJavaDir.resolve("com/sample2/R.java").toString());
assertFalse(sampleRJava.contains("sample2_string"));
assertFalse(sample2RJava.contains("app_icon"));
assertFalse(sample2RJava.contains("tiny_black"));
assertFalse(sample2RJava.contains("tiny_something"));
assertFalse(sample2RJava.contains("tiny_white"));
assertFalse(sample2RJava.contains("top_layout"));
assertFalse(sample2RJava.contains("app_name"));
assertFalse(sample2RJava.contains("base_button"));
}
}<|fim▁end|> | *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
<|file_name|>test_detector.rs<|end_file_name|><|fim▁begin|>use sourcemap::{is_sourcemap_slice, locate_sourcemap_reference, SourceMapRef};
#[test]
fn test_basic_locate() {
let input: &[_] = b"foo();\nbar();\n//# sourceMappingURL=foo.js";
assert_eq!(
locate_sourcemap_reference(input).unwrap(),
SourceMapRef::Ref("foo.js".into())
);<|fim▁hole|> locate_sourcemap_reference(input).unwrap().get_url(),
Some("foo.js")
);
}
#[test]
fn test_legacy_locate() {
let input: &[_] = b"foo();\nbar();\n//@ sourceMappingURL=foo.js";
assert_eq!(
locate_sourcemap_reference(input).unwrap(),
SourceMapRef::LegacyRef("foo.js".into())
);
assert_eq!(
locate_sourcemap_reference(input).unwrap().get_url(),
Some("foo.js")
);
}
#[test]
fn test_no_ref() {
let input: &[_] = b"foo();\nbar();\n// whatever";
assert_eq!(
locate_sourcemap_reference(input).unwrap(),
SourceMapRef::Missing
);
}
#[test]
fn test_detect_basic_sourcemap() {
let input: &[_] = b"{
\"version\":3,
\"sources\":[\"coolstuff.js\"],
\"names\":[\"x\",\"alert\"],
\"mappings\":\"AAAA,GAAIA,GAAI,EACR,IAAIA,GAAK,EAAG,CACVC,MAAM\"
}";
assert!(is_sourcemap_slice(input));
}
#[test]
fn test_detect_bad_sourcemap() {
let input: &[_] = b"{
\"sources\":[\"coolstuff.js\"],
\"names\":[\"x\",\"alert\"]
}";
assert!(!is_sourcemap_slice(input));
}
#[test]
fn test_detect_basic_sourcemap_with_junk_header() {
let input: &[_] = b")]}garbage\n
{
\"version\":3,
\"sources\":[\"coolstuff.js\"],
\"names\":[\"x\",\"alert\"],
\"mappings\":\"AAAA,GAAIA,GAAI,EACR,IAAIA,GAAK,EAAG,CACVC,MAAM\"
}";
assert!(is_sourcemap_slice(input));
}<|fim▁end|> | assert_eq!( |
<|file_name|>app.po.ts<|end_file_name|><|fim▁begin|>import { browser, element, by } from 'protractor';
<|fim▁hole|>export class ContactsAppClientPage {
navigateTo() {
return browser.get('/');
}
getParagraphText() {
return element(by.css('app-root h1')).getText();
}
}<|fim▁end|> | |
<|file_name|>dialogue.py<|end_file_name|><|fim▁begin|># This stores all the dialogue related stuff
import screen
class Dialogue(object):
"""Stores the dialogue tree for an individual NPC"""
def __init__(self, npc):
super(Dialogue, self).__init__()
self.npc = npc
self.game = npc.game
self.root = None
self.currentNode = None
def setRootNode(self, node):
self.root = node
def resetCurrentNode(self):
self.currentNode = self.root
def beginConversation(self):
self.resetCurrentNode()
self.runNextNode()
def runNextNode(self):
if self.currentNode is None:
return
# Grab all the DialogueChoices that should be shown
availableChoices = []
for (choice, predicate, child) in self.currentNode.choices:
if predicate is not None:
if predicate():
availableChoices.append((choice, child))
else:
availableChoices.append((choice, child))<|fim▁hole|>
choiceTexts = [choice.choiceText for (choice, child) in availableChoices]
screen.printDialogueChoices(self.game.screen, self.game.player,
choiceTexts, npcName)
choiceIdx = self.game.getDialogueChoice(len(choiceTexts)) - 1
self.game.draw()
(choice, nextNode) = availableChoices[choiceIdx]
response = ""
response += choice.response
if choice.responseFunction is not None:
response = choice.responseFunction(self.npc, response)
self.game.printDescription(response, npcName)
self.currentNode = nextNode
self.runNextNode()
class DialogueNode(object):
"""A single node of the dialogue tree"""
def __init__(self):
super(DialogueNode, self).__init__()
self.choices = []
def addChoice(self, choice, choicePredicate=None, childNode=None):
self.choices.append((choice, choicePredicate, childNode))
class DialogueChoice(object):
"""Stores the choice/function pair"""
def __init__(self, choiceText, response, responseFunction=None):
super(DialogueChoice, self).__init__()
self.choiceText = choiceText
self.response = response
self.responseFunction = responseFunction
def callResponseFunction(self, npcArgument, response):
if responseFunction is not None:
self.responseFunction(npcArgument, response)<|fim▁end|> |
npcName = None
if self.game.player.notebook.isNpcKnown(self.npc):
npcName = self.npc.firstName + " " + self.npc.lastName |
<|file_name|>costcalculator.js<|end_file_name|><|fim▁begin|>process.env.NODE_ENV = 'test';<|fim▁hole|>
const CostCalculator = require("../libs/costcalculator");
describe('Calculate Cost', () => {
describe("Book Meeting Room", () => {
it("it should calcuate cost for meeting room for 30m", async (done) => {
try {
let result = await CostCalculator("5bd7283ebfc02163c7b4d5d7", new Date("2020-01-01T09:00:00"), new Date("2020-01-01T09:30:00"));
result.should.equal(2.8);
done();
} catch(err) {
done(err);
}
});
});
});<|fim▁end|> |
const chai = require('chai');
const chaiHttp = require('chai-http');
const should = chai.should(); |
<|file_name|>NodeSelection.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2000 - 2013 Silverpeas
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version 3
* of the License, or (at your option) any later version.
*
* As a special exception to the terms and conditions of version 3.0 of the GPL, you may
* redistribute this Program in connection with Free/Libre Open Source Software ("FLOSS")
* applications as described in Silverpeas's FLOSS exception. You should have received a copy of the
* text describing the FLOSS exception, and it is also available here:
* "http://www.silverpeas.org/docs/core/legal/floss_exception.html"
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*/
package com.stratelia.webactiv.util.node.model;
import java.awt.datatransfer.DataFlavor;
import java.awt.datatransfer.UnsupportedFlavorException;
import java.io.Serializable;
import java.text.ParseException;
import org.silverpeas.search.indexEngine.model.IndexEntry;
import com.silverpeas.util.clipboard.ClipboardSelection;
import com.silverpeas.util.clipboard.SilverpeasKeyData;<|fim▁hole|>import com.stratelia.webactiv.util.DateUtil;
public class NodeSelection extends ClipboardSelection implements Serializable {
private static final long serialVersionUID = -6462797069972573255L;
public static DataFlavor NodeDetailFlavor;
static {
NodeDetailFlavor = new DataFlavor(NodeDetail.class, "Node");
}
private NodeDetail nodeDetail;
public NodeSelection(NodeDetail node) {
super();
nodeDetail = node;
super.addFlavor(NodeDetailFlavor);
}
@Override
public synchronized Object getTransferData(DataFlavor parFlavor)
throws UnsupportedFlavorException {
Object transferedData;
try {
transferedData = super.getTransferData(parFlavor);
} catch (UnsupportedFlavorException e) {
if (parFlavor.equals(NodeDetailFlavor)) {
transferedData = nodeDetail;
} else {
throw e;
}
}
return transferedData;
}
@Override
public IndexEntry getIndexEntry() {
NodePK pk = nodeDetail.getNodePK();
IndexEntry indexEntry = new IndexEntry(pk.getInstanceId(), "Node", pk.getId());
indexEntry.setTitle(nodeDetail.getName());
return indexEntry;
}
@Override
public SilverpeasKeyData getKeyData() {
SilverpeasKeyData keyData = new SilverpeasKeyData();
keyData.setTitle(nodeDetail.getName());
keyData.setAuthor(nodeDetail.getCreatorId());
try {
keyData.setCreationDate(DateUtil.parse(nodeDetail.getCreationDate()));
} catch (ParseException e) {
SilverTrace.error("node", "NodeSelection.getKeyData()", "root.EX_NO_MESSAGE", e);
}
keyData.setDesc(nodeDetail.getDescription());
return keyData;
}
}<|fim▁end|> |
import com.stratelia.silverpeas.silvertrace.SilverTrace; |
<|file_name|>package-info.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|> * you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
* Inspection that reports unresolved and unused references.
* You can inject logic to mark some unused imports as used. See extension points in this package.
* @author Ilya.Kazakevich
*/
package vgrechka.phizdetsidea.phizdets.inspections.unresolvedReference;<|fim▁end|> | |
<|file_name|>code.js<|end_file_name|><|fim▁begin|><|fim▁hole|>module.exports = function CodeType(config) {
var self = {
selector: '.field-type-code[for="' + config.fieldName + '"]',
elements: {
label: '.FormLabel',
lineNumber: '.CodeMirror-linenumber',
codeMirror: '.CodeMirror-container',
},
commands: [{
assertUI: function() {
this
.expect.element('@label').to.be.visible;
this
.expect.element('@label').text.to.equal(utils.titlecase(config.fieldName));
this
.expect.element('@lineNumber').to.be.visible;
this
.expect.element('@lineNumber').text.to.equal('1');
this
.expect.element('@codeMirror').to.be.visible;
return this;
},
fillInput: function(input) {
this.api
.execute(function (selector, input) {
var x = document.querySelector(selector);
var y = x.getElementsByClassName('CodeMirror')[0];
y.CodeMirror.setValue(input.value);
}, [self.selector, input]);
return this;
},
assertInput: function(input) {
this.api
.execute(function (selector) {
var x = document.querySelector(selector);
var y = x.getElementsByClassName('CodeMirror')[0];
return y.CodeMirror.getValue();
}, [self.selector], function (result) {
this.assert.equal(result.value, input.value);
});
return this;
},
}],
};
return self;
};<|fim▁end|> | var utils = require('../../../utils');
|
<|file_name|>test_cubegen.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright 2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import tempfile
from pyscf import lib, gto, scf
from pyscf.tools import cubegen
mol = gto.Mole()
mol.atom = '''
O 0.00000000, 0.000000, 0.119748
H 0.00000000, 0.761561, -0.478993
H 0.00000000, -0.761561, -0.478993 '''
mol.verbose = 0
mol.build()
mf = scf.RHF(mol).run()
def tearDownModule():
global mol, mf<|fim▁hole|> del mol, mf
class KnownValues(unittest.TestCase):
def test_mep(self):
ftmp = tempfile.NamedTemporaryFile()
mep = cubegen.mep(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10)
self.assertEqual(mep.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(mep), -0.3198103636180436, 9)
mep = cubegen.mep(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(mep.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(mep), -4.653995909548524, 9)
def test_orb(self):
ftmp = tempfile.NamedTemporaryFile()
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=10, nz=10)
self.assertEqual(orb.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(orb), -0.11804191128016768, 9)
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(orb.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(orb), -0.8591778390706646, 9)
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=1, nz=1)
self.assertEqual(orb.shape, (10,1,1))
self.assertAlmostEqual(lib.finger(orb), 6.921008881822988e-09, 9)
def test_rho(self):
ftmp = tempfile.NamedTemporaryFile()
rho = cubegen.density(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10)
self.assertEqual(rho.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(rho), -0.3740462814001553, 9)
rho = cubegen.density(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(rho.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(rho), -1.007950007160415, 9)
if __name__ == "__main__":
print("Full Tests for molden")
unittest.main()<|fim▁end|> | |
<|file_name|>Channel.js<|end_file_name|><|fim▁begin|>import _ from 'lodash';
/**
* Represents a channel with which commands can be invoked.
*
* Channels are one-per-origin (protocol/domain/port).
*/
class Channel {
constructor(config, $rootScope, $timeout, contentWindow) {
this.config = config;
this.$rootScope = $rootScope;
this.$timeout = $timeout;
this._contentWindow = contentWindow;
this.messageCounter = 0;
}
ab2str(buffer) {
let result = "";
let bytes = new Uint8Array(buffer);
let len = bytes.byteLength;
for (let i = 0; i < len; i++) {
result += String.fromCharCode(bytes[i]);
}
return result;
};
/**
* Fire and forget pattern that sends the command to the target without waiting for a response.
*/
invokeDirect(command, data, targetOrigin, transferrablePropertyPath) {
if (!data) {
data = {};
}
if (!targetOrigin) {
targetOrigin = this.config.siteUrl;
}
if (!targetOrigin) {
targetOrigin = "*";
}
data.command = command;
data.postMessageId = `SP.RequestExecutor_${this.messageCounter++}`;
let transferrableProperty = undefined;
if (transferrablePropertyPath) {
transferrableProperty = _.get(data, transferrablePropertyPath);
}
if (transferrableProperty)
this._contentWindow.postMessage(data, targetOrigin, [transferrableProperty]);
else
this._contentWindow.postMessage(data, targetOrigin);
}
/**
* Invokes the specified command on the channel with the specified data, constrained to the specified domain awaiting for max ms specified in timeout
*/
async invoke(command, data, targetOrigin, timeout, transferrablePropertyPath) {
if (!data) {
data = {};
}
if (!targetOrigin) {
targetOrigin = this.config.siteUrl;
}
if (!targetOrigin) {
targetOrigin = "*";
}
if (!timeout) {
timeout = 0;<|fim▁hole|> }
data.command = command;
data.postMessageId = `SP.RequestExecutor_${this.messageCounter++}`;
let resolve, reject;
let promise = new Promise((innerResolve, innerReject) => {
resolve = innerResolve;
reject = innerReject;
});
let timeoutPromise;
if (timeout > 0) {
timeoutPromise = this.$timeout(() => {
reject(new Error(`invoke() timed out while waiting for a response while executing ${data.command}`));
}, timeout);
}
let removeMonitor = this.$rootScope.$on(this.config.crossDomainMessageSink.incomingMessageName, (event, response) => {
if (response.postMessageId !== data.postMessageId)
return;
if (response.result === "error") {
reject(response);
}
else {
if (response.data) {
let contentType = response.headers["content-type"] || response.headers["Content-Type"];
if (contentType.startsWith("application/json")) {
let str = this.ab2str(response.data);
if (str.length > 0) {
try
{
response.data = JSON.parse(str);
}
catch(ex) {
}
}
} else if (contentType.startsWith("text")) {
response.data = this.ab2str(response.data);
}
}
resolve(response);
}
removeMonitor();
if (timeoutPromise)
this.$timeout.cancel(timeoutPromise);
});
let transferrableProperty = undefined;
if (transferrablePropertyPath) {
transferrableProperty = _.get(data, transferrablePropertyPath);
}
if (transferrableProperty)
this._contentWindow.postMessage(data, targetOrigin, [transferrableProperty]);
else
this._contentWindow.postMessage(data, targetOrigin);
return promise;
}
}
module.exports = Channel;<|fim▁end|> | |
<|file_name|>gen.go<|end_file_name|><|fim▁begin|><|fim▁hole|>//go:generate go get github.com/golang/mock/mockgen
//go:generate sh -c "mockgen -package=mockapi github.com/citwild/wfe/api AccountsServer > accounts.go"<|fim▁end|> | package mockapi
|
<|file_name|>test_spec_parser.py<|end_file_name|><|fim▁begin|>import unittest
import pythran
import os.path
#pythran export a((float,(int,uintp),str list) list list)
#pythran export a(str)
#pythran export a( (str,str), int, intp list list)
#pythran export a( float set )
#pythran export a( bool:str dict )
#pythran export a( float )
#pythran export a( int8[] )
#pythran export a( int8[][] order (F))
#pythran export a( byte )
#pythran export a0( uint8 )
#pythran export a1( int16 )
#pythran export a2( uint16 )
#pythran export a3( int32 )
#pythran export a4( uint32 )
#pythran export a5( int64 )
#pythran export a6( uint64 )
#pythran export a7( float32 )
#pythran export a8( float64 )
#pythran export a9( complex64 )
#pythran export a10( complex128 )
#pythran export a( int8 set )
#pythran export b( int8 set? )
#pythran export a( uint8 list)
#pythran export a( int16 [], slice)
#pythran export a( uint16 [][] order(C))
#pythran export a( uint16 [::][])
#pythran export a( uint16 [:,:,:])
#pythran export a( uint16 [:,::,:])
#pythran export a( uint16 [,,,,])
#pythran export a( (int32, ( uint32 , int64 ) ) )
#pythran export a( uint64:float32 dict )
#pythran export a( float64, complex64, complex128 )
class TestSpecParser(unittest.TestCase):
def test_parser(self):
real_path = os.path.splitext(os.path.realpath(__file__))[0]+".py"
with open(real_path) as fd:
print(pythran.spec_parser(fd.read()))
def test_invalid_specs0(self):
code = '#pythran export foo()\ndef foo(n): return n'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs1(self):
code = '#pythran export boo(int)\ndef boo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs2(self):
code = '#pythran export bar(int)\ndef foo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs3(self):
code = '#pythran export bar(int, int?, int)\ndef bar(x, y=1, z=1): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_multiline_spec0(self):
code = '''
#pythran export foo(
# )
def foo(): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec1(self):
code = '''
#pythran export foo(int
#, int
# )<|fim▁hole|> self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec2(self):
code = '''
# pythran export foo(int,
# float
#, int
# )
def foo(i,j,k): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec0(self):
code = '''
# pythran export soo(int) this is an int test
def soo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec1(self):
code = '''
# pythran export poo(int)
#this is a pythran export test
def poo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec0(self):
code = '''
def too(i): return
# pythran export too(int)
#this is a pythran export test
def bar(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec1(self):
code = '''
def zoo(i): return
#this is a pythran export test
# pythran export zoo(int)
#this is an export test
# pythran export zoo(str)
def bar(i): return
'''
self.assertEquals(len(pythran.spec_parser(code).functions), 1)
self.assertEquals(len(pythran.spec_parser(code).functions['zoo']), 2)
def test_var_export0(self):
code = '''
# pythran export coo
coo = 1
'''
self.assertTrue(pythran.spec_parser(code))<|fim▁end|> | def foo(i,j): return
''' |
<|file_name|>utilities_spec.js<|end_file_name|><|fim▁begin|>/*global waitsFor:true expect:true describe:true beforeEach:true it:true spyOn:true */
describe("Discourse.Utilities", function() {
describe("emailValid", function() {
it("allows upper case in first part of emails", function() {
expect(Discourse.Utilities.emailValid('[email protected]')).toBe(true);
});
it("allows upper case in domain of emails", function() {
expect(Discourse.Utilities.emailValid('[email protected]')).toBe(true);
});
});
describe("validateFilesForUpload", function() {
it("returns false when file is undefined", function() {
expect(Discourse.Utilities.validateFilesForUpload(null)).toBe(false);
expect(Discourse.Utilities.validateFilesForUpload(undefined)).toBe(false);
});
it("returns false when file there is no file", function() {
expect(Discourse.Utilities.validateFilesForUpload([])).toBe(false);
});
it("supports only one file", function() {
spyOn(bootbox, 'alert');
spyOn(Em.String, 'i18n');
expect(Discourse.Utilities.validateFilesForUpload([1, 2])).toBe(false);
expect(bootbox.alert).toHaveBeenCalled();
expect(Em.String.i18n).toHaveBeenCalledWith('post.errors.upload_too_many_images');
});
it("supports only an image", function() {
var html = { type: "text/html" };
spyOn(bootbox, 'alert');
spyOn(Em.String, 'i18n');
expect(Discourse.Utilities.validateFilesForUpload([html])).toBe(false);
expect(bootbox.alert).toHaveBeenCalled();
expect(Em.String.i18n).toHaveBeenCalledWith('post.errors.only_images_are_supported');
});<|fim▁hole|>
it("prevents the upload of a too large image", function() {
var image = { type: "image/png", size: 10 * 1024 };
Discourse.SiteSettings.max_upload_size_kb = 5;
spyOn(bootbox, 'alert');
spyOn(Em.String, 'i18n');
expect(Discourse.Utilities.validateFilesForUpload([image])).toBe(false);
expect(bootbox.alert).toHaveBeenCalled();
expect(Em.String.i18n).toHaveBeenCalledWith('post.errors.upload_too_large', { max_size_kb: 5 });
});
it("works", function() {
var image = { type: "image/png", size: 10 * 1024 };
Discourse.SiteSettings.max_upload_size_kb = 15;
expect(Discourse.Utilities.validateFilesForUpload([image])).toBe(true);
});
});
});<|fim▁end|> | |
<|file_name|>pfop.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from qiniu import config
from qiniu import http
class PersistentFop(object):
"""持久化处理类
该类用于主动触发异步持久化操作,具体规格参考:
http://developer.qiniu.com/docs/v6/api/reference/fop/pfop/pfop.html
Attributes:
auth: 账号管理密钥对,Auth对象
bucket: 操作资源所在空间
pipeline: 多媒体处理队列,详见 https://portal.qiniu.com/mps/pipeline
notify_url: 持久化处理结果通知URL
"""
def __init__(self, auth, bucket, pipeline=None, notify_url=None):
"""初始化持久化处理类"""
self.auth = auth
self.bucket = bucket
self.pipeline = pipeline
self.notify_url = notify_url
def execute(self, key, fops, force=None):
"""执行持久化处理:
Args:
key: 待处理的源文件
fops: 处理详细操作,规格详见 http://developer.qiniu.com/docs/v6/api/reference/fop/
force: 强制执行持久化处理开关
Returns:
一个dict变量,返回持久化处理的persistentId,类似{"persistentId": 5476bedf7823de4068253bae};
一个ResponseInfo对象
"""
ops = ';'.join(fops)
data = {'bucket': self.bucket, 'key': key, 'fops': ops}
if self.pipeline:
data['pipeline'] = self.pipeline<|fim▁hole|> data['force'] = 1
url = 'http://{0}/pfop'.format(config.get_default('default_api_host'))
return http._post_with_auth(url, data, self.auth)<|fim▁end|> | if self.notify_url:
data['notifyURL'] = self.notify_url
if force == 1: |
<|file_name|>OGLSurfaceData.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.java2d.opengl;
import java.awt.AlphaComposite;
import java.awt.GraphicsEnvironment;
import java.awt.Rectangle;
import java.awt.Transparency;
import java.awt.image.ColorModel;
import java.awt.image.Raster;
import sun.awt.SunHints;
import sun.awt.image.PixelConverter;
import sun.java2d.pipe.hw.AccelSurface;
import sun.java2d.SunGraphics2D;
import sun.java2d.SurfaceData;
import sun.java2d.SurfaceDataProxy;
import sun.java2d.loops.CompositeType;
import sun.java2d.loops.GraphicsPrimitive;
import sun.java2d.loops.MaskFill;
import sun.java2d.loops.SurfaceType;
import sun.java2d.pipe.ParallelogramPipe;
import sun.java2d.pipe.PixelToParallelogramConverter;
import sun.java2d.pipe.RenderBuffer;
import sun.java2d.pipe.TextPipe;
import static sun.java2d.pipe.BufferedOpCodes.*;
import static sun.java2d.opengl.OGLContext.OGLContextCaps.*;
/**
* This class describes an OpenGL "surface", that is, a region of pixels
* managed via OpenGL. An OGLSurfaceData can be tagged with one of three
* different SurfaceType objects for the purpose of registering loops, etc.
* This diagram shows the hierarchy of OGL SurfaceTypes:
*
* Any
* / \
* OpenGLSurface OpenGLTexture
* |
* OpenGLSurfaceRTT
*
* OpenGLSurface
* This kind of surface can be rendered to using OpenGL APIs. It is also
* possible to copy an OpenGLSurface to another OpenGLSurface (or to itself).
* This is typically accomplished by calling MakeContextCurrent(dstSD, srcSD)
* and then calling glCopyPixels() (although there are other techniques to
* achieve the same goal).
*
* OpenGLTexture
* This kind of surface cannot be rendered to using OpenGL (in the same sense
* as in OpenGLSurface). However, it is possible to upload a region of pixels
* to an OpenGLTexture object via glTexSubImage2D(). One can also copy a
* surface of type OpenGLTexture to an OpenGLSurface by binding the texture
* to a quad and then rendering it to the destination surface (this process
* is known as "texture mapping").
*
* OpenGLSurfaceRTT
* This kind of surface can be thought of as a sort of hybrid between
* OpenGLSurface and OpenGLTexture, in that one can render to this kind of
* surface as if it were of type OpenGLSurface, but the process of copying
* this kind of surface to another is more like an OpenGLTexture. (Note that
* "RTT" stands for "render-to-texture".)
*
* In addition to these SurfaceType variants, we have also defined some
* constants that describe in more detail the type of underlying OpenGL
* surface. This table helps explain the relationships between those
* "type" constants and their corresponding SurfaceType:
*
* OGL Type Corresponding SurfaceType
* -------- -------------------------
* WINDOW OpenGLSurface
* PBUFFER OpenGLSurface
* TEXTURE OpenGLTexture
* FLIP_BACKBUFFER OpenGLSurface
* FBOBJECT OpenGLSurfaceRTT
*/
public abstract class OGLSurfaceData extends SurfaceData
implements AccelSurface {
/**
* OGL-specific surface types
*
* @see sun.java2d.pipe.hw.AccelSurface
*/
public static final int PBUFFER = RT_PLAIN;
public static final int FBOBJECT = RT_TEXTURE;
/**
* Pixel formats
*/
public static final int PF_INT_ARGB = 0;
public static final int PF_INT_ARGB_PRE = 1;
public static final int PF_INT_RGB = 2;
public static final int PF_INT_RGBX = 3;
public static final int PF_INT_BGR = 4;
public static final int PF_INT_BGRX = 5;
public static final int PF_USHORT_565_RGB = 6;
public static final int PF_USHORT_555_RGB = 7;
public static final int PF_USHORT_555_RGBX = 8;
public static final int PF_BYTE_GRAY = 9;
public static final int PF_USHORT_GRAY = 10;
public static final int PF_3BYTE_BGR = 11;
/**
* SurfaceTypes
*/
private static final String DESC_OPENGL_SURFACE = "OpenGL Surface";
private static final String DESC_OPENGL_SURFACE_RTT =
"OpenGL Surface (render-to-texture)";
private static final String DESC_OPENGL_TEXTURE = "OpenGL Texture";
static final SurfaceType OpenGLSurface =
SurfaceType.Any.deriveSubType(DESC_OPENGL_SURFACE,
PixelConverter.ArgbPre.instance);
static final SurfaceType OpenGLSurfaceRTT =
OpenGLSurface.deriveSubType(DESC_OPENGL_SURFACE_RTT);
static final SurfaceType OpenGLTexture =
SurfaceType.Any.deriveSubType(DESC_OPENGL_TEXTURE);
/** This will be true if the fbobject system property has been enabled. */
private static boolean isFBObjectEnabled;
/** This will be true if the lcdshader system property has been enabled.*/
private static boolean isLCDShaderEnabled;
/** This will be true if the biopshader system property has been enabled.*/
private static boolean isBIOpShaderEnabled;
/** This will be true if the gradshader system property has been enabled.*/
private static boolean isGradShaderEnabled;
private OGLGraphicsConfig graphicsConfig;
protected int type;
// these fields are set from the native code when the surface is
// initialized
private int nativeWidth, nativeHeight;
protected static OGLRenderer oglRenderPipe;
protected static PixelToParallelogramConverter oglTxRenderPipe;
protected static ParallelogramPipe oglAAPgramPipe;
protected static OGLTextRenderer oglTextPipe;
protected static OGLDrawImage oglImagePipe;
protected native boolean initTexture(long pData,
boolean isOpaque, boolean texNonPow2,
boolean texRect,
int width, int height);
protected native boolean initFBObject(long pData,
boolean isOpaque, boolean texNonPow2,
boolean texRect,
int width, int height);
protected native boolean initFlipBackbuffer(long pData);
protected abstract boolean initPbuffer(long pData, long pConfigInfo,
boolean isOpaque,
int width, int height);
private native int getTextureTarget(long pData);
private native int getTextureID(long pData);
static {
if (!GraphicsEnvironment.isHeadless()) {
// fbobject currently enabled by default; use "false" to disable
String fbo = (String)java.security.AccessController.doPrivileged(
new sun.security.action.GetPropertyAction(
"sun.java2d.opengl.fbobject"));
isFBObjectEnabled = !"false".equals(fbo);
// lcdshader currently enabled by default; use "false" to disable
String lcd = (String)java.security.AccessController.doPrivileged(
new sun.security.action.GetPropertyAction(
"sun.java2d.opengl.lcdshader"));
isLCDShaderEnabled = !"false".equals(lcd);
// biopshader currently enabled by default; use "false" to disable
String biop = (String)java.security.AccessController.doPrivileged(
new sun.security.action.GetPropertyAction(
"sun.java2d.opengl.biopshader"));
isBIOpShaderEnabled = !"false".equals(biop);
// gradshader currently enabled by default; use "false" to disable
String grad = (String)java.security.AccessController.doPrivileged(
new sun.security.action.GetPropertyAction(
"sun.java2d.opengl.gradshader"));
isGradShaderEnabled = !"false".equals(grad);
OGLRenderQueue rq = OGLRenderQueue.getInstance();
oglImagePipe = new OGLDrawImage();
oglTextPipe = new OGLTextRenderer(rq);
oglRenderPipe = new OGLRenderer(rq);
if (GraphicsPrimitive.tracingEnabled()) {
oglTextPipe = oglTextPipe.traceWrap();
//The wrapped oglRenderPipe will wrap the AA pipe as well...
//oglAAPgramPipe = oglRenderPipe.traceWrap();
}
oglAAPgramPipe = oglRenderPipe.getAAParallelogramPipe();
oglTxRenderPipe =
new PixelToParallelogramConverter(oglRenderPipe,
oglRenderPipe,
1.0, 0.25, true);
OGLBlitLoops.register();
OGLMaskFill.register();
OGLMaskBlit.register();
}
}
protected OGLSurfaceData(OGLGraphicsConfig gc,
ColorModel cm, int type)
{
super(getCustomSurfaceType(type), cm);
this.graphicsConfig = gc;
this.type = type;
setBlitProxyKey(gc.getProxyKey());
}
@Override
public SurfaceDataProxy makeProxyFor(SurfaceData srcData) {
return OGLSurfaceDataProxy.createProxy(srcData, graphicsConfig);
}
/**
* Returns the appropriate SurfaceType corresponding to the given OpenGL
* surface type constant (e.g. TEXTURE -> OpenGLTexture).
*/
private static SurfaceType getCustomSurfaceType(int oglType) {
switch (oglType) {
case TEXTURE:
return OpenGLTexture;
case FBOBJECT:
return OpenGLSurfaceRTT;
case PBUFFER:
default:
return OpenGLSurface;
}
}
/**
* Note: This should only be called from the QFT under the AWT lock.
* This method is kept separate from the initSurface() method below just
* to keep the code a bit cleaner.
*/
private void initSurfaceNow(int width, int height) {
boolean isOpaque = (getTransparency() == Transparency.OPAQUE);
boolean success = false;
switch (type) {
case PBUFFER:
success = initPbuffer(getNativeOps(),
graphicsConfig.getNativeConfigInfo(),
isOpaque,
width, height);
break;
case TEXTURE:
success = initTexture(getNativeOps(),
isOpaque, isTexNonPow2Available(),
isTexRectAvailable(),
width, height);
break;
case FBOBJECT:
success = initFBObject(getNativeOps(),
isOpaque, isTexNonPow2Available(),
isTexRectAvailable(),
width, height);
break;
case FLIP_BACKBUFFER:
success = initFlipBackbuffer(getNativeOps());
break;
default:
break;
}
if (!success) {
throw new OutOfMemoryError("can't create offscreen surface");
}
}
/**
* Initializes the appropriate OpenGL offscreen surface based on the value
* of the type parameter. If the surface creation fails for any reason,
* an OutOfMemoryError will be thrown.
*/
protected void initSurface(final int width, final int height) {
OGLRenderQueue rq = OGLRenderQueue.getInstance();
rq.lock();
try {
switch (type) {
case TEXTURE:
case PBUFFER:
case FBOBJECT:
// need to make sure the context is current before
// creating the texture (or pbuffer, or fbobject)
OGLContext.setScratchSurface(graphicsConfig);
break;
default:
break;
}
rq.flushAndInvokeNow(new Runnable() {
public void run() {
initSurfaceNow(width, height);
}
});
} finally {
rq.unlock();
}
}
/**
* Returns the OGLContext for the GraphicsConfig associated with this
* surface.
*/
public final OGLContext getContext() {
return graphicsConfig.getContext();
}
/**
* Returns the OGLGraphicsConfig associated with this surface.
*/
final OGLGraphicsConfig getOGLGraphicsConfig() {
return graphicsConfig;
}
/**
* Returns one of the surface type constants defined above.
*/
public final int getType() {
return type;
}
/**
* If this surface is backed by a texture object, returns the target
* for that texture (either GL_TEXTURE_2D or GL_TEXTURE_RECTANGLE_ARB).
* Otherwise, this method will return zero.
*/
public final int getTextureTarget() {
return getTextureTarget(getNativeOps());
}
/**
* If this surface is backed by a texture object, returns the texture ID
* for that texture.
* Otherwise, this method will return zero.
*/
public final int getTextureID() {
return getTextureID(getNativeOps());
}
/**
* Returns native resource of specified {@code resType} associated with
* this surface.
*
* Specifically, for {@code OGLSurfaceData} this method returns the
* the following:
* <pre>
* TEXTURE - texture id
* </pre>
*
* Note: the resource returned by this method is only valid on the rendering
* thread.
*
* @return native resource of specified type or 0L if
* such resource doesn't exist or can not be retrieved.
* @see sun.java2d.pipe.hw.AccelSurface#getNativeResource
*/
public long getNativeResource(int resType) {
if (resType == TEXTURE) {
return getTextureID();
}
return 0L;
}
public Raster getRaster(int x, int y, int w, int h) {
throw new InternalError("not implemented yet");
}
/**
* For now, we can only render LCD text if:
* - the fragment shader extension is available, and
* - blending is disabled, and
* - the source color is opaque
* - and the destination is opaque
*
* Eventually, we could enhance the native OGL text rendering code
* and remove the above restrictions, but that would require significantly
* more code just to support a few uncommon cases.
*/
public boolean canRenderLCDText(SunGraphics2D sg2d) {
return
graphicsConfig.isCapPresent(CAPS_EXT_LCD_SHADER) &&
sg2d.compositeState <= SunGraphics2D.COMP_ISCOPY &&
sg2d.paintState <= SunGraphics2D.PAINT_OPAQUECOLOR &&
sg2d.surfaceData.getTransparency() == Transparency.OPAQUE;
}
public void validatePipe(SunGraphics2D sg2d) {
TextPipe textpipe;
boolean validated = false;
// OGLTextRenderer handles both AA and non-AA text, but
// only works with the following modes:
// (Note: For LCD text we only enter this code path if
// canRenderLCDText() has already validated that the mode is
// CompositeType.SrcNoEa (opaque color), which will be subsumed
// by the CompositeType.SrcNoEa (any color) test below.)
if (/* CompositeType.SrcNoEa (any color) */
(sg2d.compositeState <= sg2d.COMP_ISCOPY &&
sg2d.paintState <= sg2d.PAINT_ALPHACOLOR) ||
/* CompositeType.SrcOver (any color) */
(sg2d.compositeState == sg2d.COMP_ALPHA &&
sg2d.paintState <= sg2d.PAINT_ALPHACOLOR &&
(((AlphaComposite)sg2d.composite).getRule() ==
AlphaComposite.SRC_OVER)) ||
/* CompositeType.Xor (any color) */
(sg2d.compositeState == sg2d.COMP_XOR &&
sg2d.paintState <= sg2d.PAINT_ALPHACOLOR))
{
textpipe = oglTextPipe;
} else {
// do this to initialize textpipe correctly; we will attempt
// to override the non-text pipes below
super.validatePipe(sg2d);
textpipe = sg2d.textpipe;
validated = true;
}
PixelToParallelogramConverter txPipe = null;
OGLRenderer nonTxPipe = null;
if (sg2d.antialiasHint != SunHints.INTVAL_ANTIALIAS_ON) {
if (sg2d.paintState <= sg2d.PAINT_ALPHACOLOR) {
if (sg2d.compositeState <= sg2d.COMP_XOR) {
txPipe = oglTxRenderPipe;
nonTxPipe = oglRenderPipe;
}
} else if (sg2d.compositeState <= sg2d.COMP_ALPHA) {
if (OGLPaints.isValid(sg2d)) {
txPipe = oglTxRenderPipe;
nonTxPipe = oglRenderPipe;
}
// custom paints handled by super.validatePipe() below
}
} else {
if (sg2d.paintState <= sg2d.PAINT_ALPHACOLOR) {
if (graphicsConfig.isCapPresent(CAPS_PS30) &&
(sg2d.imageComp == CompositeType.SrcOverNoEa ||
sg2d.imageComp == CompositeType.SrcOver))
{
if (!validated) {
super.validatePipe(sg2d);
validated = true;
}
PixelToParallelogramConverter aaConverter =<|fim▁hole|> 1.0/8.0, 0.499,
false);
sg2d.drawpipe = aaConverter;
sg2d.fillpipe = aaConverter;
sg2d.shapepipe = aaConverter;
} else if (sg2d.compositeState == sg2d.COMP_XOR) {
// install the solid pipes when AA and XOR are both enabled
txPipe = oglTxRenderPipe;
nonTxPipe = oglRenderPipe;
}
}
// other cases handled by super.validatePipe() below
}
if (txPipe != null) {
if (sg2d.transformState >= sg2d.TRANSFORM_TRANSLATESCALE) {
sg2d.drawpipe = txPipe;
sg2d.fillpipe = txPipe;
} else if (sg2d.strokeState != sg2d.STROKE_THIN) {
sg2d.drawpipe = txPipe;
sg2d.fillpipe = nonTxPipe;
} else {
sg2d.drawpipe = nonTxPipe;
sg2d.fillpipe = nonTxPipe;
}
// Note that we use the transforming pipe here because it
// will examine the shape and possibly perform an optimized
// operation if it can be simplified. The simplifications
// will be valid for all STROKE and TRANSFORM types.
sg2d.shapepipe = txPipe;
} else {
if (!validated) {
super.validatePipe(sg2d);
}
}
// install the text pipe based on our earlier decision
sg2d.textpipe = textpipe;
// always override the image pipe with the specialized OGL pipe
sg2d.imagepipe = oglImagePipe;
}
@Override
protected MaskFill getMaskFill(SunGraphics2D sg2d) {
if (sg2d.paintState > sg2d.PAINT_ALPHACOLOR) {
/*
* We can only accelerate non-Color MaskFill operations if
* all of the following conditions hold true:
* - there is an implementation for the given paintState
* - the current Paint can be accelerated for this destination
* - multitexturing is available (since we need to modulate
* the alpha mask texture with the paint texture)
*
* In all other cases, we return null, in which case the
* validation code will choose a more general software-based loop.
*/
if (!OGLPaints.isValid(sg2d) ||
!graphicsConfig.isCapPresent(CAPS_MULTITEXTURE))
{
return null;
}
}
return super.getMaskFill(sg2d);
}
public boolean copyArea(SunGraphics2D sg2d,
int x, int y, int w, int h, int dx, int dy)
{
if (sg2d.transformState < sg2d.TRANSFORM_TRANSLATESCALE &&
sg2d.compositeState < sg2d.COMP_XOR)
{
x += sg2d.transX;
y += sg2d.transY;
oglRenderPipe.copyArea(sg2d, x, y, w, h, dx, dy);
return true;
}
return false;
}
public void flush() {
invalidate();
OGLRenderQueue rq = OGLRenderQueue.getInstance();
rq.lock();
try {
// make sure we have a current context before
// disposing the native resources (e.g. texture object)
OGLContext.setScratchSurface(graphicsConfig);
RenderBuffer buf = rq.getBuffer();
rq.ensureCapacityAndAlignment(12, 4);
buf.putInt(FLUSH_SURFACE);
buf.putLong(getNativeOps());
// this call is expected to complete synchronously, so flush now
rq.flushNow();
} finally {
rq.unlock();
}
}
/**
* Disposes the native resources associated with the given OGLSurfaceData
* (referenced by the pData parameter). This method is invoked from
* the native Dispose() method from the Disposer thread when the
* Java-level OGLSurfaceData object is about to go away. Note that we
* also pass a reference to the native GLX/WGLGraphicsConfigInfo
* (pConfigInfo) for the purposes of making a context current.
*/
static void dispose(long pData, long pConfigInfo) {
OGLRenderQueue rq = OGLRenderQueue.getInstance();
rq.lock();
try {
// make sure we have a current context before
// disposing the native resources (e.g. texture object)
OGLContext.setScratchSurface(pConfigInfo);
RenderBuffer buf = rq.getBuffer();
rq.ensureCapacityAndAlignment(12, 4);
buf.putInt(DISPOSE_SURFACE);
buf.putLong(pData);
// this call is expected to complete synchronously, so flush now
rq.flushNow();
} finally {
rq.unlock();
}
}
static void swapBuffers(long window) {
OGLRenderQueue rq = OGLRenderQueue.getInstance();
rq.lock();
try {
RenderBuffer buf = rq.getBuffer();
rq.ensureCapacityAndAlignment(12, 4);
buf.putInt(SWAP_BUFFERS);
buf.putLong(window);
rq.flushNow();
} finally {
rq.unlock();
}
}
/**
* Returns true if OpenGL textures can have non-power-of-two dimensions
* when using the basic GL_TEXTURE_2D target.
*/
boolean isTexNonPow2Available() {
return graphicsConfig.isCapPresent(CAPS_TEXNONPOW2);
}
/**
* Returns true if OpenGL textures can have non-power-of-two dimensions
* when using the GL_TEXTURE_RECTANGLE_ARB target (only available when the
* GL_ARB_texture_rectangle extension is present).
*/
boolean isTexRectAvailable() {
return graphicsConfig.isCapPresent(CAPS_EXT_TEXRECT);
}
public Rectangle getNativeBounds() {
OGLRenderQueue rq = OGLRenderQueue.getInstance();
rq.lock();
try {
return new Rectangle(nativeWidth, nativeHeight);
} finally {
rq.unlock();
}
}
}<|fim▁end|> | new PixelToParallelogramConverter(sg2d.shapepipe,
oglAAPgramPipe, |
<|file_name|>__manifest__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2015 Antiun Ingeniería S.L. - Antonio Espinosa
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': "Manage model export profiles",
'category': 'Personalization',
'version': '10.0.1.0.0',
'depends': [
'web',
],<|fim▁hole|> 'views/assets.xml',
'views/ir_exports.xml',
'views/ir_model.xml',
'views/ir_model_access.xml',
'views/res_groups.xml',
],
'qweb': [
"static/src/xml/base.xml",
],
'author': 'Tecnativa, '
'LasLabs, '
'Ursa Information Systems, '
'Odoo Community Association (OCA)',
'website': 'https://www.tecnativa.com',
'license': 'AGPL-3',
'installable': True,
'application': False,
}<|fim▁end|> | 'data': [ |
<|file_name|>layoutmodel.cpp<|end_file_name|><|fim▁begin|>/*
Copyright (C) 2013 by Claudio Zopfi, Zurich, Suisse, [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <QDebug>
#include "layoutmodel.h"
LayoutModel::LayoutModel()
{
width=200;
height=200;
nrows=2;
nsegs=12;
nseg = new int[nrows];
nseg[0] = 5;
nseg[1] = 7;
rowheight = new int[nrows];
rowheightpx = new int[nrows];
rowheight[0] = 4;
rowheight[1] = 4;
rowheightmax=8;
segwidth = new int[nsegs];
segwidthpx = new int[nsegs];
setAll(nsegs,segwidth,1);
segwidthmax=new int[nrows];
segwidthmax[0]= 5;
segwidthmax[1]= 7;
note = new int[nsegs];
note[0]=50;
note[1]=51;
note[2]=52;
note[3]=53;
note[4]=54;
note[5]=55;
note[6]=56;
note[7]=57;
note[8]=58;
note[9]=59;
note[10]=60;
note[11]=61;
ctlx=new int[nsegs];
setAll(nsegs,ctlx,0);
ctly=new int[nsegs];
setAll(nsegs,ctly,0);
chan=new int[nsegs];
setAll(nsegs,chan,0);
pressed=new int[nsegs];
setAll(nsegs,pressed,0);
calcGeo(200,200);
}
void LayoutModel::calcGeo(int w, int h)
{
// qDebug() << "Cacl geo " << w << " " << h;
width=w;
height=h;
int i=0;
int rowheightsum=0;
for(int y=0;y<nrows;y++) {
rowheightpx[y]=height*rowheight[y]/rowheightmax;
rowheightsum+=rowheightpx[y];
// additional pixels may occur due to rounding differences
// -> add additional pixels to last row
if(y==nrows-1 && rowheightsum<height) {
rowheightpx[y]+=rowheightsum-height;
}
int segwidthsum=0;
for(int x=0;x<nseg[y];x++) {
segwidthpx[i]=width*segwidth[i]/segwidthmax[y];
segwidthsum+=segwidthpx[i];
// -> add additional pixels to last segment
if(x==nseg[y]-1 && segwidthsum<width) {
segwidthpx[i]+=width-segwidthsum;
}
i++;
}
}
}
int LayoutModel::getHeight() const
{
return height;
}
int LayoutModel::getWidth() const
{
return width;
}
int LayoutModel::getNrows() const
{
return nrows;
}
int LayoutModel::getRowheightpx(int i) const
{
return rowheightpx[i];
}
int LayoutModel::getNseg(int i) const
{
return nseg[i];
}
void LayoutModel::setAll(int n, int *d, int v)
{
for(int i=0;i<n;i++) {
d[i]=v;
}
}
int LayoutModel::getSegwidth(int i) const
{
return segwidth[i];
}
int LayoutModel::getCtly(int i) const
{
return ctly[i];
}
int LayoutModel::getChan(int i) const
{
return chan[i];
}
int LayoutModel::getCtlx(int i) const<|fim▁hole|>
int LayoutModel::getNote(int i) const
{
return note[i];
}
int LayoutModel::getSegwidthpx(int i) const
{
return segwidthpx[i];
}
int LayoutModel::getSegwidthmax(int i) const
{
return segwidthmax[i];
}
int LayoutModel::getPressed(int i) const
{
return pressed[i];
}
void LayoutModel::incPressed(int i)
{
pressed[i]++;
}
void LayoutModel::decPressed(int i)
{
if(pressed[i]>0) {
pressed[i]--;
}
}<|fim▁end|> | {
return ctlx[i];
} |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import { A, O } from 'b-o-a';
import { State } from '../types/state';
import currentPage$ from '../props/current-page';<|fim▁hole|>import stampRallies$ from '../props/stamp-rallies';
import stampRally$ from '../props/stamp-rally';
import stampRallyForm$ from '../props/stamp-rally-form';
import token$ from '../props/token';
const getDefaultState = (): State => {
return {
googleApiKey: process.env.GOOGLE_API_KEY,
currentPage: 'sign_in#index',
signIn: {
email: null,
password: null
},
spots: [],
spotForm: {
name: null
},
stampRallies: [],
stampRally: null,
stampRallyForm: {
name: null
},
token: {
token: null,
userId: null
}
};
};
const $ = (action$: O<A<any>>, state: State): O<State> => {
const s = (state ? state : getDefaultState());
return O
.combineLatest(
currentPage$(s.currentPage, action$),
signIn$(s.signIn, action$),
token$(s.token, action$),
spots$(s.spots, action$),
spotForm$(s.spotForm, action$),
stampRallies$(s.stampRallies, action$),
stampRally$(s.stampRally, action$),
stampRallyForm$(s.stampRallyForm, action$),
(
currentPage,
signIn,
token,
spots,
spotForm,
stampRallies,
stampRally,
stampRallyForm
): State => {
return Object.assign({}, s, {
currentPage,
signIn,
token,
spots,
spotForm,
stampRallies,
stampRally,
stampRallyForm
});
}
)
.do(console.log.bind(console)) // logger for state
.share();
};
export { $ };<|fim▁end|> | import signIn$ from '../props/sign-in';
import spots$ from '../props/spots';
import spotForm$ from '../props/spot-form'; |
<|file_name|>64a7d6477aae_fix_description_field_in_connection_to_.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""fix description field in connection to be text
Revision ID: 64a7d6477aae
Revises: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
import sqlalchemy as sa # noqa
from alembic import op # noqa
# revision identifiers, used by Alembic.
revision = '64a7d6477aae'
down_revision = '61ec73d9401f'
branch_labels = None
depends_on = None
def upgrade():
"""Apply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():<|fim▁hole|> conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
'connection',
'description',
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)<|fim▁end|> | """Unapply fix description field in connection to be text""" |
<|file_name|>argv_experiment.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from sys import argv
from sys import stdout
stdout.write('argv ')
print(argv)
stdout.write('argv[1:] ')
print(argv[1:])
stdout.write('argv[:] ')
print(argv[:])
stdout.write('len(argv) ')
print(len(argv))
stdout.write('len(argv[1:]) ')
print(len(argv[1:]))<|fim▁hole|>
testvar = argv[1]
stdout.write('type(argv) ')
print(type(argv))
testvar = argv[1]
stdout.write('type(testvar) ')
print(type(testvar))
testvar = argv[1]
stdout.write('len(testvar) ')
print(len(testvar))
for i in range(len(argv)):
stdout.write(argv[i] + ' ')
print # -%<|fim▁end|> |
stdout.write('len(argv[:]) ')
print(len(argv[:])) |
<|file_name|>Glyphs.cpp<|end_file_name|><|fim▁begin|>//---------------------------------------------------------------------------
#include <vcl.h>
#pragma hdrstop
#include "Glyphs.h"<|fim▁hole|>#pragma link "PngImageList"
#ifndef NO_RESOURCES
#pragma resource "*.dfm"
#endif
//---------------------------------------------------------------------------
TGlyphsModule * GlyphsModule;
//---------------------------------------------------------------------------
__fastcall TGlyphsModule::TGlyphsModule(TComponent* Owner)
: TDataModule(Owner)
{
}
//---------------------------------------------------------------------------<|fim▁end|> | //---------------------------------------------------------------------------
#pragma package(smart_init) |
<|file_name|>cmdLineUtils.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env @python@
# ROOT command line tools module: cmdLineUtils
# Author: Julien Ripoche
# Mail: [email protected]
# Date: 20/08/15
"""Contain utils for ROOT command line tools"""
##########
# Stream redirect functions
# The original code of the these functions can be found here :
# http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
# Thanks J.F. Sebastian !!
from contextlib import contextmanager
import os
import sys
def fileno(file_or_fd):
"""
Look for 'fileno' attribute.
"""
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def streamRedirected(source=sys.stdout, destination=os.devnull):
"""
Redirect the output from source to destination.
"""
stdout_fd = fileno(source)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
source.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(destination), stdout_fd) # $ exec >&destination
except ValueError: # filename
with open(destination, 'wb') as destination_file:
os.dup2(destination_file.fileno(), stdout_fd) # $ exec > destination
try:
yield source # allow code to be run with the redirected stream
finally:
# restore source to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
source.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
def stdoutRedirected():
"""
Redirect the output from sys.stdout to os.devnull.
"""
return streamRedirected(sys.stdout, os.devnull)
def stderrRedirected():
"""
Redirect the output from sys.stderr to os.devnull.
"""
return streamRedirected(sys.stderr, os.devnull)
# The end of streamRedirected functions
##########
##########
# Imports
##
# redirect output (escape characters during ROOT importation...)
# The gymnastic with sys argv is necessary to workaround for ROOT-7577
argvTmp = sys.argv[:]
sys.argv = []
with stdoutRedirected():
import ROOT
ROOT.gROOT.GetVersion()
sys.argv = argvTmp
import argparse
import glob
import fnmatch
import logging
LOG_FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=LOG_FORMAT)
# The end of imports
##########
##########
# Different functions to get a parser of arguments and options
def _getParser(theHelp, theEpilog):
"""
Get a commandline parser with the defaults of the commandline utils.
"""
return argparse.ArgumentParser(description=theHelp,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = theEpilog)
def getParserSingleFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
source file or not.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='?', help="Input file")
return parser
def getParserFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
list of source files.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='+', help="Input file")
return parser
def getParserSourceDest(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils,
a list of source files and a destination file.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("SOURCE", nargs='+', help="Source file")
parser.add_argument("DEST", help="Destination file")
return parser
# The end of get parser functions
##########
##########
# Several utils
@contextmanager
def _setIgnoreLevel(level):
originalLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = level
yield
ROOT.gErrorIgnoreLevel = originalLevel
def changeDirectory(rootFile,pathSplit):
"""
Change the current directory (ROOT.gDirectory) by the corresponding (rootFile,pathSplit)
"""
rootFile.cd()
for directoryName in pathSplit:
theDir = ROOT.gDirectory.Get(directoryName)
if not theDir:
logging.warning("Directory %s does not exist." %directoryName)
return 1
else:
theDir.cd()
return 0
def createDirectory(rootFile,pathSplit):
"""
Add a directory named 'pathSplit[-1]' in (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0: ROOT.gDirectory.mkdir(pathSplit[-1])
return retcode
def getFromDirectory(objName):
"""
Get the object objName from the current directory
"""
return ROOT.gDirectory.Get(objName)
def isExisting(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), exits
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetListOfKeys().Contains(pathSplit[-1])
def isDirectoryKey(key):
"""
Return True if the object, corresponding to the key, inherits from TDirectory
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TDirectory.Class())
def isTreeKey(key):
"""
Return True if the object, corresponding to the key, inherits from TTree
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TTree.Class())
def getKey(rootFile,pathSplit):
"""
Get the key of the corresponding object (rootFile,pathSplit)
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetKey(pathSplit[-1])
def isDirectory(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TDirectory
"""
if pathSplit == []: return True # the object is the rootFile itself
else: return isDirectoryKey(getKey(rootFile,pathSplit))
def isTree(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TTree
"""
if pathSplit == []: return False # the object is the rootFile itself
else: return isTreeKey(getKey(rootFile,pathSplit))
def getKeyList(rootFile,pathSplit):
"""
Get the list of keys of the directory (rootFile,pathSplit),
if (rootFile,pathSplit) is not a directory then get the key in a list
"""
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
return ROOT.gDirectory.GetListOfKeys()
else: return [getKey(rootFile,pathSplit)]
def keyListSort(keyList):
"""
Sort list of keys by their names ignoring the case
"""
keyList.sort(key=lambda x: x.GetName().lower())
def tupleListSort(tupleList):
"""
Sort list of tuples by their first elements ignoring the case
"""
tupleList.sort(key=lambda x: x[0].lower())
def dirListSort(dirList):
"""
Sort list of directories by their names ignoring the case
"""
dirList.sort(key=lambda x: [n.lower() for n in x])
def keyClassSpliter(rootFile,pathSplitList):
"""
Return a list of directories and a list of keys corresponding
to the other objects, for rootLs and rooprint use
"""
keyList = []
dirList = []
for pathSplit in pathSplitList:
if pathSplit == []: dirList.append(pathSplit)
elif isDirectory(rootFile,pathSplit): dirList.append(pathSplit)
else: keyList.append(getKey(rootFile,pathSplit))
keyListSort(keyList)
dirListSort(dirList)
return keyList,dirList
def openROOTFile(fileName, mode="read"):
"""
Open the ROOT file corresponding to fileName in the corresponding mode,
redirecting the output not to see missing dictionnaries
"""
#with stderrRedirected():
with _setIgnoreLevel(ROOT.kError):
theFile = ROOT.TFile.Open(fileName, mode)
if not theFile:
logging.warning("File %s does not exist", fileName)
return theFile
def openROOTFileCompress(fileName, compress, recreate):
"""
Open a ROOT file (like openROOTFile) with the possibility
to change compression settings
"""
if compress != None and os.path.isfile(fileName):
logging.warning("can't change compression settings on existing file")
return None
mode = "recreate" if recreate else "update"
theFile = openROOTFile(fileName, mode)
if compress != None: theFile.SetCompressionSettings(compress)
return theFile
def joinPathSplit(pathSplit):
"""
Join the pathSplit with '/'
"""
return "/".join(pathSplit)
MANY_OCCURENCE_WARNING = "Same name objects aren't supported: '{0}' of '{1}' won't be processed"
def manyOccurenceRemove(pathSplitList,fileName):
"""
Search for double occurence of the same pathSplit and remove them
"""
if len(pathSplitList) > 1:
for n in pathSplitList:
if pathSplitList.count(n) != 1:
logging.warning(MANY_OCCURENCE_WARNING.format(joinPathSplit(n),fileName))
while n in pathSplitList: pathSplitList.remove(n)
def patternToPathSplitList(fileName,pattern):
"""
Get the list of pathSplit of objects in the ROOT file
corresponding to fileName that match with the pattern
"""
# Open ROOT file
rootFile = openROOTFile(fileName)
if not rootFile: return []
# Split pattern avoiding multiple slash problem
patternSplit = [n for n in pattern.split("/") if n != ""]
# Main loop
pathSplitList = [[]]
for patternPiece in patternSplit:
newPathSplitList = []
for pathSplit in pathSplitList:
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
newPathSplitList.extend( \
[pathSplit + [key.GetName()] \
for key in ROOT.gDirectory.GetListOfKeys() \
if fnmatch.fnmatch(key.GetName(),patternPiece)])
pathSplitList = newPathSplitList
# No match
if pathSplitList == []:
logging.warning("can't find {0} in {1}".format(pattern,fileName))
# Same match (remove double occurences from the list)
manyOccurenceRemove(pathSplitList,fileName)
return pathSplitList
def fileNameListMatch(filePattern,wildcards):
"""
Get the list of fileName that match with objPattern
"""
if wildcards: return [os.path.expandvars(os.path.expanduser(i)) for i in glob.iglob(filePattern)]
else: return [os.path.expandvars(os.path.expanduser(filePattern))]
def pathSplitListMatch(fileName,objPattern,wildcards):
"""
Get the list of pathSplit that match with objPattern
"""
if wildcards: return patternToPathSplitList(fileName,objPattern)
else: return [[n for n in objPattern.split("/") if n != ""]]
def patternToFileNameAndPathSplitList(pattern,wildcards = True):
"""
Get the list of tuple containing both :
- ROOT file name
- list of splited path (in the corresponding file) of objects that matche
Use unix wildcards by default
"""
rootFilePattern = "*.root"
rootObjPattern = rootFilePattern+":*"
httpRootFilePattern = "htt*://*.root"
httpRootObjPattern = httpRootFilePattern+":*"
xrootdRootFilePattern = "root://*.root"
xrootdRootObjPattern = xrootdRootFilePattern+":*"
s3RootFilePattern = "s3://*.root"
s3RootObjPattern = s3RootFilePattern+":*"
gsRootFilePattern = "gs://*.root"
gsRootObjPattern = gsRootFilePattern+":*"
rfioRootFilePattern = "rfio://*.root"
rfioRootObjPattern = rfioRootFilePattern+":*"
pcmFilePattern = "*.pcm"
pcmObjPattern = pcmFilePattern+":*"
if fnmatch.fnmatch(pattern,httpRootObjPattern) or \
fnmatch.fnmatch(pattern,xrootdRootObjPattern) or \
fnmatch.fnmatch(pattern,s3RootObjPattern) or \
fnmatch.fnmatch(pattern,gsRootObjPattern) or \
fnmatch.fnmatch(pattern,rfioRootObjPattern):
patternSplit = pattern.rsplit(":", 1)
fileName = patternSplit[0]
objPattern = patternSplit[1]
pathSplitList = pathSplitListMatch(fileName,objPattern,wildcards)
return [(fileName,pathSplitList)]
if fnmatch.fnmatch(pattern,httpRootFilePattern) or \
fnmatch.fnmatch(pattern,xrootdRootFilePattern) or \
fnmatch.fnmatch(pattern,s3RootFilePattern) or \
fnmatch.fnmatch(pattern,gsRootFilePattern) or \
fnmatch.fnmatch(pattern,rfioRootFilePattern):
fileName = pattern
pathSplitList = [[]]
return [(fileName,pathSplitList)]
if fnmatch.fnmatch(pattern,rootObjPattern) or \
fnmatch.fnmatch(pattern,pcmObjPattern):
patternSplit = pattern.split(":")
filePattern = patternSplit[0]
objPattern = patternSplit[1]
fileNameList = fileNameListMatch(filePattern,wildcards)
return [(fileName,pathSplitListMatch(fileName,objPattern,wildcards)) for fileName in fileNameList]
if fnmatch.fnmatch(pattern,rootFilePattern) or \
fnmatch.fnmatch(pattern,pcmFilePattern):
filePattern = pattern
fileNameList = fileNameListMatch(filePattern,wildcards)
pathSplitList = [[]]
return [(fileName,pathSplitList) for fileName in fileNameList]
logging.warning("{0}: No such file (or extension not supported)".format(pattern))
return []
# End of utils
##########
##########
# Set of functions to put the arguments in shape
def getArgs(parser):
"""
Get arguments corresponding to parser.
"""
return parser.parse_args()
def getSourceListArgs(parser, wildcards = True):
"""
Create a list of tuples that contain source ROOT file names
and lists of path in these files as well as the original arguments
"""
args = getArgs(parser)
inputFiles = []
try:
inputFiles = args.FILE
except:
inputFiles = args.SOURCE
sourceList = \
[tup for pattern in inputFiles \
for tup in patternToFileNameAndPathSplitList(pattern,wildcards)]
return sourceList, args
def getSourceListOptDict(parser, wildcards = True):
"""
Get the list of tuples and the dictionary with options
"""
sourceList, args = getSourceListArgs(parser, wildcards)
if sourceList == []:
logging.error("Input file(s) not found!")
return sourceList, vars(args)
def getSourceDestListOptDict(parser, wildcards = True):
"""
Get the list of tuples of sources, create destination name, destination pathSplit
and the dictionary with options
"""
sourceList, args = getSourceListArgs(parser, wildcards)
destList = \
patternToFileNameAndPathSplitList( \
args.DEST,wildcards=False)
if destList != []:
destFileName,destPathSplitList = destList[0]
destPathSplit = destPathSplitList[0]
else:
destFileName = ""
destPathSplit = []
return sourceList, destFileName, destPathSplit, vars(args)
# The end of the set of functions to put the arguments in shape
##########
##########
# Several functions shared by roocp, roomv and roorm
TARGET_ERROR = "target '{0}' is not a directory"
OMITTING_FILE_ERROR = "omitting file '{0}'"
OMITTING_DIRECTORY_ERROR = "omitting directory '{0}'"
OVERWRITE_ERROR = "cannot overwrite non-directory '{0}' with directory '{1}'"
def copyRootObject(sourceFile,sourcePathSplit,destFile,destPathSplit,oneSource,recursive,replace):
"""
Initialize the recursive function 'copyRootObjectRecursive', written to be as unix-like as possible
"""
retcode = 0
isMultipleInput = not (oneSource and sourcePathSplit != [])
recursiveOption = recursive
# Multiple input and unexisting or non-directory destination
# TARGET_ERROR
if isMultipleInput and destPathSplit != [] \
and not (isExisting(destFile,destPathSplit) \
and isDirectory(destFile,destPathSplit)):
logging.warning(TARGET_ERROR.format(destPathSplit[-1]))
retcode += 1
# Entire ROOT file or directory in input omitting "-r" option
# OMITTING_FILE_ERROR or OMITTING_DIRECTORY_ERROR
if not recursiveOption:
if sourcePathSplit == []:
logging.warning(OMITTING_FILE_ERROR.format( \
sourceFile.GetName()))
retcode += 1
elif isDirectory(sourceFile,sourcePathSplit):
logging.warning(OMITTING_DIRECTORY_ERROR.format( \
sourcePathSplit[-1]))
retcode += 1
# Run copyRootObjectRecursive function with the wish
# to follow the unix copy behaviour
if sourcePathSplit == []:
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = ""
if not isMultipleInput and (destPathSplit != [] \
and not isExisting(destFile,destPathSplit)):
setName = destPathSplit[-1]
objectName = sourcePathSplit[-1]
if isDirectory(sourceFile,sourcePathSplit):
if setName != "":
createDirectory(destFile,destPathSplit[:-1]+[setName])
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1]+[setName],replace)
elif isDirectory(destFile,destPathSplit):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
else:
logging.warning(OVERWRITE_ERROR.format( \
destPathSplit[-1],objectName))
retcode += 1
else:
if setName != "":
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
elif isDirectory(destFile,destPathSplit):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = destPathSplit[-1]
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
return retcode
DELETE_ERROR = "object {0} was not existing, so it is not deleted"
def deleteObject(rootFile,pathSplit):
"""
Delete the object 'pathSplit[-1]' from (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0:
fileName = pathSplit[-1]
if isExisting(rootFile,pathSplit):
ROOT.gDirectory.Delete(fileName+";*")
else:
logging.warning(DELETE_ERROR.format(fileName))
retcode += 1
return retcode
def copyRootObjectRecursive(sourceFile,sourcePathSplit,destFile,destPathSplit,replace,setName=""):
"""
Copy objects from a file or directory (sourceFile,sourcePathSplit)
to an other file or directory (destFile,destPathSplit)
- Has the will to be unix-like
- that's a recursive function
- Python adaptation of a root input/output tutorial :
$ROOTSYS/tutorials/io/copyFiles.C
"""
retcode = 0
replaceOption = replace
for key in getKeyList(sourceFile,sourcePathSplit):
objectName = key.GetName()
if isDirectoryKey(key):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode +=copyRootObjectRecursive(sourceFile, \
sourcePathSplit+[objectName], \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
elif isTreeKey(key):
T = key.GetMotherDir().Get(objectName+";"+str(key.GetCycle()))
if replaceOption and isExisting(destFile,destPathSplit+[T.GetName()]):
retcodeTemp = deleteObject(destFile,destPathSplit+[T.GetName()])
if retcodeTemp:
retcode += retcodeTemp
continue
changeDirectory(destFile,destPathSplit)
newT = T.CloneTree(-1,"fast")
if setName != "":
newT.SetName(setName)
newT.Write()
else:
obj = key.ReadObj()
if replaceOption and isExisting(destFile,destPathSplit+[setName]):
changeDirectory(destFile,destPathSplit)
otherObj = getFromDirectory(setName)
if not otherObj == obj:
retcodeTemp = deleteObject(destFile,destPathSplit+[setName])
if retcodeTemp:
retcode += retcodeTemp
continue
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
else:
if setName != "":
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
obj.Delete()
changeDirectory(destFile,destPathSplit)
ROOT.gDirectory.SaveSelf(ROOT.kTRUE)
return retcode
FILE_REMOVE_ERROR = "cannot remove '{0}': Is a ROOT file"
DIRECTORY_REMOVE_ERROR = "cannot remove '{0}': Is a directory"
ASK_FILE_REMOVE = "remove '{0}' ? (y/n) : "
ASK_OBJECT_REMOVE = "remove '{0}' from '{1}' ? (y/n) : "
def deleteRootObject(rootFile, pathSplit, interactive, recursive):
"""
Remove the object (rootFile,pathSplit)
-interactive : prompt before every removal
-recursive : allow directory, and ROOT file, removal
"""
retcode = 0
if not recursive and isDirectory(rootFile,pathSplit):
if pathSplit == []:
logging.warning(FILE_REMOVE_ERROR.format(rootFile.GetName()))
retcode += 1
else:
logging.warning(DIRECTORY_REMOVE_ERROR.format(pathSplit[-1]))
retcode += 1
else:
if interactive:
if pathSplit != []:
answer = raw_input(ASK_OBJECT_REMOVE \
.format("/".join(pathSplit),rootFile.GetName()))
else:
answer = raw_input(ASK_FILE_REMOVE \
.format(rootFile.GetName()))
remove = answer.lower() == 'y'
else:
remove = True
if remove:
if pathSplit != []:
retcode += deleteObject(rootFile,pathSplit)
else:
rootFile.Close()
os.remove(rootFile.GetName())
return retcode
# End of functions shared by roocp, roomv and roorm
##########
##########
# Help strings for ROOT command line tools
# Arguments
SOURCE_HELP = "path of the source."
SOURCES_HELP = "path of the source(s)."
DEST_HELP = "path of the destination."
# Options
COMPRESS_HELP = \
"""change the compression settings of the
destination file (if not already existing)."""
INTERACTIVE_HELP = "prompt before every removal."
RECREATE_HELP = "recreate the destination file."
RECURSIVE_HELP = "recurse inside directories"
REPLACE_HELP = "replace object if already existing"
# End of help strings
##########
##########
# ROOTBROWSE
def _openBrowser(rootFile=None):
browser = ROOT.TBrowser()
if rootFile: rootFile.Browse(browser)
ROOT.PyROOT.TPyROOTApplication.Run(ROOT.gApplication)
def rootBrowse(fileName=None):
if fileName:
rootFile = openROOTFile(fileName)
if not rootFile: return 1
_openBrowser(rootFile)
rootFile.Close()
else:
_openBrowser()
return 0
# End of ROOTBROWSE
##########
##########
# ROOTCP
def _copyObjects(fileName, pathSplitList, destFile, destPathSplit, oneFile, \
recursive, replace):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcode += copyRootObject(rootFile, pathSplit, destFile, destPathSplit, \
oneSource, recursive, replace)
if fileName != destFileName: rootFile.Close()
return retcode
def rootCp(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, recursive=False, replace=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in [n[0] for n in sourceList]:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, recursive, replace)
destFile.Close()
return retcode
# End of ROOTCP
##########
##########
# ROOTEVENTSELECTOR
def _copyTreeSubset(sourceFile,sourcePathSplit,destFile,destPathSplit,firstEvent,lastEvent):
"""Copy a subset of the tree from (sourceFile,sourcePathSplit)
to (destFile,destPathSplit) according to options in optDict"""
retcode = changeDirectory(sourceFile,sourcePathSplit[:-1])
if retcode != 0: return retcode
bigTree = getFromDirectory(sourcePathSplit[-1])
nbrEntries = bigTree.GetEntries()
# changeDirectory for the small tree not to be memory-resident
retcode = changeDirectory(destFile,destPathSplit)
if retcode != 0: return retcode
smallTree = bigTree.CloneTree(0)
if lastEvent == -1:
lastEvent = nbrEntries-1
isNtuple = bigTree.InheritsFrom(ROOT.TNtuple.Class())
for i in range(firstEvent, lastEvent+1):
bigTree.GetEntry(i)
if isNtuple:
super(ROOT.TNtuple,smallTree).Fill()
else:
smallTree.Fill()
smallTree.Write()
return retcode
def _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, first, last):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
for pathSplit in pathSplitList:
if isTree(rootFile,pathSplit):
retcode += _copyTreeSubset(rootFile,pathSplit, \
destFile,destPathSplit,first,last)
if fileName != destFileName: rootFile.Close()
return retcode
def rootEventselector(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, first=0, last=-1):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
# Loop on the root file
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, \
first, last)
destFile.Close()
return retcode
# End of ROOTEVENTSELECTOR
##########
##########
# ROOTLS
# Ansi characters
ANSI_BOLD = "\x1B[1m"
ANSI_BLUE = "\x1B[34m"
ANSI_GREEN = "\x1B[32m"
ANSI_END = "\x1B[0m"
# Needed for column width calculation
ANSI_BOLD_LENGTH = len(ANSI_BOLD+ANSI_END)
ANSI_BLUE_LENGTH = len(ANSI_BLUE+ANSI_END)
ANSI_GREEN_LENGTH = len(ANSI_GREEN+ANSI_END)
# Terminal and platform booleans
IS_TERMINAL = sys.stdout.isatty()
IS_WIN32 = sys.platform == 'win32'
def isSpecial(ansiCode,string):
"""Use ansi code on 'string' if the output is the
terminal of a not Windows platform"""
if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END
else: return string
def write(string,indent=0,end=""):
"""Use sys.stdout.write to write the string with an indentation
equal to indent and specifying the end character"""
sys.stdout.write(" "*indent+string+end)
TREE_TEMPLATE = "{0:{nameWidth}}"+"{1:{titleWidth}}{2:{memoryWidth}}"
def _recursifTreePrinter(tree,indent):
"""Print recursively tree informations"""
listOfBranches = tree.GetListOfBranches()
if len(listOfBranches) > 0: # Width informations
maxCharName = max([len(branch.GetName()) \
for branch in listOfBranches])
maxCharTitle = max([len(branch.GetTitle()) \
for branch in listOfBranches])
dic = { \
"nameWidth":maxCharName+2, \
"titleWidth":maxCharTitle+4, \
"memoryWidth":1}
for branch in listOfBranches: # Print loop
rec = \
[branch.GetName(), \
"\""+branch.GetTitle()+"\"", \
str(branch.GetTotBytes())]
write(TREE_TEMPLATE.format(*rec,**dic),indent,end="\n")
_recursifTreePrinter(branch,indent+2)
def _prepareTime(time):
"""Get time in the proper shape
ex : 174512 for 17h 45m 12s
ex : 094023 for 09h 40m 23s"""
time = str(time)
time = '000000'+time
time = time[len(time)-6:]
return time
MONTH = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun', \
7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}
LONG_TEMPLATE = \
isSpecial(ANSI_BOLD,"{0:{classWidth}}")+"{1:{timeWidth}}" + \
"{2:{nameWidth}}{3:{titleWidth}}"
def _rootLsPrintLongLs(keyList,indent,treeListing):
"""Print a list of Tkey in columns
pattern : classname, datetime, name and title"""
if len(keyList) > 0: # Width informations
maxCharClass = max([len(key.GetClassName()) for key in keyList])
maxCharTime = 12
maxCharName = max([len(key.GetName()) for key in keyList])
dic = { \
"classWidth":maxCharClass+2, \
"timeWidth":maxCharTime+2, \
"nameWidth":maxCharName+2, \
"titleWidth":1}
date = ROOT.Long(0)
for key in keyList:
datime = key.GetDatime()
time = datime.GetTime()
date = datime.GetDate()
time = _prepareTime(time)
rec = \
[key.GetClassName(), \
MONTH[int(str(date)[4:6])]+" " +str(date)[6:]+ \
" "+time[:2]+":"+time[2:4], \
key.GetName(), \
"\""+key.GetTitle()+"\""]
write(LONG_TEMPLATE.format(*rec,**dic),indent,end="\n")
if treeListing and isTreeKey(key):
tree = key.ReadObj()
_recursifTreePrinter(tree,indent+2)
##
# The code of the getTerminalSize function can be found here :
# https://gist.github.com/jtriley/1108174
# Thanks jtriley !!
import os
import shlex
import struct
import platform
import subprocess
def getTerminalSize():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
#print "default"
#_get_terminal_size_windows() or _get_terminal_size_tput don't work
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
# End of getTerminalSize code
##
def _rootLsPrintSimpleLs(keyList,indent,oneColumn):
"""Print list of strings in columns
- blue for directories
- green for trees"""
# This code is adaptated from the pprint_list function here :
# http://stackoverflow.com/questions/25026556/output-list-like-ls
# Thanks hawkjo !!
if len(keyList) == 0: return
(term_width, term_height) = getTerminalSize()
term_width = term_width - indent
min_chars_between = 2
min_element_width = min( len(key.GetName()) for key in keyList ) \
+ min_chars_between
max_element_width = max( len(key.GetName()) for key in keyList ) \
+ min_chars_between
if max_element_width >= term_width: ncol,col_widths = 1,[1]
else:
# Start with max possible number of columns and reduce until it fits
ncol = 1 if oneColumn else min( len(keyList), term_width / min_element_width )
while True:
col_widths = \
[ max( len(key.GetName()) + min_chars_between \
for j, key in enumerate(keyList) if j % ncol == i ) \
for i in range(ncol) ]
if sum( col_widths ) <= term_width: break
else: ncol -= 1
for i, key in enumerate(keyList):
if i%ncol == 0: write("",indent) # indentation
# Don't add spaces after the last element of the line or of the list
if (i+1)%ncol != 0 and i != len(keyList)-1:
if not IS_TERMINAL: write( \
key.GetName().ljust(col_widths[i%ncol]))
elif isDirectoryKey(keyList[i]): write( \
isSpecial(ANSI_BLUE,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_BLUE_LENGTH))
elif isTreeKey(keyList[i]): write( \
isSpecial(ANSI_GREEN,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_GREEN_LENGTH))
else: write(key.GetName().ljust(col_widths[i%ncol]))
else: # No spaces after the last element of the line or of the list
if not IS_TERMINAL: write(key.GetName())
elif isDirectoryKey(keyList[i]):
write(isSpecial(ANSI_BLUE, key.GetName()))
elif isTreeKey(keyList[i]):
write(isSpecial(ANSI_GREEN, key.GetName()))
else: write(key.GetName())
write('\n')
def _rootLsPrint(keyList, indent, oneColumn, \
longListing, treeListing):
"""Print informations given by keyList with a rootLs
style choosen with the options"""
if longListing or treeListing: \
_rootLsPrintLongLs(keyList, indent, treeListing)
else:
_rootLsPrintSimpleLs(keyList, indent, oneColumn)
def _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing):
retcode = 0
rootFile = openROOTFile(fileName)
if not rootFile: return 1
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
if manySources: write("{0} :".format(fileName)+"\n")
_rootLsPrint(keyList, indent, oneColumn, longListing, treeListing)
# Loop on the directories
manyPathSplits = len(pathSplitList) > 1
indentDir = 2 if manyPathSplits else 0
for pathSplit in dirList:
keyList = getKeyList(rootFile,pathSplit)
keyListSort(keyList)
if manyPathSplits: write("{0} :".format("/".join(pathSplit)),indent,end="\n")
_rootLsPrint(keyList, indent+indentDir, oneColumn, longListing, treeListing)
rootFile.Close()
return retcode
def rootLs(sourceList, oneColumn=False, longListing=False, treeListing=False):
# Check arguments
if sourceList == []: return 1
tupleListSort(sourceList)
# Loop on the ROOT files
retcode = 0
manySources = len(sourceList) > 1
indent = 2 if manySources else 0
for fileName, pathSplitList in sourceList:
retcode += _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing)
return retcode
# End of ROOTLS
##########
##########
# ROOTMKDIR
MKDIR_ERROR = "cannot create directory '{0}'"
def _createDirectories(rootFile,pathSplit,parents):
"""Same behaviour as createDirectory but allows the possibility
to build an whole path recursively with the option \"parents\" """
retcode = 0
lenPathSplit = len(pathSplit)
if lenPathSplit == 0:
pass
elif parents:
for i in range(lenPathSplit):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
retcode += createDirectory(rootFile,currentPathSplit)
else:
doMkdir = True
for i in range(lenPathSplit-1):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
doMkdir = False
break
if doMkdir:
retcode += createDirectory(rootFile,pathSplit)
else:
logging.warning(MKDIR_ERROR.format("/".join(pathSplit)))
retcode += 1
return retcode
def _rootMkdirProcessFile(fileName, pathSplitList, parents):
retcode = 0
rootFile = openROOTFile(fileName,"update")
if not rootFile: return 1
for pathSplit in pathSplitList:
retcode+=_createDirectories(rootFile,pathSplit,parents)
rootFile.Close()
return retcode
def rootMkdir(sourceList, parents=False):
# Check arguments
if sourceList == []: return 1
# Loop on the ROOT files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _rootMkdirProcessFile(fileName, pathSplitList, parents)
return retcode
# End of ROOTMKDIR
##########
##########
# ROOTMV
MOVE_ERROR = "error during copy of {0}, it is not removed from {1}"
def _moveObjects(fileName, pathSplitList, destFile, destPathSplit, \
oneFile, interactive):
retcode = 0
recursive = True
replace = True
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName,"update") \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcodeTemp = copyRootObject(rootFile,pathSplit, \
destFile,destPathSplit,oneSource,recursive,replace)
if not retcodeTemp:
retcode += deleteRootObject(rootFile, pathSplit, interactive, recursive)
else:
logging.warning(MOVE_ERROR.format("/".join(pathSplit),rootFile.GetName()))
retcode += retcodeTemp
if fileName != destFileName: rootFile.Close()
return retcode
def rootMv(sourceList, destFileName, destPathSplit, compress=None, \
interactive=False, recreate=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName,compress,recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _moveObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, interactive)
destFile.Close()
return retcode
# End of ROOTMV
##########
##########
# ROOTPRINT
def _keyListExtended(rootFile,pathSplitList):
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
for pathSplit in dirList: keyList.extend(getKeyList(rootFile,pathSplit))
keyList = [key for key in keyList if not isDirectoryKey(key)]
keyListSort(keyList)
return keyList
def rootPrint(sourceList, directoryOption = None, divideOption = None, drawOption = "", formatOption = None, \
outputOption = None, sizeOption = None, styleOption = None, verboseOption = False):
# Check arguments
if sourceList == []: return 1
tupleListSort(sourceList)
# Don't open windows
ROOT.gROOT.SetBatch()
# (Style option)
if styleOption: ROOT.gInterpreter.ProcessLine(".x {0}".format(styleOption))
# (Verbose option)
if not verboseOption: ROOT.gErrorIgnoreLevel = 9999
# Initialize the canvas (Size option)
if sizeOption:
try:
width,height = sizeOption.split("x")
width = int(width)
height = int(height)
except ValueError:
logging.warning("canvas size is on a wrong format")
return 1
canvas = ROOT.TCanvas("canvas","canvas",width,height)
else:
canvas = ROOT.TCanvas("canvas")
# Divide the canvas (Divide option)
if divideOption:
try:
x,y = divideOption.split(",")
x = int(x)
y = int(y)
except ValueError:
logging.warning("divide is on a wrong format")
return 1
canvas.Divide(x,y)
caseNumber = x*y
# Take the format of the output file (formatOutput option)
if not formatOption and outputOption:
fileName = outputOption
fileFormat = fileName.split(".")[-1]
formatOption = fileFormat
# Use pdf as default format
if not formatOption: formatOption = "pdf"
# Create the output directory (directory option)
if directoryOption:
if not os.path.isdir(os.path.join(os.getcwd(),directoryOption)):
os.mkdir(directoryOption)
# Make the output name, begin to print (output option)
if outputOption:
if formatOption in ['ps','pdf']:
outputFileName = outputOption
if directoryOption: outputFileName = \
directoryOption + "/" + outputFileName
canvas.Print(outputFileName+"[",formatOption)
else:
logging.warning("can't merge pictures, only postscript or pdf files")
return 1
# Loop on the root files
retcode = 0
objDrawnNumber = 0
openRootFiles = []
for fileName, pathSplitList in sourceList:
rootFile = openROOTFile(fileName)<|fim▁hole|> # Fill the key list (almost the same as in rools)
keyList = _keyListExtended(rootFile,pathSplitList)
for key in keyList:
if isTreeKey(key):
pass
else:
if divideOption:
canvas.cd(objDrawnNumber%caseNumber + 1)
objDrawnNumber += 1
obj = key.ReadObj()
obj.Draw(drawOption)
if divideOption:
if objDrawnNumber%caseNumber == 0:
if not outputOption:
outputFileName = str(objDrawnNumber//caseNumber)+"."+formatOption
if directoryOption:
outputFileName = os.path.join( \
directoryOption,outputFileName)
canvas.Print(outputFileName,formatOption)
canvas.Clear()
canvas.Divide(x,y)
else:
if not outputOption:
outputFileName = key.GetName() + "." +formatOption
if directoryOption:
outputFileName = os.path.join( \
directoryOption,outputFileName)
if outputOption or formatOption == 'pdf':
objTitle = "Title:"+key.GetClassName()+" : "+key.GetTitle()
canvas.Print(outputFileName,objTitle)
else:
canvas.Print(outputFileName,formatOption)
# Last page (divideOption)
if divideOption:
if objDrawnNumber%caseNumber != 0:
if not outputOption:
outputFileName = str(objDrawnNumber//caseNumber + 1)+"."+formatOption
if directoryOption:
outputFileName = os.path.join(directoryOption,outputFileName)
canvas.Print(outputFileName,formatOption)
# End to print (output option)
if outputOption:
if not divideOption:
canvas.Print(outputFileName+"]",objTitle)
else:
canvas.Print(outputFileName+"]")
# Close ROOT files
map(lambda rootFile: rootFile.Close(),openRootFiles)
return retcode
# End of ROOTPRINT
##########
##########
# ROOTRM
def _removeObjects(fileName, pathSplitList, interactive=False, recursive=False):
retcode = 0
rootFile = openROOTFile(fileName,"update")
if not rootFile: return 1
for pathSplit in pathSplitList:
retcode += deleteRootObject(rootFile, pathSplit, interactive, recursive)
rootFile.Close()
return retcode
def rootRm(sourceList, interactive=False, recursive=False):
# Check arguments
if sourceList == []: return 1
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _removeObjects(fileName, pathSplitList, interactive, recursive)
return retcode
# End of ROOTRM
##########<|fim▁end|> | if not rootFile:
retcode += 1
continue
openRootFiles.append(rootFile) |
<|file_name|>shader.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be<|fim▁hole|>#include "flutter/lib/ui/ui_dart_state.h"
namespace flutter {
IMPLEMENT_WRAPPERTYPEINFO(ui, Shader);
Shader::Shader(flutter::SkiaGPUObject<SkShader> shader)
: shader_(std::move(shader)) {}
Shader::~Shader() = default;
} // namespace flutter<|fim▁end|> | // found in the LICENSE file.
#include "flutter/lib/ui/painting/shader.h"
|
<|file_name|>index.web.tsx<|end_file_name|><|fim▁begin|>/* tslint:disable:jsx-no-multiline-js */
import React from 'react';
import classNames from 'classnames';
import Flex from '../flex';
import Carousel from '../carousel/index.web';
import { DataItem, GridProps } from './PropsType';
export default class Grid extends React.Component<GridProps, any> {
static defaultProps = {
data: [],
hasLine: true,
isCarousel: false,
columnNum: 4,
carouselMaxRow: 2,
prefixCls: 'am-grid',
};
clientWidth = document.documentElement.clientWidth;
render() {
const {
prefixCls, className,
data, hasLine, columnNum, isCarousel, carouselMaxRow, onClick = () => {},
} = this.props;
const dataLength = data && data.length || 0;
const rowCount = Math.ceil(dataLength / columnNum);
const renderItem = this.props.renderItem || ((dataItem: DataItem) => (
<div
className={`${prefixCls}-item-contain column-num-${columnNum}`}
style={{ height: `${this.clientWidth / columnNum}px` }}
>
{
React.isValidElement(dataItem.icon) ? dataItem.icon : (
<img className={`${prefixCls}-icon`} src={dataItem.icon} />
)
}
<div className={`${prefixCls}-text`}>{dataItem.text}</div>
</div><|fim▁hole|>
const rowsArr: any[] = [];
for (let i = 0; i < rowCount; i++) {
const rowArr: any[] = [];
for (let j = 0; j < columnNum; j++) {
const dataIndex = i * columnNum + j;
if (dataIndex < dataLength) {
const el = data && data[dataIndex];
rowArr.push(<Flex.Item
key={`griditem-${dataIndex}`}
className={`${prefixCls}-item`}
onClick={() => onClick(el, dataIndex)}
style={{ width: `${this.clientWidth / columnNum}px` }}
>
{renderItem(el, dataIndex)}
</Flex.Item>);
} else {
rowArr.push(<Flex.Item
key={`griditem-${dataIndex}`}
style={{ width: `${this.clientWidth / columnNum}px` }}
/>);
}
}
rowsArr.push(<Flex justify="center" align="stretch" key={`gridline-${i}`}>{rowArr}</Flex>);
}
const pageCount = Math.ceil(rowCount / carouselMaxRow);
const pagesArr: any[] = [];
if (isCarousel && pageCount > 1) {
for (let pageIndex = 0; pageIndex < pageCount; pageIndex++) {
const pageRows: any[] = [];
for (let ii = 0; ii < carouselMaxRow; ii++) {
const rowIndex = pageIndex * carouselMaxRow + ii;
if (rowIndex < rowCount) {
pageRows.push(rowsArr[rowIndex]);
} else {
// 空节点为了确保末尾页的最后未到底的行有底线(样式中last-child会没线)
pageRows.push(<div key={`gridline-${rowIndex}`} />);
}
}
pagesArr.push(<div key={`pageitem-${pageIndex}`} className={`${prefixCls}-carousel-page`}>{pageRows}</div>);
}
}
return (
<div
className={classNames({
[prefixCls as string]: true,
[`${prefixCls}-line`]: hasLine,
[className as string]: className,
})}
>
{isCarousel && pageCount > 1 ? <Carousel initialSlideWidth={this.clientWidth}>{pagesArr}</Carousel> : rowsArr}
</div>
);
}
}<|fim▁end|> | )); |
<|file_name|>config.py<|end_file_name|><|fim▁begin|># Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import six
if six.PY2
from ConfigParser import SafeConfigParser
else
from configparser import SafeConfigParser
class AggregateMicroPathConfig:
config_file = ""
table_name = ""
table_schema_id = ""
table_schema_dt = ""
table_schema_lat = ""
table_schema_lon = ""
time_filter = 0<|fim▁hole|> tripLat1 = 0
tripLon1 = 0
tripLat2 = 0
tripLon2 = 0
tripname = ""
resolutionLat = 0
resolutionLon = 0
tripLatMin = 0
tripLatMax = 0
tripLonMin = 0
tripLonMax = 0
triplineBlankets = []
def __init__(self, config, basePath = "./"):
configParser = SafeConfigParser()
configParser.read(basePath + config)
self.config_file = config
self.database_name = configParser.get("AggregateMicroPath", "database_name")
self.table_name = configParser.get("AggregateMicroPath", "table_name")
self.table_schema_id = configParser.get("AggregateMicroPath", "table_schema_id")
self.table_schema_dt = configParser.get("AggregateMicroPath", "table_schema_dt")
self.table_schema_lat = configParser.get("AggregateMicroPath", "table_schema_lat")
self.table_schema_lon = configParser.get("AggregateMicroPath", "table_schema_lon")
self.time_filter = long(configParser.get("AggregateMicroPath", "time_filter"))
self.distance_filter = long(configParser.get("AggregateMicroPath", "distance_filter"))
self.tripLat1 = float(configParser.get("AggregateMicroPath", "lower_left_lat"))
self.tripLon1 = float(configParser.get("AggregateMicroPath", "lower_left_lon"))
self.tripLat2 = float(configParser.get("AggregateMicroPath", "upper_right_lat"))
self.tripLon2 = float(configParser.get("AggregateMicroPath", "upper_right_lon"))
self.tripname = configParser.get("AggregateMicroPath", "trip_name")
self.resolutionLat = float(configParser.get("AggregateMicroPath", "resolution_lat"))
self.resolutionLon = float(configParser.get("AggregateMicroPath", "resolution_lon"))
self.tripLatMin = int(math.floor(self.tripLat1/self.resolutionLat))#6
self.tripLatMax = int(math.ceil(self.tripLat2/self.resolutionLat)) #7
self.tripLonMin = int(math.floor(self.tripLon1/self.resolutionLon)) #8
self.tripLonMax = int(math.ceil(self.tripLon2/self.resolutionLon)) #9
self.triplineBlankets.append([self.tripLat1,self.tripLon1,self.tripLat2,self.tripLon2,self.tripname,self.resolutionLat,self.resolutionLon,self.tripLatMin,self.tripLatMax,self.tripLonMin,self.tripLonMax])
self.temporal_split = configParser.get("AggregateMicroPath", "temporal_split")<|fim▁end|> | distance_filter = 0 |
<|file_name|>9F3DE783494F7FF30679A17B0C5B912834121095.js<|end_file_name|><|fim▁begin|>this.NesDb = this.NesDb || {};
NesDb[ '9F3DE783494F7FF30679A17B0C5B912834121095' ] = {
"$": {
"name": "Nekketsu Kouha Kunio-kun",
"altname": "熱血硬派くにおくん",
"class": "Licensed",
"catalog": "TJC-KN",
"publisher": "Technos",
"developer": "Technos",
"region": "Japan",
"players": "2",
"date": "1987-04-17"
},
"cartridge": [
{
"$": {
"system": "Famicom",
"crc": "A7D3635E",
"sha1": "9F3DE783494F7FF30679A17B0C5B912834121095",
"dump": "ok",
"dumper": "bootgod",
"datedumped": "2007-06-24"
},
"board": [
{
"$": {
"type": "HVC-UNROM",
"pcb": "HVC-UNROM-02",
"mapper": "2"
},
"prg": [
{
"$": {
"name": "TJC-KN-0 PRG",
"size": "128k",
"crc": "A7D3635E",
"sha1": "9F3DE783494F7FF30679A17B0C5B912834121095"
}
}
],
"vram": [<|fim▁hole|> "size": "8k"
}
}
],
"chip": [
{
"$": {
"type": "74xx161"
}
},
{
"$": {
"type": "74xx32"
}
}
],
"pad": [
{
"$": {
"h": "1",
"v": "0"
}
}
]
}
]
}
]
};<|fim▁end|> | {
"$": { |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cryptocurrency implementation example using [exonum](http://exonum.com/).
#![deny(unsafe_code, bare_trait_objects)]
#![warn(missing_docs, missing_debug_implementations)]
#[macro_use]
extern crate serde_derive; // Required for Protobuf.
pub use crate::{schema::Schema, transactions::CryptocurrencyInterface};
pub mod api;
pub mod migrations;
pub mod proto;
pub mod schema;
pub mod transactions;
pub mod wallet;
use exonum::runtime::{ExecutionContext, ExecutionError, InstanceId};
use exonum_derive::{ServiceDispatcher, ServiceFactory};
use exonum_rust_runtime::{api::ServiceApiBuilder, DefaultInstance, Service};
use crate::{api::PublicApi as CryptocurrencyApi, schema::SchemaImpl};
/// Initial balance of the wallet.
pub const INITIAL_BALANCE: u64 = 100;<|fim▁hole|>/// Cryptocurrency service implementation.
#[derive(Debug, ServiceDispatcher, ServiceFactory)]
#[service_dispatcher(implements("CryptocurrencyInterface"))]
#[service_factory(artifact_name = "exonum-cryptocurrency", proto_sources = "proto")]
pub struct CryptocurrencyService;
impl Service for CryptocurrencyService {
fn initialize(
&self,
context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
// Initialize indexes. Not doing this may lead to errors in HTTP API, since it relies on
// `wallets` indexes being initialized for returning corresponding proofs.
SchemaImpl::new(context.service_data());
Ok(())
}
fn wire_api(&self, builder: &mut ServiceApiBuilder) {
CryptocurrencyApi::wire(builder);
}
}
/// Use predefined instance name and id for frontend.
impl DefaultInstance for CryptocurrencyService {
const INSTANCE_ID: InstanceId = 3;
const INSTANCE_NAME: &'static str = "crypto";
}<|fim▁end|> | |
<|file_name|>case.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import codecs
import json
import unittest
from pkg_resources import resource_filename
from calmjs.parse import es5
class ExamplesTestCase(unittest.TestCase):
"""
A test case that automatically load the examples JS module into the
data attribute for the test instance, and to provide a shortcut for
doing assertions.
"""<|fim▁hole|> test_module_name = 'nunja.stock.tests'
test_examples = NotImplemented
@classmethod
def setUpClass(cls):
if cls.test_examples is NotImplemented:
raise ValueError(
'the class must define the test_examples attribute for data')
with codecs.open(
resource_filename(cls.test_module_name, cls.test_examples),
encoding='utf8') as fd:
cls.data = json.loads(str(es5(fd.read()).children()[0].children(
)[0].initializer))
# TODO also sanity check the resulting object?
def assertDataEqual(self, key, result):
answer = self.data[key][0]
self.assertEqual(answer, result)<|fim▁end|> |
data = None |
<|file_name|>keypage_prepare_fields.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013,2014 Rodolphe Quiédeville <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import imp
from django.core.management.base import BaseCommand
from optparse import make_option
from django.core.paginator import Paginator
from foo.offset.models import Log
from foo.july.models import BigBook
import logging
from datetime import datetime
from django.db import connection
class Command(BaseCommand):
help = 'Import datas'
def handle(self, *args, **options):
"""
Use prepared query on july_bigbook
"""
key = 'keypage_prepare_fields'
log = Log.objects.create(name=key,
start=datetime.now(),
stop=datetime.now())
nb = 0
keyid = 0
cursor = connection.cursor()
try:
cursor.execute('DEALLOCATE preptwo')
except:
pass
qry = " ".join(["PREPARE preptwo (integer) AS ",
"SELECT keyid,nbpages FROM july_bigbook",
"WHERE serie= 3 AND keyid > $1",
"ORDER BY keyid ASC LIMIT 250"])
try:
cursor.execute(qry)
except:
pass
while True:
cursor.execute('EXECUTE preptwo (%s)' % (keyid))
books = cursor.fetchall()
for book in books:
keyid = book[0]
# do want you want here
if book[1] > 500:
nb = nb + 1
if len(books) < 250:
break<|fim▁hole|> print key, log.stop - log.start, nb<|fim▁end|> |
log.stop = datetime.now()
log.save() |
<|file_name|>test_asia.py<|end_file_name|><|fim▁begin|>from datetime import date
from workalendar.tests import GenericCalendarTest
from workalendar.asia import HongKong, Japan, Qatar, Singapore
from workalendar.asia import SouthKorea, Taiwan, Malaysia
class HongKongTest(GenericCalendarTest):
cal_class = HongKong
def test_year_2010(self):
""" Interesting because Christmas fell on a Saturday and CNY fell
on a Sunday, so didn't roll, and Ching Ming was on the same day
as Easter Monday """
holidays = self.cal.holidays_set(2010)
self.assertIn(date(2010, 1, 1), holidays) # New Year
self.assertIn(date(2010, 2, 13), holidays) # Chinese new year (shift)
self.assertIn(date(2010, 2, 15), holidays) # Chinese new year
self.assertIn(date(2010, 2, 16), holidays) # Chinese new year
self.assertNotIn(date(2010, 2, 17), holidays) # Not Chinese new year
self.assertIn(date(2010, 4, 2), holidays) # Good Friday
self.assertIn(date(2010, 4, 3), holidays) # Day after Good Friday
self.assertIn(date(2010, 4, 5), holidays) # Easter Monday
self.assertIn(date(2010, 4, 6), holidays) # Ching Ming (shifted)
self.assertIn(date(2010, 5, 1), holidays) # Labour Day
self.assertIn(date(2010, 5, 21), holidays) # Buddha's Birthday
self.assertIn(date(2010, 6, 16), holidays) # Tuen Ng Festival
self.assertIn(date(2010, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2010, 9, 23), holidays) # Day after Mid-Autumn
self.assertIn(date(2010, 10, 1), holidays) # National Day
self.assertIn(date(2010, 10, 16), holidays) # Chung Yeung Festival
self.assertIn(date(2010, 12, 25), holidays) # Christmas Day
self.assertIn(date(2010, 12, 27), holidays) # Boxing Day (shifted)
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 2, 11), holidays) # Chinese new year
self.assertIn(date(2013, 2, 12), holidays) # Chinese new year
self.assertIn(date(2013, 2, 13), holidays) # Chinese new year
self.assertIn(date(2013, 3, 29), holidays) # Good Friday
self.assertIn(date(2013, 3, 30), holidays) # Day after Good Friday
self.assertIn(date(2013, 4, 1), holidays) # Easter Monday
self.assertIn(date(2013, 4, 4), holidays) # Ching Ming
self.assertIn(date(2013, 5, 1), holidays) # Labour Day
self.assertIn(date(2013, 5, 17), holidays) # Buddha's Birthday
self.assertIn(date(2013, 6, 12), holidays) # Tuen Ng Festival
self.assertIn(date(2013, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2013, 9, 20), holidays) # Day after Mid-Autumn
self.assertIn(date(2013, 10, 1), holidays) # National Day
self.assertIn(date(2013, 10, 14), holidays) # Chung Yeung Festival
self.assertIn(date(2013, 12, 25), holidays) # Christmas Day
self.assertIn(date(2013, 12, 26), holidays) # Boxing Day
def test_year_2016(self):
holidays = self.cal.holidays_set(2016)
self.assertIn(date(2016, 1, 1), holidays) # New Year
self.assertIn(date(2016, 2, 8), holidays) # Chinese new year
self.assertIn(date(2016, 2, 9), holidays) # Chinese new year
self.assertIn(date(2016, 2, 10), holidays) # Chinese new year
self.assertIn(date(2016, 3, 25), holidays) # Good Friday
self.assertIn(date(2016, 3, 26), holidays) # Day after Good Friday
self.assertIn(date(2016, 3, 28), holidays) # Easter Monday
self.assertIn(date(2016, 4, 4), holidays) # Ching Ming
self.assertIn(date(2016, 5, 2), holidays) # Labour Day (shifted)
self.assertIn(date(2016, 5, 14), holidays) # Buddha's Birthday
self.assertIn(date(2016, 6, 9), holidays) # Tuen Ng Festival
self.assertIn(date(2016, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2016, 9, 16), holidays) # Day after Mid-Autumn
self.assertIn(date(2016, 10, 1), holidays) # National Day
self.assertIn(date(2016, 10, 10), holidays) # Chung Yeung Festival
self.assertIn(date(2016, 12, 26), holidays) # Christmas Day (shifted)
self.assertIn(date(2016, 12, 27), holidays) # Boxing Day (shifted)
def test_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 1, 2), holidays) # New Year (shifted)
self.assertIn(date(2017, 1, 28), holidays) # Chinese new year
self.assertIn(date(2017, 1, 30), holidays) # Chinese new year
self.assertIn(date(2017, 1, 31), holidays) # Chinese new year
self.assertIn(date(2017, 4, 4), holidays) # Ching Ming
self.assertIn(date(2017, 4, 14), holidays) # Good Friday
self.assertIn(date(2017, 4, 15), holidays) # Day after Good Friday
self.assertIn(date(2017, 4, 17), holidays) # Easter Monday
self.assertIn(date(2017, 5, 1), holidays) # Labour Day
self.assertIn(date(2017, 5, 3), holidays) # Buddha's Birthday
self.assertIn(date(2017, 5, 30), holidays) # Tuen Ng Festival
self.assertIn(date(2017, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2017, 10, 2), holidays) # National Day (shifted)
self.assertIn(date(2017, 10, 5), holidays) # Day after Mid-Autumn
self.assertIn(date(2017, 10, 28), holidays) # Chung Yeung Festival
self.assertIn(date(2017, 12, 25), holidays) # Christmas Day
self.assertIn(date(2017, 12, 26), holidays) # Boxing Day
def test_chingming_festival(self):
# This is the same as the Taiwan test, just different spelling
# Could move this into a Core test
self.assertIn(date(2005, 4, 5), self.cal.holidays_set(2005))
self.assertIn(date(2006, 4, 5), self.cal.holidays_set(2006))
self.assertIn(date(2007, 4, 5), self.cal.holidays_set(2007))
self.assertIn(date(2008, 4, 4), self.cal.holidays_set(2008))
self.assertIn(date(2010, 4, 5), self.cal.holidays_set(2010))
self.assertIn(date(2011, 4, 5), self.cal.holidays_set(2011))
self.assertIn(date(2012, 4, 4), self.cal.holidays_set(2012))
self.assertIn(date(2013, 4, 4), self.cal.holidays_set(2013))
self.assertIn(date(2014, 4, 5), self.cal.holidays_set(2014))
self.assertIn(date(2015, 4, 4), self.cal.holidays_set(2015))
self.assertIn(date(2016, 4, 4), self.cal.holidays_set(2016))
self.assertIn(date(2017, 4, 4), self.cal.holidays_set(2017))
self.assertIn(date(2018, 4, 5), self.cal.holidays_set(2018))
class JapanTest(GenericCalendarTest):
cal_class = Japan
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 2, 11), holidays) # Foundation Day
self.assertIn(date(2013, 3, 20), holidays) # Vernal Equinox Day
self.assertIn(date(2013, 4, 29), holidays) # Showa Day
self.assertIn(date(2013, 5, 3), holidays) # Constitution Memorial Day
self.assertIn(date(2013, 5, 4), holidays) # Greenery Day
self.assertIn(date(2013, 5, 5), holidays) # Children's Day
self.assertIn(date(2013, 9, 23), holidays) # Autumnal Equinox Day
self.assertIn(date(2013, 11, 3), holidays) # Culture Day
self.assertIn(date(2013, 11, 23), holidays) # Labour Thanksgiving Day
self.assertIn(date(2013, 12, 23), holidays) # The Emperor's Birthday
# Variable days
self.assertIn(date(2013, 1, 14), holidays) # Coming of Age Day
self.assertIn(date(2013, 7, 15), holidays) # Marine Day
self.assertIn(date(2013, 9, 16), holidays) # Respect-for-the-Aged Day
self.assertIn(date(2013, 10, 14), holidays) # Health and Sports Day
def test_year_2016(self):
# Before 2016, no Mountain Day
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 8, 11), holidays) # Mountain Day
holidays = self.cal.holidays_set(2015)
self.assertNotIn(date(2015, 8, 11), holidays) # Mountain Day
# After 2016, yes
holidays = self.cal.holidays_set(2016)
self.assertIn(date(2016, 8, 11), holidays) # Mountain Day
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 8, 11), holidays) # Mountain Day
class MalaysiaTest(GenericCalendarTest):
cal_class = Malaysia
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year's Day
self.assertIn(date(2013, 1, 28), holidays) # Thaipusam
self.assertIn(date(2013, 2, 1), holidays) # Federal Territory Day
self.assertIn(date(2013, 2, 11), holidays) # 2nd day of Lunar NY
self.assertIn(date(2013, 2, 12), holidays) # 1st day (Sun lieu)
self.assertIn(date(2013, 5, 1), holidays) # Workers' Day
self.assertIn(date(2013, 5, 24), holidays) # Vesak Day
self.assertIn(date(2013, 8, 8), holidays) # 1st day eid-al-fitr
self.assertIn(date(2013, 8, 9), holidays) # 2nd day eid-al-fitr
self.assertIn(date(2013, 8, 31), holidays) # National Day
self.assertIn(date(2013, 9, 16), holidays) # Malaysia Day
self.assertIn(date(2013, 10, 15), holidays) # Hari Raya Haji
self.assertIn(date(2013, 11, 2), holidays) # Deepavali
self.assertIn(date(2013, 11, 5), holidays) # Islamic New Year
self.assertIn(date(2013, 12, 25), holidays) # Xmas
def test_year_2012(self):
holidays = self.cal.holidays_set(2012)
self.assertIn(date(2012, 1, 1), holidays) # New Year's Day
self.assertIn(date(2012, 1, 24), holidays) # Federal Territory Day
self.assertIn(date(2012, 2, 1), holidays) # 2nd day of Lunar NY<|fim▁hole|> self.assertIn(date(2012, 5, 5), holidays) # Workers' Day
self.assertIn(date(2012, 8, 19), holidays) # 1st day eid-al-fitr
self.assertIn(date(2012, 8, 20), holidays) # 2nd day eid-al-fitr
self.assertIn(date(2012, 8, 31), holidays) # National Day
self.assertIn(date(2012, 9, 16), holidays) # Malaysia Day
self.assertIn(date(2012, 10, 26), holidays) # Hari Raya Haji
self.assertIn(date(2012, 11, 13), holidays) # Islamic New Year
self.assertIn(date(2012, 11, 15), holidays) # Deepavali
self.assertIn(date(2012, 12, 25), holidays) # Xmas
def test_nuzul_al_quran(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 6, 12), holidays)
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 6, 1), holidays)
class QatarTest(GenericCalendarTest):
cal_class = Qatar
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 7, 9), holidays) # start ramadan
# warning, the official date was (2013, 8, 10)
self.assertIn(date(2013, 8, 8), holidays) # eid al fitr
# The official date was (2013, 10, 14)
self.assertIn(date(2013, 10, 15), holidays) # eid al adha
self.assertIn(date(2013, 10, 16), holidays) # eid al adha
self.assertIn(date(2013, 10, 17), holidays) # eid al adha
self.assertIn(date(2013, 10, 18), holidays) # eid al adha
self.assertIn(date(2013, 12, 18), holidays) # National Day
def test_weekend(self):
# In Qatar, Week-end days are Friday / Sunday.
weekend_day = date(2017, 5, 12) # This is a Friday
non_weekend_day = date(2017, 5, 14) # This is a Sunday
self.assertFalse(self.cal.is_working_day(weekend_day))
self.assertTrue(self.cal.is_working_day(non_weekend_day))
class SingaporeTest(GenericCalendarTest):
cal_class = Singapore
def test_CNY_2010(self):
holidays = self.cal.holidays_set(2010)
self.assertIn(date(2010, 2, 14), holidays) # CNY1
self.assertIn(date(2010, 2, 15), holidays) # CNY2
self.assertIn(date(2010, 2, 16), holidays) # Rolled day for CNY
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 2, 10), holidays) # CNY1
self.assertIn(date(2013, 2, 11), holidays) # CNY2
self.assertIn(date(2013, 2, 12), holidays) # Rolled day for CNY
self.assertIn(date(2013, 3, 29), holidays) # Good Friday
self.assertIn(date(2013, 5, 1), holidays) # Labour Day
self.assertIn(date(2013, 5, 24), holidays) # Vesak Day
self.assertIn(date(2013, 8, 8), holidays) # Hari Raya Puasa
self.assertIn(date(2013, 8, 9), holidays) # National Day
self.assertIn(date(2013, 10, 15), holidays) # Hari Raya Haji
self.assertIn(date(2013, 11, 3), holidays) # Deepavali
self.assertIn(date(2013, 11, 4), holidays) # Deepavali shift
self.assertIn(date(2013, 12, 25), holidays) # Christmas Day
def test_year_2018(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 1, 1), holidays) # New Year
self.assertIn(date(2018, 2, 16), holidays) # CNY
self.assertIn(date(2018, 2, 17), holidays) # CNY
self.assertIn(date(2018, 3, 30), holidays) # Good Friday
self.assertIn(date(2018, 5, 1), holidays) # Labour Day
self.assertIn(date(2018, 5, 29), holidays) # Vesak Day
self.assertIn(date(2018, 6, 15), holidays) # Hari Raya Puasa
self.assertIn(date(2018, 8, 9), holidays) # National Day
self.assertIn(date(2018, 8, 22), holidays) # Hari Raya Haji
self.assertIn(date(2018, 11, 6), holidays) # Deepavali
self.assertIn(date(2018, 12, 25), holidays) # Christmas Day
def test_fixed_holiday_shift(self):
# Labour Day was on a Sunday in 2016
holidays = self.cal.holidays_set(2016)
# Labour Day (sunday)
self.assertIn(date(2016, 5, 1), holidays)
# Shifted day (Monday)
self.assertIn(date(2016, 5, 2), holidays)
class SouthKoreaTest(GenericCalendarTest):
cal_class = SouthKorea
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 3, 1), holidays) # Independence day
self.assertIn(date(2013, 5, 5), holidays) # children's day
self.assertIn(date(2013, 6, 6), holidays) # Memorial day
self.assertIn(date(2013, 8, 15), holidays) # Liberation day
self.assertIn(date(2013, 10, 3), holidays) # National Foundation Day
self.assertIn(date(2013, 10, 9), holidays) # Hangul Day
self.assertIn(date(2013, 12, 25), holidays) # Christmas
# Variable days
self.assertIn(date(2013, 2, 9), holidays)
self.assertIn(date(2013, 2, 10), holidays)
self.assertIn(date(2013, 2, 11), holidays)
self.assertIn(date(2013, 5, 17), holidays)
self.assertIn(date(2013, 9, 18), holidays)
self.assertIn(date(2013, 9, 19), holidays)
self.assertIn(date(2013, 9, 20), holidays)
class TaiwanTest(GenericCalendarTest):
cal_class = Taiwan
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 2, 9), holidays) # Chinese new year's eve
self.assertIn(date(2013, 2, 10), holidays) # Chinese new year
self.assertIn(date(2013, 2, 11), holidays) # Spring Festival
self.assertIn(date(2013, 2, 12), holidays) # Spring Festival
self.assertIn(date(2013, 2, 28), holidays) # 228 Peace Memorial Day
self.assertIn(date(2013, 4, 4), holidays) # Children's Day
self.assertIn(date(2013, 6, 12), holidays) # Dragon Boat Festival
self.assertIn(date(2013, 9, 19), holidays) # Mid-Autumn Festival
self.assertIn(date(2013, 10, 10), holidays) # National Day
def test_qingming_festival(self):
self.assertIn(date(2001, 4, 5), self.cal.holidays_set(2001))
self.assertIn(date(2002, 4, 5), self.cal.holidays_set(2002))
self.assertIn(date(2005, 4, 5), self.cal.holidays_set(2005))
self.assertIn(date(2006, 4, 5), self.cal.holidays_set(2006))
self.assertIn(date(2007, 4, 5), self.cal.holidays_set(2007))
self.assertIn(date(2008, 4, 4), self.cal.holidays_set(2008))
self.assertIn(date(2010, 4, 5), self.cal.holidays_set(2010))
self.assertIn(date(2011, 4, 5), self.cal.holidays_set(2011))
self.assertIn(date(2012, 4, 4), self.cal.holidays_set(2012))
self.assertIn(date(2013, 4, 4), self.cal.holidays_set(2013))
self.assertIn(date(2014, 4, 4), self.cal.holidays_set(2014))<|fim▁end|> | self.assertIn(date(2012, 5, 1), holidays) # 1st day (Sun lieu) |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from django.http import HttpRequest
import mock
import pytest
from nose.tools import assert_false
from olympia import amo
from olympia.amo.tests import TestCase, req_factory_factory
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon, AddonUser
from olympia.users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_addons_reviewer, check_personas_reviewer,
check_unlisted_addons_reviewer, is_editor, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
# Fake request must not have .groups, just like an anonymous user.
fake_request = HttpRequest()
assert_false(action_allowed(fake_request, amo.FIREFOX, 'Admin:%'))
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/models/'
r = self.client.get(url)
self.assert3xx(r, '%s?to=%s' % (reverse('users.login'), url))
class TestHasPerm(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasPerm, self).setUp()
assert self.client.login(username='[email protected]', password='password')
self.user = UserProfile.objects.get(email='[email protected]')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = self.fake_request_with_user(self.user)
def fake_request_with_user(self, user):<|fim▁hole|> request.groups = user.groups.all()
request.user = user
request.user.is_authenticated = mock.Mock(return_value=True)
return request
def login_admin(self):
assert self.client.login(username='[email protected]',
password='password')
return UserProfile.objects.get(email='[email protected]')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
self.request.groups = self.request.user.groups.all()
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
class TestCheckReviewer(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestCheckReviewer, self).setUp()
self.user = UserProfile.objects.get()
self.persona = Addon.objects.get(pk=15663)
self.addon = Addon.objects.get(pk=3615)
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert check_personas_reviewer(req)
def test_perm_unlisted_addons(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_is_editor_for_addon_reviewer(self):
"""An addon editor is also a persona editor."""
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert is_editor(req, self.addon)
def test_is_editor_for_persona_reviewer(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert not is_editor(req, self.addon)<|fim▁end|> | request = mock.Mock() |
<|file_name|>util.js<|end_file_name|><|fim▁begin|>// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* The global object.
* @type {!Object}
*/
const global = this;
// TODO(estade): This should be removed and calls replaced with cr.isMac
const IS_MAC = /^Mac/.test(navigator.platform);
/**
* Alias for document.getElementById.
* @param {string} id The ID of the element to find.
* @return {HTMLElement} The found element or null if not found.
*/
function $(id) {
return document.getElementById(id);
}
/**
* Calls chrome.send with a callback and restores the original afterwards.
* @param {string} name The name of the message to send.
* @param {!Array} params The parameters to send.
* @param {string} callbackName The name of the function that the backend calls.
* @param {!Function} The function to call.
*/
function chromeSend(name, params, callbackName, callback) {
var old = global[callbackName];
global[callbackName] = function() {
// restore
global[callbackName] = old;
var args = Array.prototype.slice.call(arguments);
return callback.apply(global, args);
};
chrome.send(name, params);
}
/**
* Generates a CSS url string.
* @param {string} s The URL to generate the CSS url for.
* @return {string} The CSS url string.
*/
function url(s) {
// http://www.w3.org/TR/css3-values/#uris
// Parentheses, commas, whitespace characters, single quotes (') and double
// quotes (") appearing in a URI must be escaped with a backslash
var s2 = s.replace(/(\(|\)|\,|\s|\'|\"|\\)/g, '\\$1');
// WebKit has a bug when it comes to URLs that end with \
// https://bugs.webkit.org/show_bug.cgi?id=28885
if (/\\\\$/.test(s2)) {
// Add a space to work around the WebKit bug.
s2 += ' ';
}
return 'url("' + s2 + '")';
}
/**
* Parses query parameters from Location.
* @param {string} s The URL to generate the CSS url for.
* @return {object} Dictionary containing name value pairs for URL
*/
function parseQueryParams(location) {
var params = {};
var query = unescape(location.search.substring(1));
var vars = query.split("&");
for (var i=0; i < vars.length; i++) {
var pair = vars[i].split("=");
params[pair[0]] = pair[1];
}
return params;
}
function findAncestorByClass(el, className) {
return findAncestor(el, function(el) {
if (el.classList)
return el.classList.contains(className);
return null;
});
}
/**
* Return the first ancestor for which the {@code predicate} returns true.
* @param {Node} node The node to check.
* @param {function(Node) : boolean} predicate The function that tests the
* nodes.
* @return {Node} The found ancestor or null if not found.
*/
function findAncestor(node, predicate) {
var last = false;
while (node != null && !(last = predicate(node))) {
node = node.parentNode;
}
return last ? node : null;
}
function swapDomNodes(a, b) {
var afterA = a.nextSibling;
if (afterA == b) {
swapDomNodes(b, a);
return;
}
var aParent = a.parentNode;
b.parentNode.replaceChild(a, b);
aParent.insertBefore(b, afterA);
}
// Handle click on a link. If the link points to a chrome: or file: url, then
// call into the browser to do the navigation.<|fim▁hole|> if (!e.returnValue)
return;
var el = e.target;
if (el.nodeType == Node.ELEMENT_NODE &&
el.webkitMatchesSelector('A, A *')) {
while (el.tagName != 'A') {
el = el.parentElement;
}
if ((el.protocol == 'file:' || el.protocol == 'about:') &&
(e.button == 0 || e.button == 1)) {
chrome.send('navigateToUrl', [
el.href,
el.target,
e.button,
e.altKey,
e.ctrlKey,
e.metaKey,
e.shiftKey
]);
e.preventDefault();
}
}
});<|fim▁end|> | document.addEventListener('click', function(e) {
// Allow preventDefault to work. |
<|file_name|>datasync.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Create, get, update, execute and delete an AWS DataSync Task."""
import logging
import random
from typing import List, Optional
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.datasync import AWSDataSyncHook
class AWSDataSyncOperator(BaseOperator):
r"""Find, Create, Update, Execute and Delete AWS DataSync Tasks.
If ``do_xcom_push`` is True, then the DataSync TaskArn and TaskExecutionArn
which were executed will be pushed to an XCom.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AWSDataSyncOperator`
.. note:: There may be 0, 1, or many existing DataSync Tasks defined in your AWS
environment. The default behavior is to create a new Task if there are 0, or
execute the Task if there was 1 Task, or fail if there were many Tasks.
:param aws_conn_id: AWS connection to use.
:type aws_conn_id: str
:param wait_interval_seconds: Time to wait between two
consecutive calls to check TaskExecution status.
:type wait_interval_seconds: int
:param max_iterations: Maximum number of
consecutive calls to check TaskExecution status.
:type max_iterations: int
:param task_arn: AWS DataSync TaskArn to use. If None, then this operator will
attempt to either search for an existing Task or attempt to create a new Task.
:type task_arn: str
:param source_location_uri: Source location URI to search for. All DataSync
Tasks with a LocationArn with this URI will be considered.
Example: ``smb://server/subdir``
:type source_location_uri: str
:param destination_location_uri: Destination location URI to search for.
All DataSync Tasks with a LocationArn with this URI will be considered.
Example: ``s3://airflow_bucket/stuff``
:type destination_location_uri: str
:param allow_random_task_choice: If multiple Tasks match, one must be chosen to
execute. If allow_random_task_choice is True then a random one is chosen.
:type allow_random_task_choice: bool
:param allow_random_location_choice: If multiple Locations match, one must be chosen
when creating a task. If allow_random_location_choice is True then a random one is chosen.
:type allow_random_location_choice: bool
:param create_task_kwargs: If no suitable TaskArn is identified,
it will be created if ``create_task_kwargs`` is defined.
``create_task_kwargs`` is then used internally like this:
``boto3.create_task(**create_task_kwargs)``
Example: ``{'Name': 'xyz', 'Options': ..., 'Excludes': ..., 'Tags': ...}``
:type create_task_kwargs: dict
:param create_source_location_kwargs: If no suitable LocationArn is found,
a Location will be created if ``create_source_location_kwargs`` is defined.
``create_source_location_kwargs`` is then used internally like this:
``boto3.create_location_xyz(**create_source_location_kwargs)``
The xyz is determined from the prefix of source_location_uri, eg ``smb:/...`` or ``s3:/...``
Example: ``{'Subdirectory': ..., 'ServerHostname': ..., ...}``
:type create_source_location_kwargs: dict
:param create_destination_location_kwargs: If no suitable LocationArn is found,
a Location will be created if ``create_destination_location_kwargs`` is defined.
``create_destination_location_kwargs`` is used internally like this:
``boto3.create_location_xyz(**create_destination_location_kwargs)``
The xyz is determined from the prefix of destination_location_uri, eg ``smb:/...` or ``s3:/...``
Example: ``{'S3BucketArn': ..., 'S3Config': {'BucketAccessRoleArn': ...}, ...}``
:type create_destination_location_kwargs: dict
:param update_task_kwargs: If a suitable TaskArn is found or created,
it will be updated if ``update_task_kwargs`` is defined.
``update_task_kwargs`` is used internally like this:
``boto3.update_task(TaskArn=task_arn, **update_task_kwargs)``
Example: ``{'Name': 'xyz', 'Options': ..., 'Excludes': ...}``
:type update_task_kwargs: dict
:param task_execution_kwargs: Additional kwargs passed directly when starting the
Task execution, used internally like this:
``boto3.start_task_execution(TaskArn=task_arn, **task_execution_kwargs)``
:type task_execution_kwargs: dict
:param delete_task_after_execution: If True then the TaskArn which was executed
will be deleted from AWS DataSync on successful completion.
:type delete_task_after_execution: bool
:raises AirflowException: If ``task_arn`` was not specified, or if
either ``source_location_uri`` or ``destination_location_uri`` were
not specified.
:raises AirflowException: If source or destination Location were not found
and could not be created.
:raises AirflowException: If ``choose_task`` or ``choose_location`` fails.
:raises AirflowException: If Task creation, update, execution or delete fails.
"""
template_fields = (
"task_arn",
"source_location_uri",
"destination_location_uri",
"create_task_kwargs",
"create_source_location_kwargs",
"create_destination_location_kwargs",
"update_task_kwargs",
"task_execution_kwargs",
)
template_fields_renderers = {
"create_task_kwargs": "json",
"create_source_location_kwargs": "json",
"create_destination_location_kwargs": "json",
"update_task_kwargs": "json",
"task_execution_kwargs": "json",
}
ui_color = "#44b5e2"
def __init__(
self,
*,
aws_conn_id: str = "aws_default",
wait_interval_seconds: int = 30,
max_iterations: int = 60,
task_arn: Optional[str] = None,
source_location_uri: Optional[str] = None,
destination_location_uri: Optional[str] = None,
allow_random_task_choice: bool = False,
allow_random_location_choice: bool = False,
create_task_kwargs: Optional[dict] = None,
create_source_location_kwargs: Optional[dict] = None,
create_destination_location_kwargs: Optional[dict] = None,
update_task_kwargs: Optional[dict] = None,
task_execution_kwargs: Optional[dict] = None,
delete_task_after_execution: bool = False,
**kwargs,
):
super().__init__(**kwargs)
# Assignments
self.aws_conn_id = aws_conn_id
self.wait_interval_seconds = wait_interval_seconds
self.max_iterations = max_iterations
self.task_arn = task_arn
self.source_location_uri = source_location_uri
self.destination_location_uri = destination_location_uri
self.allow_random_task_choice = allow_random_task_choice
self.allow_random_location_choice = allow_random_location_choice
self.create_task_kwargs = create_task_kwargs if create_task_kwargs else {}
self.create_source_location_kwargs = {}
if create_source_location_kwargs:
self.create_source_location_kwargs = create_source_location_kwargs
self.create_destination_location_kwargs = {}
if create_destination_location_kwargs:<|fim▁hole|> self.update_task_kwargs = update_task_kwargs if update_task_kwargs else {}
self.task_execution_kwargs = task_execution_kwargs if task_execution_kwargs else {}
self.delete_task_after_execution = delete_task_after_execution
# Validations
valid = False
if self.task_arn:
valid = True
if self.source_location_uri and self.destination_location_uri:
valid = True
if not valid:
raise AirflowException(
"Either specify task_arn or both source_location_uri and destination_location_uri. "
"task_arn={} source_location_uri={} destination_location_uri={}".format(
task_arn, source_location_uri, destination_location_uri
)
)
# Others
self.hook: Optional[AWSDataSyncHook] = None
# Candidates - these are found in AWS as possible things
# for us to use
self.candidate_source_location_arns: Optional[List[str]] = None
self.candidate_destination_location_arns: Optional[List[str]] = None
self.candidate_task_arns: Optional[List[str]] = None
# Actuals
self.source_location_arn: Optional[str] = None
self.destination_location_arn: Optional[str] = None
self.task_execution_arn: Optional[str] = None
def get_hook(self) -> AWSDataSyncHook:
"""Create and return AWSDataSyncHook.
:return AWSDataSyncHook: An AWSDataSyncHook instance.
"""
if self.hook:
return self.hook
self.hook = AWSDataSyncHook(
aws_conn_id=self.aws_conn_id,
wait_interval_seconds=self.wait_interval_seconds,
)
return self.hook
def execute(self, context):
# If task_arn was not specified then try to
# find 0, 1 or many candidate DataSync Tasks to run
if not self.task_arn:
self._get_tasks_and_locations()
# If some were found, identify which one to run
if self.candidate_task_arns:
self.task_arn = self.choose_task(self.candidate_task_arns)
# If we could not find one then try to create one
if not self.task_arn and self.create_task_kwargs:
self._create_datasync_task()
if not self.task_arn:
raise AirflowException("DataSync TaskArn could not be identified or created.")
self.log.info("Using DataSync TaskArn %s", self.task_arn)
# Update the DataSync Task
if self.update_task_kwargs:
self._update_datasync_task()
# Execute the DataSync Task
self._execute_datasync_task()
if not self.task_execution_arn:
raise AirflowException("Nothing was executed")
# Delete the DataSyncTask
if self.delete_task_after_execution:
self._delete_datasync_task()
return {"TaskArn": self.task_arn, "TaskExecutionArn": self.task_execution_arn}
def _get_tasks_and_locations(self) -> None:
"""Find existing DataSync Task based on source and dest Locations."""
hook = self.get_hook()
self.candidate_source_location_arns = self._get_location_arns(self.source_location_uri)
self.candidate_destination_location_arns = self._get_location_arns(self.destination_location_uri)
if not self.candidate_source_location_arns:
self.log.info("No matching source Locations")
return
if not self.candidate_destination_location_arns:
self.log.info("No matching destination Locations")
return
self.log.info("Finding DataSync TaskArns that have these LocationArns")
self.candidate_task_arns = hook.get_task_arns_for_location_arns(
self.candidate_source_location_arns,
self.candidate_destination_location_arns,
)
self.log.info("Found candidate DataSync TaskArns %s", self.candidate_task_arns)
def choose_task(self, task_arn_list: list) -> Optional[str]:
"""Select 1 DataSync TaskArn from a list"""
if not task_arn_list:
return None
if len(task_arn_list) == 1:
return task_arn_list[0]
if self.allow_random_task_choice:
# Items are unordered so we don't want to just take
# the [0] one as it implies ordered items were received
# from AWS and might lead to confusion. Rather explicitly
# choose a random one
return random.choice(task_arn_list)
raise AirflowException(f"Unable to choose a Task from {task_arn_list}")
def choose_location(self, location_arn_list: Optional[List[str]]) -> Optional[str]:
"""Select 1 DataSync LocationArn from a list"""
if not location_arn_list:
return None
if len(location_arn_list) == 1:
return location_arn_list[0]
if self.allow_random_location_choice:
# Items are unordered so we don't want to just take
# the [0] one as it implies ordered items were received
# from AWS and might lead to confusion. Rather explicitly
# choose a random one
return random.choice(location_arn_list)
raise AirflowException(f"Unable to choose a Location from {location_arn_list}")
def _create_datasync_task(self) -> None:
"""Create a AWS DataSyncTask."""
hook = self.get_hook()
self.source_location_arn = self.choose_location(self.candidate_source_location_arns)
if not self.source_location_arn and self.source_location_uri and self.create_source_location_kwargs:
self.log.info('Attempting to create source Location')
self.source_location_arn = hook.create_location(
self.source_location_uri, **self.create_source_location_kwargs
)
if not self.source_location_arn:
raise AirflowException(
"Unable to determine source LocationArn. Does a suitable DataSync Location exist?"
)
self.destination_location_arn = self.choose_location(self.candidate_destination_location_arns)
if (
not self.destination_location_arn
and self.destination_location_uri
and self.create_destination_location_kwargs
):
self.log.info('Attempting to create destination Location')
self.destination_location_arn = hook.create_location(
self.destination_location_uri, **self.create_destination_location_kwargs
)
if not self.destination_location_arn:
raise AirflowException(
"Unable to determine destination LocationArn. Does a suitable DataSync Location exist?"
)
self.log.info("Creating a Task.")
self.task_arn = hook.create_task(
self.source_location_arn, self.destination_location_arn, **self.create_task_kwargs
)
if not self.task_arn:
raise AirflowException("Task could not be created")
self.log.info("Created a Task with TaskArn %s", self.task_arn)
def _update_datasync_task(self) -> None:
"""Update a AWS DataSyncTask."""
if not self.task_arn:
return
hook = self.get_hook()
self.log.info("Updating TaskArn %s", self.task_arn)
hook.update_task(self.task_arn, **self.update_task_kwargs)
self.log.info("Updated TaskArn %s", self.task_arn)
def _execute_datasync_task(self) -> None:
"""Create and monitor an AWSDataSync TaskExecution for a Task."""
if not self.task_arn:
raise AirflowException("Missing TaskArn")
hook = self.get_hook()
# Create a task execution:
self.log.info("Starting execution for TaskArn %s", self.task_arn)
self.task_execution_arn = hook.start_task_execution(self.task_arn, **self.task_execution_kwargs)
self.log.info("Started TaskExecutionArn %s", self.task_execution_arn)
# Wait for task execution to complete
self.log.info("Waiting for TaskExecutionArn %s", self.task_execution_arn)
try:
result = hook.wait_for_task_execution(self.task_execution_arn, max_iterations=self.max_iterations)
except (AirflowTaskTimeout, AirflowException) as e:
self.log.error('Cancelling TaskExecution after Exception: %s', e)
self._cancel_datasync_task_execution()
raise
self.log.info("Completed TaskExecutionArn %s", self.task_execution_arn)
task_execution_description = hook.describe_task_execution(task_execution_arn=self.task_execution_arn)
self.log.info("task_execution_description=%s", task_execution_description)
# Log some meaningful statuses
level = logging.ERROR if not result else logging.INFO
self.log.log(level, 'Status=%s', task_execution_description['Status'])
if 'Result' in task_execution_description:
for k, v in task_execution_description['Result'].items():
if 'Status' in k or 'Error' in k:
self.log.log(level, '%s=%s', k, v)
if not result:
raise AirflowException(f"Failed TaskExecutionArn {self.task_execution_arn}")
def _cancel_datasync_task_execution(self):
"""Cancel the submitted DataSync task."""
hook = self.get_hook()
if self.task_execution_arn:
self.log.info("Cancelling TaskExecutionArn %s", self.task_execution_arn)
hook.cancel_task_execution(task_execution_arn=self.task_execution_arn)
self.log.info("Cancelled TaskExecutionArn %s", self.task_execution_arn)
def on_kill(self):
self.log.error('Cancelling TaskExecution after task was killed')
self._cancel_datasync_task_execution()
def _delete_datasync_task(self) -> None:
"""Deletes an AWS DataSync Task."""
if not self.task_arn:
return
hook = self.get_hook()
# Delete task:
self.log.info("Deleting Task with TaskArn %s", self.task_arn)
hook.delete_task(self.task_arn)
self.log.info("Task Deleted")
def _get_location_arns(self, location_uri) -> List[str]:
location_arns = self.get_hook().get_location_arns(location_uri)
self.log.info("Found LocationArns %s for LocationUri %s", location_arns, location_uri)
return location_arns<|fim▁end|> | self.create_destination_location_kwargs = create_destination_location_kwargs
|
<|file_name|>StandardCompiler.cpp<|end_file_name|><|fim▁begin|>/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @date 2017
* Unit tests for interface/StandardCompiler.h.
*/
#include <string>
#include <boost/test/unit_test.hpp>
#include <libsolidity/interface/StandardCompiler.h>
#include <libsolidity/interface/Version.h>
#include <libsolutil/JSON.h>
#include <test/Metadata.h>
using namespace std;
using namespace solidity::evmasm;
namespace solidity::frontend::test
{
namespace
{
/// Helper to match a specific error type and message
bool containsError(Json::Value const& _compilerResult, string const& _type, string const& _message)
{
if (!_compilerResult.isMember("errors"))
return false;
for (auto const& error: _compilerResult["errors"])
{
BOOST_REQUIRE(error.isObject());
BOOST_REQUIRE(error["type"].isString());
BOOST_REQUIRE(error["message"].isString());
if ((error["type"].asString() == _type) && (error["message"].asString() == _message))
return true;
}
return false;
}
bool containsAtMostWarnings(Json::Value const& _compilerResult)
{
if (!_compilerResult.isMember("errors"))
return true;
for (auto const& error: _compilerResult["errors"])
{
BOOST_REQUIRE(error.isObject());
BOOST_REQUIRE(error["severity"].isString());
if (error["severity"].asString() != "warning")
return false;
}
return true;
}
Json::Value getContractResult(Json::Value const& _compilerResult, string const& _file, string const& _name)
{
if (
!_compilerResult["contracts"].isObject() ||
!_compilerResult["contracts"][_file].isObject() ||
!_compilerResult["contracts"][_file][_name].isObject()
)
return Json::Value();
return _compilerResult["contracts"][_file][_name];
}
Json::Value compile(string _input)
{
StandardCompiler compiler;
string output = compiler.compile(std::move(_input));
Json::Value ret;
BOOST_REQUIRE(util::jsonParseStrict(output, ret));
return ret;
}
} // end anonymous namespace
BOOST_AUTO_TEST_SUITE(StandardCompiler)
BOOST_AUTO_TEST_CASE(assume_object_input)
{
Json::Value result;
/// Use the native JSON interface of StandardCompiler to trigger these
frontend::StandardCompiler compiler;
result = compiler.compile(Json::Value());
BOOST_CHECK(containsError(result, "JSONError", "Input is not a JSON object."));
result = compiler.compile(Json::Value("INVALID"));
BOOST_CHECK(containsError(result, "JSONError", "Input is not a JSON object."));
/// Use the string interface of StandardCompiler to trigger these
result = compile("");
BOOST_CHECK(containsError(result, "JSONError", "* Line 1, Column 1\n Syntax error: value, object or array expected.\n* Line 1, Column 1\n A valid JSON document must be either an array or an object value.\n"));
result = compile("invalid");
BOOST_CHECK(containsError(result, "JSONError", "* Line 1, Column 1\n Syntax error: value, object or array expected.\n* Line 1, Column 2\n Extra non-whitespace after JSON value.\n"));
result = compile("\"invalid\"");
BOOST_CHECK(containsError(result, "JSONError", "* Line 1, Column 1\n A valid JSON document must be either an array or an object value.\n"));
BOOST_CHECK(!containsError(result, "JSONError", "* Line 1, Column 1\n Syntax error: value, object or array expected.\n"));
result = compile("{}");
BOOST_CHECK(!containsError(result, "JSONError", "* Line 1, Column 1\n Syntax error: value, object or array expected.\n"));
BOOST_CHECK(!containsAtMostWarnings(result));
}
BOOST_AUTO_TEST_CASE(invalid_language)
{
char const* input = R"(
{
"language": "INVALID",
"sources": { "name": { "content": "abc" } }
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "Only \"Solidity\" or \"Yul\" is supported as a language."));
}
BOOST_AUTO_TEST_CASE(valid_language)
{
char const* input = R"(
{
"language": "Solidity"
}
)";
Json::Value result = compile(input);
BOOST_CHECK(!containsError(result, "JSONError", "Only \"Solidity\" or \"Yul\" is supported as a language."));
}
BOOST_AUTO_TEST_CASE(no_sources)
{
char const* input = R"(
{
"language": "Solidity"
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "No input sources specified."));
}
BOOST_AUTO_TEST_CASE(no_sources_empty_object)
{
char const* input = R"(
{
"language": "Solidity",
"sources": {}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "No input sources specified."));
}
BOOST_AUTO_TEST_CASE(no_sources_empty_array)
{
char const* input = R"(
{
"language": "Solidity",
"sources": []
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "\"sources\" is not a JSON object."));
}
BOOST_AUTO_TEST_CASE(sources_is_array)
{
char const* input = R"(
{
"language": "Solidity",
"sources": ["aa", "bb"]
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "\"sources\" is not a JSON object."));
}
BOOST_AUTO_TEST_CASE(unexpected_trailing_test)
{
char const* input = R"(
{
"language": "Solidity",
"sources": {
"A": {
"content": "contract A { function f() {} }"
}
}
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "* Line 10, Column 2\n Extra non-whitespace after JSON value.\n"));
}
BOOST_AUTO_TEST_CASE(smoke_test)
{
char const* input = R"(
{
"language": "Solidity",
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
}
BOOST_AUTO_TEST_CASE(error_recovery_field)
{
auto input = R"(
{
"language": "Solidity",
"settings": {
"parserErrorRecovery": "1"
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "\"settings.parserErrorRecovery\" must be a Boolean."));
input = R"(
{
"language": "Solidity",
"settings": {
"parserErrorRecovery": true
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
}
BOOST_AUTO_TEST_CASE(optimizer_enabled_not_boolean)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"optimizer": {
"enabled": "wrong"
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "The \"enabled\" setting must be a Boolean."));
}
BOOST_AUTO_TEST_CASE(optimizer_runs_not_a_number)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"optimizer": {
"enabled": true,
"runs": "not a number"
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "The \"runs\" setting must be an unsigned number."));
}
BOOST_AUTO_TEST_CASE(optimizer_runs_not_an_unsigned_number)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"optimizer": {
"enabled": true,
"runs": -1
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "The \"runs\" setting must be an unsigned number."));
}
BOOST_AUTO_TEST_CASE(basic_compilation)
{
char const* input = R"(
{
"language": "Solidity",
"sources": {
"fileA": {
"content": "contract A { }"
}
},
"settings": {
"outputSelection": {
"fileA": {
"A": [ "abi", "devdoc", "userdoc", "evm.bytecode", "evm.assembly", "evm.gasEstimates", "evm.legacyAssembly", "metadata" ],
"": [ "legacyAST" ]
}
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[]");
BOOST_CHECK(contract["devdoc"].isObject());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["devdoc"]), "{\"methods\":{}}");
BOOST_CHECK(contract["userdoc"].isObject());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["userdoc"]), "{\"methods\":{}}");
BOOST_CHECK(contract["evm"].isObject());
/// @TODO check evm.methodIdentifiers, legacyAssembly, bytecode, deployedBytecode
BOOST_CHECK(contract["evm"]["bytecode"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["object"].isString());
BOOST_CHECK_EQUAL(
solidity::test::bytecodeSansMetadata(contract["evm"]["bytecode"]["object"].asString()),
string("6080604052348015600f57600080fd5b5060") +
(VersionIsRelease ? "3f" : util::toHex(bytes{uint8_t(61 + VersionStringStrict.size())})) +
"80601d6000396000f3fe6080604052600080fdfe"
);
BOOST_CHECK(contract["evm"]["assembly"].isString());
BOOST_CHECK(contract["evm"]["assembly"].asString().find(
" /* \"fileA\":0:14 contract A { } */\n mstore(0x40, 0x80)\n "
"callvalue\n /* \"--CODEGEN--\":8:17 */\n dup1\n "
"/* \"--CODEGEN--\":5:7 */\n iszero\n tag_1\n jumpi\n "
"/* \"--CODEGEN--\":30:31 */\n 0x00\n /* \"--CODEGEN--\":27:28 */\n "
"dup1\n /* \"--CODEGEN--\":20:32 */\n revert\n /* \"--CODEGEN--\":5:7 */\n"
"tag_1:\n /* \"fileA\":0:14 contract A { } */\n pop\n dataSize(sub_0)\n dup1\n "
"dataOffset(sub_0)\n 0x00\n codecopy\n 0x00\n return\nstop\n\nsub_0: assembly {\n "
"/* \"fileA\":0:14 contract A { } */\n mstore(0x40, 0x80)\n 0x00\n "
"dup1\n revert\n\n auxdata: 0xa26469706673582212"
) == 0);
BOOST_CHECK(contract["evm"]["gasEstimates"].isObject());
BOOST_CHECK_EQUAL(contract["evm"]["gasEstimates"].size(), 1);
BOOST_CHECK(contract["evm"]["gasEstimates"]["creation"].isObject());
BOOST_CHECK_EQUAL(contract["evm"]["gasEstimates"]["creation"].size(), 3);
BOOST_CHECK(contract["evm"]["gasEstimates"]["creation"]["codeDepositCost"].isString());
BOOST_CHECK(contract["evm"]["gasEstimates"]["creation"]["executionCost"].isString());
BOOST_CHECK(contract["evm"]["gasEstimates"]["creation"]["totalCost"].isString());
BOOST_CHECK_EQUAL(
u256(contract["evm"]["gasEstimates"]["creation"]["codeDepositCost"].asString()) +
u256(contract["evm"]["gasEstimates"]["creation"]["executionCost"].asString()),
u256(contract["evm"]["gasEstimates"]["creation"]["totalCost"].asString())
);
// Lets take the top level `.code` section (the "deployer code"), that should expose most of the features of
// the assembly JSON. What we want to check here is Operation, Push, PushTag, PushSub, PushSubSize and Tag.
BOOST_CHECK(contract["evm"]["legacyAssembly"].isObject());
BOOST_CHECK(contract["evm"]["legacyAssembly"][".code"].isArray());
BOOST_CHECK_EQUAL(
util::jsonCompactPrint(contract["evm"]["legacyAssembly"][".code"]),
"[{\"begin\":0,\"end\":14,\"name\":\"PUSH\",\"value\":\"80\"},"
"{\"begin\":0,\"end\":14,\"name\":\"PUSH\",\"value\":\"40\"},"
"{\"begin\":0,\"end\":14,\"name\":\"MSTORE\"},"
"{\"begin\":0,\"end\":14,\"name\":\"CALLVALUE\"},"
"{\"begin\":8,\"end\":17,\"name\":\"DUP1\"},"
"{\"begin\":5,\"end\":7,\"name\":\"ISZERO\"},"
"{\"begin\":5,\"end\":7,\"name\":\"PUSH [tag]\",\"value\":\"1\"},"
"{\"begin\":5,\"end\":7,\"name\":\"JUMPI\"},"
"{\"begin\":30,\"end\":31,\"name\":\"PUSH\",\"value\":\"0\"},"
"{\"begin\":27,\"end\":28,\"name\":\"DUP1\"},"
"{\"begin\":20,\"end\":32,\"name\":\"REVERT\"},"
"{\"begin\":5,\"end\":7,\"name\":\"tag\",\"value\":\"1\"},"
"{\"begin\":5,\"end\":7,\"name\":\"JUMPDEST\"},"
"{\"begin\":0,\"end\":14,\"name\":\"POP\"},"
"{\"begin\":0,\"end\":14,\"name\":\"PUSH #[$]\",\"value\":\"0000000000000000000000000000000000000000000000000000000000000000\"},"
"{\"begin\":0,\"end\":14,\"name\":\"DUP1\"},"
"{\"begin\":0,\"end\":14,\"name\":\"PUSH [$]\",\"value\":\"0000000000000000000000000000000000000000000000000000000000000000\"},"
"{\"begin\":0,\"end\":14,\"name\":\"PUSH\",\"value\":\"0\"},"
"{\"begin\":0,\"end\":14,\"name\":\"CODECOPY\"},"
"{\"begin\":0,\"end\":14,\"name\":\"PUSH\",\"value\":\"0\"},"
"{\"begin\":0,\"end\":14,\"name\":\"RETURN\"}]"
);
BOOST_CHECK(contract["metadata"].isString());
BOOST_CHECK(solidity::test::isValidMetadata(contract["metadata"].asString()));
BOOST_CHECK(result["sources"].isObject());
BOOST_CHECK(result["sources"]["fileA"].isObject());
BOOST_CHECK(result["sources"]["fileA"]["legacyAST"].isObject());
BOOST_CHECK_EQUAL(
util::jsonCompactPrint(result["sources"]["fileA"]["legacyAST"]),
"{\"attributes\":{\"absolutePath\":\"fileA\",\"exportedSymbols\":{\"A\":[1]}},\"children\":"
"[{\"attributes\":{\"abstract\":false,\"baseContracts\":[null],\"contractDependencies\":[null],\"contractKind\":\"contract\","
"\"documentation\":null,\"fullyImplemented\":true,\"linearizedBaseContracts\":[1],\"name\":\"A\",\"nodes\":[null],\"scope\":2},"
"\"id\":1,\"name\":\"ContractDefinition\",\"src\":\"0:14:0\"}],\"id\":2,\"name\":\"SourceUnit\",\"src\":\"0:14:0\"}"
);
}
BOOST_AUTO_TEST_CASE(compilation_error)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": {
"A": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "contract A { function }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(result.isMember("errors"));
BOOST_CHECK(result["errors"].size() >= 1);
for (auto const& error: result["errors"])
{
BOOST_REQUIRE(error.isObject());
BOOST_REQUIRE(error["message"].isString());
if (error["message"].asString().find("pre-release compiler") == string::npos)
{
BOOST_CHECK_EQUAL(
util::jsonCompactPrint(error),
"{\"component\":\"general\",\"formattedMessage\":\"fileA:1:23: ParserError: Expected identifier but got '}'\\n"
"contract A { function }\\n ^\\n\",\"message\":\"Expected identifier but got '}'\","
"\"severity\":\"error\",\"sourceLocation\":{\"end\":23,\"file\":\"fileA\",\"start\":22},\"type\":\"ParserError\"}"
);
}
}
}
BOOST_AUTO_TEST_CASE(output_selection_explicit)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": {
"A": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[]");
}
BOOST_AUTO_TEST_CASE(output_selection_all_contracts)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": {
"*": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[]");
}
BOOST_AUTO_TEST_CASE(output_selection_all_files_single_contract)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"*": {
"A": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[]");
}
BOOST_AUTO_TEST_CASE(output_selection_all_files_all_contracts)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"*": {
"*": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[]");
}
BOOST_AUTO_TEST_CASE(output_selection_dependent_contract)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"*": {
"A": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "contract B { } contract A { function f() public { new B(); } }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[{\"inputs\":[],\"name\":\"f\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]");
}
BOOST_AUTO_TEST_CASE(output_selection_dependent_contract_with_import)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"*": {
"A": [
"abi"
]
}
}
},
"sources": {
"fileA": {
"content": "import \"fileB\"; contract A { function f() public { new B(); } }"
},
"fileB": {
"content": "contract B { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[{\"inputs\":[],\"name\":\"f\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]");
}
BOOST_AUTO_TEST_CASE(filename_with_colon)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"http://github.com/ethereum/solidity/std/StandardToken.sol": {
"A": [
"abi"
]
}
}
},
"sources": {
"http://github.com/ethereum/solidity/std/StandardToken.sol": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "http://github.com/ethereum/solidity/std/StandardToken.sol", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["abi"].isArray());
BOOST_CHECK_EQUAL(util::jsonCompactPrint(contract["abi"]), "[]");
}
BOOST_AUTO_TEST_CASE(library_filename_with_colon)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": {
"A": [
"evm.bytecode"
]
}
}
},
"sources": {
"fileA": {
"content": "import \"git:library.sol\"; contract A { function f() public returns (uint) { return L.g(); } }"
},
"git:library.sol": {
"content": "library L { function g() public returns (uint) { return 1; } }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["evm"]["bytecode"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"]["git:library.sol"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"]["git:library.sol"]["L"].isArray());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"]["git:library.sol"]["L"][0].isObject());
}
BOOST_AUTO_TEST_CASE(libraries_invalid_top_level)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"libraries": "42"
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "\"libraries\" is not a JSON object."));
}
BOOST_AUTO_TEST_CASE(libraries_invalid_entry)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"libraries": {
"L": "42"
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "Library entry is not a JSON object."));
}
BOOST_AUTO_TEST_CASE(libraries_invalid_hex)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"libraries": {
"library.sol": {
"L": "0x4200000000000000000000000000000000000xx1"
}
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "Invalid library address (\"0x4200000000000000000000000000000000000xx1\") supplied."));
}
BOOST_AUTO_TEST_CASE(libraries_invalid_length)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"libraries": {
"library.sol": {
"L1": "0x42",
"L2": "0x4200000000000000000000000000000000000001ff"
}
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "Library address is of invalid length."));
}
BOOST_AUTO_TEST_CASE(libraries_missing_hex_prefix)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"libraries": {
"library.sol": {
"L": "4200000000000000000000000000000000000001"
}
}
},
"sources": {
"empty": {
"content": ""
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsError(result, "JSONError", "Library address is not prefixed with \"0x\"."));
}
BOOST_AUTO_TEST_CASE(library_linking)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"libraries": {
"library.sol": {
"L": "0x4200000000000000000000000000000000000001"
}
},
"outputSelection": {
"fileA": {
"A": [
"evm.bytecode"
]
}
}
},
"sources": {
"fileA": {
"content": "import \"library.sol\"; import \"library2.sol\"; contract A { function f() public returns (uint) { L2.g(); return L.g(); } }"
},
"library.sol": {
"content": "library L { function g() public returns (uint) { return 1; } }"
},
"library2.sol": {
"content": "library L2 { function g() public { } }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["evm"]["bytecode"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"].isObject());
BOOST_CHECK(!contract["evm"]["bytecode"]["linkReferences"]["library.sol"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"]["library2.sol"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"]["library2.sol"]["L2"].isArray());
BOOST_CHECK(contract["evm"]["bytecode"]["linkReferences"]["library2.sol"]["L2"][0].isObject());
}
BOOST_AUTO_TEST_CASE(evm_version)
{
auto inputForVersion = [](string const& _version)
{
return R"(
{
"language": "Solidity",
"sources": { "fileA": { "content": "contract A { }" } },
"settings": {
)" + _version + R"(
"outputSelection": {
"fileA": {
"A": [ "metadata" ]
}
}
}
}
)";
};
Json::Value result;
result = compile(inputForVersion("\"evmVersion\": \"homestead\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"homestead\"") != string::npos);
result = compile(inputForVersion("\"evmVersion\": \"tangerineWhistle\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"tangerineWhistle\"") != string::npos);
result = compile(inputForVersion("\"evmVersion\": \"spuriousDragon\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"spuriousDragon\"") != string::npos);
result = compile(inputForVersion("\"evmVersion\": \"byzantium\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"byzantium\"") != string::npos);
result = compile(inputForVersion("\"evmVersion\": \"constantinople\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"constantinople\"") != string::npos);
result = compile(inputForVersion("\"evmVersion\": \"petersburg\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"petersburg\"") != string::npos);
result = compile(inputForVersion("\"evmVersion\": \"istanbul\","));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"istanbul\"") != string::npos);
// test default
result = compile(inputForVersion(""));
BOOST_CHECK(result["contracts"]["fileA"]["A"]["metadata"].asString().find("\"evmVersion\":\"istanbul\"") != string::npos);
// test invalid
result = compile(inputForVersion("\"evmVersion\": \"invalid\","));
BOOST_CHECK(result["errors"][0]["message"].asString() == "Invalid EVM version requested.");
}
BOOST_AUTO_TEST_CASE(optimizer_settings_default_disabled)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": { "A": [ "metadata" ] }
}
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["metadata"].isString());
Json::Value metadata;
BOOST_CHECK(util::jsonParseStrict(contract["metadata"].asString(), metadata));
Json::Value const& optimizer = metadata["settings"]["optimizer"];
BOOST_CHECK(optimizer.isMember("enabled"));
BOOST_CHECK(optimizer["enabled"].asBool() == false);
BOOST_CHECK(!optimizer.isMember("details"));
BOOST_CHECK(optimizer["runs"].asUInt() == 200);
}
BOOST_AUTO_TEST_CASE(optimizer_settings_default_enabled)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": { "A": [ "metadata" ] }
},
"optimizer": { "enabled": true }
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["metadata"].isString());
Json::Value metadata;
BOOST_CHECK(util::jsonParseStrict(contract["metadata"].asString(), metadata));
Json::Value const& optimizer = metadata["settings"]["optimizer"];
BOOST_CHECK(optimizer.isMember("enabled"));
BOOST_CHECK(optimizer["enabled"].asBool() == true);
BOOST_CHECK(!optimizer.isMember("details"));
BOOST_CHECK(optimizer["runs"].asUInt() == 200);
}
BOOST_AUTO_TEST_CASE(optimizer_settings_details_exactly_as_default_disabled)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": { "A": [ "metadata" ] }
},
"optimizer": { "details": {
"constantOptimizer" : false,
"cse" : false,
"deduplicate" : false,
"jumpdestRemover" : true,
"orderLiterals" : false,
"peephole" : true
} }
},
"sources": {
"fileA": {
"content": "contract A { }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["metadata"].isString());
Json::Value metadata;
BOOST_CHECK(util::jsonParseStrict(contract["metadata"].asString(), metadata));
Json::Value const& optimizer = metadata["settings"]["optimizer"];
BOOST_CHECK(optimizer.isMember("enabled"));
// enabled is switched to false instead!
BOOST_CHECK(optimizer["enabled"].asBool() == false);
BOOST_CHECK(!optimizer.isMember("details"));
BOOST_CHECK(optimizer["runs"].asUInt() == 200);
}
BOOST_AUTO_TEST_CASE(optimizer_settings_details_different)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": { "A": [ "metadata" ] }
},
"optimizer": { "runs": 600, "details": {
"constantOptimizer" : true,
"cse" : false,
"deduplicate" : true,
"jumpdestRemover" : true,
"orderLiterals" : false,
"peephole" : true,
"yul": true
} }
},<|fim▁hole|> }
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["metadata"].isString());
Json::Value metadata;
BOOST_CHECK(util::jsonParseStrict(contract["metadata"].asString(), metadata));
Json::Value const& optimizer = metadata["settings"]["optimizer"];
BOOST_CHECK(!optimizer.isMember("enabled"));
BOOST_CHECK(optimizer.isMember("details"));
BOOST_CHECK(optimizer["details"]["constantOptimizer"].asBool() == true);
BOOST_CHECK(optimizer["details"]["cse"].asBool() == false);
BOOST_CHECK(optimizer["details"]["deduplicate"].asBool() == true);
BOOST_CHECK(optimizer["details"]["jumpdestRemover"].asBool() == true);
BOOST_CHECK(optimizer["details"]["orderLiterals"].asBool() == false);
BOOST_CHECK(optimizer["details"]["peephole"].asBool() == true);
BOOST_CHECK(optimizer["details"]["yul"].asBool() == true);
BOOST_CHECK(optimizer["details"]["yulDetails"].isObject());
BOOST_CHECK(optimizer["details"]["yulDetails"].getMemberNames() == vector<string>{"stackAllocation"});
BOOST_CHECK(optimizer["details"]["yulDetails"]["stackAllocation"].asBool() == true);
BOOST_CHECK_EQUAL(optimizer["details"].getMemberNames().size(), 8);
BOOST_CHECK(optimizer["runs"].asUInt() == 600);
}
BOOST_AUTO_TEST_CASE(metadata_without_compilation)
{
// NOTE: the contract code here should fail to compile due to "out of stack"
// If the metadata is successfully returned, that means no compilation was attempted.
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"fileA": { "A": [ "metadata" ] }
}
},
"sources": {
"fileA": {
"content": "contract A {
function x(uint a, uint b, uint c, uint d, uint e, uint f, uint g, uint h, uint i, uint j, uint k, uint l, uint m, uint n, uint o, uint p) pure public {}
function y() pure public {
uint a; uint b; uint c; uint d; uint e; uint f; uint g; uint h; uint i; uint j; uint k; uint l; uint m; uint n; uint o; uint p;
x(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p);
}
}"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["metadata"].isString());
BOOST_CHECK(solidity::test::isValidMetadata(contract["metadata"].asString()));
}
BOOST_AUTO_TEST_CASE(common_pattern)
{
char const* input = R"(
{
"language": "Solidity",
"settings": {
"outputSelection": {
"*": {
"*": [ "evm.bytecode.object", "metadata" ]
}
}
},
"sources": {
"fileA": {
"content": "contract A { function f() pure public {} }"
}
}
}
)";
Json::Value result = compile(input);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_CHECK(contract.isObject());
BOOST_CHECK(contract["metadata"].isString());
BOOST_CHECK(solidity::test::isValidMetadata(contract["metadata"].asString()));
BOOST_CHECK(contract["evm"]["bytecode"].isObject());
BOOST_CHECK(contract["evm"]["bytecode"]["object"].isString());
}
BOOST_AUTO_TEST_CASE(use_stack_optimization)
{
// NOTE: the contract code here should fail to compile due to "out of stack"
// If we enable stack optimization, though, it will compile.
char const* input = R"(
{
"language": "Solidity",
"settings": {
"optimizer": { "enabled": true, "details": { "yul": true } },
"outputSelection": {
"fileA": { "A": [ "evm.bytecode.object" ] }
}
},
"sources": {
"fileA": {
"content": "contract A {
function y() public {
assembly {
function fun() -> a3, b3, c3, d3, e3, f3, g3, h3, i3, j3, k3, l3, m3, n3, o3, p3
{
let a := 1
let b := 1
let z3 := 1
sstore(a, b)
sstore(add(a, 1), b)
sstore(add(a, 2), b)
sstore(add(a, 3), b)
sstore(add(a, 4), b)
sstore(add(a, 5), b)
sstore(add(a, 6), b)
sstore(add(a, 7), b)
sstore(add(a, 8), b)
sstore(add(a, 9), b)
sstore(add(a, 10), b)
sstore(add(a, 11), b)
sstore(add(a, 12), b)
}
let a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1, p1 := fun()
let a2, b2, c2, d2, e2, f2, g2, h2, i2, j2, k2, l2, m2, n2, o2, p2 := fun()
sstore(a1, a2)
}
}
}"
}
}
}
)";
Json::Value parsedInput;
BOOST_REQUIRE(util::jsonParseStrict(input, parsedInput));
solidity::frontend::StandardCompiler compiler;
Json::Value result = compiler.compile(parsedInput);
BOOST_CHECK(containsAtMostWarnings(result));
Json::Value contract = getContractResult(result, "fileA", "A");
BOOST_REQUIRE(contract.isObject());
BOOST_REQUIRE(contract["evm"]["bytecode"]["object"].isString());
BOOST_CHECK(contract["evm"]["bytecode"]["object"].asString().length() > 20);
// Now disable stack optimizations
// results in "stack too deep"
parsedInput["settings"]["optimizer"]["details"]["yulDetails"]["stackAllocation"] = false;
result = compiler.compile(parsedInput);
BOOST_REQUIRE(result["errors"].isArray());
BOOST_CHECK(result["errors"][0]["severity"] == "error");
BOOST_REQUIRE(result["errors"][0]["message"].isString());
BOOST_CHECK(result["errors"][0]["message"].asString().find("Stack too deep when compiling inline assembly") != std::string::npos);
BOOST_CHECK(result["errors"][0]["type"] == "YulException");
}
BOOST_AUTO_TEST_CASE(standard_output_selection_wildcard)
{
char const* input = R"(
{
"language": "Solidity",
"sources":
{
"A":
{
"content": "pragma solidity >=0.0; contract C { function f() public pure {} }"
}
},
"settings":
{
"outputSelection":
{
"*": { "C": ["evm.bytecode"] }
}
}
}
)";
Json::Value parsedInput;
BOOST_REQUIRE(util::jsonParseStrict(input, parsedInput));
solidity::frontend::StandardCompiler compiler;
Json::Value result = compiler.compile(parsedInput);
BOOST_REQUIRE(result["contracts"].isObject());
BOOST_REQUIRE(result["contracts"].size() == 1);
BOOST_REQUIRE(result["contracts"]["A"].isObject());
BOOST_REQUIRE(result["contracts"]["A"].size() == 1);
BOOST_REQUIRE(result["contracts"]["A"]["C"].isObject());
BOOST_REQUIRE(result["contracts"]["A"]["C"]["evm"].isObject());
BOOST_REQUIRE(result["contracts"]["A"]["C"]["evm"]["bytecode"].isObject());
BOOST_REQUIRE(result["sources"].isObject());
BOOST_REQUIRE(result["sources"].size() == 1);
BOOST_REQUIRE(result["sources"]["A"].isObject());
}
BOOST_AUTO_TEST_CASE(standard_output_selection_wildcard_colon_source)
{
char const* input = R"(
{
"language": "Solidity",
"sources":
{
":A":
{
"content": "pragma solidity >=0.0; contract C { function f() public pure {} }"
}
},
"settings":
{
"outputSelection":
{
"*": { "C": ["evm.bytecode"] }
}
}
}
)";
Json::Value parsedInput;
BOOST_REQUIRE(util::jsonParseStrict(input, parsedInput));
solidity::frontend::StandardCompiler compiler;
Json::Value result = compiler.compile(parsedInput);
BOOST_REQUIRE(result["contracts"].isObject());
BOOST_REQUIRE(result["contracts"].size() == 1);
BOOST_REQUIRE(result["contracts"][":A"].isObject());
BOOST_REQUIRE(result["contracts"][":A"].size() == 1);
BOOST_REQUIRE(result["contracts"][":A"]["C"].isObject());
BOOST_REQUIRE(result["contracts"][":A"]["C"]["evm"].isObject());
BOOST_REQUIRE(result["contracts"][":A"]["C"]["evm"]["bytecode"].isObject());
BOOST_REQUIRE(result["sources"].isObject());
BOOST_REQUIRE(result["sources"].size() == 1);
BOOST_REQUIRE(result["sources"][":A"].isObject());
}
BOOST_AUTO_TEST_CASE(standard_output_selection_wildcard_empty_source)
{
char const* input = R"(
{
"language": "Solidity",
"sources":
{
"":
{
"content": "pragma solidity >=0.0; contract C { function f() public pure {} }"
}
},
"settings":
{
"outputSelection":
{
"*": { "C": ["evm.bytecode"] }
}
}
}
)";
Json::Value parsedInput;
BOOST_REQUIRE(util::jsonParseStrict(input, parsedInput));
solidity::frontend::StandardCompiler compiler;
Json::Value result = compiler.compile(parsedInput);
BOOST_REQUIRE(result["contracts"].isObject());
BOOST_REQUIRE(result["contracts"].size() == 1);
BOOST_REQUIRE(result["contracts"][""].isObject());
BOOST_REQUIRE(result["contracts"][""].size() == 1);
BOOST_REQUIRE(result["contracts"][""]["C"].isObject());
BOOST_REQUIRE(result["contracts"][""]["C"]["evm"].isObject());
BOOST_REQUIRE(result["contracts"][""]["C"]["evm"]["bytecode"].isObject());
BOOST_REQUIRE(result["sources"].isObject());
BOOST_REQUIRE(result["sources"].size() == 1);
BOOST_REQUIRE(result["sources"][""].isObject());
}
BOOST_AUTO_TEST_CASE(standard_output_selection_wildcard_multiple_sources)
{
char const* input = R"(
{
"language": "Solidity",
"sources":
{
"A":
{
"content": "pragma solidity >=0.0; contract C { function f() public pure {} }"
},
"B":
{
"content": "pragma solidity >=0.0; contract D { function f() public pure {} }"
}
},
"settings":
{
"outputSelection":
{
"*": { "D": ["evm.bytecode"] }
}
}
}
)";
Json::Value parsedInput;
BOOST_REQUIRE(util::jsonParseStrict(input, parsedInput));
solidity::frontend::StandardCompiler compiler;
Json::Value result = compiler.compile(parsedInput);
BOOST_REQUIRE(result["contracts"].isObject());
BOOST_REQUIRE(result["contracts"].size() == 1);
BOOST_REQUIRE(result["contracts"]["B"].isObject());
BOOST_REQUIRE(result["contracts"]["B"].size() == 1);
BOOST_REQUIRE(result["contracts"]["B"]["D"].isObject());
BOOST_REQUIRE(result["contracts"]["B"]["D"]["evm"].isObject());
BOOST_REQUIRE(result["contracts"]["B"]["D"]["evm"]["bytecode"].isObject());
BOOST_REQUIRE(result["sources"].isObject());
BOOST_REQUIRE(result["sources"].size() == 2);
BOOST_REQUIRE(result["sources"]["A"].isObject());
BOOST_REQUIRE(result["sources"]["B"].isObject());
}
BOOST_AUTO_TEST_SUITE_END()
} // end namespaces<|fim▁end|> | "sources": {
"fileA": {
"content": "contract A { }"
} |
<|file_name|>cryptographer.py<|end_file_name|><|fim▁begin|>from base64 import b64decode, b64encode
from hashlib import sha256
from Crypto import Random
from Crypto.Cipher import AES
from frontstage import app
class Cryptographer:
"""Manage the encryption and decryption of random byte strings"""
def __init__(self):
"""<|fim▁hole|>
:param key: The encryption key to use when encrypting the data
"""
key = app.config["SECRET_KEY"]
self._key = sha256(key.encode("utf-8")).digest()
def encrypt(self, raw_text):
"""
Encrypt the supplied text
:param raw_text: The data to encrypt, must be a string of type byte
:return: The encrypted text
"""
raw_text = self.pad(raw_text)
init_vector = Random.new().read(AES.block_size)
ons_cipher = AES.new(self._key, AES.MODE_CBC, init_vector)
return b64encode(init_vector + ons_cipher.encrypt(raw_text))
def decrypt(self, encrypted_text):
"""
Decrypt the supplied text
:param encrypted_text: The data to decrypt, must be a string of type byte
:return: The unencrypted text
"""
encrypted_text = b64decode(encrypted_text)
init_vector = encrypted_text[:16]
ons_cipher = AES.new(self._key, AES.MODE_CBC, init_vector)
return self.unpad(ons_cipher.decrypt(encrypted_text[16:]))
def pad(self, data):
"""
Pad the data out to the selected block size.
:param data: The data were trying to encrypt
:return: The data padded out to our given block size
"""
vector = AES.block_size - len(data) % AES.block_size
return data + ((bytes([vector])) * vector)
def unpad(self, data):
"""
Un-pad the selected data.
:param data: Our padded data
:return: The data 'un'padded
"""
return data[0 : -data[-1]]<|fim▁end|> | Set up the encryption key, this will come from an .ini file or from
an environment variable. Change the block size to suit the data supplied
or performance required. |
<|file_name|>datastore_v3_pb.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
from google.appengine.datastore.action_pb import *
import google.appengine.datastore.action_pb
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
from google.appengine.datastore.snapshot_pb import *
import google.appengine.datastore.snapshot_pb
class InternalHeader(ProtocolBuffer.ProtocolMessage):
has_requesting_app_id_ = 0
requesting_app_id_ = ""
has_requesting_project_id_ = 0
requesting_project_id_ = ""
has_requesting_version_id_ = 0
requesting_version_id_ = ""
has_api_settings_ = 0
api_settings_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def requesting_app_id(self): return self.requesting_app_id_
def set_requesting_app_id(self, x):
self.has_requesting_app_id_ = 1
self.requesting_app_id_ = x
def clear_requesting_app_id(self):
if self.has_requesting_app_id_:
self.has_requesting_app_id_ = 0
self.requesting_app_id_ = ""
def has_requesting_app_id(self): return self.has_requesting_app_id_
def requesting_project_id(self): return self.requesting_project_id_
def set_requesting_project_id(self, x):
self.has_requesting_project_id_ = 1
self.requesting_project_id_ = x
def clear_requesting_project_id(self):
if self.has_requesting_project_id_:
self.has_requesting_project_id_ = 0
self.requesting_project_id_ = ""
def has_requesting_project_id(self): return self.has_requesting_project_id_
def requesting_version_id(self): return self.requesting_version_id_
def set_requesting_version_id(self, x):
self.has_requesting_version_id_ = 1
self.requesting_version_id_ = x
def clear_requesting_version_id(self):
if self.has_requesting_version_id_:
self.has_requesting_version_id_ = 0
self.requesting_version_id_ = ""
def has_requesting_version_id(self): return self.has_requesting_version_id_
def api_settings(self): return self.api_settings_
def set_api_settings(self, x):
self.has_api_settings_ = 1
self.api_settings_ = x
def clear_api_settings(self):
if self.has_api_settings_:
self.has_api_settings_ = 0
self.api_settings_ = ""
def has_api_settings(self): return self.has_api_settings_
def MergeFrom(self, x):
assert x is not self
if (x.has_requesting_app_id()): self.set_requesting_app_id(x.requesting_app_id())
if (x.has_requesting_project_id()): self.set_requesting_project_id(x.requesting_project_id())
if (x.has_requesting_version_id()): self.set_requesting_version_id(x.requesting_version_id())
if (x.has_api_settings()): self.set_api_settings(x.api_settings())
def Equals(self, x):
if x is self: return 1
if self.has_requesting_app_id_ != x.has_requesting_app_id_: return 0
if self.has_requesting_app_id_ and self.requesting_app_id_ != x.requesting_app_id_: return 0
if self.has_requesting_project_id_ != x.has_requesting_project_id_: return 0
if self.has_requesting_project_id_ and self.requesting_project_id_ != x.requesting_project_id_: return 0
if self.has_requesting_version_id_ != x.has_requesting_version_id_: return 0
if self.has_requesting_version_id_ and self.requesting_version_id_ != x.requesting_version_id_: return 0
if self.has_api_settings_ != x.has_api_settings_: return 0
if self.has_api_settings_ and self.api_settings_ != x.api_settings_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_requesting_app_id_): n += 1 + self.lengthString(len(self.requesting_app_id_))
if (self.has_requesting_project_id_): n += 1 + self.lengthString(len(self.requesting_project_id_))
if (self.has_requesting_version_id_): n += 1 + self.lengthString(len(self.requesting_version_id_))
if (self.has_api_settings_): n += 1 + self.lengthString(len(self.api_settings_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_requesting_app_id_): n += 1 + self.lengthString(len(self.requesting_app_id_))
if (self.has_requesting_project_id_): n += 1 + self.lengthString(len(self.requesting_project_id_))
if (self.has_requesting_version_id_): n += 1 + self.lengthString(len(self.requesting_version_id_))
if (self.has_api_settings_): n += 1 + self.lengthString(len(self.api_settings_))
return n
def Clear(self):
self.clear_requesting_app_id()
self.clear_requesting_project_id()
self.clear_requesting_version_id()
self.clear_api_settings()
def OutputUnchecked(self, out):
if (self.has_requesting_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.requesting_app_id_)
if (self.has_api_settings_):
out.putVarInt32(26)
out.putPrefixedString(self.api_settings_)
if (self.has_requesting_project_id_):
out.putVarInt32(34)
out.putPrefixedString(self.requesting_project_id_)
if (self.has_requesting_version_id_):
out.putVarInt32(42)
out.putPrefixedString(self.requesting_version_id_)
def OutputPartial(self, out):
if (self.has_requesting_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.requesting_app_id_)
if (self.has_api_settings_):
out.putVarInt32(26)
out.putPrefixedString(self.api_settings_)
if (self.has_requesting_project_id_):
out.putVarInt32(34)
out.putPrefixedString(self.requesting_project_id_)
if (self.has_requesting_version_id_):
out.putVarInt32(42)
out.putPrefixedString(self.requesting_version_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_requesting_app_id(d.getPrefixedString())
continue
if tt == 26:
self.set_api_settings(d.getPrefixedString())
continue
if tt == 34:
self.set_requesting_project_id(d.getPrefixedString())
continue
if tt == 42:
self.set_requesting_version_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_requesting_app_id_: res+=prefix+("requesting_app_id: %s\n" % self.DebugFormatString(self.requesting_app_id_))
if self.has_requesting_project_id_: res+=prefix+("requesting_project_id: %s\n" % self.DebugFormatString(self.requesting_project_id_))
if self.has_requesting_version_id_: res+=prefix+("requesting_version_id: %s\n" % self.DebugFormatString(self.requesting_version_id_))
if self.has_api_settings_: res+=prefix+("api_settings: %s\n" % self.DebugFormatString(self.api_settings_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
krequesting_app_id = 2
krequesting_project_id = 4
krequesting_version_id = 5
kapi_settings = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "requesting_app_id",
3: "api_settings",
4: "requesting_project_id",
5: "requesting_version_id",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.InternalHeader'
class Transaction(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_handle_ = 0
handle_ = 0
has_app_ = 0
app_ = ""
has_mark_changes_ = 0
mark_changes_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def handle(self): return self.handle_
def set_handle(self, x):
self.has_handle_ = 1
self.handle_ = x
def clear_handle(self):
if self.has_handle_:
self.has_handle_ = 0
self.handle_ = 0
def has_handle(self): return self.has_handle_
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def mark_changes(self): return self.mark_changes_
def set_mark_changes(self, x):
self.has_mark_changes_ = 1
self.mark_changes_ = x
def clear_mark_changes(self):
if self.has_mark_changes_:
self.has_mark_changes_ = 0
self.mark_changes_ = 0
def has_mark_changes(self): return self.has_mark_changes_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
if (x.has_handle()): self.set_handle(x.handle())
if (x.has_app()): self.set_app(x.app())
if (x.has_mark_changes()): self.set_mark_changes(x.mark_changes())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if self.has_handle_ != x.has_handle_: return 0
if self.has_handle_ and self.handle_ != x.handle_: return 0
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_mark_changes_ != x.has_mark_changes_: return 0
if self.has_mark_changes_ and self.mark_changes_ != x.mark_changes_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_handle_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: handle not set.')
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += self.lengthString(len(self.app_))
if (self.has_mark_changes_): n += 2
return n + 10
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
if (self.has_handle_):
n += 9
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_mark_changes_): n += 2
return n
def Clear(self):
self.clear_header()
self.clear_handle()
self.clear_app()
self.clear_mark_changes()
def OutputUnchecked(self, out):
out.putVarInt32(9)
out.put64(self.handle_)
out.putVarInt32(18)
out.putPrefixedString(self.app_)
if (self.has_mark_changes_):
out.putVarInt32(24)
out.putBoolean(self.mark_changes_)
if (self.has_header_):
out.putVarInt32(34)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_handle_):
out.putVarInt32(9)
out.put64(self.handle_)
if (self.has_app_):
out.putVarInt32(18)
out.putPrefixedString(self.app_)
if (self.has_mark_changes_):
out.putVarInt32(24)
out.putBoolean(self.mark_changes_)
if (self.has_header_):
out.putVarInt32(34)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_handle(d.get64())
continue
if tt == 18:
self.set_app(d.getPrefixedString())
continue
if tt == 24:
self.set_mark_changes(d.getBoolean())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_handle_: res+=prefix+("handle: %s\n" % self.DebugFormatFixed64(self.handle_))
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_mark_changes_: res+=prefix+("mark_changes: %s\n" % self.DebugFormatBool(self.mark_changes_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 4
khandle = 1
kapp = 2
kmark_changes = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "handle",
2: "app",
3: "mark_changes",
4: "header",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.DOUBLE,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Transaction'
class Query_Filter(ProtocolBuffer.ProtocolMessage):
LESS_THAN = 1
LESS_THAN_OR_EQUAL = 2
GREATER_THAN = 3
GREATER_THAN_OR_EQUAL = 4
EQUAL = 5
IN = 6
EXISTS = 7
_Operator_NAMES = {
1: "LESS_THAN",
2: "LESS_THAN_OR_EQUAL",
3: "GREATER_THAN",
4: "GREATER_THAN_OR_EQUAL",
5: "EQUAL",
6: "IN",
7: "EXISTS",
}
def Operator_Name(cls, x): return cls._Operator_NAMES.get(x, "")
Operator_Name = classmethod(Operator_Name)
has_op_ = 0
op_ = 0
def __init__(self, contents=None):
self.property_ = []
if contents is not None: self.MergeFromString(contents)
def op(self): return self.op_
def set_op(self, x):
self.has_op_ = 1
self.op_ = x
def clear_op(self):
if self.has_op_:
self.has_op_ = 0
self.op_ = 0
def has_op(self): return self.has_op_
def property_size(self): return len(self.property_)
def property_list(self): return self.property_
def property(self, i):
return self.property_[i]
def mutable_property(self, i):
return self.property_[i]
def add_property(self):
x = Property()
self.property_.append(x)
return x
def clear_property(self):
self.property_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_op()): self.set_op(x.op())
for i in xrange(x.property_size()): self.add_property().CopyFrom(x.property(i))
def Equals(self, x):
if x is self: return 1
if self.has_op_ != x.has_op_: return 0
if self.has_op_ and self.op_ != x.op_: return 0
if len(self.property_) != len(x.property_): return 0
for e1, e2 in zip(self.property_, x.property_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_op_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: op not set.')
for p in self.property_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.op_)
n += 1 * len(self.property_)
for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_op_):
n += 1
n += self.lengthVarInt64(self.op_)
n += 1 * len(self.property_)
for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_op()
self.clear_property()
def OutputUnchecked(self, out):
out.putVarInt32(48)
out.putVarInt32(self.op_)
for i in xrange(len(self.property_)):
out.putVarInt32(114)
out.putVarInt32(self.property_[i].ByteSize())
self.property_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_op_):
out.putVarInt32(48)
out.putVarInt32(self.op_)
for i in xrange(len(self.property_)):
out.putVarInt32(114)
out.putVarInt32(self.property_[i].ByteSizePartial())
self.property_[i].OutputPartial(out)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 36: break
if tt == 48:
self.set_op(d.getVarInt32())
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_property().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_op_: res+=prefix+("op: %s\n" % self.DebugFormatInt32(self.op_))
cnt=0
for e in self.property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("property%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
class Query_Order(ProtocolBuffer.ProtocolMessage):
ASCENDING = 1
DESCENDING = 2
_Direction_NAMES = {
1: "ASCENDING",
2: "DESCENDING",
}
def Direction_Name(cls, x): return cls._Direction_NAMES.get(x, "")
Direction_Name = classmethod(Direction_Name)
has_property_ = 0
property_ = ""
has_direction_ = 0
direction_ = 1
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def property(self): return self.property_
def set_property(self, x):
self.has_property_ = 1
self.property_ = x
def clear_property(self):
if self.has_property_:
self.has_property_ = 0
self.property_ = ""
def has_property(self): return self.has_property_
def direction(self): return self.direction_
def set_direction(self, x):
self.has_direction_ = 1
self.direction_ = x
def clear_direction(self):
if self.has_direction_:
self.has_direction_ = 0
self.direction_ = 1
def has_direction(self): return self.has_direction_
def MergeFrom(self, x):
assert x is not self
if (x.has_property()): self.set_property(x.property())
if (x.has_direction()): self.set_direction(x.direction())
def Equals(self, x):
if x is self: return 1
if self.has_property_ != x.has_property_: return 0
if self.has_property_ and self.property_ != x.property_: return 0
if self.has_direction_ != x.has_direction_: return 0
if self.has_direction_ and self.direction_ != x.direction_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_property_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: property not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.property_))
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_property_):
n += 1
n += self.lengthString(len(self.property_))
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
return n
def Clear(self):
self.clear_property()
self.clear_direction()
def OutputUnchecked(self, out):
out.putVarInt32(82)
out.putPrefixedString(self.property_)
if (self.has_direction_):
out.putVarInt32(88)
out.putVarInt32(self.direction_)
def OutputPartial(self, out):
if (self.has_property_):
out.putVarInt32(82)
out.putPrefixedString(self.property_)
if (self.has_direction_):
out.putVarInt32(88)
out.putVarInt32(self.direction_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 76: break
if tt == 82:
self.set_property(d.getPrefixedString())
continue
if tt == 88:
self.set_direction(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_: res+=prefix+("property: %s\n" % self.DebugFormatString(self.property_))
if self.has_direction_: res+=prefix+("direction: %s\n" % self.DebugFormatInt32(self.direction_))
return res
class Query(ProtocolBuffer.ProtocolMessage):
ORDER_FIRST = 1
ANCESTOR_FIRST = 2
FILTER_FIRST = 3
_Hint_NAMES = {
1: "ORDER_FIRST",
2: "ANCESTOR_FIRST",
3: "FILTER_FIRST",
}
def Hint_Name(cls, x): return cls._Hint_NAMES.get(x, "")
Hint_Name = classmethod(Hint_Name)
has_header_ = 0
header_ = None
has_app_ = 0
app_ = ""
has_name_space_ = 0
name_space_ = ""
has_kind_ = 0
kind_ = ""
has_ancestor_ = 0
ancestor_ = None
has_search_query_ = 0
search_query_ = ""
has_hint_ = 0
hint_ = 0
has_count_ = 0
count_ = 0
has_offset_ = 0
offset_ = 0
has_limit_ = 0
limit_ = 0
has_compiled_cursor_ = 0
compiled_cursor_ = None
has_end_compiled_cursor_ = 0
end_compiled_cursor_ = None
has_require_perfect_plan_ = 0
require_perfect_plan_ = 0
has_keys_only_ = 0
keys_only_ = 0
has_transaction_ = 0
transaction_ = None
has_compile_ = 0
compile_ = 0
has_failover_ms_ = 0
failover_ms_ = 0
has_strong_ = 0
strong_ = 0
has_distinct_ = 0
distinct_ = 0
has_min_safe_time_seconds_ = 0
min_safe_time_seconds_ = 0
has_persist_offset_ = 0
persist_offset_ = 1
def __init__(self, contents=None):
self.filter_ = []
self.order_ = []
self.composite_index_ = []
self.property_name_ = []
self.group_by_property_name_ = []
self.safe_replica_name_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def name_space(self): return self.name_space_
def set_name_space(self, x):
self.has_name_space_ = 1
self.name_space_ = x
def clear_name_space(self):
if self.has_name_space_:
self.has_name_space_ = 0
self.name_space_ = ""
def has_name_space(self): return self.has_name_space_
def kind(self): return self.kind_
def set_kind(self, x):
self.has_kind_ = 1
self.kind_ = x
def clear_kind(self):
if self.has_kind_:
self.has_kind_ = 0
self.kind_ = ""
def has_kind(self): return self.has_kind_
def ancestor(self):
if self.ancestor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.ancestor_ is None: self.ancestor_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.ancestor_
def mutable_ancestor(self): self.has_ancestor_ = 1; return self.ancestor()
def clear_ancestor(self):
if self.has_ancestor_:
self.has_ancestor_ = 0;
if self.ancestor_ is not None: self.ancestor_.Clear()
def has_ancestor(self): return self.has_ancestor_
def filter_size(self): return len(self.filter_)
def filter_list(self): return self.filter_
def filter(self, i):
return self.filter_[i]
def mutable_filter(self, i):
return self.filter_[i]
def add_filter(self):
x = Query_Filter()
self.filter_.append(x)
return x
def clear_filter(self):
self.filter_ = []
def search_query(self): return self.search_query_
def set_search_query(self, x):
self.has_search_query_ = 1
self.search_query_ = x
def clear_search_query(self):
if self.has_search_query_:
self.has_search_query_ = 0
self.search_query_ = ""
def has_search_query(self): return self.has_search_query_
def order_size(self): return len(self.order_)
def order_list(self): return self.order_
def order(self, i):
return self.order_[i]
def mutable_order(self, i):
return self.order_[i]
def add_order(self):
x = Query_Order()
self.order_.append(x)
return x
def clear_order(self):
self.order_ = []
def hint(self): return self.hint_
def set_hint(self, x):
self.has_hint_ = 1
self.hint_ = x
def clear_hint(self):
if self.has_hint_:
self.has_hint_ = 0
self.hint_ = 0
def has_hint(self): return self.has_hint_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def offset(self): return self.offset_
def set_offset(self, x):
self.has_offset_ = 1
self.offset_ = x
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0
self.offset_ = 0
def has_offset(self): return self.has_offset_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 0
def has_limit(self): return self.has_limit_
def compiled_cursor(self):
if self.compiled_cursor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.compiled_cursor_ is None: self.compiled_cursor_ = CompiledCursor()
finally:
self.lazy_init_lock_.release()
return self.compiled_cursor_
def mutable_compiled_cursor(self): self.has_compiled_cursor_ = 1; return self.compiled_cursor()
def clear_compiled_cursor(self):
if self.has_compiled_cursor_:
self.has_compiled_cursor_ = 0;
if self.compiled_cursor_ is not None: self.compiled_cursor_.Clear()
def has_compiled_cursor(self): return self.has_compiled_cursor_
def end_compiled_cursor(self):
if self.end_compiled_cursor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.end_compiled_cursor_ is None: self.end_compiled_cursor_ = CompiledCursor()
finally:
self.lazy_init_lock_.release()
return self.end_compiled_cursor_
def mutable_end_compiled_cursor(self): self.has_end_compiled_cursor_ = 1; return self.end_compiled_cursor()
def clear_end_compiled_cursor(self):
if self.has_end_compiled_cursor_:
self.has_end_compiled_cursor_ = 0;
if self.end_compiled_cursor_ is not None: self.end_compiled_cursor_.Clear()
def has_end_compiled_cursor(self): return self.has_end_compiled_cursor_
def composite_index_size(self): return len(self.composite_index_)
def composite_index_list(self): return self.composite_index_
def composite_index(self, i):
return self.composite_index_[i]
def mutable_composite_index(self, i):
return self.composite_index_[i]
def add_composite_index(self):
x = CompositeIndex()
self.composite_index_.append(x)
return x
def clear_composite_index(self):
self.composite_index_ = []
def require_perfect_plan(self): return self.require_perfect_plan_
def set_require_perfect_plan(self, x):
self.has_require_perfect_plan_ = 1
self.require_perfect_plan_ = x
def clear_require_perfect_plan(self):
if self.has_require_perfect_plan_:
self.has_require_perfect_plan_ = 0
self.require_perfect_plan_ = 0
def has_require_perfect_plan(self): return self.has_require_perfect_plan_
def keys_only(self): return self.keys_only_
def set_keys_only(self, x):
self.has_keys_only_ = 1
self.keys_only_ = x
def clear_keys_only(self):
if self.has_keys_only_:
self.has_keys_only_ = 0
self.keys_only_ = 0
def has_keys_only(self): return self.has_keys_only_
def transaction(self):
if self.transaction_ is None:
self.lazy_init_lock_.acquire()
try:
if self.transaction_ is None: self.transaction_ = Transaction()
finally:
self.lazy_init_lock_.release()
return self.transaction_
def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0;
if self.transaction_ is not None: self.transaction_.Clear()
def has_transaction(self): return self.has_transaction_
def compile(self): return self.compile_
def set_compile(self, x):
self.has_compile_ = 1
self.compile_ = x
def clear_compile(self):
if self.has_compile_:
self.has_compile_ = 0
self.compile_ = 0
def has_compile(self): return self.has_compile_
def failover_ms(self): return self.failover_ms_
def set_failover_ms(self, x):
self.has_failover_ms_ = 1
self.failover_ms_ = x
def clear_failover_ms(self):
if self.has_failover_ms_:
self.has_failover_ms_ = 0
self.failover_ms_ = 0
def has_failover_ms(self): return self.has_failover_ms_
def strong(self): return self.strong_
def set_strong(self, x):
self.has_strong_ = 1
self.strong_ = x
def clear_strong(self):
if self.has_strong_:
self.has_strong_ = 0
self.strong_ = 0
def has_strong(self): return self.has_strong_
def property_name_size(self): return len(self.property_name_)
def property_name_list(self): return self.property_name_
def property_name(self, i):
return self.property_name_[i]
def set_property_name(self, i, x):
self.property_name_[i] = x
def add_property_name(self, x):
self.property_name_.append(x)
def clear_property_name(self):
self.property_name_ = []
def group_by_property_name_size(self): return len(self.group_by_property_name_)
def group_by_property_name_list(self): return self.group_by_property_name_
def group_by_property_name(self, i):
return self.group_by_property_name_[i]
def set_group_by_property_name(self, i, x):
self.group_by_property_name_[i] = x
def add_group_by_property_name(self, x):
self.group_by_property_name_.append(x)
def clear_group_by_property_name(self):
self.group_by_property_name_ = []
def distinct(self): return self.distinct_
def set_distinct(self, x):
self.has_distinct_ = 1
self.distinct_ = x
def clear_distinct(self):
if self.has_distinct_:
self.has_distinct_ = 0
self.distinct_ = 0
def has_distinct(self): return self.has_distinct_
def min_safe_time_seconds(self): return self.min_safe_time_seconds_
def set_min_safe_time_seconds(self, x):
self.has_min_safe_time_seconds_ = 1
self.min_safe_time_seconds_ = x
def clear_min_safe_time_seconds(self):
if self.has_min_safe_time_seconds_:
self.has_min_safe_time_seconds_ = 0
self.min_safe_time_seconds_ = 0
def has_min_safe_time_seconds(self): return self.has_min_safe_time_seconds_
def safe_replica_name_size(self): return len(self.safe_replica_name_)
def safe_replica_name_list(self): return self.safe_replica_name_
def safe_replica_name(self, i):
return self.safe_replica_name_[i]
def set_safe_replica_name(self, i, x):
self.safe_replica_name_[i] = x
def add_safe_replica_name(self, x):
self.safe_replica_name_.append(x)
def clear_safe_replica_name(self):
self.safe_replica_name_ = []
def persist_offset(self): return self.persist_offset_
def set_persist_offset(self, x):
self.has_persist_offset_ = 1
self.persist_offset_ = x
def clear_persist_offset(self):
if self.has_persist_offset_:
self.has_persist_offset_ = 0
self.persist_offset_ = 1
def has_persist_offset(self): return self.has_persist_offset_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
if (x.has_app()): self.set_app(x.app())
if (x.has_name_space()): self.set_name_space(x.name_space())
if (x.has_kind()): self.set_kind(x.kind())
if (x.has_ancestor()): self.mutable_ancestor().MergeFrom(x.ancestor())
for i in xrange(x.filter_size()): self.add_filter().CopyFrom(x.filter(i))
if (x.has_search_query()): self.set_search_query(x.search_query())
for i in xrange(x.order_size()): self.add_order().CopyFrom(x.order(i))
if (x.has_hint()): self.set_hint(x.hint())
if (x.has_count()): self.set_count(x.count())
if (x.has_offset()): self.set_offset(x.offset())
if (x.has_limit()): self.set_limit(x.limit())
if (x.has_compiled_cursor()): self.mutable_compiled_cursor().MergeFrom(x.compiled_cursor())
if (x.has_end_compiled_cursor()): self.mutable_end_compiled_cursor().MergeFrom(x.end_compiled_cursor())
for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
if (x.has_require_perfect_plan()): self.set_require_perfect_plan(x.require_perfect_plan())
if (x.has_keys_only()): self.set_keys_only(x.keys_only())
if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
if (x.has_compile()): self.set_compile(x.compile())
if (x.has_failover_ms()): self.set_failover_ms(x.failover_ms())
if (x.has_strong()): self.set_strong(x.strong())
for i in xrange(x.property_name_size()): self.add_property_name(x.property_name(i))
for i in xrange(x.group_by_property_name_size()): self.add_group_by_property_name(x.group_by_property_name(i))
if (x.has_distinct()): self.set_distinct(x.distinct())
if (x.has_min_safe_time_seconds()): self.set_min_safe_time_seconds(x.min_safe_time_seconds())
for i in xrange(x.safe_replica_name_size()): self.add_safe_replica_name(x.safe_replica_name(i))
if (x.has_persist_offset()): self.set_persist_offset(x.persist_offset())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_name_space_ != x.has_name_space_: return 0
if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
if self.has_kind_ != x.has_kind_: return 0
if self.has_kind_ and self.kind_ != x.kind_: return 0
if self.has_ancestor_ != x.has_ancestor_: return 0
if self.has_ancestor_ and self.ancestor_ != x.ancestor_: return 0
if len(self.filter_) != len(x.filter_): return 0
for e1, e2 in zip(self.filter_, x.filter_):
if e1 != e2: return 0
if self.has_search_query_ != x.has_search_query_: return 0
if self.has_search_query_ and self.search_query_ != x.search_query_: return 0
if len(self.order_) != len(x.order_): return 0
for e1, e2 in zip(self.order_, x.order_):
if e1 != e2: return 0
if self.has_hint_ != x.has_hint_: return 0
if self.has_hint_ and self.hint_ != x.hint_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
if self.has_compiled_cursor_ != x.has_compiled_cursor_: return 0
if self.has_compiled_cursor_ and self.compiled_cursor_ != x.compiled_cursor_: return 0
if self.has_end_compiled_cursor_ != x.has_end_compiled_cursor_: return 0
if self.has_end_compiled_cursor_ and self.end_compiled_cursor_ != x.end_compiled_cursor_: return 0
if len(self.composite_index_) != len(x.composite_index_): return 0
for e1, e2 in zip(self.composite_index_, x.composite_index_):
if e1 != e2: return 0
if self.has_require_perfect_plan_ != x.has_require_perfect_plan_: return 0
if self.has_require_perfect_plan_ and self.require_perfect_plan_ != x.require_perfect_plan_: return 0
if self.has_keys_only_ != x.has_keys_only_: return 0
if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if self.has_compile_ != x.has_compile_: return 0
if self.has_compile_ and self.compile_ != x.compile_: return 0
if self.has_failover_ms_ != x.has_failover_ms_: return 0
if self.has_failover_ms_ and self.failover_ms_ != x.failover_ms_: return 0
if self.has_strong_ != x.has_strong_: return 0
if self.has_strong_ and self.strong_ != x.strong_: return 0
if len(self.property_name_) != len(x.property_name_): return 0
for e1, e2 in zip(self.property_name_, x.property_name_):
if e1 != e2: return 0
if len(self.group_by_property_name_) != len(x.group_by_property_name_): return 0
for e1, e2 in zip(self.group_by_property_name_, x.group_by_property_name_):
if e1 != e2: return 0
if self.has_distinct_ != x.has_distinct_: return 0
if self.has_distinct_ and self.distinct_ != x.distinct_: return 0
if self.has_min_safe_time_seconds_ != x.has_min_safe_time_seconds_: return 0
if self.has_min_safe_time_seconds_ and self.min_safe_time_seconds_ != x.min_safe_time_seconds_: return 0
if len(self.safe_replica_name_) != len(x.safe_replica_name_): return 0
for e1, e2 in zip(self.safe_replica_name_, x.safe_replica_name_):
if e1 != e2: return 0
if self.has_persist_offset_ != x.has_persist_offset_: return 0
if self.has_persist_offset_ and self.persist_offset_ != x.persist_offset_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
if (self.has_ancestor_ and not self.ancestor_.IsInitialized(debug_strs)): initialized = 0
for p in self.filter_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.order_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_compiled_cursor_ and not self.compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
if (self.has_end_compiled_cursor_ and not self.end_compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
for p in self.composite_index_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 2 + self.lengthString(self.header_.ByteSize())
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
if (self.has_kind_): n += 1 + self.lengthString(len(self.kind_))
if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSize())
n += 2 * len(self.filter_)
for i in xrange(len(self.filter_)): n += self.filter_[i].ByteSize()
if (self.has_search_query_): n += 1 + self.lengthString(len(self.search_query_))
n += 2 * len(self.order_)
for i in xrange(len(self.order_)): n += self.order_[i].ByteSize()
if (self.has_hint_): n += 2 + self.lengthVarInt64(self.hint_)
if (self.has_count_): n += 2 + self.lengthVarInt64(self.count_)
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_limit_): n += 2 + self.lengthVarInt64(self.limit_)
if (self.has_compiled_cursor_): n += 2 + self.lengthString(self.compiled_cursor_.ByteSize())
if (self.has_end_compiled_cursor_): n += 2 + self.lengthString(self.end_compiled_cursor_.ByteSize())
n += 2 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
if (self.has_require_perfect_plan_): n += 3
if (self.has_keys_only_): n += 3
if (self.has_transaction_): n += 2 + self.lengthString(self.transaction_.ByteSize())
if (self.has_compile_): n += 3
if (self.has_failover_ms_): n += 2 + self.lengthVarInt64(self.failover_ms_)
if (self.has_strong_): n += 3
n += 2 * len(self.property_name_)
for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
n += 2 * len(self.group_by_property_name_)
for i in xrange(len(self.group_by_property_name_)): n += self.lengthString(len(self.group_by_property_name_[i]))
if (self.has_distinct_): n += 3
if (self.has_min_safe_time_seconds_): n += 2 + self.lengthVarInt64(self.min_safe_time_seconds_)
n += 2 * len(self.safe_replica_name_)
for i in xrange(len(self.safe_replica_name_)): n += self.lengthString(len(self.safe_replica_name_[i]))
if (self.has_persist_offset_): n += 3
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 2 + self.lengthString(self.header_.ByteSizePartial())
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
if (self.has_kind_): n += 1 + self.lengthString(len(self.kind_))
if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSizePartial())
n += 2 * len(self.filter_)
for i in xrange(len(self.filter_)): n += self.filter_[i].ByteSizePartial()
if (self.has_search_query_): n += 1 + self.lengthString(len(self.search_query_))
n += 2 * len(self.order_)
for i in xrange(len(self.order_)): n += self.order_[i].ByteSizePartial()
if (self.has_hint_): n += 2 + self.lengthVarInt64(self.hint_)
if (self.has_count_): n += 2 + self.lengthVarInt64(self.count_)
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_limit_): n += 2 + self.lengthVarInt64(self.limit_)
if (self.has_compiled_cursor_): n += 2 + self.lengthString(self.compiled_cursor_.ByteSizePartial())
if (self.has_end_compiled_cursor_): n += 2 + self.lengthString(self.end_compiled_cursor_.ByteSizePartial())
n += 2 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
if (self.has_require_perfect_plan_): n += 3
if (self.has_keys_only_): n += 3
if (self.has_transaction_): n += 2 + self.lengthString(self.transaction_.ByteSizePartial())
if (self.has_compile_): n += 3
if (self.has_failover_ms_): n += 2 + self.lengthVarInt64(self.failover_ms_)
if (self.has_strong_): n += 3
n += 2 * len(self.property_name_)
for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
n += 2 * len(self.group_by_property_name_)
for i in xrange(len(self.group_by_property_name_)): n += self.lengthString(len(self.group_by_property_name_[i]))
if (self.has_distinct_): n += 3
if (self.has_min_safe_time_seconds_): n += 2 + self.lengthVarInt64(self.min_safe_time_seconds_)
n += 2 * len(self.safe_replica_name_)
for i in xrange(len(self.safe_replica_name_)): n += self.lengthString(len(self.safe_replica_name_[i]))
if (self.has_persist_offset_): n += 3
return n
def Clear(self):
self.clear_header()
self.clear_app()
self.clear_name_space()
self.clear_kind()
self.clear_ancestor()
self.clear_filter()
self.clear_search_query()
self.clear_order()
self.clear_hint()
self.clear_count()
self.clear_offset()
self.clear_limit()
self.clear_compiled_cursor()
self.clear_end_compiled_cursor()
self.clear_composite_index()
self.clear_require_perfect_plan()
self.clear_keys_only()
self.clear_transaction()
self.clear_compile()
self.clear_failover_ms()
self.clear_strong()
self.clear_property_name()
self.clear_group_by_property_name()
self.clear_distinct()
self.clear_min_safe_time_seconds()
self.clear_safe_replica_name()
self.clear_persist_offset()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_)
if (self.has_kind_):
out.putVarInt32(26)
out.putPrefixedString(self.kind_)
for i in xrange(len(self.filter_)):
out.putVarInt32(35)
self.filter_[i].OutputUnchecked(out)
out.putVarInt32(36)
if (self.has_search_query_):
out.putVarInt32(66)
out.putPrefixedString(self.search_query_)
for i in xrange(len(self.order_)):
out.putVarInt32(75)
self.order_[i].OutputUnchecked(out)
out.putVarInt32(76)
if (self.has_offset_):
out.putVarInt32(96)
out.putVarInt32(self.offset_)
if (self.has_limit_):
out.putVarInt32(128)
out.putVarInt32(self.limit_)
if (self.has_ancestor_):
out.putVarInt32(138)
out.putVarInt32(self.ancestor_.ByteSize())
self.ancestor_.OutputUnchecked(out)
if (self.has_hint_):
out.putVarInt32(144)
out.putVarInt32(self.hint_)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(154)
out.putVarInt32(self.composite_index_[i].ByteSize())
self.composite_index_[i].OutputUnchecked(out)
if (self.has_require_perfect_plan_):
out.putVarInt32(160)
out.putBoolean(self.require_perfect_plan_)
if (self.has_keys_only_):
out.putVarInt32(168)
out.putBoolean(self.keys_only_)
if (self.has_transaction_):
out.putVarInt32(178)
out.putVarInt32(self.transaction_.ByteSize())
self.transaction_.OutputUnchecked(out)
if (self.has_count_):
out.putVarInt32(184)
out.putVarInt32(self.count_)
if (self.has_distinct_):
out.putVarInt32(192)
out.putBoolean(self.distinct_)
if (self.has_compile_):
out.putVarInt32(200)
out.putBoolean(self.compile_)
if (self.has_failover_ms_):
out.putVarInt32(208)
out.putVarInt64(self.failover_ms_)
if (self.has_name_space_):
out.putVarInt32(234)
out.putPrefixedString(self.name_space_)
if (self.has_compiled_cursor_):
out.putVarInt32(242)
out.putVarInt32(self.compiled_cursor_.ByteSize())
self.compiled_cursor_.OutputUnchecked(out)
if (self.has_end_compiled_cursor_):
out.putVarInt32(250)
out.putVarInt32(self.end_compiled_cursor_.ByteSize())
self.end_compiled_cursor_.OutputUnchecked(out)
if (self.has_strong_):
out.putVarInt32(256)
out.putBoolean(self.strong_)
for i in xrange(len(self.property_name_)):
out.putVarInt32(266)
out.putPrefixedString(self.property_name_[i])
for i in xrange(len(self.group_by_property_name_)):
out.putVarInt32(274)
out.putPrefixedString(self.group_by_property_name_[i])
if (self.has_min_safe_time_seconds_):
out.putVarInt32(280)
out.putVarInt64(self.min_safe_time_seconds_)
for i in xrange(len(self.safe_replica_name_)):
out.putVarInt32(290)
out.putPrefixedString(self.safe_replica_name_[i])
if (self.has_persist_offset_):
out.putVarInt32(296)
out.putBoolean(self.persist_offset_)
if (self.has_header_):
out.putVarInt32(314)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_app_):
out.putVarInt32(10)
out.putPrefixedString(self.app_)
if (self.has_kind_):
out.putVarInt32(26)
out.putPrefixedString(self.kind_)
for i in xrange(len(self.filter_)):
out.putVarInt32(35)
self.filter_[i].OutputPartial(out)
out.putVarInt32(36)
if (self.has_search_query_):
out.putVarInt32(66)
out.putPrefixedString(self.search_query_)
for i in xrange(len(self.order_)):
out.putVarInt32(75)
self.order_[i].OutputPartial(out)
out.putVarInt32(76)
if (self.has_offset_):
out.putVarInt32(96)
out.putVarInt32(self.offset_)
if (self.has_limit_):
out.putVarInt32(128)
out.putVarInt32(self.limit_)
if (self.has_ancestor_):
out.putVarInt32(138)
out.putVarInt32(self.ancestor_.ByteSizePartial())
self.ancestor_.OutputPartial(out)
if (self.has_hint_):
out.putVarInt32(144)
out.putVarInt32(self.hint_)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(154)
out.putVarInt32(self.composite_index_[i].ByteSizePartial())
self.composite_index_[i].OutputPartial(out)
if (self.has_require_perfect_plan_):
out.putVarInt32(160)
out.putBoolean(self.require_perfect_plan_)
if (self.has_keys_only_):
out.putVarInt32(168)
out.putBoolean(self.keys_only_)
if (self.has_transaction_):
out.putVarInt32(178)
out.putVarInt32(self.transaction_.ByteSizePartial())
self.transaction_.OutputPartial(out)
if (self.has_count_):
out.putVarInt32(184)
out.putVarInt32(self.count_)
if (self.has_distinct_):
out.putVarInt32(192)
out.putBoolean(self.distinct_)
if (self.has_compile_):
out.putVarInt32(200)
out.putBoolean(self.compile_)
if (self.has_failover_ms_):
out.putVarInt32(208)
out.putVarInt64(self.failover_ms_)
if (self.has_name_space_):
out.putVarInt32(234)
out.putPrefixedString(self.name_space_)
if (self.has_compiled_cursor_):
out.putVarInt32(242)
out.putVarInt32(self.compiled_cursor_.ByteSizePartial())
self.compiled_cursor_.OutputPartial(out)
if (self.has_end_compiled_cursor_):
out.putVarInt32(250)
out.putVarInt32(self.end_compiled_cursor_.ByteSizePartial())
self.end_compiled_cursor_.OutputPartial(out)
if (self.has_strong_):
out.putVarInt32(256)
out.putBoolean(self.strong_)
for i in xrange(len(self.property_name_)):
out.putVarInt32(266)
out.putPrefixedString(self.property_name_[i])
for i in xrange(len(self.group_by_property_name_)):
out.putVarInt32(274)
out.putPrefixedString(self.group_by_property_name_[i])
if (self.has_min_safe_time_seconds_):
out.putVarInt32(280)
out.putVarInt64(self.min_safe_time_seconds_)
for i in xrange(len(self.safe_replica_name_)):
out.putVarInt32(290)
out.putPrefixedString(self.safe_replica_name_[i])
if (self.has_persist_offset_):
out.putVarInt32(296)
out.putBoolean(self.persist_offset_)
if (self.has_header_):
out.putVarInt32(314)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app(d.getPrefixedString())
continue
if tt == 26:
self.set_kind(d.getPrefixedString())
continue
if tt == 35:
self.add_filter().TryMerge(d)
continue
if tt == 66:
self.set_search_query(d.getPrefixedString())
continue
if tt == 75:
self.add_order().TryMerge(d)
continue
if tt == 96:
self.set_offset(d.getVarInt32())
continue
if tt == 128:
self.set_limit(d.getVarInt32())
continue
if tt == 138:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_ancestor().TryMerge(tmp)
continue
if tt == 144:
self.set_hint(d.getVarInt32())
continue
if tt == 154:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_composite_index().TryMerge(tmp)
continue
if tt == 160:
self.set_require_perfect_plan(d.getBoolean())
continue
if tt == 168:
self.set_keys_only(d.getBoolean())
continue
if tt == 178:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_transaction().TryMerge(tmp)
continue
if tt == 184:
self.set_count(d.getVarInt32())
continue
if tt == 192:
self.set_distinct(d.getBoolean())
continue
if tt == 200:
self.set_compile(d.getBoolean())
continue
if tt == 208:
self.set_failover_ms(d.getVarInt64())
continue
if tt == 234:
self.set_name_space(d.getPrefixedString())
continue
if tt == 242:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_compiled_cursor().TryMerge(tmp)
continue
if tt == 250:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_end_compiled_cursor().TryMerge(tmp)
continue
if tt == 256:
self.set_strong(d.getBoolean())
continue
if tt == 266:
self.add_property_name(d.getPrefixedString())
continue
if tt == 274:
self.add_group_by_property_name(d.getPrefixedString())
continue
if tt == 280:
self.set_min_safe_time_seconds(d.getVarInt64())
continue
if tt == 290:
self.add_safe_replica_name(d.getPrefixedString())
continue
if tt == 296:
self.set_persist_offset(d.getBoolean())
continue
if tt == 314:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
if self.has_kind_: res+=prefix+("kind: %s\n" % self.DebugFormatString(self.kind_))
if self.has_ancestor_:
res+=prefix+"ancestor <\n"
res+=self.ancestor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.filter_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Filter%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_search_query_: res+=prefix+("search_query: %s\n" % self.DebugFormatString(self.search_query_))
cnt=0
for e in self.order_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Order%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_hint_: res+=prefix+("hint: %s\n" % self.DebugFormatInt32(self.hint_))
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt32(self.count_))
if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
if self.has_compiled_cursor_:
res+=prefix+"compiled_cursor <\n"
res+=self.compiled_cursor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_end_compiled_cursor_:
res+=prefix+"end_compiled_cursor <\n"
res+=self.end_compiled_cursor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.composite_index_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("composite_index%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_require_perfect_plan_: res+=prefix+("require_perfect_plan: %s\n" % self.DebugFormatBool(self.require_perfect_plan_))
if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
if self.has_transaction_:
res+=prefix+"transaction <\n"
res+=self.transaction_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_compile_: res+=prefix+("compile: %s\n" % self.DebugFormatBool(self.compile_))
if self.has_failover_ms_: res+=prefix+("failover_ms: %s\n" % self.DebugFormatInt64(self.failover_ms_))
if self.has_strong_: res+=prefix+("strong: %s\n" % self.DebugFormatBool(self.strong_))
cnt=0
for e in self.property_name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("property_name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
cnt=0
for e in self.group_by_property_name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("group_by_property_name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_distinct_: res+=prefix+("distinct: %s\n" % self.DebugFormatBool(self.distinct_))
if self.has_min_safe_time_seconds_: res+=prefix+("min_safe_time_seconds: %s\n" % self.DebugFormatInt64(self.min_safe_time_seconds_))
cnt=0
for e in self.safe_replica_name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("safe_replica_name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_persist_offset_: res+=prefix+("persist_offset: %s\n" % self.DebugFormatBool(self.persist_offset_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 39
kapp = 1
kname_space = 29
kkind = 3
kancestor = 17
kFilterGroup = 4
kFilterop = 6
kFilterproperty = 14
ksearch_query = 8
kOrderGroup = 9
kOrderproperty = 10
kOrderdirection = 11
khint = 18
kcount = 23
koffset = 12
klimit = 16
kcompiled_cursor = 30
kend_compiled_cursor = 31
kcomposite_index = 19
krequire_perfect_plan = 20
kkeys_only = 21
ktransaction = 22
kcompile = 25
kfailover_ms = 26
kstrong = 32
kproperty_name = 33
kgroup_by_property_name = 34
kdistinct = 24
kmin_safe_time_seconds = 35
ksafe_replica_name = 36
kpersist_offset = 37
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app",
3: "kind",
4: "Filter",
6: "op",
8: "search_query",
9: "Order",
10: "property",
11: "direction",
12: "offset",
14: "property",
16: "limit",
17: "ancestor",
18: "hint",
19: "composite_index",
20: "require_perfect_plan",
21: "keys_only",
22: "transaction",
23: "count",
24: "distinct",
25: "compile",
26: "failover_ms",
29: "name_space",
30: "compiled_cursor",
31: "end_compiled_cursor",
32: "strong",
33: "property_name",
34: "group_by_property_name",
35: "min_safe_time_seconds",
36: "safe_replica_name",
37: "persist_offset",
39: "header",
}, 39)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STARTGROUP,
6: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.STARTGROUP,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.NUMERIC,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.STRING,
20: ProtocolBuffer.Encoder.NUMERIC,
21: ProtocolBuffer.Encoder.NUMERIC,
22: ProtocolBuffer.Encoder.STRING,
23: ProtocolBuffer.Encoder.NUMERIC,
24: ProtocolBuffer.Encoder.NUMERIC,
25: ProtocolBuffer.Encoder.NUMERIC,
26: ProtocolBuffer.Encoder.NUMERIC,
29: ProtocolBuffer.Encoder.STRING,
30: ProtocolBuffer.Encoder.STRING,
31: ProtocolBuffer.Encoder.STRING,
32: ProtocolBuffer.Encoder.NUMERIC,
33: ProtocolBuffer.Encoder.STRING,
34: ProtocolBuffer.Encoder.STRING,
35: ProtocolBuffer.Encoder.NUMERIC,
36: ProtocolBuffer.Encoder.STRING,
37: ProtocolBuffer.Encoder.NUMERIC,
39: ProtocolBuffer.Encoder.STRING,
}, 39, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Query'
class CompiledQuery_PrimaryScan(ProtocolBuffer.ProtocolMessage):
has_index_name_ = 0
index_name_ = ""
has_start_key_ = 0
start_key_ = ""
has_start_inclusive_ = 0
start_inclusive_ = 0
has_end_key_ = 0
end_key_ = ""
has_end_inclusive_ = 0
end_inclusive_ = 0
has_end_unapplied_log_timestamp_us_ = 0
end_unapplied_log_timestamp_us_ = 0
def __init__(self, contents=None):
self.start_postfix_value_ = []
self.end_postfix_value_ = []
if contents is not None: self.MergeFromString(contents)
def index_name(self): return self.index_name_
def set_index_name(self, x):
self.has_index_name_ = 1
self.index_name_ = x
def clear_index_name(self):
if self.has_index_name_:
self.has_index_name_ = 0
self.index_name_ = ""
def has_index_name(self): return self.has_index_name_
def start_key(self): return self.start_key_
def set_start_key(self, x):
self.has_start_key_ = 1
self.start_key_ = x
def clear_start_key(self):
if self.has_start_key_:
self.has_start_key_ = 0
self.start_key_ = ""
def has_start_key(self): return self.has_start_key_
def start_inclusive(self): return self.start_inclusive_
def set_start_inclusive(self, x):
self.has_start_inclusive_ = 1
self.start_inclusive_ = x
def clear_start_inclusive(self):
if self.has_start_inclusive_:
self.has_start_inclusive_ = 0
self.start_inclusive_ = 0
def has_start_inclusive(self): return self.has_start_inclusive_
def end_key(self): return self.end_key_
def set_end_key(self, x):
self.has_end_key_ = 1
self.end_key_ = x
def clear_end_key(self):
if self.has_end_key_:
self.has_end_key_ = 0
self.end_key_ = ""
def has_end_key(self): return self.has_end_key_
def end_inclusive(self): return self.end_inclusive_
def set_end_inclusive(self, x):
self.has_end_inclusive_ = 1
self.end_inclusive_ = x
def clear_end_inclusive(self):
if self.has_end_inclusive_:
self.has_end_inclusive_ = 0
self.end_inclusive_ = 0
def has_end_inclusive(self): return self.has_end_inclusive_
def start_postfix_value_size(self): return len(self.start_postfix_value_)
def start_postfix_value_list(self): return self.start_postfix_value_
def start_postfix_value(self, i):
return self.start_postfix_value_[i]
def set_start_postfix_value(self, i, x):
self.start_postfix_value_[i] = x
def add_start_postfix_value(self, x):
self.start_postfix_value_.append(x)
def clear_start_postfix_value(self):
self.start_postfix_value_ = []
def end_postfix_value_size(self): return len(self.end_postfix_value_)
def end_postfix_value_list(self): return self.end_postfix_value_
def end_postfix_value(self, i):
return self.end_postfix_value_[i]
def set_end_postfix_value(self, i, x):
self.end_postfix_value_[i] = x
def add_end_postfix_value(self, x):
self.end_postfix_value_.append(x)
def clear_end_postfix_value(self):
self.end_postfix_value_ = []
def end_unapplied_log_timestamp_us(self): return self.end_unapplied_log_timestamp_us_
def set_end_unapplied_log_timestamp_us(self, x):
self.has_end_unapplied_log_timestamp_us_ = 1
self.end_unapplied_log_timestamp_us_ = x
def clear_end_unapplied_log_timestamp_us(self):
if self.has_end_unapplied_log_timestamp_us_:
self.has_end_unapplied_log_timestamp_us_ = 0
self.end_unapplied_log_timestamp_us_ = 0
def has_end_unapplied_log_timestamp_us(self): return self.has_end_unapplied_log_timestamp_us_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_name()): self.set_index_name(x.index_name())
if (x.has_start_key()): self.set_start_key(x.start_key())
if (x.has_start_inclusive()): self.set_start_inclusive(x.start_inclusive())
if (x.has_end_key()): self.set_end_key(x.end_key())
if (x.has_end_inclusive()): self.set_end_inclusive(x.end_inclusive())
for i in xrange(x.start_postfix_value_size()): self.add_start_postfix_value(x.start_postfix_value(i))
for i in xrange(x.end_postfix_value_size()): self.add_end_postfix_value(x.end_postfix_value(i))
if (x.has_end_unapplied_log_timestamp_us()): self.set_end_unapplied_log_timestamp_us(x.end_unapplied_log_timestamp_us())
def Equals(self, x):
if x is self: return 1
if self.has_index_name_ != x.has_index_name_: return 0
if self.has_index_name_ and self.index_name_ != x.index_name_: return 0
if self.has_start_key_ != x.has_start_key_: return 0
if self.has_start_key_ and self.start_key_ != x.start_key_: return 0
if self.has_start_inclusive_ != x.has_start_inclusive_: return 0
if self.has_start_inclusive_ and self.start_inclusive_ != x.start_inclusive_: return 0
if self.has_end_key_ != x.has_end_key_: return 0
if self.has_end_key_ and self.end_key_ != x.end_key_: return 0
if self.has_end_inclusive_ != x.has_end_inclusive_: return 0
if self.has_end_inclusive_ and self.end_inclusive_ != x.end_inclusive_: return 0
if len(self.start_postfix_value_) != len(x.start_postfix_value_): return 0
for e1, e2 in zip(self.start_postfix_value_, x.start_postfix_value_):
if e1 != e2: return 0
if len(self.end_postfix_value_) != len(x.end_postfix_value_): return 0
for e1, e2 in zip(self.end_postfix_value_, x.end_postfix_value_):
if e1 != e2: return 0
if self.has_end_unapplied_log_timestamp_us_ != x.has_end_unapplied_log_timestamp_us_: return 0
if self.has_end_unapplied_log_timestamp_us_ and self.end_unapplied_log_timestamp_us_ != x.end_unapplied_log_timestamp_us_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_index_name_): n += 1 + self.lengthString(len(self.index_name_))
if (self.has_start_key_): n += 1 + self.lengthString(len(self.start_key_))
if (self.has_start_inclusive_): n += 2
if (self.has_end_key_): n += 1 + self.lengthString(len(self.end_key_))
if (self.has_end_inclusive_): n += 2
n += 2 * len(self.start_postfix_value_)
for i in xrange(len(self.start_postfix_value_)): n += self.lengthString(len(self.start_postfix_value_[i]))
n += 2 * len(self.end_postfix_value_)
for i in xrange(len(self.end_postfix_value_)): n += self.lengthString(len(self.end_postfix_value_[i]))
if (self.has_end_unapplied_log_timestamp_us_): n += 2 + self.lengthVarInt64(self.end_unapplied_log_timestamp_us_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_index_name_): n += 1 + self.lengthString(len(self.index_name_))
if (self.has_start_key_): n += 1 + self.lengthString(len(self.start_key_))
if (self.has_start_inclusive_): n += 2
if (self.has_end_key_): n += 1 + self.lengthString(len(self.end_key_))
if (self.has_end_inclusive_): n += 2
n += 2 * len(self.start_postfix_value_)
for i in xrange(len(self.start_postfix_value_)): n += self.lengthString(len(self.start_postfix_value_[i]))
n += 2 * len(self.end_postfix_value_)
for i in xrange(len(self.end_postfix_value_)): n += self.lengthString(len(self.end_postfix_value_[i]))
if (self.has_end_unapplied_log_timestamp_us_): n += 2 + self.lengthVarInt64(self.end_unapplied_log_timestamp_us_)
return n
def Clear(self):
self.clear_index_name()
self.clear_start_key()
self.clear_start_inclusive()
self.clear_end_key()
self.clear_end_inclusive()
self.clear_start_postfix_value()
self.clear_end_postfix_value()
self.clear_end_unapplied_log_timestamp_us()
def OutputUnchecked(self, out):
if (self.has_index_name_):
out.putVarInt32(18)
out.putPrefixedString(self.index_name_)
if (self.has_start_key_):
out.putVarInt32(26)
out.putPrefixedString(self.start_key_)
if (self.has_start_inclusive_):
out.putVarInt32(32)
out.putBoolean(self.start_inclusive_)
if (self.has_end_key_):
out.putVarInt32(42)
out.putPrefixedString(self.end_key_)
if (self.has_end_inclusive_):
out.putVarInt32(48)
out.putBoolean(self.end_inclusive_)
if (self.has_end_unapplied_log_timestamp_us_):
out.putVarInt32(152)
out.putVarInt64(self.end_unapplied_log_timestamp_us_)
for i in xrange(len(self.start_postfix_value_)):
out.putVarInt32(178)
out.putPrefixedString(self.start_postfix_value_[i])
for i in xrange(len(self.end_postfix_value_)):
out.putVarInt32(186)
out.putPrefixedString(self.end_postfix_value_[i])
def OutputPartial(self, out):
if (self.has_index_name_):
out.putVarInt32(18)
out.putPrefixedString(self.index_name_)
if (self.has_start_key_):
out.putVarInt32(26)
out.putPrefixedString(self.start_key_)
if (self.has_start_inclusive_):
out.putVarInt32(32)
out.putBoolean(self.start_inclusive_)
if (self.has_end_key_):
out.putVarInt32(42)
out.putPrefixedString(self.end_key_)
if (self.has_end_inclusive_):
out.putVarInt32(48)
out.putBoolean(self.end_inclusive_)
if (self.has_end_unapplied_log_timestamp_us_):
out.putVarInt32(152)
out.putVarInt64(self.end_unapplied_log_timestamp_us_)
for i in xrange(len(self.start_postfix_value_)):
out.putVarInt32(178)
out.putPrefixedString(self.start_postfix_value_[i])
for i in xrange(len(self.end_postfix_value_)):
out.putVarInt32(186)
out.putPrefixedString(self.end_postfix_value_[i])
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
self.set_index_name(d.getPrefixedString())
continue
if tt == 26:
self.set_start_key(d.getPrefixedString())
continue
if tt == 32:
self.set_start_inclusive(d.getBoolean())
continue
if tt == 42:
self.set_end_key(d.getPrefixedString())
continue
if tt == 48:
self.set_end_inclusive(d.getBoolean())
continue
if tt == 152:
self.set_end_unapplied_log_timestamp_us(d.getVarInt64())
continue
if tt == 178:
self.add_start_postfix_value(d.getPrefixedString())
continue
if tt == 186:
self.add_end_postfix_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_name_: res+=prefix+("index_name: %s\n" % self.DebugFormatString(self.index_name_))
if self.has_start_key_: res+=prefix+("start_key: %s\n" % self.DebugFormatString(self.start_key_))
if self.has_start_inclusive_: res+=prefix+("start_inclusive: %s\n" % self.DebugFormatBool(self.start_inclusive_))
if self.has_end_key_: res+=prefix+("end_key: %s\n" % self.DebugFormatString(self.end_key_))
if self.has_end_inclusive_: res+=prefix+("end_inclusive: %s\n" % self.DebugFormatBool(self.end_inclusive_))
cnt=0
for e in self.start_postfix_value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("start_postfix_value%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
cnt=0
for e in self.end_postfix_value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("end_postfix_value%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_end_unapplied_log_timestamp_us_: res+=prefix+("end_unapplied_log_timestamp_us: %s\n" % self.DebugFormatInt64(self.end_unapplied_log_timestamp_us_))
return res
class CompiledQuery_MergeJoinScan(ProtocolBuffer.ProtocolMessage):
has_index_name_ = 0
index_name_ = ""
has_value_prefix_ = 0
value_prefix_ = 0
def __init__(self, contents=None):
self.prefix_value_ = []
if contents is not None: self.MergeFromString(contents)
def index_name(self): return self.index_name_
def set_index_name(self, x):
self.has_index_name_ = 1
self.index_name_ = x
def clear_index_name(self):
if self.has_index_name_:
self.has_index_name_ = 0
self.index_name_ = ""
def has_index_name(self): return self.has_index_name_
def prefix_value_size(self): return len(self.prefix_value_)
def prefix_value_list(self): return self.prefix_value_
def prefix_value(self, i):
return self.prefix_value_[i]
def set_prefix_value(self, i, x):
self.prefix_value_[i] = x
def add_prefix_value(self, x):
self.prefix_value_.append(x)
def clear_prefix_value(self):
self.prefix_value_ = []
def value_prefix(self): return self.value_prefix_
def set_value_prefix(self, x):
self.has_value_prefix_ = 1
self.value_prefix_ = x
def clear_value_prefix(self):
if self.has_value_prefix_:
self.has_value_prefix_ = 0
self.value_prefix_ = 0
def has_value_prefix(self): return self.has_value_prefix_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_name()): self.set_index_name(x.index_name())
for i in xrange(x.prefix_value_size()): self.add_prefix_value(x.prefix_value(i))
if (x.has_value_prefix()): self.set_value_prefix(x.value_prefix())
def Equals(self, x):
if x is self: return 1
if self.has_index_name_ != x.has_index_name_: return 0
if self.has_index_name_ and self.index_name_ != x.index_name_: return 0
if len(self.prefix_value_) != len(x.prefix_value_): return 0
for e1, e2 in zip(self.prefix_value_, x.prefix_value_):
if e1 != e2: return 0
if self.has_value_prefix_ != x.has_value_prefix_: return 0
if self.has_value_prefix_ and self.value_prefix_ != x.value_prefix_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.index_name_))
n += 1 * len(self.prefix_value_)
for i in xrange(len(self.prefix_value_)): n += self.lengthString(len(self.prefix_value_[i]))
if (self.has_value_prefix_): n += 3
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_name_):
n += 1
n += self.lengthString(len(self.index_name_))
n += 1 * len(self.prefix_value_)
for i in xrange(len(self.prefix_value_)): n += self.lengthString(len(self.prefix_value_[i]))
if (self.has_value_prefix_): n += 3
return n
def Clear(self):
self.clear_index_name()
self.clear_prefix_value()
self.clear_value_prefix()
def OutputUnchecked(self, out):
out.putVarInt32(66)
out.putPrefixedString(self.index_name_)
for i in xrange(len(self.prefix_value_)):
out.putVarInt32(74)
out.putPrefixedString(self.prefix_value_[i])
if (self.has_value_prefix_):
out.putVarInt32(160)
out.putBoolean(self.value_prefix_)
def OutputPartial(self, out):
if (self.has_index_name_):
out.putVarInt32(66)
out.putPrefixedString(self.index_name_)
for i in xrange(len(self.prefix_value_)):
out.putVarInt32(74)
out.putPrefixedString(self.prefix_value_[i])
if (self.has_value_prefix_):
out.putVarInt32(160)
out.putBoolean(self.value_prefix_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 60: break
if tt == 66:
self.set_index_name(d.getPrefixedString())
continue
if tt == 74:
self.add_prefix_value(d.getPrefixedString())
continue
if tt == 160:
self.set_value_prefix(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_name_: res+=prefix+("index_name: %s\n" % self.DebugFormatString(self.index_name_))
cnt=0
for e in self.prefix_value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("prefix_value%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_value_prefix_: res+=prefix+("value_prefix: %s\n" % self.DebugFormatBool(self.value_prefix_))
return res
class CompiledQuery_EntityFilter(ProtocolBuffer.ProtocolMessage):
has_distinct_ = 0
distinct_ = 0
has_kind_ = 0
kind_ = ""
has_ancestor_ = 0
ancestor_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def distinct(self): return self.distinct_
def set_distinct(self, x):
self.has_distinct_ = 1
self.distinct_ = x
def clear_distinct(self):
if self.has_distinct_:
self.has_distinct_ = 0
self.distinct_ = 0
def has_distinct(self): return self.has_distinct_
def kind(self): return self.kind_
def set_kind(self, x):
self.has_kind_ = 1
self.kind_ = x
def clear_kind(self):
if self.has_kind_:
self.has_kind_ = 0
self.kind_ = ""
def has_kind(self): return self.has_kind_
def ancestor(self):
if self.ancestor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.ancestor_ is None: self.ancestor_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.ancestor_
def mutable_ancestor(self): self.has_ancestor_ = 1; return self.ancestor()
def clear_ancestor(self):
if self.has_ancestor_:
self.has_ancestor_ = 0;
if self.ancestor_ is not None: self.ancestor_.Clear()
def has_ancestor(self): return self.has_ancestor_
def MergeFrom(self, x):
assert x is not self
if (x.has_distinct()): self.set_distinct(x.distinct())
if (x.has_kind()): self.set_kind(x.kind())
if (x.has_ancestor()): self.mutable_ancestor().MergeFrom(x.ancestor())
def Equals(self, x):
if x is self: return 1
if self.has_distinct_ != x.has_distinct_: return 0
if self.has_distinct_ and self.distinct_ != x.distinct_: return 0
if self.has_kind_ != x.has_kind_: return 0
if self.has_kind_ and self.kind_ != x.kind_: return 0
if self.has_ancestor_ != x.has_ancestor_: return 0
if self.has_ancestor_ and self.ancestor_ != x.ancestor_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_ancestor_ and not self.ancestor_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_distinct_): n += 2
if (self.has_kind_): n += 2 + self.lengthString(len(self.kind_))
if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_distinct_): n += 2
if (self.has_kind_): n += 2 + self.lengthString(len(self.kind_))
if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSizePartial())
return n
def Clear(self):
self.clear_distinct()
self.clear_kind()
self.clear_ancestor()
def OutputUnchecked(self, out):
if (self.has_distinct_):
out.putVarInt32(112)
out.putBoolean(self.distinct_)
if (self.has_kind_):
out.putVarInt32(138)
out.putPrefixedString(self.kind_)
if (self.has_ancestor_):
out.putVarInt32(146)
out.putVarInt32(self.ancestor_.ByteSize())
self.ancestor_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_distinct_):
out.putVarInt32(112)
out.putBoolean(self.distinct_)
if (self.has_kind_):
out.putVarInt32(138)
out.putPrefixedString(self.kind_)
if (self.has_ancestor_):
out.putVarInt32(146)
out.putVarInt32(self.ancestor_.ByteSizePartial())
self.ancestor_.OutputPartial(out)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 108: break
if tt == 112:
self.set_distinct(d.getBoolean())
continue
if tt == 138:
self.set_kind(d.getPrefixedString())
continue
if tt == 146:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_ancestor().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_distinct_: res+=prefix+("distinct: %s\n" % self.DebugFormatBool(self.distinct_))
if self.has_kind_: res+=prefix+("kind: %s\n" % self.DebugFormatString(self.kind_))
if self.has_ancestor_:
res+=prefix+"ancestor <\n"
res+=self.ancestor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
class CompiledQuery(ProtocolBuffer.ProtocolMessage):
has_primaryscan_ = 0
has_index_def_ = 0
index_def_ = None
has_offset_ = 0
offset_ = 0
has_limit_ = 0
limit_ = 0
has_keys_only_ = 0
keys_only_ = 0
has_distinct_infix_size_ = 0
distinct_infix_size_ = 0
has_entityfilter_ = 0
entityfilter_ = None
has_plan_label_ = 0
plan_label_ = ""
def __init__(self, contents=None):
self.primaryscan_ = CompiledQuery_PrimaryScan()
self.mergejoinscan_ = []
self.property_name_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def primaryscan(self): return self.primaryscan_
def mutable_primaryscan(self): self.has_primaryscan_ = 1; return self.primaryscan_
def clear_primaryscan(self):self.has_primaryscan_ = 0; self.primaryscan_.Clear()
def has_primaryscan(self): return self.has_primaryscan_
def mergejoinscan_size(self): return len(self.mergejoinscan_)
def mergejoinscan_list(self): return self.mergejoinscan_
def mergejoinscan(self, i):
return self.mergejoinscan_[i]
def mutable_mergejoinscan(self, i):
return self.mergejoinscan_[i]
def add_mergejoinscan(self):
x = CompiledQuery_MergeJoinScan()
self.mergejoinscan_.append(x)
return x
def clear_mergejoinscan(self):
self.mergejoinscan_ = []
def index_def(self):
if self.index_def_ is None:
self.lazy_init_lock_.acquire()
try:
if self.index_def_ is None: self.index_def_ = Index()
finally:
self.lazy_init_lock_.release()
return self.index_def_
def mutable_index_def(self): self.has_index_def_ = 1; return self.index_def()
def clear_index_def(self):
if self.has_index_def_:
self.has_index_def_ = 0;
if self.index_def_ is not None: self.index_def_.Clear()
def has_index_def(self): return self.has_index_def_
def offset(self): return self.offset_
def set_offset(self, x):
self.has_offset_ = 1
self.offset_ = x
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0
self.offset_ = 0
def has_offset(self): return self.has_offset_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 0
def has_limit(self): return self.has_limit_
def keys_only(self): return self.keys_only_
def set_keys_only(self, x):
self.has_keys_only_ = 1
self.keys_only_ = x
def clear_keys_only(self):
if self.has_keys_only_:
self.has_keys_only_ = 0
self.keys_only_ = 0
def has_keys_only(self): return self.has_keys_only_
def property_name_size(self): return len(self.property_name_)
def property_name_list(self): return self.property_name_
def property_name(self, i):
return self.property_name_[i]
def set_property_name(self, i, x):
self.property_name_[i] = x
def add_property_name(self, x):
self.property_name_.append(x)
def clear_property_name(self):
self.property_name_ = []
def distinct_infix_size(self): return self.distinct_infix_size_
def set_distinct_infix_size(self, x):
self.has_distinct_infix_size_ = 1
self.distinct_infix_size_ = x
def clear_distinct_infix_size(self):
if self.has_distinct_infix_size_:
self.has_distinct_infix_size_ = 0
self.distinct_infix_size_ = 0
def has_distinct_infix_size(self): return self.has_distinct_infix_size_
def entityfilter(self):
if self.entityfilter_ is None:
self.lazy_init_lock_.acquire()
try:
if self.entityfilter_ is None: self.entityfilter_ = CompiledQuery_EntityFilter()
finally:
self.lazy_init_lock_.release()
return self.entityfilter_
def mutable_entityfilter(self): self.has_entityfilter_ = 1; return self.entityfilter()
def clear_entityfilter(self):
if self.has_entityfilter_:
self.has_entityfilter_ = 0;
if self.entityfilter_ is not None: self.entityfilter_.Clear()
def has_entityfilter(self): return self.has_entityfilter_
def plan_label(self): return self.plan_label_
def set_plan_label(self, x):
self.has_plan_label_ = 1
self.plan_label_ = x
def clear_plan_label(self):
if self.has_plan_label_:
self.has_plan_label_ = 0
self.plan_label_ = ""
def has_plan_label(self): return self.has_plan_label_
def MergeFrom(self, x):
assert x is not self
if (x.has_primaryscan()): self.mutable_primaryscan().MergeFrom(x.primaryscan())
for i in xrange(x.mergejoinscan_size()): self.add_mergejoinscan().CopyFrom(x.mergejoinscan(i))
if (x.has_index_def()): self.mutable_index_def().MergeFrom(x.index_def())
if (x.has_offset()): self.set_offset(x.offset())
if (x.has_limit()): self.set_limit(x.limit())
if (x.has_keys_only()): self.set_keys_only(x.keys_only())
for i in xrange(x.property_name_size()): self.add_property_name(x.property_name(i))
if (x.has_distinct_infix_size()): self.set_distinct_infix_size(x.distinct_infix_size())
if (x.has_entityfilter()): self.mutable_entityfilter().MergeFrom(x.entityfilter())
if (x.has_plan_label()): self.set_plan_label(x.plan_label())
def Equals(self, x):
if x is self: return 1
if self.has_primaryscan_ != x.has_primaryscan_: return 0
if self.has_primaryscan_ and self.primaryscan_ != x.primaryscan_: return 0
if len(self.mergejoinscan_) != len(x.mergejoinscan_): return 0
for e1, e2 in zip(self.mergejoinscan_, x.mergejoinscan_):
if e1 != e2: return 0
if self.has_index_def_ != x.has_index_def_: return 0
if self.has_index_def_ and self.index_def_ != x.index_def_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
if self.has_keys_only_ != x.has_keys_only_: return 0
if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
if len(self.property_name_) != len(x.property_name_): return 0
for e1, e2 in zip(self.property_name_, x.property_name_):
if e1 != e2: return 0
if self.has_distinct_infix_size_ != x.has_distinct_infix_size_: return 0
if self.has_distinct_infix_size_ and self.distinct_infix_size_ != x.distinct_infix_size_: return 0
if self.has_entityfilter_ != x.has_entityfilter_: return 0
if self.has_entityfilter_ and self.entityfilter_ != x.entityfilter_: return 0
if self.has_plan_label_ != x.has_plan_label_: return 0
if self.has_plan_label_ and self.plan_label_ != x.plan_label_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_primaryscan_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: primaryscan not set.')
elif not self.primaryscan_.IsInitialized(debug_strs): initialized = 0
for p in self.mergejoinscan_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_index_def_ and not self.index_def_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_keys_only_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: keys_only not set.')
if (self.has_entityfilter_ and not self.entityfilter_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.primaryscan_.ByteSize()
n += 2 * len(self.mergejoinscan_)
for i in xrange(len(self.mergejoinscan_)): n += self.mergejoinscan_[i].ByteSize()
if (self.has_index_def_): n += 2 + self.lengthString(self.index_def_.ByteSize())
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
n += 2 * len(self.property_name_)
for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
if (self.has_distinct_infix_size_): n += 2 + self.lengthVarInt64(self.distinct_infix_size_)
if (self.has_entityfilter_): n += 2 + self.entityfilter_.ByteSize()
if (self.has_plan_label_): n += 2 + self.lengthString(len(self.plan_label_))
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_primaryscan_):
n += 2
n += self.primaryscan_.ByteSizePartial()
n += 2 * len(self.mergejoinscan_)<|fim▁hole|> for i in xrange(len(self.mergejoinscan_)): n += self.mergejoinscan_[i].ByteSizePartial()
if (self.has_index_def_): n += 2 + self.lengthString(self.index_def_.ByteSizePartial())
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_keys_only_):
n += 2
n += 2 * len(self.property_name_)
for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
if (self.has_distinct_infix_size_): n += 2 + self.lengthVarInt64(self.distinct_infix_size_)
if (self.has_entityfilter_): n += 2 + self.entityfilter_.ByteSizePartial()
if (self.has_plan_label_): n += 2 + self.lengthString(len(self.plan_label_))
return n
def Clear(self):
self.clear_primaryscan()
self.clear_mergejoinscan()
self.clear_index_def()
self.clear_offset()
self.clear_limit()
self.clear_keys_only()
self.clear_property_name()
self.clear_distinct_infix_size()
self.clear_entityfilter()
self.clear_plan_label()
def OutputUnchecked(self, out):
out.putVarInt32(11)
self.primaryscan_.OutputUnchecked(out)
out.putVarInt32(12)
for i in xrange(len(self.mergejoinscan_)):
out.putVarInt32(59)
self.mergejoinscan_[i].OutputUnchecked(out)
out.putVarInt32(60)
if (self.has_offset_):
out.putVarInt32(80)
out.putVarInt32(self.offset_)
if (self.has_limit_):
out.putVarInt32(88)
out.putVarInt32(self.limit_)
out.putVarInt32(96)
out.putBoolean(self.keys_only_)
if (self.has_entityfilter_):
out.putVarInt32(107)
self.entityfilter_.OutputUnchecked(out)
out.putVarInt32(108)
if (self.has_index_def_):
out.putVarInt32(170)
out.putVarInt32(self.index_def_.ByteSize())
self.index_def_.OutputUnchecked(out)
for i in xrange(len(self.property_name_)):
out.putVarInt32(194)
out.putPrefixedString(self.property_name_[i])
if (self.has_distinct_infix_size_):
out.putVarInt32(200)
out.putVarInt32(self.distinct_infix_size_)
if (self.has_plan_label_):
out.putVarInt32(210)
out.putPrefixedString(self.plan_label_)
def OutputPartial(self, out):
if (self.has_primaryscan_):
out.putVarInt32(11)
self.primaryscan_.OutputPartial(out)
out.putVarInt32(12)
for i in xrange(len(self.mergejoinscan_)):
out.putVarInt32(59)
self.mergejoinscan_[i].OutputPartial(out)
out.putVarInt32(60)
if (self.has_offset_):
out.putVarInt32(80)
out.putVarInt32(self.offset_)
if (self.has_limit_):
out.putVarInt32(88)
out.putVarInt32(self.limit_)
if (self.has_keys_only_):
out.putVarInt32(96)
out.putBoolean(self.keys_only_)
if (self.has_entityfilter_):
out.putVarInt32(107)
self.entityfilter_.OutputPartial(out)
out.putVarInt32(108)
if (self.has_index_def_):
out.putVarInt32(170)
out.putVarInt32(self.index_def_.ByteSizePartial())
self.index_def_.OutputPartial(out)
for i in xrange(len(self.property_name_)):
out.putVarInt32(194)
out.putPrefixedString(self.property_name_[i])
if (self.has_distinct_infix_size_):
out.putVarInt32(200)
out.putVarInt32(self.distinct_infix_size_)
if (self.has_plan_label_):
out.putVarInt32(210)
out.putPrefixedString(self.plan_label_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.mutable_primaryscan().TryMerge(d)
continue
if tt == 59:
self.add_mergejoinscan().TryMerge(d)
continue
if tt == 80:
self.set_offset(d.getVarInt32())
continue
if tt == 88:
self.set_limit(d.getVarInt32())
continue
if tt == 96:
self.set_keys_only(d.getBoolean())
continue
if tt == 107:
self.mutable_entityfilter().TryMerge(d)
continue
if tt == 170:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_def().TryMerge(tmp)
continue
if tt == 194:
self.add_property_name(d.getPrefixedString())
continue
if tt == 200:
self.set_distinct_infix_size(d.getVarInt32())
continue
if tt == 210:
self.set_plan_label(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_primaryscan_:
res+=prefix+"PrimaryScan {\n"
res+=self.primaryscan_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt=0
for e in self.mergejoinscan_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("MergeJoinScan%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_index_def_:
res+=prefix+"index_def <\n"
res+=self.index_def_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
cnt=0
for e in self.property_name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("property_name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_distinct_infix_size_: res+=prefix+("distinct_infix_size: %s\n" % self.DebugFormatInt32(self.distinct_infix_size_))
if self.has_entityfilter_:
res+=prefix+"EntityFilter {\n"
res+=self.entityfilter_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_plan_label_: res+=prefix+("plan_label: %s\n" % self.DebugFormatString(self.plan_label_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kPrimaryScanGroup = 1
kPrimaryScanindex_name = 2
kPrimaryScanstart_key = 3
kPrimaryScanstart_inclusive = 4
kPrimaryScanend_key = 5
kPrimaryScanend_inclusive = 6
kPrimaryScanstart_postfix_value = 22
kPrimaryScanend_postfix_value = 23
kPrimaryScanend_unapplied_log_timestamp_us = 19
kMergeJoinScanGroup = 7
kMergeJoinScanindex_name = 8
kMergeJoinScanprefix_value = 9
kMergeJoinScanvalue_prefix = 20
kindex_def = 21
koffset = 10
klimit = 11
kkeys_only = 12
kproperty_name = 24
kdistinct_infix_size = 25
kEntityFilterGroup = 13
kEntityFilterdistinct = 14
kEntityFilterkind = 17
kEntityFilterancestor = 18
kplan_label = 26
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "PrimaryScan",
2: "index_name",
3: "start_key",
4: "start_inclusive",
5: "end_key",
6: "end_inclusive",
7: "MergeJoinScan",
8: "index_name",
9: "prefix_value",
10: "offset",
11: "limit",
12: "keys_only",
13: "EntityFilter",
14: "distinct",
17: "kind",
18: "ancestor",
19: "end_unapplied_log_timestamp_us",
20: "value_prefix",
21: "index_def",
22: "start_postfix_value",
23: "end_postfix_value",
24: "property_name",
25: "distinct_infix_size",
26: "plan_label",
}, 26)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.STARTGROUP,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.STARTGROUP,
14: ProtocolBuffer.Encoder.NUMERIC,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.STRING,
19: ProtocolBuffer.Encoder.NUMERIC,
20: ProtocolBuffer.Encoder.NUMERIC,
21: ProtocolBuffer.Encoder.STRING,
22: ProtocolBuffer.Encoder.STRING,
23: ProtocolBuffer.Encoder.STRING,
24: ProtocolBuffer.Encoder.STRING,
25: ProtocolBuffer.Encoder.NUMERIC,
26: ProtocolBuffer.Encoder.STRING,
}, 26, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CompiledQuery'
class CompiledCursor_PositionIndexValue(ProtocolBuffer.ProtocolMessage):
has_property_ = 0
property_ = ""
has_value_ = 0
def __init__(self, contents=None):
self.value_ = PropertyValue()
if contents is not None: self.MergeFromString(contents)
def property(self): return self.property_
def set_property(self, x):
self.has_property_ = 1
self.property_ = x
def clear_property(self):
if self.has_property_:
self.has_property_ = 0
self.property_ = ""
def has_property(self): return self.has_property_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_property()): self.set_property(x.property())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_property_ != x.has_property_: return 0
if self.has_property_ and self.property_ != x.property_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_property_): n += 2 + self.lengthString(len(self.property_))
n += self.lengthString(self.value_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_property_): n += 2 + self.lengthString(len(self.property_))
if (self.has_value_):
n += 2
n += self.lengthString(self.value_.ByteSizePartial())
return n
def Clear(self):
self.clear_property()
self.clear_value()
def OutputUnchecked(self, out):
if (self.has_property_):
out.putVarInt32(242)
out.putPrefixedString(self.property_)
out.putVarInt32(250)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_property_):
out.putVarInt32(242)
out.putPrefixedString(self.property_)
if (self.has_value_):
out.putVarInt32(250)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 236: break
if tt == 242:
self.set_property(d.getPrefixedString())
continue
if tt == 250:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_: res+=prefix+("property: %s\n" % self.DebugFormatString(self.property_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
class CompiledCursor_Position(ProtocolBuffer.ProtocolMessage):
has_start_key_ = 0
start_key_ = ""
has_key_ = 0
key_ = None
has_start_inclusive_ = 0
start_inclusive_ = 1
has_before_ascending_ = 0
before_ascending_ = 0
def __init__(self, contents=None):
self.indexvalue_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def start_key(self): return self.start_key_
def set_start_key(self, x):
self.has_start_key_ = 1
self.start_key_ = x
def clear_start_key(self):
if self.has_start_key_:
self.has_start_key_ = 0
self.start_key_ = ""
def has_start_key(self): return self.has_start_key_
def indexvalue_size(self): return len(self.indexvalue_)
def indexvalue_list(self): return self.indexvalue_
def indexvalue(self, i):
return self.indexvalue_[i]
def mutable_indexvalue(self, i):
return self.indexvalue_[i]
def add_indexvalue(self):
x = CompiledCursor_PositionIndexValue()
self.indexvalue_.append(x)
return x
def clear_indexvalue(self):
self.indexvalue_ = []
def key(self):
if self.key_ is None:
self.lazy_init_lock_.acquire()
try:
if self.key_ is None: self.key_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key()
def clear_key(self):
if self.has_key_:
self.has_key_ = 0;
if self.key_ is not None: self.key_.Clear()
def has_key(self): return self.has_key_
def start_inclusive(self): return self.start_inclusive_
def set_start_inclusive(self, x):
self.has_start_inclusive_ = 1
self.start_inclusive_ = x
def clear_start_inclusive(self):
if self.has_start_inclusive_:
self.has_start_inclusive_ = 0
self.start_inclusive_ = 1
def has_start_inclusive(self): return self.has_start_inclusive_
def before_ascending(self): return self.before_ascending_
def set_before_ascending(self, x):
self.has_before_ascending_ = 1
self.before_ascending_ = x
def clear_before_ascending(self):
if self.has_before_ascending_:
self.has_before_ascending_ = 0
self.before_ascending_ = 0
def has_before_ascending(self): return self.has_before_ascending_
def MergeFrom(self, x):
assert x is not self
if (x.has_start_key()): self.set_start_key(x.start_key())
for i in xrange(x.indexvalue_size()): self.add_indexvalue().CopyFrom(x.indexvalue(i))
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_start_inclusive()): self.set_start_inclusive(x.start_inclusive())
if (x.has_before_ascending()): self.set_before_ascending(x.before_ascending())
def Equals(self, x):
if x is self: return 1
if self.has_start_key_ != x.has_start_key_: return 0
if self.has_start_key_ and self.start_key_ != x.start_key_: return 0
if len(self.indexvalue_) != len(x.indexvalue_): return 0
for e1, e2 in zip(self.indexvalue_, x.indexvalue_):
if e1 != e2: return 0
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_start_inclusive_ != x.has_start_inclusive_: return 0
if self.has_start_inclusive_ and self.start_inclusive_ != x.start_inclusive_: return 0
if self.has_before_ascending_ != x.has_before_ascending_: return 0
if self.has_before_ascending_ and self.before_ascending_ != x.before_ascending_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.indexvalue_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_start_key_): n += 2 + self.lengthString(len(self.start_key_))
n += 4 * len(self.indexvalue_)
for i in xrange(len(self.indexvalue_)): n += self.indexvalue_[i].ByteSize()
if (self.has_key_): n += 2 + self.lengthString(self.key_.ByteSize())
if (self.has_start_inclusive_): n += 3
if (self.has_before_ascending_): n += 3
return n
def ByteSizePartial(self):
n = 0
if (self.has_start_key_): n += 2 + self.lengthString(len(self.start_key_))
n += 4 * len(self.indexvalue_)
for i in xrange(len(self.indexvalue_)): n += self.indexvalue_[i].ByteSizePartial()
if (self.has_key_): n += 2 + self.lengthString(self.key_.ByteSizePartial())
if (self.has_start_inclusive_): n += 3
if (self.has_before_ascending_): n += 3
return n
def Clear(self):
self.clear_start_key()
self.clear_indexvalue()
self.clear_key()
self.clear_start_inclusive()
self.clear_before_ascending()
def OutputUnchecked(self, out):
if (self.has_start_key_):
out.putVarInt32(218)
out.putPrefixedString(self.start_key_)
if (self.has_start_inclusive_):
out.putVarInt32(224)
out.putBoolean(self.start_inclusive_)
for i in xrange(len(self.indexvalue_)):
out.putVarInt32(235)
self.indexvalue_[i].OutputUnchecked(out)
out.putVarInt32(236)
if (self.has_key_):
out.putVarInt32(258)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_before_ascending_):
out.putVarInt32(264)
out.putBoolean(self.before_ascending_)
def OutputPartial(self, out):
if (self.has_start_key_):
out.putVarInt32(218)
out.putPrefixedString(self.start_key_)
if (self.has_start_inclusive_):
out.putVarInt32(224)
out.putBoolean(self.start_inclusive_)
for i in xrange(len(self.indexvalue_)):
out.putVarInt32(235)
self.indexvalue_[i].OutputPartial(out)
out.putVarInt32(236)
if (self.has_key_):
out.putVarInt32(258)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_before_ascending_):
out.putVarInt32(264)
out.putBoolean(self.before_ascending_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 20: break
if tt == 218:
self.set_start_key(d.getPrefixedString())
continue
if tt == 224:
self.set_start_inclusive(d.getBoolean())
continue
if tt == 235:
self.add_indexvalue().TryMerge(d)
continue
if tt == 258:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 264:
self.set_before_ascending(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_start_key_: res+=prefix+("start_key: %s\n" % self.DebugFormatString(self.start_key_))
cnt=0
for e in self.indexvalue_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("IndexValue%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_start_inclusive_: res+=prefix+("start_inclusive: %s\n" % self.DebugFormatBool(self.start_inclusive_))
if self.has_before_ascending_: res+=prefix+("before_ascending: %s\n" % self.DebugFormatBool(self.before_ascending_))
return res
class CompiledCursor(ProtocolBuffer.ProtocolMessage):
has_position_ = 0
position_ = None
has_postfix_position_ = 0
postfix_position_ = None
has_absolute_position_ = 0
absolute_position_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def position(self):
if self.position_ is None:
self.lazy_init_lock_.acquire()
try:
if self.position_ is None: self.position_ = CompiledCursor_Position()
finally:
self.lazy_init_lock_.release()
return self.position_
def mutable_position(self): self.has_position_ = 1; return self.position()
def clear_position(self):
if self.has_position_:
self.has_position_ = 0;
if self.position_ is not None: self.position_.Clear()
def has_position(self): return self.has_position_
def postfix_position(self):
if self.postfix_position_ is None:
self.lazy_init_lock_.acquire()
try:
if self.postfix_position_ is None: self.postfix_position_ = IndexPostfix()
finally:
self.lazy_init_lock_.release()
return self.postfix_position_
def mutable_postfix_position(self): self.has_postfix_position_ = 1; return self.postfix_position()
def clear_postfix_position(self):
if self.has_postfix_position_:
self.has_postfix_position_ = 0;
if self.postfix_position_ is not None: self.postfix_position_.Clear()
def has_postfix_position(self): return self.has_postfix_position_
def absolute_position(self):
if self.absolute_position_ is None:
self.lazy_init_lock_.acquire()
try:
if self.absolute_position_ is None: self.absolute_position_ = IndexPosition()
finally:
self.lazy_init_lock_.release()
return self.absolute_position_
def mutable_absolute_position(self): self.has_absolute_position_ = 1; return self.absolute_position()
def clear_absolute_position(self):
if self.has_absolute_position_:
self.has_absolute_position_ = 0;
if self.absolute_position_ is not None: self.absolute_position_.Clear()
def has_absolute_position(self): return self.has_absolute_position_
def MergeFrom(self, x):
assert x is not self
if (x.has_position()): self.mutable_position().MergeFrom(x.position())
if (x.has_postfix_position()): self.mutable_postfix_position().MergeFrom(x.postfix_position())
if (x.has_absolute_position()): self.mutable_absolute_position().MergeFrom(x.absolute_position())
def Equals(self, x):
if x is self: return 1
if self.has_position_ != x.has_position_: return 0
if self.has_position_ and self.position_ != x.position_: return 0
if self.has_postfix_position_ != x.has_postfix_position_: return 0
if self.has_postfix_position_ and self.postfix_position_ != x.postfix_position_: return 0
if self.has_absolute_position_ != x.has_absolute_position_: return 0
if self.has_absolute_position_ and self.absolute_position_ != x.absolute_position_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_position_ and not self.position_.IsInitialized(debug_strs)): initialized = 0
if (self.has_postfix_position_ and not self.postfix_position_.IsInitialized(debug_strs)): initialized = 0
if (self.has_absolute_position_ and not self.absolute_position_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_position_): n += 2 + self.position_.ByteSize()
if (self.has_postfix_position_): n += 1 + self.lengthString(self.postfix_position_.ByteSize())
if (self.has_absolute_position_): n += 1 + self.lengthString(self.absolute_position_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_position_): n += 2 + self.position_.ByteSizePartial()
if (self.has_postfix_position_): n += 1 + self.lengthString(self.postfix_position_.ByteSizePartial())
if (self.has_absolute_position_): n += 1 + self.lengthString(self.absolute_position_.ByteSizePartial())
return n
def Clear(self):
self.clear_position()
self.clear_postfix_position()
self.clear_absolute_position()
def OutputUnchecked(self, out):
if (self.has_postfix_position_):
out.putVarInt32(10)
out.putVarInt32(self.postfix_position_.ByteSize())
self.postfix_position_.OutputUnchecked(out)
if (self.has_position_):
out.putVarInt32(19)
self.position_.OutputUnchecked(out)
out.putVarInt32(20)
if (self.has_absolute_position_):
out.putVarInt32(26)
out.putVarInt32(self.absolute_position_.ByteSize())
self.absolute_position_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_postfix_position_):
out.putVarInt32(10)
out.putVarInt32(self.postfix_position_.ByteSizePartial())
self.postfix_position_.OutputPartial(out)
if (self.has_position_):
out.putVarInt32(19)
self.position_.OutputPartial(out)
out.putVarInt32(20)
if (self.has_absolute_position_):
out.putVarInt32(26)
out.putVarInt32(self.absolute_position_.ByteSizePartial())
self.absolute_position_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_postfix_position().TryMerge(tmp)
continue
if tt == 19:
self.mutable_position().TryMerge(d)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_absolute_position().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_position_:
res+=prefix+"Position {\n"
res+=self.position_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_postfix_position_:
res+=prefix+"postfix_position <\n"
res+=self.postfix_position_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_absolute_position_:
res+=prefix+"absolute_position <\n"
res+=self.absolute_position_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kPositionGroup = 2
kPositionstart_key = 27
kPositionIndexValueGroup = 29
kPositionIndexValueproperty = 30
kPositionIndexValuevalue = 31
kPositionkey = 32
kPositionstart_inclusive = 28
kPositionbefore_ascending = 33
kpostfix_position = 1
kabsolute_position = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "postfix_position",
2: "Position",
3: "absolute_position",
27: "start_key",
28: "start_inclusive",
29: "IndexValue",
30: "property",
31: "value",
32: "key",
33: "before_ascending",
}, 33)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STARTGROUP,
3: ProtocolBuffer.Encoder.STRING,
27: ProtocolBuffer.Encoder.STRING,
28: ProtocolBuffer.Encoder.NUMERIC,
29: ProtocolBuffer.Encoder.STARTGROUP,
30: ProtocolBuffer.Encoder.STRING,
31: ProtocolBuffer.Encoder.STRING,
32: ProtocolBuffer.Encoder.STRING,
33: ProtocolBuffer.Encoder.NUMERIC,
}, 33, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CompiledCursor'
class Cursor(ProtocolBuffer.ProtocolMessage):
has_cursor_ = 0
cursor_ = 0
has_app_ = 0
app_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def cursor(self): return self.cursor_
def set_cursor(self, x):
self.has_cursor_ = 1
self.cursor_ = x
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0
self.cursor_ = 0
def has_cursor(self): return self.has_cursor_
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def MergeFrom(self, x):
assert x is not self
if (x.has_cursor()): self.set_cursor(x.cursor())
if (x.has_app()): self.set_app(x.app())
def Equals(self, x):
if x is self: return 1
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_cursor_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: cursor not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_app_): n += 1 + self.lengthString(len(self.app_))
return n + 9
def ByteSizePartial(self):
n = 0
if (self.has_cursor_):
n += 9
if (self.has_app_): n += 1 + self.lengthString(len(self.app_))
return n
def Clear(self):
self.clear_cursor()
self.clear_app()
def OutputUnchecked(self, out):
out.putVarInt32(9)
out.put64(self.cursor_)
if (self.has_app_):
out.putVarInt32(18)
out.putPrefixedString(self.app_)
def OutputPartial(self, out):
if (self.has_cursor_):
out.putVarInt32(9)
out.put64(self.cursor_)
if (self.has_app_):
out.putVarInt32(18)
out.putPrefixedString(self.app_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_cursor(d.get64())
continue
if tt == 18:
self.set_app(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatFixed64(self.cursor_))
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcursor = 1
kapp = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cursor",
2: "app",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.DOUBLE,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Cursor'
class Error(ProtocolBuffer.ProtocolMessage):
BAD_REQUEST = 1
CONCURRENT_TRANSACTION = 2
INTERNAL_ERROR = 3
NEED_INDEX = 4
TIMEOUT = 5
PERMISSION_DENIED = 6
BIGTABLE_ERROR = 7
COMMITTED_BUT_STILL_APPLYING = 8
CAPABILITY_DISABLED = 9
TRY_ALTERNATE_BACKEND = 10
SAFE_TIME_TOO_OLD = 11
_ErrorCode_NAMES = {
1: "BAD_REQUEST",
2: "CONCURRENT_TRANSACTION",
3: "INTERNAL_ERROR",
4: "NEED_INDEX",
5: "TIMEOUT",
6: "PERMISSION_DENIED",
7: "BIGTABLE_ERROR",
8: "COMMITTED_BUT_STILL_APPLYING",
9: "CAPABILITY_DISABLED",
10: "TRY_ALTERNATE_BACKEND",
11: "SAFE_TIME_TOO_OLD",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Error'
class Cost_CommitCost(ProtocolBuffer.ProtocolMessage):
has_requested_entity_puts_ = 0
requested_entity_puts_ = 0
has_requested_entity_deletes_ = 0
requested_entity_deletes_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def requested_entity_puts(self): return self.requested_entity_puts_
def set_requested_entity_puts(self, x):
self.has_requested_entity_puts_ = 1
self.requested_entity_puts_ = x
def clear_requested_entity_puts(self):
if self.has_requested_entity_puts_:
self.has_requested_entity_puts_ = 0
self.requested_entity_puts_ = 0
def has_requested_entity_puts(self): return self.has_requested_entity_puts_
def requested_entity_deletes(self): return self.requested_entity_deletes_
def set_requested_entity_deletes(self, x):
self.has_requested_entity_deletes_ = 1
self.requested_entity_deletes_ = x
def clear_requested_entity_deletes(self):
if self.has_requested_entity_deletes_:
self.has_requested_entity_deletes_ = 0
self.requested_entity_deletes_ = 0
def has_requested_entity_deletes(self): return self.has_requested_entity_deletes_
def MergeFrom(self, x):
assert x is not self
if (x.has_requested_entity_puts()): self.set_requested_entity_puts(x.requested_entity_puts())
if (x.has_requested_entity_deletes()): self.set_requested_entity_deletes(x.requested_entity_deletes())
def Equals(self, x):
if x is self: return 1
if self.has_requested_entity_puts_ != x.has_requested_entity_puts_: return 0
if self.has_requested_entity_puts_ and self.requested_entity_puts_ != x.requested_entity_puts_: return 0
if self.has_requested_entity_deletes_ != x.has_requested_entity_deletes_: return 0
if self.has_requested_entity_deletes_ and self.requested_entity_deletes_ != x.requested_entity_deletes_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_requested_entity_puts_): n += 1 + self.lengthVarInt64(self.requested_entity_puts_)
if (self.has_requested_entity_deletes_): n += 1 + self.lengthVarInt64(self.requested_entity_deletes_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_requested_entity_puts_): n += 1 + self.lengthVarInt64(self.requested_entity_puts_)
if (self.has_requested_entity_deletes_): n += 1 + self.lengthVarInt64(self.requested_entity_deletes_)
return n
def Clear(self):
self.clear_requested_entity_puts()
self.clear_requested_entity_deletes()
def OutputUnchecked(self, out):
if (self.has_requested_entity_puts_):
out.putVarInt32(48)
out.putVarInt32(self.requested_entity_puts_)
if (self.has_requested_entity_deletes_):
out.putVarInt32(56)
out.putVarInt32(self.requested_entity_deletes_)
def OutputPartial(self, out):
if (self.has_requested_entity_puts_):
out.putVarInt32(48)
out.putVarInt32(self.requested_entity_puts_)
if (self.has_requested_entity_deletes_):
out.putVarInt32(56)
out.putVarInt32(self.requested_entity_deletes_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 44: break
if tt == 48:
self.set_requested_entity_puts(d.getVarInt32())
continue
if tt == 56:
self.set_requested_entity_deletes(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_requested_entity_puts_: res+=prefix+("requested_entity_puts: %s\n" % self.DebugFormatInt32(self.requested_entity_puts_))
if self.has_requested_entity_deletes_: res+=prefix+("requested_entity_deletes: %s\n" % self.DebugFormatInt32(self.requested_entity_deletes_))
return res
class Cost(ProtocolBuffer.ProtocolMessage):
has_index_writes_ = 0
index_writes_ = 0
has_index_write_bytes_ = 0
index_write_bytes_ = 0
has_entity_writes_ = 0
entity_writes_ = 0
has_entity_write_bytes_ = 0
entity_write_bytes_ = 0
has_commitcost_ = 0
commitcost_ = None
has_approximate_storage_delta_ = 0
approximate_storage_delta_ = 0
has_id_sequence_updates_ = 0
id_sequence_updates_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def index_writes(self): return self.index_writes_
def set_index_writes(self, x):
self.has_index_writes_ = 1
self.index_writes_ = x
def clear_index_writes(self):
if self.has_index_writes_:
self.has_index_writes_ = 0
self.index_writes_ = 0
def has_index_writes(self): return self.has_index_writes_
def index_write_bytes(self): return self.index_write_bytes_
def set_index_write_bytes(self, x):
self.has_index_write_bytes_ = 1
self.index_write_bytes_ = x
def clear_index_write_bytes(self):
if self.has_index_write_bytes_:
self.has_index_write_bytes_ = 0
self.index_write_bytes_ = 0
def has_index_write_bytes(self): return self.has_index_write_bytes_
def entity_writes(self): return self.entity_writes_
def set_entity_writes(self, x):
self.has_entity_writes_ = 1
self.entity_writes_ = x
def clear_entity_writes(self):
if self.has_entity_writes_:
self.has_entity_writes_ = 0
self.entity_writes_ = 0
def has_entity_writes(self): return self.has_entity_writes_
def entity_write_bytes(self): return self.entity_write_bytes_
def set_entity_write_bytes(self, x):
self.has_entity_write_bytes_ = 1
self.entity_write_bytes_ = x
def clear_entity_write_bytes(self):
if self.has_entity_write_bytes_:
self.has_entity_write_bytes_ = 0
self.entity_write_bytes_ = 0
def has_entity_write_bytes(self): return self.has_entity_write_bytes_
def commitcost(self):
if self.commitcost_ is None:
self.lazy_init_lock_.acquire()
try:
if self.commitcost_ is None: self.commitcost_ = Cost_CommitCost()
finally:
self.lazy_init_lock_.release()
return self.commitcost_
def mutable_commitcost(self): self.has_commitcost_ = 1; return self.commitcost()
def clear_commitcost(self):
if self.has_commitcost_:
self.has_commitcost_ = 0;
if self.commitcost_ is not None: self.commitcost_.Clear()
def has_commitcost(self): return self.has_commitcost_
def approximate_storage_delta(self): return self.approximate_storage_delta_
def set_approximate_storage_delta(self, x):
self.has_approximate_storage_delta_ = 1
self.approximate_storage_delta_ = x
def clear_approximate_storage_delta(self):
if self.has_approximate_storage_delta_:
self.has_approximate_storage_delta_ = 0
self.approximate_storage_delta_ = 0
def has_approximate_storage_delta(self): return self.has_approximate_storage_delta_
def id_sequence_updates(self): return self.id_sequence_updates_
def set_id_sequence_updates(self, x):
self.has_id_sequence_updates_ = 1
self.id_sequence_updates_ = x
def clear_id_sequence_updates(self):
if self.has_id_sequence_updates_:
self.has_id_sequence_updates_ = 0
self.id_sequence_updates_ = 0
def has_id_sequence_updates(self): return self.has_id_sequence_updates_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_writes()): self.set_index_writes(x.index_writes())
if (x.has_index_write_bytes()): self.set_index_write_bytes(x.index_write_bytes())
if (x.has_entity_writes()): self.set_entity_writes(x.entity_writes())
if (x.has_entity_write_bytes()): self.set_entity_write_bytes(x.entity_write_bytes())
if (x.has_commitcost()): self.mutable_commitcost().MergeFrom(x.commitcost())
if (x.has_approximate_storage_delta()): self.set_approximate_storage_delta(x.approximate_storage_delta())
if (x.has_id_sequence_updates()): self.set_id_sequence_updates(x.id_sequence_updates())
def Equals(self, x):
if x is self: return 1
if self.has_index_writes_ != x.has_index_writes_: return 0
if self.has_index_writes_ and self.index_writes_ != x.index_writes_: return 0
if self.has_index_write_bytes_ != x.has_index_write_bytes_: return 0
if self.has_index_write_bytes_ and self.index_write_bytes_ != x.index_write_bytes_: return 0
if self.has_entity_writes_ != x.has_entity_writes_: return 0
if self.has_entity_writes_ and self.entity_writes_ != x.entity_writes_: return 0
if self.has_entity_write_bytes_ != x.has_entity_write_bytes_: return 0
if self.has_entity_write_bytes_ and self.entity_write_bytes_ != x.entity_write_bytes_: return 0
if self.has_commitcost_ != x.has_commitcost_: return 0
if self.has_commitcost_ and self.commitcost_ != x.commitcost_: return 0
if self.has_approximate_storage_delta_ != x.has_approximate_storage_delta_: return 0
if self.has_approximate_storage_delta_ and self.approximate_storage_delta_ != x.approximate_storage_delta_: return 0
if self.has_id_sequence_updates_ != x.has_id_sequence_updates_: return 0
if self.has_id_sequence_updates_ and self.id_sequence_updates_ != x.id_sequence_updates_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_commitcost_ and not self.commitcost_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_index_writes_): n += 1 + self.lengthVarInt64(self.index_writes_)
if (self.has_index_write_bytes_): n += 1 + self.lengthVarInt64(self.index_write_bytes_)
if (self.has_entity_writes_): n += 1 + self.lengthVarInt64(self.entity_writes_)
if (self.has_entity_write_bytes_): n += 1 + self.lengthVarInt64(self.entity_write_bytes_)
if (self.has_commitcost_): n += 2 + self.commitcost_.ByteSize()
if (self.has_approximate_storage_delta_): n += 1 + self.lengthVarInt64(self.approximate_storage_delta_)
if (self.has_id_sequence_updates_): n += 1 + self.lengthVarInt64(self.id_sequence_updates_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_index_writes_): n += 1 + self.lengthVarInt64(self.index_writes_)
if (self.has_index_write_bytes_): n += 1 + self.lengthVarInt64(self.index_write_bytes_)
if (self.has_entity_writes_): n += 1 + self.lengthVarInt64(self.entity_writes_)
if (self.has_entity_write_bytes_): n += 1 + self.lengthVarInt64(self.entity_write_bytes_)
if (self.has_commitcost_): n += 2 + self.commitcost_.ByteSizePartial()
if (self.has_approximate_storage_delta_): n += 1 + self.lengthVarInt64(self.approximate_storage_delta_)
if (self.has_id_sequence_updates_): n += 1 + self.lengthVarInt64(self.id_sequence_updates_)
return n
def Clear(self):
self.clear_index_writes()
self.clear_index_write_bytes()
self.clear_entity_writes()
self.clear_entity_write_bytes()
self.clear_commitcost()
self.clear_approximate_storage_delta()
self.clear_id_sequence_updates()
def OutputUnchecked(self, out):
if (self.has_index_writes_):
out.putVarInt32(8)
out.putVarInt32(self.index_writes_)
if (self.has_index_write_bytes_):
out.putVarInt32(16)
out.putVarInt32(self.index_write_bytes_)
if (self.has_entity_writes_):
out.putVarInt32(24)
out.putVarInt32(self.entity_writes_)
if (self.has_entity_write_bytes_):
out.putVarInt32(32)
out.putVarInt32(self.entity_write_bytes_)
if (self.has_commitcost_):
out.putVarInt32(43)
self.commitcost_.OutputUnchecked(out)
out.putVarInt32(44)
if (self.has_approximate_storage_delta_):
out.putVarInt32(64)
out.putVarInt32(self.approximate_storage_delta_)
if (self.has_id_sequence_updates_):
out.putVarInt32(72)
out.putVarInt32(self.id_sequence_updates_)
def OutputPartial(self, out):
if (self.has_index_writes_):
out.putVarInt32(8)
out.putVarInt32(self.index_writes_)
if (self.has_index_write_bytes_):
out.putVarInt32(16)
out.putVarInt32(self.index_write_bytes_)
if (self.has_entity_writes_):
out.putVarInt32(24)
out.putVarInt32(self.entity_writes_)
if (self.has_entity_write_bytes_):
out.putVarInt32(32)
out.putVarInt32(self.entity_write_bytes_)
if (self.has_commitcost_):
out.putVarInt32(43)
self.commitcost_.OutputPartial(out)
out.putVarInt32(44)
if (self.has_approximate_storage_delta_):
out.putVarInt32(64)
out.putVarInt32(self.approximate_storage_delta_)
if (self.has_id_sequence_updates_):
out.putVarInt32(72)
out.putVarInt32(self.id_sequence_updates_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_index_writes(d.getVarInt32())
continue
if tt == 16:
self.set_index_write_bytes(d.getVarInt32())
continue
if tt == 24:
self.set_entity_writes(d.getVarInt32())
continue
if tt == 32:
self.set_entity_write_bytes(d.getVarInt32())
continue
if tt == 43:
self.mutable_commitcost().TryMerge(d)
continue
if tt == 64:
self.set_approximate_storage_delta(d.getVarInt32())
continue
if tt == 72:
self.set_id_sequence_updates(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_writes_: res+=prefix+("index_writes: %s\n" % self.DebugFormatInt32(self.index_writes_))
if self.has_index_write_bytes_: res+=prefix+("index_write_bytes: %s\n" % self.DebugFormatInt32(self.index_write_bytes_))
if self.has_entity_writes_: res+=prefix+("entity_writes: %s\n" % self.DebugFormatInt32(self.entity_writes_))
if self.has_entity_write_bytes_: res+=prefix+("entity_write_bytes: %s\n" % self.DebugFormatInt32(self.entity_write_bytes_))
if self.has_commitcost_:
res+=prefix+"CommitCost {\n"
res+=self.commitcost_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_approximate_storage_delta_: res+=prefix+("approximate_storage_delta: %s\n" % self.DebugFormatInt32(self.approximate_storage_delta_))
if self.has_id_sequence_updates_: res+=prefix+("id_sequence_updates: %s\n" % self.DebugFormatInt32(self.id_sequence_updates_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_writes = 1
kindex_write_bytes = 2
kentity_writes = 3
kentity_write_bytes = 4
kCommitCostGroup = 5
kCommitCostrequested_entity_puts = 6
kCommitCostrequested_entity_deletes = 7
kapproximate_storage_delta = 8
kid_sequence_updates = 9
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_writes",
2: "index_write_bytes",
3: "entity_writes",
4: "entity_write_bytes",
5: "CommitCost",
6: "requested_entity_puts",
7: "requested_entity_deletes",
8: "approximate_storage_delta",
9: "id_sequence_updates",
}, 9)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STARTGROUP,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.NUMERIC,
}, 9, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Cost'
class GetRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_transaction_ = 0
transaction_ = None
has_failover_ms_ = 0
failover_ms_ = 0
has_strong_ = 0
strong_ = 0
has_allow_deferred_ = 0
allow_deferred_ = 0
def __init__(self, contents=None):
self.key_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def key_size(self): return len(self.key_)
def key_list(self): return self.key_
def key(self, i):
return self.key_[i]
def mutable_key(self, i):
return self.key_[i]
def add_key(self):
x = Reference()
self.key_.append(x)
return x
def clear_key(self):
self.key_ = []
def transaction(self):
if self.transaction_ is None:
self.lazy_init_lock_.acquire()
try:
if self.transaction_ is None: self.transaction_ = Transaction()
finally:
self.lazy_init_lock_.release()
return self.transaction_
def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0;
if self.transaction_ is not None: self.transaction_.Clear()
def has_transaction(self): return self.has_transaction_
def failover_ms(self): return self.failover_ms_
def set_failover_ms(self, x):
self.has_failover_ms_ = 1
self.failover_ms_ = x
def clear_failover_ms(self):
if self.has_failover_ms_:
self.has_failover_ms_ = 0
self.failover_ms_ = 0
def has_failover_ms(self): return self.has_failover_ms_
def strong(self): return self.strong_
def set_strong(self, x):
self.has_strong_ = 1
self.strong_ = x
def clear_strong(self):
if self.has_strong_:
self.has_strong_ = 0
self.strong_ = 0
def has_strong(self): return self.has_strong_
def allow_deferred(self): return self.allow_deferred_
def set_allow_deferred(self, x):
self.has_allow_deferred_ = 1
self.allow_deferred_ = x
def clear_allow_deferred(self):
if self.has_allow_deferred_:
self.has_allow_deferred_ = 0
self.allow_deferred_ = 0
def has_allow_deferred(self): return self.has_allow_deferred_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
if (x.has_failover_ms()): self.set_failover_ms(x.failover_ms())
if (x.has_strong()): self.set_strong(x.strong())
if (x.has_allow_deferred()): self.set_allow_deferred(x.allow_deferred())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if len(self.key_) != len(x.key_): return 0
for e1, e2 in zip(self.key_, x.key_):
if e1 != e2: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if self.has_failover_ms_ != x.has_failover_ms_: return 0
if self.has_failover_ms_ and self.failover_ms_ != x.failover_ms_: return 0
if self.has_strong_ != x.has_strong_: return 0
if self.has_strong_ and self.strong_ != x.strong_: return 0
if self.has_allow_deferred_ != x.has_allow_deferred_: return 0
if self.has_allow_deferred_ and self.allow_deferred_ != x.allow_deferred_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
for p in self.key_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
if (self.has_failover_ms_): n += 1 + self.lengthVarInt64(self.failover_ms_)
if (self.has_strong_): n += 2
if (self.has_allow_deferred_): n += 2
return n
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
if (self.has_failover_ms_): n += 1 + self.lengthVarInt64(self.failover_ms_)
if (self.has_strong_): n += 2
if (self.has_allow_deferred_): n += 2
return n
def Clear(self):
self.clear_header()
self.clear_key()
self.clear_transaction()
self.clear_failover_ms()
self.clear_strong()
self.clear_allow_deferred()
def OutputUnchecked(self, out):
for i in xrange(len(self.key_)):
out.putVarInt32(10)
out.putVarInt32(self.key_[i].ByteSize())
self.key_[i].OutputUnchecked(out)
if (self.has_transaction_):
out.putVarInt32(18)
out.putVarInt32(self.transaction_.ByteSize())
self.transaction_.OutputUnchecked(out)
if (self.has_failover_ms_):
out.putVarInt32(24)
out.putVarInt64(self.failover_ms_)
if (self.has_strong_):
out.putVarInt32(32)
out.putBoolean(self.strong_)
if (self.has_allow_deferred_):
out.putVarInt32(40)
out.putBoolean(self.allow_deferred_)
if (self.has_header_):
out.putVarInt32(50)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.key_)):
out.putVarInt32(10)
out.putVarInt32(self.key_[i].ByteSizePartial())
self.key_[i].OutputPartial(out)
if (self.has_transaction_):
out.putVarInt32(18)
out.putVarInt32(self.transaction_.ByteSizePartial())
self.transaction_.OutputPartial(out)
if (self.has_failover_ms_):
out.putVarInt32(24)
out.putVarInt64(self.failover_ms_)
if (self.has_strong_):
out.putVarInt32(32)
out.putBoolean(self.strong_)
if (self.has_allow_deferred_):
out.putVarInt32(40)
out.putBoolean(self.allow_deferred_)
if (self.has_header_):
out.putVarInt32(50)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_key().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_transaction().TryMerge(tmp)
continue
if tt == 24:
self.set_failover_ms(d.getVarInt64())
continue
if tt == 32:
self.set_strong(d.getBoolean())
continue
if tt == 40:
self.set_allow_deferred(d.getBoolean())
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("key%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_transaction_:
res+=prefix+"transaction <\n"
res+=self.transaction_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_failover_ms_: res+=prefix+("failover_ms: %s\n" % self.DebugFormatInt64(self.failover_ms_))
if self.has_strong_: res+=prefix+("strong: %s\n" % self.DebugFormatBool(self.strong_))
if self.has_allow_deferred_: res+=prefix+("allow_deferred: %s\n" % self.DebugFormatBool(self.allow_deferred_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 6
kkey = 1
ktransaction = 2
kfailover_ms = 3
kstrong = 4
kallow_deferred = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "transaction",
3: "failover_ms",
4: "strong",
5: "allow_deferred",
6: "header",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STRING,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.GetRequest'
class GetResponse_Entity(ProtocolBuffer.ProtocolMessage):
has_entity_ = 0
entity_ = None
has_key_ = 0
key_ = None
has_version_ = 0
version_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def entity(self):
if self.entity_ is None:
self.lazy_init_lock_.acquire()
try:
if self.entity_ is None: self.entity_ = EntityProto()
finally:
self.lazy_init_lock_.release()
return self.entity_
def mutable_entity(self): self.has_entity_ = 1; return self.entity()
def clear_entity(self):
if self.has_entity_:
self.has_entity_ = 0;
if self.entity_ is not None: self.entity_.Clear()
def has_entity(self): return self.has_entity_
def key(self):
if self.key_ is None:
self.lazy_init_lock_.acquire()
try:
if self.key_ is None: self.key_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key()
def clear_key(self):
if self.has_key_:
self.has_key_ = 0;
if self.key_ is not None: self.key_.Clear()
def has_key(self): return self.has_key_
def version(self): return self.version_
def set_version(self, x):
self.has_version_ = 1
self.version_ = x
def clear_version(self):
if self.has_version_:
self.has_version_ = 0
self.version_ = 0
def has_version(self): return self.has_version_
def MergeFrom(self, x):
assert x is not self
if (x.has_entity()): self.mutable_entity().MergeFrom(x.entity())
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_version()): self.set_version(x.version())
def Equals(self, x):
if x is self: return 1
if self.has_entity_ != x.has_entity_: return 0
if self.has_entity_ and self.entity_ != x.entity_: return 0
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_version_ != x.has_version_: return 0
if self.has_version_ and self.version_ != x.version_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_entity_ and not self.entity_.IsInitialized(debug_strs)): initialized = 0
if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_entity_): n += 1 + self.lengthString(self.entity_.ByteSize())
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSize())
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_entity_): n += 1 + self.lengthString(self.entity_.ByteSizePartial())
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSizePartial())
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
return n
def Clear(self):
self.clear_entity()
self.clear_key()
self.clear_version()
def OutputUnchecked(self, out):
if (self.has_entity_):
out.putVarInt32(18)
out.putVarInt32(self.entity_.ByteSize())
self.entity_.OutputUnchecked(out)
if (self.has_version_):
out.putVarInt32(24)
out.putVarInt64(self.version_)
if (self.has_key_):
out.putVarInt32(34)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_entity_):
out.putVarInt32(18)
out.putVarInt32(self.entity_.ByteSizePartial())
self.entity_.OutputPartial(out)
if (self.has_version_):
out.putVarInt32(24)
out.putVarInt64(self.version_)
if (self.has_key_):
out.putVarInt32(34)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity().TryMerge(tmp)
continue
if tt == 24:
self.set_version(d.getVarInt64())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_entity_:
res+=prefix+"entity <\n"
res+=self.entity_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt64(self.version_))
return res
class GetResponse(ProtocolBuffer.ProtocolMessage):
has_in_order_ = 0
in_order_ = 1
def __init__(self, contents=None):
self.entity_ = []
self.deferred_ = []
if contents is not None: self.MergeFromString(contents)
def entity_size(self): return len(self.entity_)
def entity_list(self): return self.entity_
def entity(self, i):
return self.entity_[i]
def mutable_entity(self, i):
return self.entity_[i]
def add_entity(self):
x = GetResponse_Entity()
self.entity_.append(x)
return x
def clear_entity(self):
self.entity_ = []
def deferred_size(self): return len(self.deferred_)
def deferred_list(self): return self.deferred_
def deferred(self, i):
return self.deferred_[i]
def mutable_deferred(self, i):
return self.deferred_[i]
def add_deferred(self):
x = Reference()
self.deferred_.append(x)
return x
def clear_deferred(self):
self.deferred_ = []
def in_order(self): return self.in_order_
def set_in_order(self, x):
self.has_in_order_ = 1
self.in_order_ = x
def clear_in_order(self):
if self.has_in_order_:
self.has_in_order_ = 0
self.in_order_ = 1
def has_in_order(self): return self.has_in_order_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.entity_size()): self.add_entity().CopyFrom(x.entity(i))
for i in xrange(x.deferred_size()): self.add_deferred().CopyFrom(x.deferred(i))
if (x.has_in_order()): self.set_in_order(x.in_order())
def Equals(self, x):
if x is self: return 1
if len(self.entity_) != len(x.entity_): return 0
for e1, e2 in zip(self.entity_, x.entity_):
if e1 != e2: return 0
if len(self.deferred_) != len(x.deferred_): return 0
for e1, e2 in zip(self.deferred_, x.deferred_):
if e1 != e2: return 0
if self.has_in_order_ != x.has_in_order_: return 0
if self.has_in_order_ and self.in_order_ != x.in_order_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.entity_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.deferred_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.entity_)
for i in xrange(len(self.entity_)): n += self.entity_[i].ByteSize()
n += 1 * len(self.deferred_)
for i in xrange(len(self.deferred_)): n += self.lengthString(self.deferred_[i].ByteSize())
if (self.has_in_order_): n += 2
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.entity_)
for i in xrange(len(self.entity_)): n += self.entity_[i].ByteSizePartial()
n += 1 * len(self.deferred_)
for i in xrange(len(self.deferred_)): n += self.lengthString(self.deferred_[i].ByteSizePartial())
if (self.has_in_order_): n += 2
return n
def Clear(self):
self.clear_entity()
self.clear_deferred()
self.clear_in_order()
def OutputUnchecked(self, out):
for i in xrange(len(self.entity_)):
out.putVarInt32(11)
self.entity_[i].OutputUnchecked(out)
out.putVarInt32(12)
for i in xrange(len(self.deferred_)):
out.putVarInt32(42)
out.putVarInt32(self.deferred_[i].ByteSize())
self.deferred_[i].OutputUnchecked(out)
if (self.has_in_order_):
out.putVarInt32(48)
out.putBoolean(self.in_order_)
def OutputPartial(self, out):
for i in xrange(len(self.entity_)):
out.putVarInt32(11)
self.entity_[i].OutputPartial(out)
out.putVarInt32(12)
for i in xrange(len(self.deferred_)):
out.putVarInt32(42)
out.putVarInt32(self.deferred_[i].ByteSizePartial())
self.deferred_[i].OutputPartial(out)
if (self.has_in_order_):
out.putVarInt32(48)
out.putBoolean(self.in_order_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_entity().TryMerge(d)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_deferred().TryMerge(tmp)
continue
if tt == 48:
self.set_in_order(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.entity_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Entity%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
cnt=0
for e in self.deferred_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("deferred%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_in_order_: res+=prefix+("in_order: %s\n" % self.DebugFormatBool(self.in_order_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kEntityGroup = 1
kEntityentity = 2
kEntitykey = 4
kEntityversion = 3
kdeferred = 5
kin_order = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Entity",
2: "entity",
3: "version",
4: "key",
5: "deferred",
6: "in_order",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.GetResponse'
class PutRequest(ProtocolBuffer.ProtocolMessage):
CURRENT = 0
SEQUENTIAL = 1
_AutoIdPolicy_NAMES = {
0: "CURRENT",
1: "SEQUENTIAL",
}
def AutoIdPolicy_Name(cls, x): return cls._AutoIdPolicy_NAMES.get(x, "")
AutoIdPolicy_Name = classmethod(AutoIdPolicy_Name)
has_header_ = 0
header_ = None
has_transaction_ = 0
transaction_ = None
has_trusted_ = 0
trusted_ = 0
has_force_ = 0
force_ = 0
has_mark_changes_ = 0
mark_changes_ = 0
has_auto_id_policy_ = 0
auto_id_policy_ = 0
def __init__(self, contents=None):
self.entity_ = []
self.composite_index_ = []
self.snapshot_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def entity_size(self): return len(self.entity_)
def entity_list(self): return self.entity_
def entity(self, i):
return self.entity_[i]
def mutable_entity(self, i):
return self.entity_[i]
def add_entity(self):
x = EntityProto()
self.entity_.append(x)
return x
def clear_entity(self):
self.entity_ = []
def transaction(self):
if self.transaction_ is None:
self.lazy_init_lock_.acquire()
try:
if self.transaction_ is None: self.transaction_ = Transaction()
finally:
self.lazy_init_lock_.release()
return self.transaction_
def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0;
if self.transaction_ is not None: self.transaction_.Clear()
def has_transaction(self): return self.has_transaction_
def composite_index_size(self): return len(self.composite_index_)
def composite_index_list(self): return self.composite_index_
def composite_index(self, i):
return self.composite_index_[i]
def mutable_composite_index(self, i):
return self.composite_index_[i]
def add_composite_index(self):
x = CompositeIndex()
self.composite_index_.append(x)
return x
def clear_composite_index(self):
self.composite_index_ = []
def trusted(self): return self.trusted_
def set_trusted(self, x):
self.has_trusted_ = 1
self.trusted_ = x
def clear_trusted(self):
if self.has_trusted_:
self.has_trusted_ = 0
self.trusted_ = 0
def has_trusted(self): return self.has_trusted_
def force(self): return self.force_
def set_force(self, x):
self.has_force_ = 1
self.force_ = x
def clear_force(self):
if self.has_force_:
self.has_force_ = 0
self.force_ = 0
def has_force(self): return self.has_force_
def mark_changes(self): return self.mark_changes_
def set_mark_changes(self, x):
self.has_mark_changes_ = 1
self.mark_changes_ = x
def clear_mark_changes(self):
if self.has_mark_changes_:
self.has_mark_changes_ = 0
self.mark_changes_ = 0
def has_mark_changes(self): return self.has_mark_changes_
def snapshot_size(self): return len(self.snapshot_)
def snapshot_list(self): return self.snapshot_
def snapshot(self, i):
return self.snapshot_[i]
def mutable_snapshot(self, i):
return self.snapshot_[i]
def add_snapshot(self):
x = Snapshot()
self.snapshot_.append(x)
return x
def clear_snapshot(self):
self.snapshot_ = []
def auto_id_policy(self): return self.auto_id_policy_
def set_auto_id_policy(self, x):
self.has_auto_id_policy_ = 1
self.auto_id_policy_ = x
def clear_auto_id_policy(self):
if self.has_auto_id_policy_:
self.has_auto_id_policy_ = 0
self.auto_id_policy_ = 0
def has_auto_id_policy(self): return self.has_auto_id_policy_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
for i in xrange(x.entity_size()): self.add_entity().CopyFrom(x.entity(i))
if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
if (x.has_trusted()): self.set_trusted(x.trusted())
if (x.has_force()): self.set_force(x.force())
if (x.has_mark_changes()): self.set_mark_changes(x.mark_changes())
for i in xrange(x.snapshot_size()): self.add_snapshot().CopyFrom(x.snapshot(i))
if (x.has_auto_id_policy()): self.set_auto_id_policy(x.auto_id_policy())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if len(self.entity_) != len(x.entity_): return 0
for e1, e2 in zip(self.entity_, x.entity_):
if e1 != e2: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if len(self.composite_index_) != len(x.composite_index_): return 0
for e1, e2 in zip(self.composite_index_, x.composite_index_):
if e1 != e2: return 0
if self.has_trusted_ != x.has_trusted_: return 0
if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
if self.has_force_ != x.has_force_: return 0
if self.has_force_ and self.force_ != x.force_: return 0
if self.has_mark_changes_ != x.has_mark_changes_: return 0
if self.has_mark_changes_ and self.mark_changes_ != x.mark_changes_: return 0
if len(self.snapshot_) != len(x.snapshot_): return 0
for e1, e2 in zip(self.snapshot_, x.snapshot_):
if e1 != e2: return 0
if self.has_auto_id_policy_ != x.has_auto_id_policy_: return 0
if self.has_auto_id_policy_ and self.auto_id_policy_ != x.auto_id_policy_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
for p in self.entity_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
for p in self.composite_index_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.snapshot_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += 1 * len(self.entity_)
for i in xrange(len(self.entity_)): n += self.lengthString(self.entity_[i].ByteSize())
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
n += 1 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
if (self.has_trusted_): n += 2
if (self.has_force_): n += 2
if (self.has_mark_changes_): n += 2
n += 1 * len(self.snapshot_)
for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSize())
if (self.has_auto_id_policy_): n += 1 + self.lengthVarInt64(self.auto_id_policy_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
n += 1 * len(self.entity_)
for i in xrange(len(self.entity_)): n += self.lengthString(self.entity_[i].ByteSizePartial())
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
n += 1 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
if (self.has_trusted_): n += 2
if (self.has_force_): n += 2
if (self.has_mark_changes_): n += 2
n += 1 * len(self.snapshot_)
for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSizePartial())
if (self.has_auto_id_policy_): n += 1 + self.lengthVarInt64(self.auto_id_policy_)
return n
def Clear(self):
self.clear_header()
self.clear_entity()
self.clear_transaction()
self.clear_composite_index()
self.clear_trusted()
self.clear_force()
self.clear_mark_changes()
self.clear_snapshot()
self.clear_auto_id_policy()
def OutputUnchecked(self, out):
for i in xrange(len(self.entity_)):
out.putVarInt32(10)
out.putVarInt32(self.entity_[i].ByteSize())
self.entity_[i].OutputUnchecked(out)
if (self.has_transaction_):
out.putVarInt32(18)
out.putVarInt32(self.transaction_.ByteSize())
self.transaction_.OutputUnchecked(out)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(26)
out.putVarInt32(self.composite_index_[i].ByteSize())
self.composite_index_[i].OutputUnchecked(out)
if (self.has_trusted_):
out.putVarInt32(32)
out.putBoolean(self.trusted_)
if (self.has_force_):
out.putVarInt32(56)
out.putBoolean(self.force_)
if (self.has_mark_changes_):
out.putVarInt32(64)
out.putBoolean(self.mark_changes_)
for i in xrange(len(self.snapshot_)):
out.putVarInt32(74)
out.putVarInt32(self.snapshot_[i].ByteSize())
self.snapshot_[i].OutputUnchecked(out)
if (self.has_auto_id_policy_):
out.putVarInt32(80)
out.putVarInt32(self.auto_id_policy_)
if (self.has_header_):
out.putVarInt32(90)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.entity_)):
out.putVarInt32(10)
out.putVarInt32(self.entity_[i].ByteSizePartial())
self.entity_[i].OutputPartial(out)
if (self.has_transaction_):
out.putVarInt32(18)
out.putVarInt32(self.transaction_.ByteSizePartial())
self.transaction_.OutputPartial(out)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(26)
out.putVarInt32(self.composite_index_[i].ByteSizePartial())
self.composite_index_[i].OutputPartial(out)
if (self.has_trusted_):
out.putVarInt32(32)
out.putBoolean(self.trusted_)
if (self.has_force_):
out.putVarInt32(56)
out.putBoolean(self.force_)
if (self.has_mark_changes_):
out.putVarInt32(64)
out.putBoolean(self.mark_changes_)
for i in xrange(len(self.snapshot_)):
out.putVarInt32(74)
out.putVarInt32(self.snapshot_[i].ByteSizePartial())
self.snapshot_[i].OutputPartial(out)
if (self.has_auto_id_policy_):
out.putVarInt32(80)
out.putVarInt32(self.auto_id_policy_)
if (self.has_header_):
out.putVarInt32(90)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_entity().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_transaction().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_composite_index().TryMerge(tmp)
continue
if tt == 32:
self.set_trusted(d.getBoolean())
continue
if tt == 56:
self.set_force(d.getBoolean())
continue
if tt == 64:
self.set_mark_changes(d.getBoolean())
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_snapshot().TryMerge(tmp)
continue
if tt == 80:
self.set_auto_id_policy(d.getVarInt32())
continue
if tt == 90:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.entity_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("entity%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_transaction_:
res+=prefix+"transaction <\n"
res+=self.transaction_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.composite_index_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("composite_index%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
if self.has_mark_changes_: res+=prefix+("mark_changes: %s\n" % self.DebugFormatBool(self.mark_changes_))
cnt=0
for e in self.snapshot_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("snapshot%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_auto_id_policy_: res+=prefix+("auto_id_policy: %s\n" % self.DebugFormatInt32(self.auto_id_policy_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 11
kentity = 1
ktransaction = 2
kcomposite_index = 3
ktrusted = 4
kforce = 7
kmark_changes = 8
ksnapshot = 9
kauto_id_policy = 10
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "entity",
2: "transaction",
3: "composite_index",
4: "trusted",
7: "force",
8: "mark_changes",
9: "snapshot",
10: "auto_id_policy",
11: "header",
}, 11)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.STRING,
}, 11, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.PutRequest'
class PutResponse(ProtocolBuffer.ProtocolMessage):
has_cost_ = 0
cost_ = None
def __init__(self, contents=None):
self.key_ = []
self.version_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def key_size(self): return len(self.key_)
def key_list(self): return self.key_
def key(self, i):
return self.key_[i]
def mutable_key(self, i):
return self.key_[i]
def add_key(self):
x = Reference()
self.key_.append(x)
return x
def clear_key(self):
self.key_ = []
def cost(self):
if self.cost_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cost_ is None: self.cost_ = Cost()
finally:
self.lazy_init_lock_.release()
return self.cost_
def mutable_cost(self): self.has_cost_ = 1; return self.cost()
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0;
if self.cost_ is not None: self.cost_.Clear()
def has_cost(self): return self.has_cost_
def version_size(self): return len(self.version_)
def version_list(self): return self.version_
def version(self, i):
return self.version_[i]
def set_version(self, i, x):
self.version_[i] = x
def add_version(self, x):
self.version_.append(x)
def clear_version(self):
self.version_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
for i in xrange(x.version_size()): self.add_version(x.version(i))
def Equals(self, x):
if x is self: return 1
if len(self.key_) != len(x.key_): return 0
for e1, e2 in zip(self.key_, x.key_):
if e1 != e2: return 0
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
if len(self.version_) != len(x.version_): return 0
for e1, e2 in zip(self.version_, x.version_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.key_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
n += 1 * len(self.version_)
for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
n += 1 * len(self.version_)
for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
return n
def Clear(self):
self.clear_key()
self.clear_cost()
self.clear_version()
def OutputUnchecked(self, out):
for i in xrange(len(self.key_)):
out.putVarInt32(10)
out.putVarInt32(self.key_[i].ByteSize())
self.key_[i].OutputUnchecked(out)
if (self.has_cost_):
out.putVarInt32(18)
out.putVarInt32(self.cost_.ByteSize())
self.cost_.OutputUnchecked(out)
for i in xrange(len(self.version_)):
out.putVarInt32(24)
out.putVarInt64(self.version_[i])
def OutputPartial(self, out):
for i in xrange(len(self.key_)):
out.putVarInt32(10)
out.putVarInt32(self.key_[i].ByteSizePartial())
self.key_[i].OutputPartial(out)
if (self.has_cost_):
out.putVarInt32(18)
out.putVarInt32(self.cost_.ByteSizePartial())
self.cost_.OutputPartial(out)
for i in xrange(len(self.version_)):
out.putVarInt32(24)
out.putVarInt64(self.version_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_key().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cost().TryMerge(tmp)
continue
if tt == 24:
self.add_version(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("key%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_cost_:
res+=prefix+"cost <\n"
res+=self.cost_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 1
kcost = 2
kversion = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "cost",
3: "version",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.PutResponse'
class TouchRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_force_ = 0
force_ = 0
def __init__(self, contents=None):
self.key_ = []
self.composite_index_ = []
self.snapshot_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def key_size(self): return len(self.key_)
def key_list(self): return self.key_
def key(self, i):
return self.key_[i]
def mutable_key(self, i):
return self.key_[i]
def add_key(self):
x = Reference()
self.key_.append(x)
return x
def clear_key(self):
self.key_ = []
def composite_index_size(self): return len(self.composite_index_)
def composite_index_list(self): return self.composite_index_
def composite_index(self, i):
return self.composite_index_[i]
def mutable_composite_index(self, i):
return self.composite_index_[i]
def add_composite_index(self):
x = CompositeIndex()
self.composite_index_.append(x)
return x
def clear_composite_index(self):
self.composite_index_ = []
def force(self): return self.force_
def set_force(self, x):
self.has_force_ = 1
self.force_ = x
def clear_force(self):
if self.has_force_:
self.has_force_ = 0
self.force_ = 0
def has_force(self): return self.has_force_
def snapshot_size(self): return len(self.snapshot_)
def snapshot_list(self): return self.snapshot_
def snapshot(self, i):
return self.snapshot_[i]
def mutable_snapshot(self, i):
return self.snapshot_[i]
def add_snapshot(self):
x = Snapshot()
self.snapshot_.append(x)
return x
def clear_snapshot(self):
self.snapshot_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
if (x.has_force()): self.set_force(x.force())
for i in xrange(x.snapshot_size()): self.add_snapshot().CopyFrom(x.snapshot(i))
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if len(self.key_) != len(x.key_): return 0
for e1, e2 in zip(self.key_, x.key_):
if e1 != e2: return 0
if len(self.composite_index_) != len(x.composite_index_): return 0
for e1, e2 in zip(self.composite_index_, x.composite_index_):
if e1 != e2: return 0
if self.has_force_ != x.has_force_: return 0
if self.has_force_ and self.force_ != x.force_: return 0
if len(self.snapshot_) != len(x.snapshot_): return 0
for e1, e2 in zip(self.snapshot_, x.snapshot_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
for p in self.key_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.composite_index_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.snapshot_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
n += 1 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
if (self.has_force_): n += 2
n += 1 * len(self.snapshot_)
for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
n += 1 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
if (self.has_force_): n += 2
n += 1 * len(self.snapshot_)
for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_header()
self.clear_key()
self.clear_composite_index()
self.clear_force()
self.clear_snapshot()
def OutputUnchecked(self, out):
for i in xrange(len(self.key_)):
out.putVarInt32(10)
out.putVarInt32(self.key_[i].ByteSize())
self.key_[i].OutputUnchecked(out)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(18)
out.putVarInt32(self.composite_index_[i].ByteSize())
self.composite_index_[i].OutputUnchecked(out)
if (self.has_force_):
out.putVarInt32(24)
out.putBoolean(self.force_)
for i in xrange(len(self.snapshot_)):
out.putVarInt32(74)
out.putVarInt32(self.snapshot_[i].ByteSize())
self.snapshot_[i].OutputUnchecked(out)
if (self.has_header_):
out.putVarInt32(82)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.key_)):
out.putVarInt32(10)
out.putVarInt32(self.key_[i].ByteSizePartial())
self.key_[i].OutputPartial(out)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(18)
out.putVarInt32(self.composite_index_[i].ByteSizePartial())
self.composite_index_[i].OutputPartial(out)
if (self.has_force_):
out.putVarInt32(24)
out.putBoolean(self.force_)
for i in xrange(len(self.snapshot_)):
out.putVarInt32(74)
out.putVarInt32(self.snapshot_[i].ByteSizePartial())
self.snapshot_[i].OutputPartial(out)
if (self.has_header_):
out.putVarInt32(82)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_key().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_composite_index().TryMerge(tmp)
continue
if tt == 24:
self.set_force(d.getBoolean())
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_snapshot().TryMerge(tmp)
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("key%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.composite_index_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("composite_index%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
cnt=0
for e in self.snapshot_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("snapshot%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 10
kkey = 1
kcomposite_index = 2
kforce = 3
ksnapshot = 9
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "composite_index",
3: "force",
9: "snapshot",
10: "header",
}, 10)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
}, 10, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.TouchRequest'
class TouchResponse(ProtocolBuffer.ProtocolMessage):
has_cost_ = 0
cost_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def cost(self):
if self.cost_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cost_ is None: self.cost_ = Cost()
finally:
self.lazy_init_lock_.release()
return self.cost_
def mutable_cost(self): self.has_cost_ = 1; return self.cost()
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0;
if self.cost_ is not None: self.cost_.Clear()
def has_cost(self): return self.has_cost_
def MergeFrom(self, x):
assert x is not self
if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
def Equals(self, x):
if x is self: return 1
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
return n
def Clear(self):
self.clear_cost()
def OutputUnchecked(self, out):
if (self.has_cost_):
out.putVarInt32(10)
out.putVarInt32(self.cost_.ByteSize())
self.cost_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_cost_):
out.putVarInt32(10)
out.putVarInt32(self.cost_.ByteSizePartial())
self.cost_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cost().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cost_:
res+=prefix+"cost <\n"
res+=self.cost_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcost = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cost",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.TouchResponse'
class DeleteRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_transaction_ = 0
transaction_ = None
has_trusted_ = 0
trusted_ = 0
has_force_ = 0
force_ = 0
has_mark_changes_ = 0
mark_changes_ = 0
def __init__(self, contents=None):
self.key_ = []
self.composite_index_ = []
self.snapshot_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def key_size(self): return len(self.key_)
def key_list(self): return self.key_
def key(self, i):
return self.key_[i]
def mutable_key(self, i):
return self.key_[i]
def add_key(self):
x = Reference()
self.key_.append(x)
return x
def clear_key(self):
self.key_ = []
def transaction(self):
if self.transaction_ is None:
self.lazy_init_lock_.acquire()
try:
if self.transaction_ is None: self.transaction_ = Transaction()
finally:
self.lazy_init_lock_.release()
return self.transaction_
def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0;
if self.transaction_ is not None: self.transaction_.Clear()
def has_transaction(self): return self.has_transaction_
def composite_index_size(self): return len(self.composite_index_)
def composite_index_list(self): return self.composite_index_
def composite_index(self, i):
return self.composite_index_[i]
def mutable_composite_index(self, i):
return self.composite_index_[i]
def add_composite_index(self):
x = CompositeIndex()
self.composite_index_.append(x)
return x
def clear_composite_index(self):
self.composite_index_ = []
def trusted(self): return self.trusted_
def set_trusted(self, x):
self.has_trusted_ = 1
self.trusted_ = x
def clear_trusted(self):
if self.has_trusted_:
self.has_trusted_ = 0
self.trusted_ = 0
def has_trusted(self): return self.has_trusted_
def force(self): return self.force_
def set_force(self, x):
self.has_force_ = 1
self.force_ = x
def clear_force(self):
if self.has_force_:
self.has_force_ = 0
self.force_ = 0
def has_force(self): return self.has_force_
def mark_changes(self): return self.mark_changes_
def set_mark_changes(self, x):
self.has_mark_changes_ = 1
self.mark_changes_ = x
def clear_mark_changes(self):
if self.has_mark_changes_:
self.has_mark_changes_ = 0
self.mark_changes_ = 0
def has_mark_changes(self): return self.has_mark_changes_
def snapshot_size(self): return len(self.snapshot_)
def snapshot_list(self): return self.snapshot_
def snapshot(self, i):
return self.snapshot_[i]
def mutable_snapshot(self, i):
return self.snapshot_[i]
def add_snapshot(self):
x = Snapshot()
self.snapshot_.append(x)
return x
def clear_snapshot(self):
self.snapshot_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
if (x.has_trusted()): self.set_trusted(x.trusted())
if (x.has_force()): self.set_force(x.force())
if (x.has_mark_changes()): self.set_mark_changes(x.mark_changes())
for i in xrange(x.snapshot_size()): self.add_snapshot().CopyFrom(x.snapshot(i))
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if len(self.key_) != len(x.key_): return 0
for e1, e2 in zip(self.key_, x.key_):
if e1 != e2: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if len(self.composite_index_) != len(x.composite_index_): return 0
for e1, e2 in zip(self.composite_index_, x.composite_index_):
if e1 != e2: return 0
if self.has_trusted_ != x.has_trusted_: return 0
if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
if self.has_force_ != x.has_force_: return 0
if self.has_force_ and self.force_ != x.force_: return 0
if self.has_mark_changes_ != x.has_mark_changes_: return 0
if self.has_mark_changes_ and self.mark_changes_ != x.mark_changes_: return 0
if len(self.snapshot_) != len(x.snapshot_): return 0
for e1, e2 in zip(self.snapshot_, x.snapshot_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
for p in self.key_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
for p in self.composite_index_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.snapshot_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
n += 1 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
if (self.has_trusted_): n += 2
if (self.has_force_): n += 2
if (self.has_mark_changes_): n += 2
n += 1 * len(self.snapshot_)
for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
n += 1 * len(self.composite_index_)
for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
if (self.has_trusted_): n += 2
if (self.has_force_): n += 2
if (self.has_mark_changes_): n += 2
n += 1 * len(self.snapshot_)
for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_header()
self.clear_key()
self.clear_transaction()
self.clear_composite_index()
self.clear_trusted()
self.clear_force()
self.clear_mark_changes()
self.clear_snapshot()
def OutputUnchecked(self, out):
if (self.has_trusted_):
out.putVarInt32(32)
out.putBoolean(self.trusted_)
if (self.has_transaction_):
out.putVarInt32(42)
out.putVarInt32(self.transaction_.ByteSize())
self.transaction_.OutputUnchecked(out)
for i in xrange(len(self.key_)):
out.putVarInt32(50)
out.putVarInt32(self.key_[i].ByteSize())
self.key_[i].OutputUnchecked(out)
if (self.has_force_):
out.putVarInt32(56)
out.putBoolean(self.force_)
if (self.has_mark_changes_):
out.putVarInt32(64)
out.putBoolean(self.mark_changes_)
for i in xrange(len(self.snapshot_)):
out.putVarInt32(74)
out.putVarInt32(self.snapshot_[i].ByteSize())
self.snapshot_[i].OutputUnchecked(out)
if (self.has_header_):
out.putVarInt32(82)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(90)
out.putVarInt32(self.composite_index_[i].ByteSize())
self.composite_index_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_trusted_):
out.putVarInt32(32)
out.putBoolean(self.trusted_)
if (self.has_transaction_):
out.putVarInt32(42)
out.putVarInt32(self.transaction_.ByteSizePartial())
self.transaction_.OutputPartial(out)
for i in xrange(len(self.key_)):
out.putVarInt32(50)
out.putVarInt32(self.key_[i].ByteSizePartial())
self.key_[i].OutputPartial(out)
if (self.has_force_):
out.putVarInt32(56)
out.putBoolean(self.force_)
if (self.has_mark_changes_):
out.putVarInt32(64)
out.putBoolean(self.mark_changes_)
for i in xrange(len(self.snapshot_)):
out.putVarInt32(74)
out.putVarInt32(self.snapshot_[i].ByteSizePartial())
self.snapshot_[i].OutputPartial(out)
if (self.has_header_):
out.putVarInt32(82)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
for i in xrange(len(self.composite_index_)):
out.putVarInt32(90)
out.putVarInt32(self.composite_index_[i].ByteSizePartial())
self.composite_index_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 32:
self.set_trusted(d.getBoolean())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_transaction().TryMerge(tmp)
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_key().TryMerge(tmp)
continue
if tt == 56:
self.set_force(d.getBoolean())
continue
if tt == 64:
self.set_mark_changes(d.getBoolean())
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_snapshot().TryMerge(tmp)
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if tt == 90:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_composite_index().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("key%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_transaction_:
res+=prefix+"transaction <\n"
res+=self.transaction_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.composite_index_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("composite_index%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
if self.has_mark_changes_: res+=prefix+("mark_changes: %s\n" % self.DebugFormatBool(self.mark_changes_))
cnt=0
for e in self.snapshot_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("snapshot%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 10
kkey = 6
ktransaction = 5
kcomposite_index = 11
ktrusted = 4
kforce = 7
kmark_changes = 8
ksnapshot = 9
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
4: "trusted",
5: "transaction",
6: "key",
7: "force",
8: "mark_changes",
9: "snapshot",
10: "header",
11: "composite_index",
}, 11)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
}, 11, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.DeleteRequest'
class DeleteResponse(ProtocolBuffer.ProtocolMessage):
has_cost_ = 0
cost_ = None
def __init__(self, contents=None):
self.version_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def cost(self):
if self.cost_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cost_ is None: self.cost_ = Cost()
finally:
self.lazy_init_lock_.release()
return self.cost_
def mutable_cost(self): self.has_cost_ = 1; return self.cost()
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0;
if self.cost_ is not None: self.cost_.Clear()
def has_cost(self): return self.has_cost_
def version_size(self): return len(self.version_)
def version_list(self): return self.version_
def version(self, i):
return self.version_[i]
def set_version(self, i, x):
self.version_[i] = x
def add_version(self, x):
self.version_.append(x)
def clear_version(self):
self.version_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
for i in xrange(x.version_size()): self.add_version(x.version(i))
def Equals(self, x):
if x is self: return 1
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
if len(self.version_) != len(x.version_): return 0
for e1, e2 in zip(self.version_, x.version_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
n += 1 * len(self.version_)
for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
return n
def ByteSizePartial(self):
n = 0
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
n += 1 * len(self.version_)
for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
return n
def Clear(self):
self.clear_cost()
self.clear_version()
def OutputUnchecked(self, out):
if (self.has_cost_):
out.putVarInt32(10)
out.putVarInt32(self.cost_.ByteSize())
self.cost_.OutputUnchecked(out)
for i in xrange(len(self.version_)):
out.putVarInt32(24)
out.putVarInt64(self.version_[i])
def OutputPartial(self, out):
if (self.has_cost_):
out.putVarInt32(10)
out.putVarInt32(self.cost_.ByteSizePartial())
self.cost_.OutputPartial(out)
for i in xrange(len(self.version_)):
out.putVarInt32(24)
out.putVarInt64(self.version_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cost().TryMerge(tmp)
continue
if tt == 24:
self.add_version(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cost_:
res+=prefix+"cost <\n"
res+=self.cost_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcost = 1
kversion = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cost",
3: "version",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.DeleteResponse'
class NextRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_cursor_ = 0
has_count_ = 0
count_ = 0
has_offset_ = 0
offset_ = 0
has_compile_ = 0
compile_ = 0
def __init__(self, contents=None):
self.cursor_ = Cursor()
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def cursor(self): return self.cursor_
def mutable_cursor(self): self.has_cursor_ = 1; return self.cursor_
def clear_cursor(self):self.has_cursor_ = 0; self.cursor_.Clear()
def has_cursor(self): return self.has_cursor_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def offset(self): return self.offset_
def set_offset(self, x):
self.has_offset_ = 1
self.offset_ = x
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0
self.offset_ = 0
def has_offset(self): return self.has_offset_
def compile(self): return self.compile_
def set_compile(self, x):
self.has_compile_ = 1
self.compile_ = x
def clear_compile(self):
if self.has_compile_:
self.has_compile_ = 0
self.compile_ = 0
def has_compile(self): return self.has_compile_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
if (x.has_cursor()): self.mutable_cursor().MergeFrom(x.cursor())
if (x.has_count()): self.set_count(x.count())
if (x.has_offset()): self.set_offset(x.offset())
if (x.has_compile()): self.set_compile(x.compile())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_compile_ != x.has_compile_: return 0
if self.has_compile_ and self.compile_ != x.compile_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_cursor_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: cursor not set.')
elif not self.cursor_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += self.lengthString(self.cursor_.ByteSize())
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_compile_): n += 2
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
if (self.has_cursor_):
n += 1
n += self.lengthString(self.cursor_.ByteSizePartial())
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_compile_): n += 2
return n
def Clear(self):
self.clear_header()
self.clear_cursor()
self.clear_count()
self.clear_offset()
self.clear_compile()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.cursor_.ByteSize())
self.cursor_.OutputUnchecked(out)
if (self.has_count_):
out.putVarInt32(16)
out.putVarInt32(self.count_)
if (self.has_compile_):
out.putVarInt32(24)
out.putBoolean(self.compile_)
if (self.has_offset_):
out.putVarInt32(32)
out.putVarInt32(self.offset_)
if (self.has_header_):
out.putVarInt32(42)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_cursor_):
out.putVarInt32(10)
out.putVarInt32(self.cursor_.ByteSizePartial())
self.cursor_.OutputPartial(out)
if (self.has_count_):
out.putVarInt32(16)
out.putVarInt32(self.count_)
if (self.has_compile_):
out.putVarInt32(24)
out.putBoolean(self.compile_)
if (self.has_offset_):
out.putVarInt32(32)
out.putVarInt32(self.offset_)
if (self.has_header_):
out.putVarInt32(42)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cursor().TryMerge(tmp)
continue
if tt == 16:
self.set_count(d.getVarInt32())
continue
if tt == 24:
self.set_compile(d.getBoolean())
continue
if tt == 32:
self.set_offset(d.getVarInt32())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_cursor_:
res+=prefix+"cursor <\n"
res+=self.cursor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt32(self.count_))
if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
if self.has_compile_: res+=prefix+("compile: %s\n" % self.DebugFormatBool(self.compile_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 5
kcursor = 1
kcount = 2
koffset = 4
kcompile = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cursor",
2: "count",
3: "compile",
4: "offset",
5: "header",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.NextRequest'
class QueryResult(ProtocolBuffer.ProtocolMessage):
has_cursor_ = 0
cursor_ = None
has_skipped_results_ = 0
skipped_results_ = 0
has_more_results_ = 0
more_results_ = 0
has_keys_only_ = 0
keys_only_ = 0
has_index_only_ = 0
index_only_ = 0
has_small_ops_ = 0
small_ops_ = 0
has_compiled_query_ = 0
compiled_query_ = None
has_compiled_cursor_ = 0
compiled_cursor_ = None
has_skipped_results_compiled_cursor_ = 0
skipped_results_compiled_cursor_ = None
def __init__(self, contents=None):
self.result_ = []
self.index_ = []
self.version_ = []
self.result_compiled_cursor_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def cursor(self):
if self.cursor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cursor_ is None: self.cursor_ = Cursor()
finally:
self.lazy_init_lock_.release()
return self.cursor_
def mutable_cursor(self): self.has_cursor_ = 1; return self.cursor()
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0;
if self.cursor_ is not None: self.cursor_.Clear()
def has_cursor(self): return self.has_cursor_
def result_size(self): return len(self.result_)
def result_list(self): return self.result_
def result(self, i):
return self.result_[i]
def mutable_result(self, i):
return self.result_[i]
def add_result(self):
x = EntityProto()
self.result_.append(x)
return x
def clear_result(self):
self.result_ = []
def skipped_results(self): return self.skipped_results_
def set_skipped_results(self, x):
self.has_skipped_results_ = 1
self.skipped_results_ = x
def clear_skipped_results(self):
if self.has_skipped_results_:
self.has_skipped_results_ = 0
self.skipped_results_ = 0
def has_skipped_results(self): return self.has_skipped_results_
def more_results(self): return self.more_results_
def set_more_results(self, x):
self.has_more_results_ = 1
self.more_results_ = x
def clear_more_results(self):
if self.has_more_results_:
self.has_more_results_ = 0
self.more_results_ = 0
def has_more_results(self): return self.has_more_results_
def keys_only(self): return self.keys_only_
def set_keys_only(self, x):
self.has_keys_only_ = 1
self.keys_only_ = x
def clear_keys_only(self):
if self.has_keys_only_:
self.has_keys_only_ = 0
self.keys_only_ = 0
def has_keys_only(self): return self.has_keys_only_
def index_only(self): return self.index_only_
def set_index_only(self, x):
self.has_index_only_ = 1
self.index_only_ = x
def clear_index_only(self):
if self.has_index_only_:
self.has_index_only_ = 0
self.index_only_ = 0
def has_index_only(self): return self.has_index_only_
def small_ops(self): return self.small_ops_
def set_small_ops(self, x):
self.has_small_ops_ = 1
self.small_ops_ = x
def clear_small_ops(self):
if self.has_small_ops_:
self.has_small_ops_ = 0
self.small_ops_ = 0
def has_small_ops(self): return self.has_small_ops_
def compiled_query(self):
if self.compiled_query_ is None:
self.lazy_init_lock_.acquire()
try:
if self.compiled_query_ is None: self.compiled_query_ = CompiledQuery()
finally:
self.lazy_init_lock_.release()
return self.compiled_query_
def mutable_compiled_query(self): self.has_compiled_query_ = 1; return self.compiled_query()
def clear_compiled_query(self):
if self.has_compiled_query_:
self.has_compiled_query_ = 0;
if self.compiled_query_ is not None: self.compiled_query_.Clear()
def has_compiled_query(self): return self.has_compiled_query_
def compiled_cursor(self):
if self.compiled_cursor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.compiled_cursor_ is None: self.compiled_cursor_ = CompiledCursor()
finally:
self.lazy_init_lock_.release()
return self.compiled_cursor_
def mutable_compiled_cursor(self): self.has_compiled_cursor_ = 1; return self.compiled_cursor()
def clear_compiled_cursor(self):
if self.has_compiled_cursor_:
self.has_compiled_cursor_ = 0;
if self.compiled_cursor_ is not None: self.compiled_cursor_.Clear()
def has_compiled_cursor(self): return self.has_compiled_cursor_
def index_size(self): return len(self.index_)
def index_list(self): return self.index_
def index(self, i):
return self.index_[i]
def mutable_index(self, i):
return self.index_[i]
def add_index(self):
x = CompositeIndex()
self.index_.append(x)
return x
def clear_index(self):
self.index_ = []
def version_size(self): return len(self.version_)
def version_list(self): return self.version_
def version(self, i):
return self.version_[i]
def set_version(self, i, x):
self.version_[i] = x
def add_version(self, x):
self.version_.append(x)
def clear_version(self):
self.version_ = []
def result_compiled_cursor_size(self): return len(self.result_compiled_cursor_)
def result_compiled_cursor_list(self): return self.result_compiled_cursor_
def result_compiled_cursor(self, i):
return self.result_compiled_cursor_[i]
def mutable_result_compiled_cursor(self, i):
return self.result_compiled_cursor_[i]
def add_result_compiled_cursor(self):
x = CompiledCursor()
self.result_compiled_cursor_.append(x)
return x
def clear_result_compiled_cursor(self):
self.result_compiled_cursor_ = []
def skipped_results_compiled_cursor(self):
if self.skipped_results_compiled_cursor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.skipped_results_compiled_cursor_ is None: self.skipped_results_compiled_cursor_ = CompiledCursor()
finally:
self.lazy_init_lock_.release()
return self.skipped_results_compiled_cursor_
def mutable_skipped_results_compiled_cursor(self): self.has_skipped_results_compiled_cursor_ = 1; return self.skipped_results_compiled_cursor()
def clear_skipped_results_compiled_cursor(self):
if self.has_skipped_results_compiled_cursor_:
self.has_skipped_results_compiled_cursor_ = 0;
if self.skipped_results_compiled_cursor_ is not None: self.skipped_results_compiled_cursor_.Clear()
def has_skipped_results_compiled_cursor(self): return self.has_skipped_results_compiled_cursor_
def MergeFrom(self, x):
assert x is not self
if (x.has_cursor()): self.mutable_cursor().MergeFrom(x.cursor())
for i in xrange(x.result_size()): self.add_result().CopyFrom(x.result(i))
if (x.has_skipped_results()): self.set_skipped_results(x.skipped_results())
if (x.has_more_results()): self.set_more_results(x.more_results())
if (x.has_keys_only()): self.set_keys_only(x.keys_only())
if (x.has_index_only()): self.set_index_only(x.index_only())
if (x.has_small_ops()): self.set_small_ops(x.small_ops())
if (x.has_compiled_query()): self.mutable_compiled_query().MergeFrom(x.compiled_query())
if (x.has_compiled_cursor()): self.mutable_compiled_cursor().MergeFrom(x.compiled_cursor())
for i in xrange(x.index_size()): self.add_index().CopyFrom(x.index(i))
for i in xrange(x.version_size()): self.add_version(x.version(i))
for i in xrange(x.result_compiled_cursor_size()): self.add_result_compiled_cursor().CopyFrom(x.result_compiled_cursor(i))
if (x.has_skipped_results_compiled_cursor()): self.mutable_skipped_results_compiled_cursor().MergeFrom(x.skipped_results_compiled_cursor())
def Equals(self, x):
if x is self: return 1
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
if len(self.result_) != len(x.result_): return 0
for e1, e2 in zip(self.result_, x.result_):
if e1 != e2: return 0
if self.has_skipped_results_ != x.has_skipped_results_: return 0
if self.has_skipped_results_ and self.skipped_results_ != x.skipped_results_: return 0
if self.has_more_results_ != x.has_more_results_: return 0
if self.has_more_results_ and self.more_results_ != x.more_results_: return 0
if self.has_keys_only_ != x.has_keys_only_: return 0
if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
if self.has_index_only_ != x.has_index_only_: return 0
if self.has_index_only_ and self.index_only_ != x.index_only_: return 0
if self.has_small_ops_ != x.has_small_ops_: return 0
if self.has_small_ops_ and self.small_ops_ != x.small_ops_: return 0
if self.has_compiled_query_ != x.has_compiled_query_: return 0
if self.has_compiled_query_ and self.compiled_query_ != x.compiled_query_: return 0
if self.has_compiled_cursor_ != x.has_compiled_cursor_: return 0
if self.has_compiled_cursor_ and self.compiled_cursor_ != x.compiled_cursor_: return 0
if len(self.index_) != len(x.index_): return 0
for e1, e2 in zip(self.index_, x.index_):
if e1 != e2: return 0
if len(self.version_) != len(x.version_): return 0
for e1, e2 in zip(self.version_, x.version_):
if e1 != e2: return 0
if len(self.result_compiled_cursor_) != len(x.result_compiled_cursor_): return 0
for e1, e2 in zip(self.result_compiled_cursor_, x.result_compiled_cursor_):
if e1 != e2: return 0
if self.has_skipped_results_compiled_cursor_ != x.has_skipped_results_compiled_cursor_: return 0
if self.has_skipped_results_compiled_cursor_ and self.skipped_results_compiled_cursor_ != x.skipped_results_compiled_cursor_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cursor_ and not self.cursor_.IsInitialized(debug_strs)): initialized = 0
for p in self.result_:
if not p.IsInitialized(debug_strs): initialized=0
if (not self.has_more_results_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: more_results not set.')
if (self.has_compiled_query_ and not self.compiled_query_.IsInitialized(debug_strs)): initialized = 0
if (self.has_compiled_cursor_ and not self.compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
for p in self.index_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.result_compiled_cursor_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_skipped_results_compiled_cursor_ and not self.skipped_results_compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_cursor_): n += 1 + self.lengthString(self.cursor_.ByteSize())
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSize())
if (self.has_skipped_results_): n += 1 + self.lengthVarInt64(self.skipped_results_)
if (self.has_keys_only_): n += 2
if (self.has_index_only_): n += 2
if (self.has_small_ops_): n += 2
if (self.has_compiled_query_): n += 1 + self.lengthString(self.compiled_query_.ByteSize())
if (self.has_compiled_cursor_): n += 1 + self.lengthString(self.compiled_cursor_.ByteSize())
n += 1 * len(self.index_)
for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSize())
n += 1 * len(self.version_)
for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
n += 1 * len(self.result_compiled_cursor_)
for i in xrange(len(self.result_compiled_cursor_)): n += self.lengthString(self.result_compiled_cursor_[i].ByteSize())
if (self.has_skipped_results_compiled_cursor_): n += 1 + self.lengthString(self.skipped_results_compiled_cursor_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_cursor_): n += 1 + self.lengthString(self.cursor_.ByteSizePartial())
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSizePartial())
if (self.has_skipped_results_): n += 1 + self.lengthVarInt64(self.skipped_results_)
if (self.has_more_results_):
n += 2
if (self.has_keys_only_): n += 2
if (self.has_index_only_): n += 2
if (self.has_small_ops_): n += 2
if (self.has_compiled_query_): n += 1 + self.lengthString(self.compiled_query_.ByteSizePartial())
if (self.has_compiled_cursor_): n += 1 + self.lengthString(self.compiled_cursor_.ByteSizePartial())
n += 1 * len(self.index_)
for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSizePartial())
n += 1 * len(self.version_)
for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
n += 1 * len(self.result_compiled_cursor_)
for i in xrange(len(self.result_compiled_cursor_)): n += self.lengthString(self.result_compiled_cursor_[i].ByteSizePartial())
if (self.has_skipped_results_compiled_cursor_): n += 1 + self.lengthString(self.skipped_results_compiled_cursor_.ByteSizePartial())
return n
def Clear(self):
self.clear_cursor()
self.clear_result()
self.clear_skipped_results()
self.clear_more_results()
self.clear_keys_only()
self.clear_index_only()
self.clear_small_ops()
self.clear_compiled_query()
self.clear_compiled_cursor()
self.clear_index()
self.clear_version()
self.clear_result_compiled_cursor()
self.clear_skipped_results_compiled_cursor()
def OutputUnchecked(self, out):
if (self.has_cursor_):
out.putVarInt32(10)
out.putVarInt32(self.cursor_.ByteSize())
self.cursor_.OutputUnchecked(out)
for i in xrange(len(self.result_)):
out.putVarInt32(18)
out.putVarInt32(self.result_[i].ByteSize())
self.result_[i].OutputUnchecked(out)
out.putVarInt32(24)
out.putBoolean(self.more_results_)
if (self.has_keys_only_):
out.putVarInt32(32)
out.putBoolean(self.keys_only_)
if (self.has_compiled_query_):
out.putVarInt32(42)
out.putVarInt32(self.compiled_query_.ByteSize())
self.compiled_query_.OutputUnchecked(out)
if (self.has_compiled_cursor_):
out.putVarInt32(50)
out.putVarInt32(self.compiled_cursor_.ByteSize())
self.compiled_cursor_.OutputUnchecked(out)
if (self.has_skipped_results_):
out.putVarInt32(56)
out.putVarInt32(self.skipped_results_)
for i in xrange(len(self.index_)):
out.putVarInt32(66)
out.putVarInt32(self.index_[i].ByteSize())
self.index_[i].OutputUnchecked(out)
if (self.has_index_only_):
out.putVarInt32(72)
out.putBoolean(self.index_only_)
if (self.has_small_ops_):
out.putVarInt32(80)
out.putBoolean(self.small_ops_)
for i in xrange(len(self.version_)):
out.putVarInt32(88)
out.putVarInt64(self.version_[i])
for i in xrange(len(self.result_compiled_cursor_)):
out.putVarInt32(98)
out.putVarInt32(self.result_compiled_cursor_[i].ByteSize())
self.result_compiled_cursor_[i].OutputUnchecked(out)
if (self.has_skipped_results_compiled_cursor_):
out.putVarInt32(106)
out.putVarInt32(self.skipped_results_compiled_cursor_.ByteSize())
self.skipped_results_compiled_cursor_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_cursor_):
out.putVarInt32(10)
out.putVarInt32(self.cursor_.ByteSizePartial())
self.cursor_.OutputPartial(out)
for i in xrange(len(self.result_)):
out.putVarInt32(18)
out.putVarInt32(self.result_[i].ByteSizePartial())
self.result_[i].OutputPartial(out)
if (self.has_more_results_):
out.putVarInt32(24)
out.putBoolean(self.more_results_)
if (self.has_keys_only_):
out.putVarInt32(32)
out.putBoolean(self.keys_only_)
if (self.has_compiled_query_):
out.putVarInt32(42)
out.putVarInt32(self.compiled_query_.ByteSizePartial())
self.compiled_query_.OutputPartial(out)
if (self.has_compiled_cursor_):
out.putVarInt32(50)
out.putVarInt32(self.compiled_cursor_.ByteSizePartial())
self.compiled_cursor_.OutputPartial(out)
if (self.has_skipped_results_):
out.putVarInt32(56)
out.putVarInt32(self.skipped_results_)
for i in xrange(len(self.index_)):
out.putVarInt32(66)
out.putVarInt32(self.index_[i].ByteSizePartial())
self.index_[i].OutputPartial(out)
if (self.has_index_only_):
out.putVarInt32(72)
out.putBoolean(self.index_only_)
if (self.has_small_ops_):
out.putVarInt32(80)
out.putBoolean(self.small_ops_)
for i in xrange(len(self.version_)):
out.putVarInt32(88)
out.putVarInt64(self.version_[i])
for i in xrange(len(self.result_compiled_cursor_)):
out.putVarInt32(98)
out.putVarInt32(self.result_compiled_cursor_[i].ByteSizePartial())
self.result_compiled_cursor_[i].OutputPartial(out)
if (self.has_skipped_results_compiled_cursor_):
out.putVarInt32(106)
out.putVarInt32(self.skipped_results_compiled_cursor_.ByteSizePartial())
self.skipped_results_compiled_cursor_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cursor().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_result().TryMerge(tmp)
continue
if tt == 24:
self.set_more_results(d.getBoolean())
continue
if tt == 32:
self.set_keys_only(d.getBoolean())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_compiled_query().TryMerge(tmp)
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_compiled_cursor().TryMerge(tmp)
continue
if tt == 56:
self.set_skipped_results(d.getVarInt32())
continue
if tt == 66:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_index().TryMerge(tmp)
continue
if tt == 72:
self.set_index_only(d.getBoolean())
continue
if tt == 80:
self.set_small_ops(d.getBoolean())
continue
if tt == 88:
self.add_version(d.getVarInt64())
continue
if tt == 98:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_result_compiled_cursor().TryMerge(tmp)
continue
if tt == 106:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_skipped_results_compiled_cursor().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cursor_:
res+=prefix+"cursor <\n"
res+=self.cursor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.result_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("result%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_skipped_results_: res+=prefix+("skipped_results: %s\n" % self.DebugFormatInt32(self.skipped_results_))
if self.has_more_results_: res+=prefix+("more_results: %s\n" % self.DebugFormatBool(self.more_results_))
if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
if self.has_index_only_: res+=prefix+("index_only: %s\n" % self.DebugFormatBool(self.index_only_))
if self.has_small_ops_: res+=prefix+("small_ops: %s\n" % self.DebugFormatBool(self.small_ops_))
if self.has_compiled_query_:
res+=prefix+"compiled_query <\n"
res+=self.compiled_query_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_compiled_cursor_:
res+=prefix+"compiled_cursor <\n"
res+=self.compiled_cursor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.index_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("index%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
cnt=0
for e in self.result_compiled_cursor_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("result_compiled_cursor%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_skipped_results_compiled_cursor_:
res+=prefix+"skipped_results_compiled_cursor <\n"
res+=self.skipped_results_compiled_cursor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcursor = 1
kresult = 2
kskipped_results = 7
kmore_results = 3
kkeys_only = 4
kindex_only = 9
ksmall_ops = 10
kcompiled_query = 5
kcompiled_cursor = 6
kindex = 8
kversion = 11
kresult_compiled_cursor = 12
kskipped_results_compiled_cursor = 13
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cursor",
2: "result",
3: "more_results",
4: "keys_only",
5: "compiled_query",
6: "compiled_cursor",
7: "skipped_results",
8: "index",
9: "index_only",
10: "small_ops",
11: "version",
12: "result_compiled_cursor",
13: "skipped_results_compiled_cursor",
}, 13)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.STRING,
}, 13, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.QueryResult'
class AllocateIdsRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_model_key_ = 0
model_key_ = None
has_size_ = 0
size_ = 0
has_max_ = 0
max_ = 0
has_trusted_ = 0
trusted_ = 0
def __init__(self, contents=None):
self.reserve_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def model_key(self):
if self.model_key_ is None:
self.lazy_init_lock_.acquire()
try:
if self.model_key_ is None: self.model_key_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.model_key_
def mutable_model_key(self): self.has_model_key_ = 1; return self.model_key()
def clear_model_key(self):
if self.has_model_key_:
self.has_model_key_ = 0;
if self.model_key_ is not None: self.model_key_.Clear()
def has_model_key(self): return self.has_model_key_
def size(self): return self.size_
def set_size(self, x):
self.has_size_ = 1
self.size_ = x
def clear_size(self):
if self.has_size_:
self.has_size_ = 0
self.size_ = 0
def has_size(self): return self.has_size_
def max(self): return self.max_
def set_max(self, x):
self.has_max_ = 1
self.max_ = x
def clear_max(self):
if self.has_max_:
self.has_max_ = 0
self.max_ = 0
def has_max(self): return self.has_max_
def reserve_size(self): return len(self.reserve_)
def reserve_list(self): return self.reserve_
def reserve(self, i):
return self.reserve_[i]
def mutable_reserve(self, i):
return self.reserve_[i]
def add_reserve(self):
x = Reference()
self.reserve_.append(x)
return x
def clear_reserve(self):
self.reserve_ = []
def trusted(self): return self.trusted_
def set_trusted(self, x):
self.has_trusted_ = 1
self.trusted_ = x
def clear_trusted(self):
if self.has_trusted_:
self.has_trusted_ = 0
self.trusted_ = 0
def has_trusted(self): return self.has_trusted_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
if (x.has_model_key()): self.mutable_model_key().MergeFrom(x.model_key())
if (x.has_size()): self.set_size(x.size())
if (x.has_max()): self.set_max(x.max())
for i in xrange(x.reserve_size()): self.add_reserve().CopyFrom(x.reserve(i))
if (x.has_trusted()): self.set_trusted(x.trusted())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if self.has_model_key_ != x.has_model_key_: return 0
if self.has_model_key_ and self.model_key_ != x.model_key_: return 0
if self.has_size_ != x.has_size_: return 0
if self.has_size_ and self.size_ != x.size_: return 0
if self.has_max_ != x.has_max_: return 0
if self.has_max_ and self.max_ != x.max_: return 0
if len(self.reserve_) != len(x.reserve_): return 0
for e1, e2 in zip(self.reserve_, x.reserve_):
if e1 != e2: return 0
if self.has_trusted_ != x.has_trusted_: return 0
if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
if (self.has_model_key_ and not self.model_key_.IsInitialized(debug_strs)): initialized = 0
for p in self.reserve_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
if (self.has_model_key_): n += 1 + self.lengthString(self.model_key_.ByteSize())
if (self.has_size_): n += 1 + self.lengthVarInt64(self.size_)
if (self.has_max_): n += 1 + self.lengthVarInt64(self.max_)
n += 1 * len(self.reserve_)
for i in xrange(len(self.reserve_)): n += self.lengthString(self.reserve_[i].ByteSize())
if (self.has_trusted_): n += 2
return n
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
if (self.has_model_key_): n += 1 + self.lengthString(self.model_key_.ByteSizePartial())
if (self.has_size_): n += 1 + self.lengthVarInt64(self.size_)
if (self.has_max_): n += 1 + self.lengthVarInt64(self.max_)
n += 1 * len(self.reserve_)
for i in xrange(len(self.reserve_)): n += self.lengthString(self.reserve_[i].ByteSizePartial())
if (self.has_trusted_): n += 2
return n
def Clear(self):
self.clear_header()
self.clear_model_key()
self.clear_size()
self.clear_max()
self.clear_reserve()
self.clear_trusted()
def OutputUnchecked(self, out):
if (self.has_model_key_):
out.putVarInt32(10)
out.putVarInt32(self.model_key_.ByteSize())
self.model_key_.OutputUnchecked(out)
if (self.has_size_):
out.putVarInt32(16)
out.putVarInt64(self.size_)
if (self.has_max_):
out.putVarInt32(24)
out.putVarInt64(self.max_)
if (self.has_header_):
out.putVarInt32(34)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
for i in xrange(len(self.reserve_)):
out.putVarInt32(42)
out.putVarInt32(self.reserve_[i].ByteSize())
self.reserve_[i].OutputUnchecked(out)
if (self.has_trusted_):
out.putVarInt32(48)
out.putBoolean(self.trusted_)
def OutputPartial(self, out):
if (self.has_model_key_):
out.putVarInt32(10)
out.putVarInt32(self.model_key_.ByteSizePartial())
self.model_key_.OutputPartial(out)
if (self.has_size_):
out.putVarInt32(16)
out.putVarInt64(self.size_)
if (self.has_max_):
out.putVarInt32(24)
out.putVarInt64(self.max_)
if (self.has_header_):
out.putVarInt32(34)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
for i in xrange(len(self.reserve_)):
out.putVarInt32(42)
out.putVarInt32(self.reserve_[i].ByteSizePartial())
self.reserve_[i].OutputPartial(out)
if (self.has_trusted_):
out.putVarInt32(48)
out.putBoolean(self.trusted_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_model_key().TryMerge(tmp)
continue
if tt == 16:
self.set_size(d.getVarInt64())
continue
if tt == 24:
self.set_max(d.getVarInt64())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_reserve().TryMerge(tmp)
continue
if tt == 48:
self.set_trusted(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_model_key_:
res+=prefix+"model_key <\n"
res+=self.model_key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_size_: res+=prefix+("size: %s\n" % self.DebugFormatInt64(self.size_))
if self.has_max_: res+=prefix+("max: %s\n" % self.DebugFormatInt64(self.max_))
cnt=0
for e in self.reserve_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("reserve%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 4
kmodel_key = 1
ksize = 2
kmax = 3
kreserve = 5
ktrusted = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "model_key",
2: "size",
3: "max",
4: "header",
5: "reserve",
6: "trusted",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AllocateIdsRequest'
class AllocateIdsResponse(ProtocolBuffer.ProtocolMessage):
has_start_ = 0
start_ = 0
has_end_ = 0
end_ = 0
has_cost_ = 0
cost_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def start(self): return self.start_
def set_start(self, x):
self.has_start_ = 1
self.start_ = x
def clear_start(self):
if self.has_start_:
self.has_start_ = 0
self.start_ = 0
def has_start(self): return self.has_start_
def end(self): return self.end_
def set_end(self, x):
self.has_end_ = 1
self.end_ = x
def clear_end(self):
if self.has_end_:
self.has_end_ = 0
self.end_ = 0
def has_end(self): return self.has_end_
def cost(self):
if self.cost_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cost_ is None: self.cost_ = Cost()
finally:
self.lazy_init_lock_.release()
return self.cost_
def mutable_cost(self): self.has_cost_ = 1; return self.cost()
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0;
if self.cost_ is not None: self.cost_.Clear()
def has_cost(self): return self.has_cost_
def MergeFrom(self, x):
assert x is not self
if (x.has_start()): self.set_start(x.start())
if (x.has_end()): self.set_end(x.end())
if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
def Equals(self, x):
if x is self: return 1
if self.has_start_ != x.has_start_: return 0
if self.has_start_ and self.start_ != x.start_: return 0
if self.has_end_ != x.has_end_: return 0
if self.has_end_ and self.end_ != x.end_: return 0
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_start_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start not set.')
if (not self.has_end_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: end not set.')
if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.start_)
n += self.lengthVarInt64(self.end_)
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_start_):
n += 1
n += self.lengthVarInt64(self.start_)
if (self.has_end_):
n += 1
n += self.lengthVarInt64(self.end_)
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
return n
def Clear(self):
self.clear_start()
self.clear_end()
self.clear_cost()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.start_)
out.putVarInt32(16)
out.putVarInt64(self.end_)
if (self.has_cost_):
out.putVarInt32(26)
out.putVarInt32(self.cost_.ByteSize())
self.cost_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_start_):
out.putVarInt32(8)
out.putVarInt64(self.start_)
if (self.has_end_):
out.putVarInt32(16)
out.putVarInt64(self.end_)
if (self.has_cost_):
out.putVarInt32(26)
out.putVarInt32(self.cost_.ByteSizePartial())
self.cost_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_start(d.getVarInt64())
continue
if tt == 16:
self.set_end(d.getVarInt64())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cost().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_start_: res+=prefix+("start: %s\n" % self.DebugFormatInt64(self.start_))
if self.has_end_: res+=prefix+("end: %s\n" % self.DebugFormatInt64(self.end_))
if self.has_cost_:
res+=prefix+"cost <\n"
res+=self.cost_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstart = 1
kend = 2
kcost = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "start",
2: "end",
3: "cost",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AllocateIdsResponse'
class CompositeIndices(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.index_ = []
if contents is not None: self.MergeFromString(contents)
def index_size(self): return len(self.index_)
def index_list(self): return self.index_
def index(self, i):
return self.index_[i]
def mutable_index(self, i):
return self.index_[i]
def add_index(self):
x = CompositeIndex()
self.index_.append(x)
return x
def clear_index(self):
self.index_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.index_size()): self.add_index().CopyFrom(x.index(i))
def Equals(self, x):
if x is self: return 1
if len(self.index_) != len(x.index_): return 0
for e1, e2 in zip(self.index_, x.index_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.index_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.index_)
for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.index_)
for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_index()
def OutputUnchecked(self, out):
for i in xrange(len(self.index_)):
out.putVarInt32(10)
out.putVarInt32(self.index_[i].ByteSize())
self.index_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.index_)):
out.putVarInt32(10)
out.putVarInt32(self.index_[i].ByteSizePartial())
self.index_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_index().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.index_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("index%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CompositeIndices'
class AddActionsRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_transaction_ = 0
def __init__(self, contents=None):
self.transaction_ = Transaction()
self.action_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def transaction(self): return self.transaction_
def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction_
def clear_transaction(self):self.has_transaction_ = 0; self.transaction_.Clear()
def has_transaction(self): return self.has_transaction_
def action_size(self): return len(self.action_)
def action_list(self): return self.action_
def action(self, i):
return self.action_[i]
def mutable_action(self, i):
return self.action_[i]
def add_action(self):
x = Action()
self.action_.append(x)
return x
def clear_action(self):
self.action_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
for i in xrange(x.action_size()): self.add_action().CopyFrom(x.action(i))
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if len(self.action_) != len(x.action_): return 0
for e1, e2 in zip(self.action_, x.action_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_transaction_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: transaction not set.')
elif not self.transaction_.IsInitialized(debug_strs): initialized = 0
for p in self.action_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += self.lengthString(self.transaction_.ByteSize())
n += 1 * len(self.action_)
for i in xrange(len(self.action_)): n += self.lengthString(self.action_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
if (self.has_transaction_):
n += 1
n += self.lengthString(self.transaction_.ByteSizePartial())
n += 1 * len(self.action_)
for i in xrange(len(self.action_)): n += self.lengthString(self.action_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_header()
self.clear_transaction()
self.clear_action()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.transaction_.ByteSize())
self.transaction_.OutputUnchecked(out)
for i in xrange(len(self.action_)):
out.putVarInt32(18)
out.putVarInt32(self.action_[i].ByteSize())
self.action_[i].OutputUnchecked(out)
if (self.has_header_):
out.putVarInt32(26)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_transaction_):
out.putVarInt32(10)
out.putVarInt32(self.transaction_.ByteSizePartial())
self.transaction_.OutputPartial(out)
for i in xrange(len(self.action_)):
out.putVarInt32(18)
out.putVarInt32(self.action_[i].ByteSizePartial())
self.action_[i].OutputPartial(out)
if (self.has_header_):
out.putVarInt32(26)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_transaction().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_action().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_transaction_:
res+=prefix+"transaction <\n"
res+=self.transaction_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.action_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("action%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 3
ktransaction = 1
kaction = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "transaction",
2: "action",
3: "header",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AddActionsRequest'
class AddActionsResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AddActionsResponse'
class BeginTransactionRequest(ProtocolBuffer.ProtocolMessage):
has_header_ = 0
header_ = None
has_app_ = 0
app_ = ""
has_allow_multiple_eg_ = 0
allow_multiple_eg_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def header(self):
if self.header_ is None:
self.lazy_init_lock_.acquire()
try:
if self.header_ is None: self.header_ = InternalHeader()
finally:
self.lazy_init_lock_.release()
return self.header_
def mutable_header(self): self.has_header_ = 1; return self.header()
def clear_header(self):
if self.has_header_:
self.has_header_ = 0;
if self.header_ is not None: self.header_.Clear()
def has_header(self): return self.has_header_
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def allow_multiple_eg(self): return self.allow_multiple_eg_
def set_allow_multiple_eg(self, x):
self.has_allow_multiple_eg_ = 1
self.allow_multiple_eg_ = x
def clear_allow_multiple_eg(self):
if self.has_allow_multiple_eg_:
self.has_allow_multiple_eg_ = 0
self.allow_multiple_eg_ = 0
def has_allow_multiple_eg(self): return self.has_allow_multiple_eg_
def MergeFrom(self, x):
assert x is not self
if (x.has_header()): self.mutable_header().MergeFrom(x.header())
if (x.has_app()): self.set_app(x.app())
if (x.has_allow_multiple_eg()): self.set_allow_multiple_eg(x.allow_multiple_eg())
def Equals(self, x):
if x is self: return 1
if self.has_header_ != x.has_header_: return 0
if self.has_header_ and self.header_ != x.header_: return 0
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_allow_multiple_eg_ != x.has_allow_multiple_eg_: return 0
if self.has_allow_multiple_eg_ and self.allow_multiple_eg_ != x.allow_multiple_eg_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
n += self.lengthString(len(self.app_))
if (self.has_allow_multiple_eg_): n += 2
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_allow_multiple_eg_): n += 2
return n
def Clear(self):
self.clear_header()
self.clear_app()
self.clear_allow_multiple_eg()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_)
if (self.has_allow_multiple_eg_):
out.putVarInt32(16)
out.putBoolean(self.allow_multiple_eg_)
if (self.has_header_):
out.putVarInt32(26)
out.putVarInt32(self.header_.ByteSize())
self.header_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_app_):
out.putVarInt32(10)
out.putPrefixedString(self.app_)
if (self.has_allow_multiple_eg_):
out.putVarInt32(16)
out.putBoolean(self.allow_multiple_eg_)
if (self.has_header_):
out.putVarInt32(26)
out.putVarInt32(self.header_.ByteSizePartial())
self.header_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app(d.getPrefixedString())
continue
if tt == 16:
self.set_allow_multiple_eg(d.getBoolean())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_header().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_header_:
res+=prefix+"header <\n"
res+=self.header_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_allow_multiple_eg_: res+=prefix+("allow_multiple_eg: %s\n" % self.DebugFormatBool(self.allow_multiple_eg_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kheader = 3
kapp = 1
kallow_multiple_eg = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app",
2: "allow_multiple_eg",
3: "header",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.BeginTransactionRequest'
class CommitResponse_Version(ProtocolBuffer.ProtocolMessage):
has_root_entity_key_ = 0
has_version_ = 0
version_ = 0
def __init__(self, contents=None):
self.root_entity_key_ = Reference()
if contents is not None: self.MergeFromString(contents)
def root_entity_key(self): return self.root_entity_key_
def mutable_root_entity_key(self): self.has_root_entity_key_ = 1; return self.root_entity_key_
def clear_root_entity_key(self):self.has_root_entity_key_ = 0; self.root_entity_key_.Clear()
def has_root_entity_key(self): return self.has_root_entity_key_
def version(self): return self.version_
def set_version(self, x):
self.has_version_ = 1
self.version_ = x
def clear_version(self):
if self.has_version_:
self.has_version_ = 0
self.version_ = 0
def has_version(self): return self.has_version_
def MergeFrom(self, x):
assert x is not self
if (x.has_root_entity_key()): self.mutable_root_entity_key().MergeFrom(x.root_entity_key())
if (x.has_version()): self.set_version(x.version())
def Equals(self, x):
if x is self: return 1
if self.has_root_entity_key_ != x.has_root_entity_key_: return 0
if self.has_root_entity_key_ and self.root_entity_key_ != x.root_entity_key_: return 0
if self.has_version_ != x.has_version_: return 0
if self.has_version_ and self.version_ != x.version_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_root_entity_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: root_entity_key not set.')
elif not self.root_entity_key_.IsInitialized(debug_strs): initialized = 0
if (not self.has_version_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: version not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.root_entity_key_.ByteSize())
n += self.lengthVarInt64(self.version_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_root_entity_key_):
n += 1
n += self.lengthString(self.root_entity_key_.ByteSizePartial())
if (self.has_version_):
n += 1
n += self.lengthVarInt64(self.version_)
return n
def Clear(self):
self.clear_root_entity_key()
self.clear_version()
def OutputUnchecked(self, out):
out.putVarInt32(34)
out.putVarInt32(self.root_entity_key_.ByteSize())
self.root_entity_key_.OutputUnchecked(out)
out.putVarInt32(40)
out.putVarInt64(self.version_)
def OutputPartial(self, out):
if (self.has_root_entity_key_):
out.putVarInt32(34)
out.putVarInt32(self.root_entity_key_.ByteSizePartial())
self.root_entity_key_.OutputPartial(out)
if (self.has_version_):
out.putVarInt32(40)
out.putVarInt64(self.version_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 28: break
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_root_entity_key().TryMerge(tmp)
continue
if tt == 40:
self.set_version(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_root_entity_key_:
res+=prefix+"root_entity_key <\n"
res+=self.root_entity_key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt64(self.version_))
return res
class CommitResponse(ProtocolBuffer.ProtocolMessage):
has_cost_ = 0
cost_ = None
def __init__(self, contents=None):
self.version_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def cost(self):
if self.cost_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cost_ is None: self.cost_ = Cost()
finally:
self.lazy_init_lock_.release()
return self.cost_
def mutable_cost(self): self.has_cost_ = 1; return self.cost()
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0;
if self.cost_ is not None: self.cost_.Clear()
def has_cost(self): return self.has_cost_
def version_size(self): return len(self.version_)
def version_list(self): return self.version_
def version(self, i):
return self.version_[i]
def mutable_version(self, i):
return self.version_[i]
def add_version(self):
x = CommitResponse_Version()
self.version_.append(x)
return x
def clear_version(self):
self.version_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
for i in xrange(x.version_size()): self.add_version().CopyFrom(x.version(i))
def Equals(self, x):
if x is self: return 1
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
if len(self.version_) != len(x.version_): return 0
for e1, e2 in zip(self.version_, x.version_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
for p in self.version_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
n += 2 * len(self.version_)
for i in xrange(len(self.version_)): n += self.version_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
n += 2 * len(self.version_)
for i in xrange(len(self.version_)): n += self.version_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_cost()
self.clear_version()
def OutputUnchecked(self, out):
if (self.has_cost_):
out.putVarInt32(10)
out.putVarInt32(self.cost_.ByteSize())
self.cost_.OutputUnchecked(out)
for i in xrange(len(self.version_)):
out.putVarInt32(27)
self.version_[i].OutputUnchecked(out)
out.putVarInt32(28)
def OutputPartial(self, out):
if (self.has_cost_):
out.putVarInt32(10)
out.putVarInt32(self.cost_.ByteSizePartial())
self.cost_.OutputPartial(out)
for i in xrange(len(self.version_)):
out.putVarInt32(27)
self.version_[i].OutputPartial(out)
out.putVarInt32(28)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cost().TryMerge(tmp)
continue
if tt == 27:
self.add_version().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cost_:
res+=prefix+"cost <\n"
res+=self.cost_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Version%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcost = 1
kVersionGroup = 3
kVersionroot_entity_key = 4
kVersionversion = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cost",
3: "Version",
4: "root_entity_key",
5: "version",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STARTGROUP,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CommitResponse'
if _extension_runtime:
pass
__all__ = ['InternalHeader','Transaction','Query','Query_Filter','Query_Order','CompiledQuery','CompiledQuery_PrimaryScan','CompiledQuery_MergeJoinScan','CompiledQuery_EntityFilter','CompiledCursor','CompiledCursor_PositionIndexValue','CompiledCursor_Position','Cursor','Error','Cost','Cost_CommitCost','GetRequest','GetResponse','GetResponse_Entity','PutRequest','PutResponse','TouchRequest','TouchResponse','DeleteRequest','DeleteResponse','NextRequest','QueryResult','AllocateIdsRequest','AllocateIdsResponse','CompositeIndices','AddActionsRequest','AddActionsResponse','BeginTransactionRequest','CommitResponse','CommitResponse_Version']<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
console.log('TESTTTT');
var mean = require('meanio');
exports.render = function (req, res) {
function isAdmin() {
return req.user && req.user.roles.indexOf('admin') !== -1;
}
// Send some basic starting info to the view
res.render('index', {
user: req.user ? {
name: req.user.name,
_id: req.user._id,
username: req.user.username,
roles: req.user.roles
} : {},
modules: 'ho',<|fim▁hole|> });
};<|fim▁end|> | motti: 'motti is cool',
isAdmin: 'motti',
adminEnabled: isAdmin() && mean.moduleEnabled('mean-admin') |
<|file_name|>tree.ui.tablet-onepanel.js<|end_file_name|><|fim▁begin|>Joshfire.define(['joshfire/class', 'joshfire/tree.ui', 'joshfire/uielements/list', 'joshfire/uielements/panel', 'joshfire/uielements/panel.manager', 'joshfire/uielements/button', 'src/ui-components'], function(Class, UITree, List, Panel, PanelManager, Button, UI) {
return Class(UITree, {
buildTree: function() {
var app = this.app;
return [
{
id: 'sidebarleft',
type: Panel,
children: [
{
id: 'menu',
type: List,
dataPath: '/datasourcelist/',
itemInnerTemplate: '<div class="picto item-<%= item.config.col %>"></div><div class="name"><%= item.name %></div>',
onData: function() {} // trigger data, WTF?
}
]
},
{
id: 'sidebarright',
type: Panel,
children: [
{
id: 'header',
type: Panel,
htmlClass: 'header',
children: [
{
id: 'prev',
type: Button,
label: 'Prev',
autoShow: false
},
{
id: 'title', // the title or the logo
type: Panel,
innerTemplate: UI.tplHeader
}
]
},
{
id: 'content',
type: PanelManager,
uiMaster: '/sidebarleft/menu',
children: [
{
id: 'itemList',
type: List,
loadingTemplate: '<div class="loading"></div>',
itemTemplate: "<li id='<%=itemHtmlId%>' " +
"data-josh-ui-path='<%= path %>' data-josh-grid-id='<%= item.id %>'" +
"class='josh-List joshover item-<%= (item['@type'] || item.itemType).replace('/', '') %> mainitemlist " +
// grid view
"<% if ((item['@type'] || item.itemType) === 'ImageObject') { %>" +
"grid" +
"<% } else if ((item['@type'] || item.itemType) === 'VideoObject') { %>" +
// two rows
"rows" +
"<% } else { %>" +
// list view
"list" +
"<% } %>" +
"' >" +
"<%= itemInner %>" +
"</li>",
itemInnerTemplate:
'<% if ((item["@type"] || item.itemType) === "VideoObject") { %>' +
'<div class="title"><%= item.name %></div>' +
UI.getItemDescriptionTemplate(130) +
UI.tplItemPreview +
'<span class="list-arrow"></span>' +
'<% } else if ((item["@type"] || item.itemType) === "ImageObject") { %>' +
UI.tplItemThumbnail +
'<% } else if ((item["@type"] || item.itemType) === "Article/Status") { %>' +
UI.tplTweetItem +
'<% } else if ((item["@type"] || item.itemType) === "Event") { %>' +
UI.tplEventItem +
'<% } else { %>' +
'<%= item.name %><span class="list-arrow"></span>' +
'<% } %>'
},
{
id: 'detail',
type: Panel,
htmlClass: 'detailView',
uiDataMaster: '/sidebarright/content/itemList',
loadingTemplate: '<div class="loading"></div>',
autoShow: false,
children: [
{
// Article (default)
id: 'article',
type: Panel,
uiDataMaster: '/sidebarright/content/itemList',
forceDataPathRefresh: true,
loadingTemplate: '<div class="loading"></div>',
innerTemplate:
'<div class="title"><h1><%= data.name %></h1>' +
UI.tplDataAuthor +
'<% if (data.articleBody) { print(data.articleBody); } %>',
onData: function(ui) {
var thisEl = app.ui.element('/sidebarright/content/detail/article').htmlEl;
var type = ui.data['@type'] || ui.data.itemType;
if (type === 'VideoObject' ||
type === 'ImageObject' ||
type === 'Event' ||
type === 'Article/Status'
) {
$(thisEl).hide();
}
else {
$(thisEl).show();
}
}
},
{
// Twitter
id: 'twitter',
type: Panel,<|fim▁hole|> onData: function(ui) {
var thisEl = app.ui.element('/sidebarright/content/detail/twitter').htmlEl;
if ((ui.data['@type'] || ui.data.itemType) === 'Article/Status') {
$(thisEl).show();
} else {
$(thisEl).hide();
}
}
},
{
// Flickr
id: 'image',
type: Panel,
uiDataMaster: '/sidebarright/content/itemList',
forceDataPathRefresh: true,
loadingTemplate: '<div class="loading"></div>',
innerTemplate: '<img src="<%= data.contentURL %>" />',
onData: function(ui) {
var thisEl = app.ui.element('/sidebarright/content/detail/image').htmlEl;
if ((ui.data['@type'] || ui.data.itemType) === 'ImageObject') {
$(thisEl).show();
} else {
$(thisEl).hide();
}
}
},
{
// Event
id: 'event',
type: Panel,
uiDataMaster: '/sidebarright/content/itemList',
forceDataPathRefresh: true,
loadingTemplate: '<div class="loading"></div>',
innerTemplate: UI.tplEventPage,
onData: function(ui) {
var thisEl = app.ui.element('/sidebarright/content/detail/event').htmlEl;
if ((ui.data['@type'] || ui.data.itemType) === 'Event') {
$(thisEl).show();
} else {
$(thisEl).hide();
}
}
},
{
// Video
id: 'video',
type: Panel,
uiDataMaster: '/sidebarright/content/itemList',
forceDataPathRefresh: true,
loadingTemplate: '<div class="loading"></div>',
onData: function(ui) {
var thisEl = app.ui.element('/sidebarright/content/detail/video').htmlEl,
player = app.ui.element('/sidebarright/content/detail/video/player.youtube');
if (((ui.data['@type'] || ui.data.itemType) === 'VideoObject') && ui.data.publisher && (ui.data.publisher.name === 'Youtube')) {
player.playWithStaticUrl({
url: ui.data.url.replace('http://www.youtube.com/watch?v=', ''),
width: '480px'
});
$(thisEl).show();
} else {
$(thisEl).hide();
}
},
children: [
{
id: 'title',
type: Panel,
uiDataMaster: '/sidebarright/content/itemList',
innerTemplate:
'<div class="title"><h1><%= data.name %></h1>' +
UI.tplDataAuthor +
'</div>'
},
{
id: 'player.youtube',
type: 'video.youtube',
autoShow: true,
controls: true,
noAutoPlay: false
}
]
}
]
},
{
id: 'about',
type: Panel,
loadingTemplate: '<div class="loading"></div>',
autoShow: false,
innerTemplate: UI.tplAboutPage
}
]
}
]
}
];
}
});
});<|fim▁end|> | uiDataMaster: '/sidebarright/content/itemList',
forceDataPathRefresh: true,
loadingTemplate: '<div class="loading"></div>',
innerTemplate: UI.tplTweetPage, |
<|file_name|>block_base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp 7/C-mode block storage systems.
"""
import math
import sys
import uuid
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class NetAppLun(object):
"""Represents a LUN on NetApp storage."""
def __init__(self, handle, name, size, metadata_dict):
self.handle = handle
self.name = name
self.size = size
self.metadata = metadata_dict or {}
def get_metadata_property(self, prop):
"""Get the metadata property of a LUN."""
if prop in self.metadata:
return self.metadata[prop]
name = self.name
LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s",
{'prop': prop, 'name': name})
def __str__(self, *args, **kwargs):
return 'NetApp LUN [handle:%s, name:%s, size:%s, metadata:%s]' % (
self.handle, self.name, self.size, self.metadata)
class NetAppBlockStorageLibrary(object):
"""NetApp block storage library for Data ONTAP."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
ALLOWED_LUN_OS_TYPES = ['linux', 'aix', 'hpux', 'image', 'windows',
'windows_2008', 'windows_gpt', 'solaris',
'solaris_efi', 'netware', 'openvms', 'hyper_v']
ALLOWED_IGROUP_HOST_TYPES = ['linux', 'aix', 'hpux', 'windows', 'solaris',
'netware', 'default', 'vmware', 'openvms',
'xen', 'hyper_v']
DEFAULT_LUN_OS = 'linux'
DEFAULT_HOST_TYPE = 'linux'
def __init__(self, driver_name, driver_protocol, **kwargs):
na_utils.validate_instantiation(**kwargs)
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self.zapi_client = None
self._stats = {}
self.lun_table = {}
self.lun_ostype = None
self.host_type = None
self.lookup_service = fczm_utils.create_lookup_service()
self.app_version = kwargs.get("app_version", "unknown")
self.configuration = kwargs['configuration']
self.configuration.append_config_values(na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(
na_opts.netapp_provisioning_opts)
self.configuration.append_config_values(na_opts.netapp_san_opts)
def do_setup(self, context):
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.lun_ostype = (self.configuration.netapp_lun_ostype
or self.DEFAULT_LUN_OS)
self.host_type = (self.configuration.netapp_host_type
or self.DEFAULT_HOST_TYPE)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate.
Discovers the LUNs on the NetApp server.
"""
if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES:
msg = _("Invalid value for NetApp configuration"
" option netapp_lun_ostype.")
LOG.error(msg)
raise exception.NetAppDriverException(msg)
if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES:
msg = _("Invalid value for NetApp configuration"
" option netapp_host_type.")
LOG.error(msg)
raise exception.NetAppDriverException(msg)
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
LOG.debug("Success getting list of LUNs from server.")
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata') or dict()
return metadata.get('Volume', None)
def create_volume(self, volume):
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
LOG.debug('create_volume on %s', volume['host'])
# get Data ONTAP volume name as pool name
pool_name = volume_utils.extract_host(volume['host'], level='pool')
if pool_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
lun_name = volume['name']
size = int(volume['size']) * units.Gi
metadata = {'OsType': self.lun_ostype,
'SpaceReserved': 'true',
'Path': '/vol/%s/%s' % (pool_name, lun_name)}
qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
qos_policy_group_name = (
na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info))
try:
self._create_lun(pool_name, lun_name, size, metadata,
qos_policy_group_name)
except Exception:
LOG.exception(_LE("Exception creating LUN %(name)s in pool "
"%(pool)s."),
{'name': lun_name, 'pool': pool_name})
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = _("Volume %s could not be created.")
raise exception.VolumeBackendAPIException(data=msg % (
volume['name']))
LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s',
{'name': lun_name, 'qos': qos_policy_group_info})
metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name)
metadata['Volume'] = pool_name
metadata['Qtree'] = None
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def _setup_qos_for_volume(self, volume, extra_specs):
return None
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
return
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
LOG.warning(_LW("No entry in LUN table for volume/snapshot"
" %(name)s."), {'name': name})
return
self.zapi_client.destroy_lun(metadata['Path'])
self.lun_table.pop(name)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot.
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self._get_lun_from_table(vol_name)
self._clone_lun(lun.name, snapshot_name, space_reserved='false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
source = {'name': snapshot['name'], 'size': snapshot['volume_size']}
return self._clone_source_to_destination(source, volume)
def create_cloned_volume(self, volume, src_vref):
src_lun = self._get_lun_from_table(src_vref['name'])
source = {'name': src_lun.name, 'size': src_vref['size']}
return self._clone_source_to_destination(source, volume)
def _clone_source_to_destination(self, source, destination_volume):
source_size = source['size']
destination_size = destination_volume['size']
source_name = source['name']
destination_name = destination_volume['name']
extra_specs = na_utils.get_volume_extra_specs(destination_volume)
qos_policy_group_info = self._setup_qos_for_volume(
destination_volume, extra_specs)
qos_policy_group_name = (
na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info))
try:
self._clone_lun(source_name, destination_name,
space_reserved='true',
qos_policy_group_name=qos_policy_group_name)
if destination_size != source_size:
try:
self.extend_volume(
destination_volume, destination_size,
qos_policy_group_name=qos_policy_group_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_LE("Resizing %s failed. Cleaning volume."),
destination_volume['id'])
self.delete_volume(destination_volume)
except Exception:
LOG.exception(_LE("Exception cloning volume %(name)s from source "
"volume %(source)s."),
{'name': destination_name, 'source': source_name})
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = _("Volume %s could not be created from source volume.")
raise exception.VolumeBackendAPIException(
data=msg % destination_name)
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
raise NotImplementedError()
def _create_lun_handle(self, metadata):
"""Returns LUN handle based on filer type."""
raise NotImplementedError()
def _extract_lun_info(self, lun):
"""Extracts the LUNs from API and populates the LUN table."""
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
(_rest, _splitter, name) = path.rpartition('/')
handle = self._create_lun_handle(meta_dict)
size = lun.get_child_content('size')
return NetAppLun(handle, name, size, meta_dict)
def _extract_and_populate_luns(self, api_luns):
"""Extracts the LUNs from API and populates the LUN table."""
for lun in api_luns:
discovered_lun = self._extract_lun_info(lun)
self._add_lun_to_table(discovered_lun)
def _map_lun(self, name, initiator_list, initiator_type, lun_id=None):
"""Maps LUN to the initiator(s) and returns LUN ID assigned."""
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
igroup_name, ig_host_os, ig_type = self._get_or_create_igroup(
initiator_list, initiator_type, self.host_type)
if ig_host_os != self.host_type:
LOG.warning(_LW("LUN misalignment may occur for current"
" initiator group %(ig_nm)s) with host OS type"
" %(ig_os)s. Please configure initiator group"
" manually according to the type of the"
" host OS."),
{'ig_nm': igroup_name, 'ig_os': ig_host_os})
try:
return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
except na_api.NaApiError:
exc_info = sys.exc_info()
(_igroup, lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
if lun_id is not None:
return lun_id
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _unmap_lun(self, path, initiator_list):
"""Unmaps a LUN from given initiator."""
(igroup_name, _lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
self.zapi_client.unmap_lun(path, igroup_name)
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
raise NotImplementedError()
def _has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
return self.zapi_client.has_luns_mapped_to_initiators(initiator_list)
def _get_or_create_igroup(self, initiator_list, initiator_group_type,
host_os_type):
"""Checks for an igroup for a set of one or more initiators.
Creates igroup if not already present with given host os type,
igroup type and adds initiators.
"""
igroups = self.zapi_client.get_igroup_by_initiators(initiator_list)
igroup_name = None
if igroups:
igroup = igroups[0]
igroup_name = igroup['initiator-group-name']
host_os_type = igroup['initiator-group-os-type']
initiator_group_type = igroup['initiator-group-type']
if not igroup_name:
igroup_name = self._create_igroup_add_initiators(
initiator_group_type, host_os_type, initiator_list)
return igroup_name, host_os_type, initiator_group_type
def _create_igroup_add_initiators(self, initiator_group_type,
host_os_type, initiator_list):
"""Creates igroup and adds initiators."""
igroup_name = na_utils.OPENSTACK_PREFIX + six.text_type(uuid.uuid4())
self.zapi_client.create_igroup(igroup_name, initiator_group_type,
host_os_type)
for initiator in initiator_list:
self.zapi_client.add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _get_lun_from_table(self, name):
"""Gets LUN from cache table.
Refreshes cache if LUN not found in cache.
"""
lun = self.lun_table.get(name)
if lun is None:
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
lun = self.lun_table.get(name)
if lun is None:
raise exception.VolumeNotFound(volume_id=name)
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the LUN attribute if found else None."""
try:
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e)
return None
def _create_lun_meta(self, lun):
raise NotImplementedError()
def _get_fc_target_wwpns(self, include_partner=True):
raise NotImplementedError()
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
raise NotImplementedError()
def extend_volume(self, volume, new_size, qos_policy_group_name=None):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
path = lun.metadata['Path']
curr_size_bytes = six.text_type(lun.size)
new_size_bytes = six.text_type(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
lun_geometry = self.zapi_client.get_lun_geometry(path)
if (lun_geometry and lun_geometry.get("max_resize")
and int(lun_geometry.get("max_resize")) >=
int(new_size_bytes)):
self.zapi_client.do_direct_resize(path, new_size_bytes)
else:
self._do_sub_clone_resize(
path, new_size_bytes,
qos_policy_group_name=qos_policy_group_name)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_LI("No need to extend volume %s"
" as it is already the requested new size."), name)
def _get_vol_option(self, volume_name, option_name):
"""Get the value for the volume option."""
value = None
options = self.zapi_client.get_volume_options(volume_name)
for opt in options:
if opt.get_child_content('name') == option_name:
value = opt.get_child_content('value')
break
return value
def _do_sub_clone_resize(self, path, new_size_bytes,
qos_policy_group_name=None):
"""Does sub LUN clone after verification.
Clones the block ranges and swaps
the LUNs also deletes older LUN
after a successful clone.
"""
seg = path.split("/")
LOG.info(_LI("Resizing LUN %s to new size using clone operation."),
seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
metadata = lun.metadata
compression = self._get_vol_option(vol_name, 'compression')
if compression == "on":
msg = _('%s cannot be resized using clone operation'
' as it is hosted on compressed volume')
raise exception.VolumeBackendAPIException(data=msg % name)
else:
block_count = self._get_lun_block_count(path)
if block_count == 0:
msg = _('%s cannot be resized using clone operation'
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % name
self.zapi_client.create_lun(
vol_name, new_lun, new_size_bytes, metadata,
qos_policy_group_name=qos_policy_group_name)
try:
self._clone_lun(name, new_lun, block_count=block_count,
qos_policy_group_name=qos_policy_group_name)
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
new_path = '/vol/%s/%s' % (vol_name, new_lun)
self.zapi_client.destroy_lun(new_path)
def _post_sub_clone_resize(self, path):
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
LOG.info(_LI("Post clone resize LUN %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
new_path = "/vol/%s/%s" % (seg[2], new_lun)
try:
st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
st_nw_mv = self.zapi_client.move_lun(new_path, path)
st_del_old = self.zapi_client.destroy_lun(tmp_path)
except Exception as e:
if st_tm_mv is None:
msg = _("Failure staging LUN %s to tmp.")
raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
else:
if st_nw_mv is None:
self.zapi_client.move_lun(tmp_path, path)
msg = _("Failure moving new cloned LUN to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
LOG.error(_LE("Failure deleting staged tmp LUN %s."),
tmp_lun)
else:
LOG.error(_LE("Unknown exception in"
" post clone resize LUN %s."), seg[-1])
LOG.error(_LE("Exception details: %s"), e)
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
LOG.debug("Getting LUN block count.")
lun_infos = self.zapi_client.get_lun_by_args(path=path)
if not lun_infos:
seg = path.split('/')
msg = _('Failure getting LUN info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
bs = int(lun_info.get_child_content('block-size'))
ls = int(lun_info.get_child_content('size'))
block_count = ls / bs
return block_count
def _check_volume_type_for_lun(self, volume, lun, existing_ref,
extra_specs):
"""Checks if lun satifies the volume type."""
raise NotImplementedError()
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management.
existing_ref can contain source-id or source-name or both.
source-id: lun uuid.
source-name: complete lun path eg. /vol/vol0/lun.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
extra_specs = na_utils.get_volume_extra_specs(volume)
self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs)
qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
qos_policy_group_name = (
na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info))
path = lun.get_metadata_property('Path')
if lun.name == volume['name']:
new_path = path
LOG.info(_LI("LUN with given ref %s need not be renamed "
"during manage operation."), existing_ref)
else:
(rest, splitter, name) = path.rpartition('/')
new_path = '%s/%s' % (rest, volume['name'])
self.zapi_client.move_lun(path, new_path)
lun = self._get_existing_vol_with_manage_ref(
{'source-name': new_path})
if qos_policy_group_name is not None:
self.zapi_client.set_lun_qos_policy_group(new_path,
qos_policy_group_name)
self._add_lun_to_table(lun)
LOG.info(_LI("Manage operation completed for LUN with new path"
" %(path)s and uuid %(uuid)s."),
{'path': lun.get_metadata_property('Path'),
'uuid': lun.get_metadata_property('UUID')})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
return int(math.ceil(float(lun.size) / units.Gi))
def _get_existing_vol_with_manage_ref(self, existing_ref):
"""Get the corresponding LUN from the storage server."""
uuid = existing_ref.get('source-id')
path = existing_ref.get('source-name')
if not (uuid or path):
reason = _('Reference must contain either source-id'
' or source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lun_info = {}
lun_info.setdefault('path', path if path else None)
if hasattr(self, 'vserver') and uuid:
lun_info['uuid'] = uuid
luns = self.zapi_client.get_lun_by_args(**lun_info)
if luns:
for lun in luns:
netapp_lun = self._extract_lun_info(lun)
storage_valid = self._is_lun_valid_on_storage(netapp_lun)
uuid_valid = True
if uuid:
if netapp_lun.get_metadata_property('UUID') == uuid:
uuid_valid = True
else:
uuid_valid = False
if storage_valid and uuid_valid:
return netapp_lun
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=(_('LUN not found with given ref %s.') % existing_ref))
def _is_lun_valid_on_storage(self, lun):
"""Validate lun specific to storage system."""
return True
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
managed_lun = self._get_lun_from_table(volume['name'])
LOG.info(_LI("Unmanaged LUN with current path %(path)s and uuid "
"%(uuid)s."),
{'path': managed_lun.get_metadata_property('Path'),
'uuid': managed_lun.get_metadata_property('UUID')
or 'unknown'})
def initialize_connection_iscsi(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Do the LUN masking on the storage system so the initiator can access
the LUN on the target. Also return the iSCSI properties so the
initiator can find the LUN. This implementation does not call
_get_iscsi_properties() to get the properties because cannot store the
LUN number in the database. We only find out what the LUN number will
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
target_list = self.zapi_client.get_iscsi_target_details()
if not target_list:
raise exception.VolumeBackendAPIException(
data=_('Failed to get LUN target list for the LUN %s') % name)
LOG.debug("Successfully fetched target list for LUN %(name)s and "
"initiator %(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
preferred_target = self._get_preferred_target_from_list(
target_list)
if preferred_target is None:
msg = _('Failed to get target portal for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
(address, port) = (preferred_target['address'],
preferred_target['port'])
iqn = self.zapi_client.get_iscsi_service_details()
if not iqn:
msg = _('Failed to get target IQN for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def _get_preferred_target_from_list(self, target_details_list,
filter=None):
preferred_target = None
for target in target_details_list:
if filter and target['address'] not in filter:
continue
if target.get('interface-enabled', 'true') == 'true':
preferred_target = target
break
if preferred_target is None and len(target_details_list) > 0:
preferred_target = target_details_list[0]
return preferred_target
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, [initiator_name])
LOG.debug("Unmapped LUN %(name)s from the initiator "
"%(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
volume_name = volume['name']
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
target_wwpns, initiator_target_map, num_paths = (
self._build_initiator_target_map(connector))
if target_wwpns:
LOG.debug("Successfully fetched target details for LUN %(name)s "
"and initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
else:
raise exception.VolumeBackendAPIException(
data=_('Failed to get LUN target details for '
'the LUN %s') % volume_name)
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, initiators)
LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s",
{'name': name, 'initiators': initiators})
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not self._has_luns_mapped_to_initiators(initiators):
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map"))
target_wwpns, initiator_target_map, num_paths = (
self._build_initiator_target_map(connector))
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._get_fc_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service is not None:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)<|fim▁hole|>
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths<|fim▁end|> | |
<|file_name|>dashboard.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import time
from datetime import datetime
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from desktop.conf import TIME_ZONE
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str, smart_unicode
from desktop.lib.rest.http_client import RestException
from desktop.lib.view_util import format_duration_in_millis
from desktop.log.access import access_warn
from desktop.models import Document, Document2
from hadoop.fs.hadoopfs import Hdfs
from liboozie.oozie_api import get_oozie
from liboozie.credentials import Credentials
from liboozie.submission2 import Submission
from oozie.conf import OOZIE_JOBS_COUNT, ENABLE_CRON_SCHEDULING, ENABLE_V2
from oozie.forms import RerunForm, ParameterForm, RerunCoordForm, RerunBundleForm, UpdateCoordinatorForm
from oozie.models import Workflow as OldWorkflow, Job, utc_datetime_format, Bundle, Coordinator, get_link, History as OldHistory
from oozie.models2 import History, Workflow, WORKFLOW_NODE_PROPERTIES
from oozie.settings import DJANGO_APPS
from oozie.utils import convert_to_server_timezone
def get_history():
if ENABLE_V2.get():
return History
else:
return OldHistory
def get_workflow():
if ENABLE_V2.get():
return Workflow
else:
return OldWorkflow
LOG = logging.getLogger(__name__)
"""
Permissions:
A Workflow/Coordinator/Bundle can:
* be accessed only by its owner or a superuser or by a user with 'dashboard_jobs_access' permissions
* be submitted/modified only by its owner or a superuser
Permissions checking happens by calling:
* check_job_access_permission()
* check_job_edition_permission()
"""
def _get_workflows(user):
return [{
'name': workflow.name,
'owner': workflow.owner.username,
'value': workflow.uuid,
'id': workflow.id
} for workflow in [d.content_object for d in Document.objects.get_docs(user, Document2, extra='workflow2')]
]
def manage_oozie_jobs(request, job_id, action):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage an Oozie job.'))
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
response = {'status': -1, 'data': ''}
try:
oozie_api = get_oozie(request.user)
params = None
if action == 'change':
pause_time_val = request.POST.get('pause_time')
if request.POST.get('clear_pause_time') == 'true':
pause_time_val = ''
end_time_val = request.POST.get('end_time')
if end_time_val:
end_time_val = convert_to_server_timezone(end_time_val, TIME_ZONE.get())
if pause_time_val:
pause_time_val = convert_to_server_timezone(pause_time_val, TIME_ZONE.get())
params = {'value': 'endtime=%s' % (end_time_val) + ';'
'pausetime=%s' % (pause_time_val) + ';'
'concurrency=%s' % (request.POST.get('concurrency'))}
elif action == 'ignore':
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'type': 'action',
'scope': ','.join(job.aggreate(request.POST.get('actions').split())),
}
response['data'] = oozie_api.job_control(job_id, action, parameters=params)
response['status'] = 0
if 'notification' in request.POST:
request.info(_(request.POST.get('notification')))
except RestException, ex:
ex_message = ex.message
if ex._headers.get('oozie-error-message'):
ex_message = ex._headers.get('oozie-error-message')
msg = "Error performing %s on Oozie job %s: %s." % (action, job_id, ex_message)
LOG.exception(msg)
response['data'] = _(msg)
return JsonResponse(response)
def bulk_manage_oozie_jobs(request):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage the Oozie jobs.'))
response = {'status': -1, 'data': ''}
if 'job_ids' in request.POST and 'action' in request.POST:
jobs = request.POST.get('job_ids').split()
response = {'totalRequests': len(jobs), 'totalErrors': 0, 'messages': ''}
oozie_api = get_oozie(request.user)
for job_id in jobs:
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
oozie_api.job_control(job_id, request.POST.get('action'))
except RestException, ex:
LOG.exception("Error performing bulk operation for job_id=%s", job_id)
response['totalErrors'] = response['totalErrors'] + 1
response['messages'] += str(ex)
return JsonResponse(response)
def show_oozie_error(view_func):
def decorate(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except RestException, ex:
LOG.exception("Error communicating with Oozie in %s", view_func.__name__)
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail) or 'Connection refused' in str(detail):
detail = _('The Oozie server is not running')
raise PopupException(_('An error occurred with Oozie.'), detail=detail)
return wraps(view_func)(decorate)
@show_oozie_error
def list_oozie_workflows(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
just_sla = request.GET.get('justsla') == 'true'
if request.GET.get('startcreatedtime'):
kwargs['filters'].extend([('startcreatedtime', request.GET.get('startcreatedtime'))])
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
wf_list = oozie_api.get_workflows(**kwargs)
json_jobs = wf_list.jobs
total_jobs = wf_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_job(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user, just_sla)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_workflows.mako', request, {
'user': request.user,
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_coordinators(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
co_list = oozie_api.get_coordinators(**kwargs)
json_jobs = co_list.jobs
total_jobs = co_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_coordinators.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
'enable_cron_scheduling': enable_cron_scheduling,
})
@show_oozie_error
def list_oozie_bundles(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
bundle_list = oozie_api.get_bundles(**kwargs)
json_jobs = bundle_list.jobs
total_jobs = bundle_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_bundles.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
if oozie_coordinator is not None:
setattr(oozie_workflow, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(oozie_workflow, 'oozie_bundle', oozie_bundle)
oozie_parent = oozie_workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
workflow_data = None
credentials = None
doc = None
hue_workflow = None
workflow_graph = 'MISSING' # default to prevent loading the graph tab for deleted workflows
full_node_list = None
if ENABLE_V2.get():
try:
# To update with the new History document model
hue_coord = get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: hue_coord.workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow: hue_workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow:
full_node_list = hue_workflow.nodes
workflow_id = hue_workflow.id
wid = {
'id': workflow_id
}
doc = Document2.objects.get(type='oozie-workflow2', **wid)
new_workflow = get_workflow()(document=doc)
workflow_data = new_workflow.get_data()
else:
try:
workflow_data = Workflow.gen_workflow_data_from_xml(request.user, oozie_workflow)
except Exception, e:
LOG.exception('Graph data could not be generated from Workflow %s: %s' % (oozie_workflow.id, e))
workflow_graph = ''
credentials = Credentials()
except:
LOG.exception("Error generating full page for running workflow %s" % job_id)
else:
history = get_history().cross_reference_submission_history(request.user, job_id)
hue_coord = history and history.get_coordinator() or get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or (history and history.get_workflow()) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: Job.objects.can_read_or_exception(request, hue_coord.workflow.id)
if hue_workflow: Job.objects.can_read_or_exception(request, hue_workflow.id)
if hue_workflow:
workflow_graph = hue_workflow.gen_status_graph(oozie_workflow)
full_node_list = hue_workflow.node_list
else:
workflow_graph, full_node_list = get_workflow().gen_status_graph_from_xml(request.user, oozie_workflow)
parameters = oozie_workflow.conf_dict.copy()
for action in oozie_workflow.actions:
action.oozie_coordinator = oozie_coordinator
action.oozie_bundle = oozie_bundle
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(full_node_list),
'graph': workflow_graph,
'actions': massaged_workflow_actions_for_json(oozie_workflow.get_working_actions(), oozie_coordinator, oozie_bundle)
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_workflow.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_workflow.id,
'parent_id': oozie_workflow.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
return render('dashboard/list_oozie_workflow.mako', request, {
'oozie_workflow': oozie_workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
'oozie_slas': oozie_slas,
'hue_workflow': hue_workflow,
'hue_coord': hue_coord,
'parameters': parameters,
'has_job_edition_permission': has_job_edition_permission,
'workflow_graph': workflow_graph,
'layout_json': json.dumps(workflow_data['layout'], cls=JSONEncoderForHTML) if workflow_data else '',
'workflow_json': json.dumps(workflow_data['workflow'], cls=JSONEncoderForHTML) if workflow_data else '',
'credentials_json': json.dumps(credentials.credentials.keys(), cls=JSONEncoderForHTML) if credentials else '',
'workflow_properties_json': json.dumps(WORKFLOW_NODE_PROPERTIES, cls=JSONEncoderForHTML),
'doc1_id': doc.doc.get().id if doc else -1,
'subworkflows_json': json.dumps(_get_workflows(request.user), cls=JSONEncoderForHTML),
'can_edit_json': json.dumps(doc is None or doc.doc.get().is_editable(request.user))
})
@show_oozie_error
def list_oozie_coordinator(request, job_id):
kwargs = {'cnt': 50, 'filters': []}
kwargs['offset'] = request.GET.get('offset', 1)
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
oozie_coordinator = check_job_access_permission(request, job_id, **kwargs)
# Cross reference the submission history (if any)
coordinator = get_history().get_coordinator_from_config(oozie_coordinator.conf_dict)
try:
if not ENABLE_V2.get():
coordinator = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
LOG.exception("Ignoring error getting oozie job coordinator for job_id=%s", job_id)
oozie_bundle = None
if request.GET.get('bundle_job_id'):
try:
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
except:
LOG.exception("Ignoring error getting oozie bundle for job_id=%s", job_id)
if request.GET.get('format') == 'json':
actions = massaged_coordinator_actions_for_json(oozie_coordinator, oozie_bundle)
return_obj = {
'id': oozie_coordinator.id,
'status': oozie_coordinator.status,
'progress': oozie_coordinator.get_progress(),
'nextTime': format_time(oozie_coordinator.nextMaterializedTime),
'endTime': format_time(oozie_coordinator.endTime),
'actions': actions,
'total_actions': oozie_coordinator.total
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_coordinator.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_coordinator.id,
'parent_id': oozie_coordinator.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
update_coord_form = UpdateCoordinatorForm(oozie_coordinator=oozie_coordinator)
return render('dashboard/list_oozie_coordinator.mako', request, {
'oozie_coordinator': oozie_coordinator,
'oozie_slas': oozie_slas,
'coordinator': coordinator,
'oozie_bundle': oozie_bundle,
'has_job_edition_permission': has_job_edition_permission,
'enable_cron_scheduling': enable_cron_scheduling,
'update_coord_form': update_coord_form,
})
@show_oozie_error
def list_oozie_bundle(request, job_id):
oozie_bundle = check_job_access_permission(request, job_id)
# Cross reference the submission history (if any)
bundle = None
try:
if ENABLE_V2.get():
bundle = get_history().get_bundle_from_config(oozie_bundle.conf_dict)
else:
bundle = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
LOG.exception("Ignoring error getting oozie job bundle for job_id=%s", job_id)
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_bundle.id,
'status': oozie_bundle.status,
'progress': oozie_bundle.get_progress(),
'endTime': format_time(oozie_bundle.endTime),
'actions': massaged_bundle_actions_for_json(oozie_bundle)
}
return HttpResponse(json.dumps(return_obj).replace('\\\\', '\\'), content_type="application/json")
return render('dashboard/list_oozie_bundle.mako', request, {
'oozie_bundle': oozie_bundle,
'bundle': bundle,
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow_action(request, action):
try:
action = get_oozie(request.user).get_action(action)
workflow = check_job_access_permission(request, action.id.split('@')[0])
except RestException, ex:
msg = _("Error accessing Oozie action %s.") % (action,)
LOG.exception(msg)
raise PopupException(msg, detail=ex.message)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
workflow.oozie_coordinator = oozie_coordinator
workflow.oozie_bundle = oozie_bundle
oozie_parent = workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
return render('dashboard/list_oozie_workflow_action.mako', request, {
'action': action,
'workflow': workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
})
@show_oozie_error
def get_oozie_job_log(request, job_id):
oozie_api = get_oozie(request.user, api_version="v2")
check_job_access_permission(request, job_id)
kwargs = {'logfilter' : []}
if request.GET.get('format') == 'json':
if request.GET.get('recent'):
kwargs['logfilter'].extend([('recent', val) for val in request.GET.get('recent').split(':')])
if request.GET.get('limit'):
kwargs['logfilter'].extend([('limit', request.GET.get('limit'))])
if request.GET.get('loglevel'):
kwargs['logfilter'].extend([('loglevel', request.GET.get('loglevel'))])
if request.GET.get('text'):
kwargs['logfilter'].extend([('text', request.GET.get('text'))])
status_resp = oozie_api.get_job_status(job_id)
log = oozie_api.get_job_log(job_id, **kwargs)
return_obj = {
'id': job_id,
'status': status_resp['status'],
'log': log,
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
@show_oozie_error
def list_oozie_info(request):
api = get_oozie(request.user)
configuration = api.get_configuration()
oozie_status = api.get_oozie_status()
instrumentation = {}
metrics = {}
if 'org.apache.oozie.service.MetricsInstrumentationService' in [c.strip() for c in configuration.get('oozie.services.ext', '').split(',')]:
api2 = get_oozie(request.user, api_version="v2")
metrics = api2.get_metrics()
else:
instrumentation = api.get_instrumentation()
return render('dashboard/list_oozie_info.mako', request, {
'instrumentation': instrumentation,
'metrics': metrics,
'configuration': configuration,
'oozie_status': oozie_status,
})
@show_oozie_error
def list_oozie_sla(request):
oozie_api = get_oozie(request.user, api_version="v2")
if request.method == 'POST':
params = {}
job_name = request.POST.get('job_name')
if re.match('.*-oozie-oozi-[WCB]', job_name):
params['id'] = job_name
params['parent_id'] = job_name
else:
params['app_name'] = job_name
if 'useDates' in request.POST:
if request.POST.get('start'):
params['nominal_start'] = request.POST.get('start')
if request.POST.get('end'):
params['nominal_end'] = request.POST.get('end')
oozie_slas = oozie_api.get_oozie_slas(**params)
else:
oozie_slas = [] # or get latest?
if request.REQUEST.get('format') == 'json':
massaged_slas = []
for sla in oozie_slas:
massaged_slas.append(massaged_sla_for_json(sla, request))
return HttpResponse(json.dumps({'oozie_slas': massaged_slas}), content_type="text/json")
configuration = oozie_api.get_configuration()
show_slas_hint = 'org.apache.oozie.sla.service.SLAService' not in configuration.get('oozie.services.ext', '')
return render('dashboard/list_oozie_sla.mako', request, {
'oozie_slas': oozie_slas,
'show_slas_hint': show_slas_hint
})
def massaged_sla_for_json(sla, request):
massaged_sla = {
'slaStatus': sla['slaStatus'],
'id': sla['id'],
'appType': sla['appType'],
'appName': sla['appName'],
'appUrl': get_link(sla['id']),
'user': sla['user'],
'nominalTime': sla['nominalTime'],
'expectedStart': sla['expectedStart'],
'actualStart': sla['actualStart'],
'expectedEnd': sla['expectedEnd'],
'actualEnd': sla['actualEnd'],
'jobStatus': sla['jobStatus'],
'expectedDuration': sla['expectedDuration'],
'actualDuration': sla['actualDuration'],
'lastModified': sla['lastModified']
}
return massaged_sla
@show_oozie_error
def sync_coord_workflow(request, job_id):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
hue_coord = get_history().get_coordinator_from_config(job.conf_dict)
hue_wf = (hue_coord and hue_coord.workflow) or get_history().get_workflow_from_config(job.conf_dict)
wf_application_path = job.conf_dict.get('wf_application_path') and Hdfs.urlsplit(job.conf_dict['wf_application_path'])[2] or ''
coord_application_path = job.conf_dict.get('oozie.coord.application.path') and Hdfs.urlsplit(job.conf_dict['oozie.coord.application.path'])[2] or ''
properties = hue_coord and hue_coord.properties and dict([(param['name'], param['value']) for param in hue_coord.properties]) or None
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
# Update workflow params in coordinator
hue_coord.clear_workflow_params()
properties = dict([(param['name'], param['value']) for param in hue_coord.properties])
# Deploy WF XML
submission = Submission(user=request.user, job=hue_wf, fs=request.fs, jt=request.jt, properties=properties)
submission._create_file(wf_application_path, hue_wf.XML_FILE_NAME, hue_wf.to_xml(mapping=properties), do_as=True)
# Deploy Coordinator XML
job.conf_dict.update(mapping)
submission = Submission(user=request.user, job=hue_coord, fs=request.fs, jt=request.jt, properties=job.conf_dict, oozie_id=job.id)
submission._create_file(coord_application_path, hue_coord.XML_FILE_NAME, hue_coord.to_xml(mapping=job.conf_dict), do_as=True)
# Server picks up deployed Coordinator XML changes after running 'update' action
submission.update_coord()
request.info(_('Successfully updated Workflow definition'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
new_params = hue_wf and hue_wf.find_all_parameters() or []
new_params = dict([(param['name'], param['value']) for param in new_params])
# Set previous values
if properties:
new_params = dict([(key, properties[key]) if key in properties.keys() else (key, new_params[key]) for key, value in new_params.iteritems()])
initial_params = ParameterForm.get_initial_params(new_params)
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor2/submit_job_popup.mako', request, {
'params_form': params_form,
'name': _('Job'),
'header': _('Sync Workflow definition?'),
'action': reverse('oozie:sync_coord_workflow', kwargs={'job_id': job_id})
}, force_template=True).content
return JsonResponse(popup, safe=False)
@show_oozie_error
def rerun_oozie_job(request, job_id, app_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
oozie_workflow = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_workflow, request.user)
if request.method == 'POST':
rerun_form = RerunForm(request.POST, oozie_workflow=oozie_workflow)
params_form = ParametersFormSet(request.POST)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
if request.POST['rerun_form_choice'] == 'fail_nodes':
args['fail_nodes'] = 'true'
else:
args['skip_nodes'] = ','.join(rerun_form.cleaned_data['skip_nodes'])
args['deployment_dir'] = app_path
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_workflow(request, job_id, args, mapping)
request.info(_('Workflow re-running.'))
return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s %s' % (rerun_form.errors, params_form.errors)))
else:
rerun_form = RerunForm(oozie_workflow=oozie_workflow)
initial_params = ParameterForm.get_initial_params(oozie_workflow.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_job_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_job', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_workflow(request, oozie_id, run_args, mapping):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, properties=mapping, oozie_id=oozie_id)
job_id = submission.rerun(**run_args)
return job_id
except RestException, ex:
msg = _("Error re-running workflow %s.") % (oozie_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_coordinator(request, job_id, app_path):
oozie_coordinator = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_coordinator, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunCoordForm(request.POST, oozie_coordinator=oozie_coordinator)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'type': 'action',
'scope': ','.join(oozie_coordinator.aggreate(rerun_form.cleaned_data['actions'])),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_coordinator(request, job_id, args, params, properties)
request.info(_('Coordinator re-running.'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s') % smart_unicode(rerun_form.errors))
return list_oozie_coordinator(request, job_id)
else:
rerun_form = RerunCoordForm(oozie_coordinator=oozie_coordinator)
initial_params = ParameterForm.get_initial_params(oozie_coordinator.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_coord_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_coord', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_coordinator(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
job_id = submission.rerun_coord(params=params, **args)
return job_id
except RestException, ex:
msg = _("Error re-running coordinator %s.") % (oozie_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_bundle(request, job_id, app_path):
oozie_bundle = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_bundle, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunBundleForm(request.POST, oozie_bundle=oozie_bundle)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'coord-scope': ','.join(rerun_form.cleaned_data['coordinators']),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
if rerun_form.cleaned_data['start'] and rerun_form.cleaned_data['end']:
date = {
'date-scope':
'%(start)s::%(end)s' % {
'start': utc_datetime_format(rerun_form.cleaned_data['start']),
'end': utc_datetime_format(rerun_form.cleaned_data['end'])
}
}
params.update(date)
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_bundle(request, job_id, args, params, properties)
request.info(_('Bundle re-running.'))
return redirect(reverse('oozie:list_oozie_bundle', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % (rerun_form.errors,)))
return list_oozie_bundle(request, job_id)
else:
rerun_form = RerunBundleForm(oozie_bundle=oozie_bundle)
initial_params = ParameterForm.get_initial_params(oozie_bundle.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_bundle_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_bundle', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_bundle(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
job_id = submission.rerun_bundle(params=params, **args)
return job_id
except RestException, ex:
msg = _("Error re-running bundle %s.") % (oozie_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers.get('oozie-error-message', ex))
def submit_external_job(request, application_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
mapping['dryrun'] = request.POST.get('dryrun_checkbox') == 'on'
application_name = os.path.basename(application_path)
application_class = Bundle if application_name == 'bundle.xml' else Coordinator if application_name == 'coordinator.xml' else get_workflow()
mapping[application_class.get_application_path_key()] = application_path
try:
submission = Submission(request.user, fs=request.fs, jt=request.jt, properties=mapping)
job_id = submission.run(application_path)
except RestException, ex:
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail):
detail = '%s: %s' % (_('The Oozie server is not running'), detail)
LOG.exception(smart_str(detail))
raise PopupException(_("Error submitting job %s") % (application_path,), detail=detail)
request.info(_('Oozie job submitted'))
view = 'list_oozie_bundle' if application_name == 'bundle.xml' else 'list_oozie_coordinator' if application_name == 'coordinator.xml' else 'list_oozie_workflow'
return redirect(reverse('oozie:%s' % view, kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
parameters = Submission(request.user, fs=request.fs, jt=request.jt).get_external_parameters(application_path)
initial_params = ParameterForm.get_initial_params(parameters)
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor/submit_job_popup.mako', request, {
'params_form': params_form,
'name': _('Job'),
'action': reverse('oozie:submit_external_job', kwargs={'application_path': application_path}),
'show_dryrun': os.path.basename(application_path) != 'bundle.xml'
}, force_template=True).content
return JsonResponse(popup, safe=False)
def massaged_workflow_actions_for_json(workflow_actions, oozie_coordinator, oozie_bundle):
actions = []
for action in workflow_actions:
if oozie_coordinator is not None:
setattr(action, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(action, 'oozie_bundle', oozie_bundle)
massaged_action = {
'id': action.id,
'log': action.get_absolute_log_url(),
'url': action.get_absolute_url(),
'name': action.name,
'type': action.type,
'status': action.status,
'externalIdUrl': action.get_external_id_url(),
'externalId': action.externalId,
'startTime': format_time(action.startTime),
'endTime': format_time(action.endTime),
'retries': action.retries,
'errorCode': action.errorCode,
'errorMessage': action.errorMessage,
'transition': action.transition,
'data': action.data,
}
actions.append(massaged_action)
return actions
def massaged_coordinator_actions_for_json(coordinator, oozie_bundle):
coordinator_id = coordinator.id
coordinator_actions = coordinator.get_working_actions()
actions = []
related_job_ids = []
related_job_ids.append('coordinator_job_id=%s' % coordinator_id)
if oozie_bundle is not None:
related_job_ids.append('bundle_job_id=%s' %oozie_bundle.id)
for action in coordinator_actions:
massaged_action = {
'id': action.id,
'url': action.externalId and reverse('oozie:list_oozie_workflow', kwargs={'job_id': action.externalId}) + '?%s' % '&'.join(related_job_ids) or '',
'number': action.actionNumber,
'type': action.type,
'status': action.status,
'externalId': action.externalId or '-',
'externalIdUrl': action.externalId and reverse('oozie:list_oozie_workflow_action', kwargs={'action': action.externalId}) or '',
'nominalTime': format_time(action.nominalTime),
'title': action.title,
'createdTime': format_time(action.createdTime),
'lastModifiedTime': format_time(action.lastModifiedTime),
'errorCode': action.errorCode,
'errorMessage': action.errorMessage,
'missingDependencies': action.missingDependencies
}
actions.append(massaged_action)
# Sorting for Oozie < 4.1 backward compatibility
actions.sort(key=lambda k: k['number'], reverse=True)
return actions
def massaged_bundle_actions_for_json(bundle):
bundle_actions = bundle.get_working_actions()
actions = []
for action in bundle_actions:
massaged_action = {
'id': action.coordJobId,
'url': action.coordJobId and reverse('oozie:list_oozie_coordinator', kwargs={'job_id': action.coordJobId}) + '?bundle_job_id=%s' % bundle.id or '',
'name': action.coordJobName,
'type': action.type,
'status': action.status,
'externalId': action.coordExternalId or '-',
'frequency': action.frequency,
'timeUnit': action.timeUnit,
'nextMaterializedTime': action.nextMaterializedTime,
'concurrency': action.concurrency,
'pauseTime': action.pauseTime,
'user': action.user,
'acl': action.acl,
'timeOut': action.timeOut,
'coordJobPath': action.coordJobPath,
'executionPolicy': action.executionPolicy,
'startTime': action.startTime,
'endTime': action.endTime,
'lastAction': action.lastAction
}
actions.insert(0, massaged_action)
return actions
def format_time(st_time):
if st_time is None:
return '-'
elif type(st_time) == time.struct_time:
return time.strftime("%a, %d %b %Y %H:%M:%S", st_time)
else:
return st_time
def catch_unicode_time(u_time):
if type(u_time) == time.struct_time:
return u_time
else:
return datetime.timetuple(datetime.strptime(u_time, '%a, %d %b %Y %H:%M:%S %Z'))
def massaged_oozie_jobs_for_json(oozie_jobs, user, just_sla=False):
jobs = []
for job in oozie_jobs:
if not just_sla or (just_sla and job.has_sla) and job.appName != 'pig-app-hue-script':
last_modified_time_millis = hasattr(job, 'lastModTime') and job.lastModTime and (time.time() - time.mktime(job.lastModTime)) * 1000 or 0
duration_millis = job.endTime and job.startTime and ((time.mktime(job.endTime) - time.mktime(job.startTime)) * 1000) or 0
massaged_job = {
'id': job.id,
'lastModTime': hasattr(job, 'lastModTime') and job.lastModTime and format_time(job.lastModTime) or None,
'lastModTimeInMillis': last_modified_time_millis,
'lastModTimeFormatted': last_modified_time_millis and format_duration_in_millis(last_modified_time_millis) or None,
'kickoffTime': hasattr(job, 'kickoffTime') and job.kickoffTime and format_time(job.kickoffTime) or '',
'kickoffTimeInMillis': hasattr(job, 'kickoffTime') and job.kickoffTime and time.mktime(catch_unicode_time(job.kickoffTime)) or 0,
'nextMaterializedTime': hasattr(job, 'nextMaterializedTime') and job.nextMaterializedTime and format_time(job.nextMaterializedTime) or '',
'nextMaterializedTimeInMillis': hasattr(job, 'nextMaterializedTime') and job.nextMaterializedTime and time.mktime(job.nextMaterializedTime) or 0,<|fim▁hole|> 'concurrency': hasattr(job, 'concurrency') and job.concurrency or None,
'endTimeInMillis': job.endTime and time.mktime(job.endTime) or 0,
'status': job.status,
'isRunning': job.is_running(),
'duration': duration_millis and format_duration_in_millis(duration_millis) or None,
'durationInMillis': duration_millis,
'appName': job.appName,
'progress': job.get_progress(),
'user': job.user,
'absoluteUrl': job.get_absolute_url(),
'canEdit': has_job_edition_permission(job, user),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'kill'}),
'suspendUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'suspend'}),
'resumeUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'resume'}),
'created': hasattr(job, 'createdTime') and job.createdTime and format_time(job.createdTime) or '',
'createdInMillis': hasattr(job, 'createdTime') and job.createdTime and time.mktime(catch_unicode_time(job.createdTime)) or 0,
'startTime': hasattr(job, 'startTime') and format_time(job.startTime) or None,
'startTimeInMillis': hasattr(job, 'startTime') and job.startTime and time.mktime(job.startTime) or 0,
'run': hasattr(job, 'run') and job.run or 0,
'frequency': hasattr(job, 'frequency') and Coordinator.CRON_MAPPING.get(job.frequency, job.frequency) or None,
'timeUnit': hasattr(job, 'timeUnit') and job.timeUnit or None,
'parentUrl': hasattr(job, 'parentId') and job.parentId and get_link(job.parentId) or '',
'submittedManually': hasattr(job, 'parentId') and (job.parentId is None or 'C@' not in job.parentId)
}
jobs.append(massaged_job)
return { 'jobs': jobs }
def check_job_access_permission(request, job_id, **kwargs):
"""
Decorator ensuring that the user has access to the job submitted to Oozie.
Arg: Oozie 'workflow', 'coordinator' or 'bundle' ID.
Return: the Oozie workflow, coordinator or bundle or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
if job_id is not None:
oozie_api = get_oozie(request.user)
if job_id.endswith('W'):
get_job = oozie_api.get_job
elif job_id.endswith('C'):
get_job = oozie_api.get_coordinator
else:
get_job = oozie_api.get_bundle
try:
if job_id.endswith('C'):
oozie_job = get_job(job_id, **kwargs)
else:
oozie_job = get_job(job_id)
except RestException, ex:
msg = _("Error accessing Oozie job %s.") % (job_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers['oozie-error-message', ''])
if request.user.is_superuser \
or oozie_job.user == request.user.username \
or has_dashboard_jobs_access(request.user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to access job %(id)s.") % \
{'username': request.user.username, 'id': oozie_job.id}
access_warn(request, message)
raise PopupException(message)
def check_job_edition_permission(oozie_job, user):
if has_job_edition_permission(oozie_job, user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to modify job %(id)s.") % \
{'username': user.username, 'id': oozie_job.id}
raise PopupException(message)
def has_job_edition_permission(oozie_job, user):
return user.is_superuser or oozie_job.user == user.username
def has_dashboard_jobs_access(user):
return user.is_superuser or user.has_hue_permission(action="dashboard_jobs_access", app=DJANGO_APPS[0])<|fim▁end|> | 'timeOut': hasattr(job, 'timeOut') and job.timeOut or None,
'endTime': job.endTime and format_time(job.endTime) or None,
'pauseTime': hasattr(job, 'pauseTime') and job.pauseTime and format_time(job.endTime) or None, |
<|file_name|>cow.rs<|end_file_name|><|fim▁begin|>#![allow(unused_imports)]
use geometry::prim::{Prim};<|fim▁hole|>use light::light::{Light};
use light::lights::{PointLight, SphereLight};
use material::materials::{CookTorranceMaterial, FlatMaterial, PhongMaterial};
use material::Texture;
use material::textures::{CheckerTexture, CubeMap, UVTexture, ImageTexture};
use raytracer::animator::CameraKeyframe;
use scene::{Camera, Scene};
use vec3::Vec3;
// 5000 polys, cow. Octree helps.
pub fn get_camera(image_width: u32, image_height: u32, fov: f64) -> Camera {
Camera::new(
Vec3 { x: -2.0, y: 4.0, z: 10.0 },
Vec3 { x: 0.0, y: 0.0, z: 0.0 },
Vec3 { x: 0.0, y: 1.0, z: 0.0 },
fov,
image_width,
image_height
)
}
pub fn get_scene() -> Scene {
let mut lights: Vec<Box<Light+Send+Sync>> = Vec::new();
lights.push(Box::new(SphereLight { position: Vec3 {x: 3.0, y: 10.0, z: 6.0}, color: Vec3::one(), radius: 5.0 }));
let red = CookTorranceMaterial { k_a: 0.0, k_d: 0.6, k_s: 1.0, k_sg: 0.2, k_tg: 0.0, gauss_constant: 30.0, roughness: 0.1, glossiness: 0.0, ior: 0.8, ambient: Vec3::one(), diffuse: Vec3 { x: 1.0, y: 0.25, z: 0.1 }, specular: Vec3::one(), transmission: Vec3::zero(), diffuse_texture: None };
let green = CookTorranceMaterial { k_a: 0.0, k_d: 0.5, k_s: 0.4, k_sg: 0.1, k_tg: 0.0, gauss_constant: 25.0, roughness: 0.4, glossiness: 0.0, ior: 0.95, ambient: Vec3::one(), diffuse: Vec3 { x: 0.2, y: 0.7, z: 0.2 }, specular: Vec3::one(), transmission: Vec3::zero(), diffuse_texture: None };
let mut prims: Vec<Box<Prim+Send+Sync>> = Vec::new();
prims.push(Box::new(Plane { a: 0.0, b: 1.0, c: 0.0, d: 3.6, material: Box::new(green) }));
let cow = ::util::import::from_obj(red, true, "./docs/assets/models/cow.obj").ok().expect("failed to load obj model");;
for triangle in cow.triangles.into_iter() { prims.push(triangle); }
println!("Generating octree...");
let octree = prims.into_iter().collect();
println!("Octree generated...");
Scene {
lights: lights,
octree: octree,
background: Vec3 { x: 0.3, y: 0.5, z: 0.8 },
skybox: None
}
}<|fim▁end|> | use geometry::prims::{Plane, Sphere, Triangle}; |
<|file_name|>bfi.rs<|end_file_name|><|fim▁begin|>use crate::core::bits::Bits;
use crate::core::instruction::{BfiParams, Instruction};
use crate::core::register::Reg;
#[allow(non_snake_case)]
pub fn decode_BFI_t1(opcode: u32) -> Instruction {
let rn: u8 = opcode.get_bits(16..20) as u8;
let rd: u8 = opcode.get_bits(8..12) as u8;
let imm3: u8 = opcode.get_bits(12..15) as u8;
let imm2: u8 = opcode.get_bits(6..8) as u8;
let lsbit = u32::from((imm3 << 2) + imm2);
let msbit = opcode.get_bits(0..5);
<|fim▁hole|> let width = msbit - lsbit + 1;
Instruction::BFI {
params: BfiParams {
rd: Reg::from(rd),
rn: Reg::from(rn),
lsbit: lsbit as usize,
width: width as usize,
},
}
}<|fim▁end|> | // msbit = lsbit + width -1 <=>
// width = msbit - lsbit + 1 |
<|file_name|>probe_pins.rs<|end_file_name|><|fim▁begin|>extern crate serial;
use std::env;
use std::thread;
use std::time::Duration;
use serial::prelude::*;
const SETTINGS: serial::PortSettings = serial::PortSettings {
baud_rate: serial::Baud9600,
char_size: serial::Bits8,
parity: serial::ParityNone,
stop_bits: serial::Stop1,
flow_control: serial::FlowNone,
};
fn main() {
for arg in env::args_os().skip(1) {
let mut port = serial::open(&arg).unwrap();<|fim▁hole|>}
fn probe_pins<T: SerialPort>(port: &mut T) -> serial::Result<()> {
try!(port.configure(&SETTINGS));
try!(port.set_timeout(Duration::from_millis(100)));
try!(port.set_rts(false));
try!(port.set_dtr(false));
let mut rts = false;
let mut dtr = false;
let mut toggle = true;
loop {
thread::sleep(Duration::from_secs(1));
if toggle {
rts = !rts;
try!(port.set_rts(rts));
}
else {
dtr = !dtr;
try!(port.set_dtr(dtr));
}
println!("RTS={:5?} DTR={:5?} CTS={:5?} DSR={:5?} RI={:5?} CD={:?}",
rts,
dtr,
try!(port.read_cts()),
try!(port.read_dsr()),
try!(port.read_ri()),
try!(port.read_cd()));
toggle = !toggle;
}
}<|fim▁end|> | println!("opened device {:?}", arg);
probe_pins(&mut port).unwrap();
} |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>/*<|fim▁hole|> * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use anyhow::{Context, Result};
use std::{io::Write, path::Path, process::Command};
fn main() -> Result<()> {
let out_dir = std::env::var("CARGO_MANIFEST_DIR")?;
let python_script = concat!(env!("CARGO_MANIFEST_DIR"), "/src/build_resnet.py");
let synset_txt = concat!(env!("CARGO_MANIFEST_DIR"), "/synset.txt");
println!("cargo:rerun-if-changed={}", python_script);
println!("cargo:rerun-if-changed={}", synset_txt);
let output = Command::new("python3")
.arg(python_script)
.arg(&format!("--build-dir={}", out_dir))
.output()
.with_context(|| anyhow::anyhow!("failed to run python3"))?;
if !output.status.success() {
std::io::stdout()
.write_all(&output.stderr)
.context("Failed to write error")?;
panic!("Failed to execute build script");
}
assert!(
Path::new(&format!("{}/deploy_lib.o", out_dir)).exists(),
"Could not prepare demo: {}",
String::from_utf8(output.stderr)
.unwrap()
.trim()
.split("\n")
.last()
.unwrap_or("")
);
println!("cargo:rustc-link-search=native={}", out_dir);
Ok(())
}<|fim▁end|> | * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file |
<|file_name|>shift_swap_network_test.py<|end_file_name|><|fim▁begin|># Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import pytest
import cirq
import cirq.contrib.acquaintance as cca
def random_part_lens(max_n_parts, max_part_size):
return tuple(random.randint(1, max_part_size) for _ in range(random.randint(1, max_n_parts)))
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(7, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_acquaintance_opps(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
qubits = cirq.LineQubit.range(n_qubits)
strategy = cirq.Circuit(gate(*qubits))
# actual_opps
initial_mapping = {q: i for i, q in enumerate(qubits)}
actual_opps = cca.get_logical_acquaintance_opportunities(strategy, initial_mapping)
# expected opps
i = 0
sides = ('left', 'right')
parts = {side: [] for side in sides}
for side, part_lens in zip(sides, (left_part_lens, right_part_lens)):
for part_len in part_lens:
parts[side].append(set(range(i, i + part_len)))
i += part_len
expected_opps = set(
frozenset(left_part | right_part)
for left_part, right_part in itertools.product(parts['left'], parts['right'])
)
assert actual_opps == expected_opps
circuit_diagrams = {
(
'undecomposed',
(1,) * 3,
(1,) * 3,
): """
0: ───(0, 0, 0)↦(1, 0, 0)───
│
1: ───(0, 1, 0)↦(1, 1, 0)───
│
2: ───(0, 2, 0)↦(1, 2, 0)───
│
3: ───(1, 0, 0)↦(0, 0, 0)───
│
4: ───(1, 1, 0)↦(0, 1, 0)───
│
5: ───(1, 2, 0)↦(0, 2, 0)───
""",
(
'decomposed',
(1,) * 3,
(1,) * 3,
): """
0: ───────────────────────█───╲0╱───────────────────────
│ │
1: ─────────────█───╲0╱───█───╱1╲───█───╲0╱─────────────
│ │ │ │
2: ───█───╲0╱───█───╱1╲───█───╲0╱───█───╱1╲───█───╲0╱───
│ │ │ │ │ │
3: ───█───╱1╲───█───╲0╱───█───╱1╲───█───╲0╱───█───╱1╲───
│ │ │ │
4: ─────────────█───╱1╲───█───╲0╱───█───╱1╲─────────────
│ │
5: ───────────────────────█───╱1╲───────────────────────
""",
(
'undecomposed',
(2,) * 3,
(2,) * 3,
): """
0: ────(0, 0, 0)↦(1, 0, 0)───
│
1: ────(0, 0, 1)↦(1, 0, 1)───
│
2: ────(0, 1, 0)↦(1, 1, 0)───
│
3: ────(0, 1, 1)↦(1, 1, 1)───
│
4: ────(0, 2, 0)↦(1, 2, 0)───
│
5: ────(0, 2, 1)↦(1, 2, 1)───
│
6: ────(1, 0, 0)↦(0, 0, 0)───
│
7: ────(1, 0, 1)↦(0, 0, 1)───
│
8: ────(1, 1, 0)↦(0, 1, 0)───
│
9: ────(1, 1, 1)↦(0, 1, 1)───
│
10: ───(1, 2, 0)↦(0, 2, 0)───
│
11: ───(1, 2, 1)↦(0, 2, 1)───
""",
(
'decomposed',
(2,) * 3,
(2,) * 3,
): """
0: ────────────────────────█───╲0╱───────────────────────
│ │
1: ────────────────────────█───╲1╱───────────────────────
│ │
2: ──────────────█───╲0╱───█───╱2╲───█───╲0╱─────────────
│ │ │ │ │ │
3: ──────────────█───╲1╱───█───╱3╲───█───╲1╱─────────────
│ │ │ │
4: ────█───╲0╱───█───╱2╲───█───╲0╱───█───╱2╲───█───╲0╱───
│ │ │ │ │ │ │ │ │ │
5: ────█───╲1╱───█───╱3╲───█───╲1╱───█───╱3╲───█───╲1╱───
│ │ │ │ │ │
6: ────█───╱2╲───█───╲0╱───█───╱2╲───█───╲0╱───█───╱2╲───
│ │ │ │ │ │ │ │ │ │
7: ────█───╱3╲───█───╲1╱───█───╱3╲───█───╲1╱───█───╱3╲───
│ │ │ │
8: ──────────────█───╱2╲───█───╲0╱───█───╱2╲─────────────
│ │ │ │ │ │
9: ──────────────█───╱3╲───█───╲1╱───█───╱3╲─────────────
│ │
10: ───────────────────────█───╱2╲───────────────────────
│ │
11: ───────────────────────█───╱3╲───────────────────────
""",
(
'undecomposed',
(1, 2, 2),
(2, 1, 2),
): """
0: ───(0, 0, 0)↦(1, 0, 0)───
│
1: ───(0, 1, 0)↦(1, 1, 0)───
│
2: ───(0, 1, 1)↦(1, 1, 1)───
│
3: ───(0, 2, 0)↦(1, 2, 0)───
│
4: ───(0, 2, 1)↦(1, 2, 1)───
│
5: ───(1, 0, 0)↦(0, 0, 0)───
│
6: ───(1, 0, 1)↦(0, 0, 1)───
│
7: ───(1, 1, 0)↦(0, 1, 0)───
│
8: ───(1, 2, 0)↦(0, 2, 0)───
│
9: ───(1, 2, 1)↦(0, 2, 1)───
""",
(
'decomposed',
(1, 2, 2),
(2, 1, 2),
): """
0: ───────────────────────█───╲0╱───────────────────────
│ │
1: ─────────────█───╲0╱───█───╱1╲───────────────────────
│ │ │ │
2: ─────────────█───╲1╱───█───╱2╲───█───╲0╱─────────────
│ │ │ │
3: ───█───╲0╱───█───╱2╲───█───╲0╱───█───╱1╲───█───╲0╱───
│ │ │ │ │ │ │ │
4: ───█───╲1╱───█───╱3╲───█───╲1╱───█───╲0╱───█───╱1╲───
│ │ │ │ │ │ │ │
5: ───█───╱2╲───█───╲0╱───█───╱2╲───█───╲1╱───█───╱2╲───
│ │ │ │ │ │
6: ───█───╱3╲───█───╲1╱───█───╲0╱───█───╱2╲─────────────
│ │ │ │ │ │
7: ─────────────█───╱2╲───█───╲1╱───█───╱3╲─────────────
│ │
8: ───────────────────────█───╱2╲───────────────────────
│ │
9: ───────────────────────█───╱3╲───────────────────────
""",
}
@pytest.mark.parametrize('left_part_lens,right_part_lens', set(key[1:] for key in circuit_diagrams))
def test_shift_swap_network_gate_diagrams(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
qubits = cirq.LineQubit.range(n_qubits)
circuit = cirq.Circuit(gate(*qubits))
diagram = circuit_diagrams['undecomposed', left_part_lens, right_part_lens]
cirq.testing.assert_has_diagram(circuit, diagram)
cca.expose_acquaintance_gates(circuit)
diagram = circuit_diagrams['decomposed', left_part_lens, right_part_lens]
cirq.testing.assert_has_diagram(circuit, diagram)
def test_shift_swap_network_gate_bad_part_lens():
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((0, 1, 1), (2, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((-1, 1, 1), (2, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((1, 1), (2, 0, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((1, 1), (2, -3))
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(2, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_repr(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
cirq.testing.assert_equivalent_repr(gate)
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens, cirq.ZZ)
cirq.testing.assert_equivalent_repr(gate)
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(2, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_permutation(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()<|fim▁hole|><|fim▁end|> | cca.testing.assert_permutation_decomposition_equivalence(gate, n_qubits) |
<|file_name|>0021_sso_id_verification.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-11 15:33
from __future__ import unicode_literals
from django.db import migrations, models
<|fim▁hole|> ]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='enable_sso_id_verification',
field=models.BooleanField(default=False, help_text=b'Use the presence of a profile from a trusted third party as proof of identity verification.'),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='enable_sso_id_verification',
field=models.BooleanField(default=False, help_text=b'Use the presence of a profile from a trusted third party as proof of identity verification.'),
),
migrations.AddField(
model_name='samlproviderconfig',
name='enable_sso_id_verification',
field=models.BooleanField(default=False, help_text=b'Use the presence of a profile from a trusted third party as proof of identity verification.'),
),
]<|fim▁end|> | class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0020_cleanup_slug_fields'), |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>"""
Copyright (c) 2016 Gabriel Esteban
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.<|fim▁hole|>
# Register your models here.<|fim▁end|> | """
from django.contrib import admin |
<|file_name|>format.rs<|end_file_name|><|fim▁begin|>use gl;
use ToGlEnum;
/// List of client-side pixel formats.
///
/// These are all the possible formats of data when uploading to a texture.
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClientFormat {
U8,
U8U8,
U8U8U8,
U8U8U8U8,
I8,
I8I8,
I8I8I8,
I8I8I8I8,
U16,
U16U16,
U16U16U16,
U16U16U16U16,
I16,
I16I16,
I16I16I16,
I16I16I16I16,
U32,
U32U32,
U32U32U32,
U32U32U32U32,
I32,
I32I32,
I32I32I32,
I32I32I32I32,
U3U3U2,
U5U6U5,
U4U4U4U4,
U5U5U5U1,
U10U10U10U2,
F16,
F16F16,
F16F16F16,
F16F16F16F16,
F32,
F32F32,
F32F32F32,
F32F32F32F32,
}
impl ClientFormat {
/// Returns the size in bytes of a pixel of this type.
pub fn get_size(&self) -> usize {
use std::mem;
match *self {
ClientFormat::U8 => 1 * mem::size_of::<u8>(),
ClientFormat::U8U8 => 2 * mem::size_of::<u8>(),
ClientFormat::U8U8U8 => 3 * mem::size_of::<u8>(),
ClientFormat::U8U8U8U8 => 4 * mem::size_of::<u8>(),
ClientFormat::I8 => 1 * mem::size_of::<i8>(),
ClientFormat::I8I8 => 2 * mem::size_of::<i8>(),
ClientFormat::I8I8I8 => 3 * mem::size_of::<i8>(),
ClientFormat::I8I8I8I8 => 4 * mem::size_of::<i8>(),
ClientFormat::U16 => 1 * mem::size_of::<u16>(),
ClientFormat::U16U16 => 2 * mem::size_of::<u16>(),
ClientFormat::U16U16U16 => 3 * mem::size_of::<u16>(),
ClientFormat::U16U16U16U16 => 4 * mem::size_of::<u16>(),
ClientFormat::I16 => 1 * mem::size_of::<i16>(),
ClientFormat::I16I16 => 2 * mem::size_of::<i16>(),
ClientFormat::I16I16I16 => 3 * mem::size_of::<i16>(),
ClientFormat::I16I16I16I16 => 4 * mem::size_of::<i16>(),
ClientFormat::U32 => 1 * mem::size_of::<u32>(),
ClientFormat::U32U32 => 2 * mem::size_of::<u32>(),
ClientFormat::U32U32U32 => 3 * mem::size_of::<u32>(),
ClientFormat::U32U32U32U32 => 4 * mem::size_of::<u32>(),
ClientFormat::I32 => 1 * mem::size_of::<i32>(),
ClientFormat::I32I32 => 2 * mem::size_of::<i32>(),
ClientFormat::I32I32I32 => 3 * mem::size_of::<i32>(),
ClientFormat::I32I32I32I32 => 4 * mem::size_of::<i32>(),
ClientFormat::U3U3U2 => (3 + 3 + 2) / 8,
ClientFormat::U5U6U5 => (5 + 6 + 5) / 8,
ClientFormat::U4U4U4U4 => (4 + 4 + 4 + 4) / 8,
ClientFormat::U5U5U5U1 => (5 + 5 + 5 + 1) / 8,
ClientFormat::U10U10U10U2 => (10 + 10 + 10 + 2) / 2,
ClientFormat::F16 => 16 / 8,
ClientFormat::F16F16 => (16 + 16) / 8,
ClientFormat::F16F16F16 => (16 + 16 + 16) / 8,
ClientFormat::F16F16F16F16 => (16 + 16 + 16 + 16) / 8,
ClientFormat::F32 => 1 * mem::size_of::<f32>(),
ClientFormat::F32F32 => 2 * mem::size_of::<f32>(),
ClientFormat::F32F32F32 => 3 * mem::size_of::<f32>(),
ClientFormat::F32F32F32F32 => 4 * mem::size_of::<f32>(),
}
}
/// Returns the number of components of this client format.
pub fn get_num_components(&self) -> u8 {
match *self {
ClientFormat::U8 => 1,
ClientFormat::U8U8 => 2,
ClientFormat::U8U8U8 => 3,
ClientFormat::U8U8U8U8 => 4,
ClientFormat::I8 => 1,
ClientFormat::I8I8 => 2,
ClientFormat::I8I8I8 => 3,
ClientFormat::I8I8I8I8 => 4,
ClientFormat::U16 => 1,
ClientFormat::U16U16 => 2,
ClientFormat::U16U16U16 => 3,
ClientFormat::U16U16U16U16 => 4,
ClientFormat::I16 => 1,
ClientFormat::I16I16 => 2,
ClientFormat::I16I16I16 => 3,
ClientFormat::I16I16I16I16 => 4,
ClientFormat::U32 => 1,
ClientFormat::U32U32 => 2,
ClientFormat::U32U32U32 => 3,
ClientFormat::U32U32U32U32 => 4,
ClientFormat::I32 => 1,
ClientFormat::I32I32 => 2,
ClientFormat::I32I32I32 => 3,
ClientFormat::I32I32I32I32 => 4,
ClientFormat::U3U3U2 => 3,
ClientFormat::U5U6U5 => 3,
ClientFormat::U4U4U4U4 => 4,
ClientFormat::U5U5U5U1 => 4,
ClientFormat::U10U10U10U2 => 4,
ClientFormat::F16 => 1,
ClientFormat::F16F16 => 2,
ClientFormat::F16F16F16 => 3,
ClientFormat::F16F16F16F16 => 4,
ClientFormat::F32 => 1,
ClientFormat::F32F32 => 2,
ClientFormat::F32F32F32 => 3,
ClientFormat::F32F32F32F32 => 4,
}
}
}
/// List of uncompressed pixel formats that contain floating-point-like data.
///
/// Some formats are marked as "guaranteed to be supported". What this means is that you are
/// certain that the backend will use exactly these formats. If you try to use a format that
/// is not supported by the backend, it will automatically fall back to a larger format.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UncompressedFloatFormat {
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U8,
///
///
/// Guaranteed to be supported for textures.
I8,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U16,
///
///
/// Guaranteed to be supported for textures.
I16,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U8U8,
///
///
/// Guaranteed to be supported for textures.
I8I8,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U16U16,
///
///
/// Guaranteed to be supported for textures.
I16I16,
///
U3U32U,
///
U4U4U4,
///
U5U5U5,
///
///
/// Guaranteed to be supported for textures.
U8U8U8,
///
///
/// Guaranteed to be supported for textures.
I8I8I8,
///
U10U10U10,
///
U12U12U12,
///
///
/// Guaranteed to be supported for textures.
I16I16I16,
///
U2U2U2U2,
///
U4U4U4U4,
///
U5U5U5U1,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U8U8U8U8,
///
///
/// Guaranteed to be supported for textures.
I8I8I8I8,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U10U10U10U2,
///
U12U12U12U12,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
U16U16U16U16,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F16,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F16F16,
///
///
/// Guaranteed to be supported for textures.
F16F16F16,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F16F16F16F16,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F32,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F32F32,
///
///
/// Guaranteed to be supported for textures.
F32F32F32,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F32F32F32F32,
///
///
/// Guaranteed to be supported for both textures and renderbuffers.
F11F11F10,
/// Uses three components of 9 bits of precision that all share the same exponent.
///
/// Use this format only if all the components are approximately equal.
///
/// Guaranteed to be supported for textures.
F9F9F9,
}
impl UncompressedFloatFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat {
TextureFormat::UncompressedFloat(self)
}
}
impl ToGlEnum for UncompressedFloatFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
UncompressedFloatFormat::U8 => gl::R8,
UncompressedFloatFormat::I8 => gl::R8_SNORM,
UncompressedFloatFormat::U16 => gl::R16,
UncompressedFloatFormat::I16 => gl::R16_SNORM,
UncompressedFloatFormat::U8U8 => gl::RG8,
UncompressedFloatFormat::I8I8 => gl::RG8_SNORM,
UncompressedFloatFormat::U16U16 => gl::RG16,
UncompressedFloatFormat::I16I16 => gl::RG16_SNORM,
UncompressedFloatFormat::U3U32U => gl::R3_G3_B2,
UncompressedFloatFormat::U4U4U4 => gl::RGB4,
UncompressedFloatFormat::U5U5U5 => gl::RGB5,
UncompressedFloatFormat::U8U8U8 => gl::RGB8,
UncompressedFloatFormat::I8I8I8 => gl::RGB8_SNORM,
UncompressedFloatFormat::U10U10U10 => gl::RGB10,
UncompressedFloatFormat::U12U12U12 => gl::RGB12,
UncompressedFloatFormat::I16I16I16 => gl::RGB16_SNORM,
UncompressedFloatFormat::U2U2U2U2 => gl::RGBA2,
UncompressedFloatFormat::U4U4U4U4 => gl::RGBA4,
UncompressedFloatFormat::U5U5U5U1 => gl::RGB5_A1,
UncompressedFloatFormat::U8U8U8U8 => gl::RGBA8,
UncompressedFloatFormat::I8I8I8I8 => gl::RGBA8_SNORM,
UncompressedFloatFormat::U10U10U10U2 => gl::RGB10_A2,
UncompressedFloatFormat::U12U12U12U12 => gl::RGBA12,
UncompressedFloatFormat::U16U16U16U16 => gl::RGBA16,
UncompressedFloatFormat::F16 => gl::R16F,
UncompressedFloatFormat::F16F16 => gl::RG16F,
UncompressedFloatFormat::F16F16F16 => gl::RGB16F,
UncompressedFloatFormat::F16F16F16F16 => gl::RGBA16F,
UncompressedFloatFormat::F32 => gl::R32F,
UncompressedFloatFormat::F32F32 => gl::RG32F,
UncompressedFloatFormat::F32F32F32 => gl::RGB32F,
UncompressedFloatFormat::F32F32F32F32 => gl::RGBA32F,
UncompressedFloatFormat::F11F11F10 => gl::R11F_G11F_B10F,
UncompressedFloatFormat::F9F9F9 => gl::RGB9_E5,
}
}
}
/// List of uncompressed pixel formats that contain signed integral data.
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UncompressedIntFormat {
I8,
I16,
I32,
I8I8,
I16I16,
I32I32,
I8I8I8,
/// May not be supported by renderbuffers.
I16I16I16,
/// May not be supported by renderbuffers.
I32I32I32,
/// May not be supported by renderbuffers.
I8I8I8I8,
I16I16I16I16,
I32I32I32I32,
}<|fim▁hole|> TextureFormat::UncompressedIntegral(self)
}
}
impl ToGlEnum for UncompressedIntFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
UncompressedIntFormat::I8 => gl::R8I,
UncompressedIntFormat::I16 => gl::R16I,
UncompressedIntFormat::I32 => gl::R32I,
UncompressedIntFormat::I8I8 => gl::RG8I,
UncompressedIntFormat::I16I16 => gl::RG16I,
UncompressedIntFormat::I32I32 => gl::RG32I,
UncompressedIntFormat::I8I8I8 => gl::RGB8I,
UncompressedIntFormat::I16I16I16 => gl::RGB16I,
UncompressedIntFormat::I32I32I32 => gl::RGB32I,
UncompressedIntFormat::I8I8I8I8 => gl::RGBA8I,
UncompressedIntFormat::I16I16I16I16 => gl::RGBA16I,
UncompressedIntFormat::I32I32I32I32 => gl::RGBA32I,
}
}
}
/// List of uncompressed pixel formats that contain unsigned integral data.
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UncompressedUintFormat {
U8,
U16,
U32,
U8U8,
U16U16,
U32U32,
U8U8U8,
/// May not be supported by renderbuffers.
U16U16U16,
/// May not be supported by renderbuffers.
U32U32U32,
/// May not be supported by renderbuffers.
U8U8U8U8,
U16U16U16U16,
U32U32U32U32,
U10U10U10U2,
}
impl UncompressedUintFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat {
TextureFormat::UncompressedUnsigned(self)
}
}
impl ToGlEnum for UncompressedUintFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
UncompressedUintFormat::U8 => gl::R8UI,
UncompressedUintFormat::U16 => gl::R16UI,
UncompressedUintFormat::U32 => gl::R32UI,
UncompressedUintFormat::U8U8 => gl::RG8UI,
UncompressedUintFormat::U16U16 => gl::RG16UI,
UncompressedUintFormat::U32U32 => gl::RG32UI,
UncompressedUintFormat::U8U8U8 => gl::RGB8UI,
UncompressedUintFormat::U16U16U16 => gl::RGB16UI,
UncompressedUintFormat::U32U32U32 => gl::RGB32UI,
UncompressedUintFormat::U8U8U8U8 => gl::RGBA8UI,
UncompressedUintFormat::U16U16U16U16 => gl::RGBA16UI,
UncompressedUintFormat::U32U32U32U32 => gl::RGBA32UI,
UncompressedUintFormat::U10U10U10U2 => gl::RGB10_A2UI,
}
}
}
/// List of compressed texture formats.
///
/// TODO: many formats are missing
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CompressedFormat {
/// Red/green compressed texture with one unsigned component.
RGTCFormatU,
/// Red/green compressed texture with one signed component.
RGTCFormatI,
/// Red/green compressed texture with two unsigned components.
RGTCFormatUU,
/// Red/green compressed texture with two signed components.
RGTCFormatII,
}
impl CompressedFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat {
TextureFormat::CompressedFormat(self)
}
}
impl ToGlEnum for CompressedFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
CompressedFormat::RGTCFormatU => gl::COMPRESSED_RED_RGTC1,
CompressedFormat::RGTCFormatI => gl::COMPRESSED_SIGNED_RED_RGTC1,
CompressedFormat::RGTCFormatUU => gl::COMPRESSED_RG_RGTC2,
CompressedFormat::RGTCFormatII => gl::COMPRESSED_SIGNED_RG_RGTC2,
}
}
}
/// List of formats available for depth textures.
///
/// `I16`, `I24` and `I32` are still treated as if they were floating points.
/// Only the internal representation is integral.
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DepthFormat {
I16,
I24,
/// May not be supported by all hardware.
I32,
F32,
}
impl DepthFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat {
TextureFormat::DepthFormat(self)
}
}
impl ToGlEnum for DepthFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
DepthFormat::I16 => gl::DEPTH_COMPONENT16,
DepthFormat::I24 => gl::DEPTH_COMPONENT24,
DepthFormat::I32 => gl::DEPTH_COMPONENT32,
DepthFormat::F32 => gl::DEPTH_COMPONENT32F,
}
}
}
/// List of formats available for depth-stencil textures.
// TODO: If OpenGL 4.3 or ARB_stencil_texturing is not available, then depth/stencil
// textures are treated by samplers exactly like depth-only textures
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DepthStencilFormat {
I24I8,
F32I8,
}
impl DepthStencilFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat {
TextureFormat::DepthStencilFormat(self)
}
}
impl ToGlEnum for DepthStencilFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
DepthStencilFormat::I24I8 => gl::DEPTH24_STENCIL8,
DepthStencilFormat::F32I8 => gl::DEPTH32F_STENCIL8,
}
}
}
/// List of formats available for stencil textures.
///
/// You are strongly advised to only use `I8`.
// TODO: Stencil only formats cannot be used for Textures, unless OpenGL 4.4 or
// ARB_texture_stencil8 is available.
#[allow(missing_docs)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum StencilFormat {
I1,
I4,
I8,
I16,
}
impl StencilFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat {
TextureFormat::StencilFormat(self)
}
}
impl ToGlEnum for StencilFormat {
fn to_glenum(&self) -> gl::types::GLenum {
match *self {
StencilFormat::I1 => gl::STENCIL_INDEX1,
StencilFormat::I4 => gl::STENCIL_INDEX4,
StencilFormat::I8 => gl::STENCIL_INDEX8,
StencilFormat::I16 => gl::STENCIL_INDEX16,
}
}
}
/// Format of the internal representation of a texture.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum TextureFormat {
UncompressedFloat(UncompressedFloatFormat),
UncompressedIntegral(UncompressedIntFormat),
UncompressedUnsigned(UncompressedUintFormat),
CompressedFormat(CompressedFormat),
DepthFormat(DepthFormat),
StencilFormat(StencilFormat),
DepthStencilFormat(DepthStencilFormat),
}<|fim▁end|> |
impl UncompressedIntFormat {
/// Turns this format into a more generic `TextureFormat`.
pub fn to_texture_format(self) -> TextureFormat { |
<|file_name|>chart.component.spec.ts<|end_file_name|><|fim▁begin|>/* tslint:disable:no-unused-variable */
import { TestBed, async } from '@angular/core/testing';
import { DpsBarChartComponent } from './dps-bar-chart.component';
describe('Component: DpsBarChart', () => {
it('should create an instance', () => {<|fim▁hole|> expect(component).toBeTruthy();
});
});<|fim▁end|> | let component = new DpsBarChartComponent(); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.