prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>solr_test.py<|end_file_name|><|fim▁begin|>from optparse import OptionParser import sys from os.path import expanduser, abspath import sunburnt import dist_job_mgr.client as djm QUERY_TERM="datablox" def run_query(addresses): master = addresses[0] si = sunburnt.SolrInterface("http://%s:8983/solr" % master) resp = si.query(QUERY_TERM).execute() assert resp.status==0 objs = resp.result.numFound time_ms = resp.QTime if time_ms>0: rate = "%.2f obj/sec" % (1000.0*(float(objs)/float(time_ms))) else: rate = "Rate too fast to measure" print "%d results in %d ms (%s)" % (objs, time_ms, rate) return 0 def main(argv=sys.argv[1:]): usage = "%prog [options] query_host [host2 host3 ...]" parser = OptionParser(usage=usage) (options, args) = parser.parse_args(argv) if len(args)==0: parser.error("Need to provide at least one host name") djm_conn = djm.get_local_connection(abspath(expanduser("~/apps/djm"))) addresses = [] for name in args: host = djm_conn.find_node_by_name(name) if host==None: parser.error("No node named '%s' found in djm database" % name) addresses.append(host["contact_address"]) return run_query(addresses)<|fim▁hole|> if __name__ == "__main__": sys.exit(main())<|fim▁end|>
<|file_name|>errors.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The Govisor Authors //<|fim▁hole|>// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use file except in compliance with the License. // You may obtain a copy of the license at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package govisor import ( "errors" ) var ( ErrNoManager = errors.New("No manager for service") ErrConflict = errors.New("Conflicting service enabled") ErrIsEnabled = errors.New("Service is enabled") ErrNotRunning = errors.New("Service is not running") ErrBadPropType = errors.New("Bad property type") ErrBadPropName = errors.New("Bad property name") ErrBadPropValue = errors.New("Bad property value") ErrPropReadOnly = errors.New("Property not changeable") ErrRateLimited = errors.New("Restarting too quickly") )<|fim▁end|>
<|file_name|>package-info.java<|end_file_name|><|fim▁begin|>/* * Copyright 2016-present Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> /** * Implementation of YANG node bgpVrfAf's children nodes. */ package org.onosproject.yang.gen.v1.ne.bgpcomm.rev20141225.nebgpcomm.bgpcomm.bgpvrfs.bgpvrf.bgpvrfafs.bgpvrfaf;<|fim▁end|>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
<|file_name|>SkScalerCacheTest.cpp<|end_file_name|><|fim▁begin|>/* * Copyright 2020 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "include/core/SkFont.h" #include "include/core/SkTypeface.h" #include "src/core/SkScalerCache.h" #include "src/core/SkStrikeSpec.h" #include "src/core/SkTaskGroup.h" #include "tests/Test.h" #include "tools/ToolUtils.h" #include <atomic> class Barrier { public: Barrier(int threadCount) : fThreadCount(threadCount) { } void waitForAll() { fThreadCount -= 1; while (fThreadCount > 0) { } } private: std::atomic<int> fThreadCount; }; DEF_TEST(SkScalerCacheMultiThread, Reporter) { sk_sp<SkTypeface> typeface = ToolUtils::create_portable_typeface("serif", SkFontStyle::Italic()); static constexpr int kThreadCount = 4; Barrier barrier{kThreadCount}; SkFont font; font.setEdging(SkFont::Edging::kAntiAlias); font.setSubpixel(true); font.setTypeface(typeface); SkGlyphID glyphs['z']; SkPoint pos['z']; for (int c = ' '; c < 'z'; c++) { glyphs[c] = font.unicharToGlyph(c); pos[c] = {30.0f * c + 30, 30.0f}; } constexpr size_t glyphCount = 'z' - ' '; auto data = SkMakeZip(glyphs, pos).subspan(SkTo<int>(' '), glyphCount); SkPaint defaultPaint; SkStrikeSpec strikeSpec = SkStrikeSpec::MakeMask( font, defaultPaint, SkSurfaceProps(0, kUnknown_SkPixelGeometry), SkScalerContextFlags::kNone, SkMatrix::I()); // Make our own executor so the --threads parameter doesn't mess things up.<|fim▁hole|> typeface->createScalerContext(effects, &strikeSpec.descriptor())}; SkScalerCache scalerCache{strikeSpec.descriptor(), std::move(ctx)}; auto perThread = [&](int threadIndex) { barrier.waitForAll(); auto local = data.subspan(threadIndex * 2, data.size() - kThreadCount * 2); for (int i = 0; i < 100; i++) { SkDrawableGlyphBuffer drawable; SkSourceGlyphBuffer rejects; drawable.ensureSize(glyphCount); rejects.setSource(local); drawable.startBitmapDevice(rejects.source(), {0, 0}, SkMatrix::I(), scalerCache.roundingSpec()); scalerCache.prepareForMaskDrawing(&drawable, &rejects); rejects.flipRejectsToSource(); drawable.reset(); } }; SkTaskGroup(*executor).batch(kThreadCount, perThread); } }<|fim▁end|>
auto executor = SkExecutor::MakeFIFOThreadPool(kThreadCount); for (int tries = 0; tries < 100; tries++) { SkScalerContextEffects effects; std::unique_ptr<SkScalerContext> ctx{
<|file_name|>reference_assoc.py<|end_file_name|><|fim▁begin|>import orange data = orange.ExampleTable("lenses") print "\nAssociation rules" rules = orange.AssociationRulesInducer(data, support = 0.3) for r in rules: print "%5.3f %5.3f %s" % (r.support, r.confidence, r) print "\nClassification rules" rules = orange.AssociationRulesInducer(data, support = 0.3, classificationRules = 1)<|fim▁hole|>for r in rules: print "%5.3f %5.3f %s" % (r.support, r.confidence, r)<|fim▁end|>
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*- import urlparse from django.core.urlresolvers import reverse from django.shortcuts import redirect from django.http.response import HttpResponseBadRequest from .base import Mixin from .. import strings class SuccessURLAliasViewMixin(Mixin): def get_success_url(self): return reverse(self.success_url_alias) class HttpRefererViewMixin(Mixin): def get(self, request, referers=None, *args, **kwargs):<|fim▁hole|> and all(map(lambda r: unicode(r) != from_referer, referers)): return HttpResponseBadRequest( strings.HTTP_REFERER_VIEW_MIXIN_FORM_VIEW_BAD_REQUEST \ % from_referer) return self.base_impl( HttpRefererViewMixin, self).get(request, args, kwargs) class KwargsUserFormViewMixin(Mixin): def get_form_kwargs(self): kwargs = self.base_impl(KwargsUserFormViewMixin, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs<|fim▁end|>
from_referer = urlparse.urlsplit( request.META.get('HTTP_REFERER', '')).path if referers is not None \
<|file_name|>history_traversal.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use script_thread::{CommonScriptMsg, MainThreadScriptMsg, ScriptChan}; use std::sync::mpsc::Sender;<|fim▁hole|> #[derive(JSTraceable)] pub struct HistoryTraversalTaskSource(pub Sender<MainThreadScriptMsg>); impl ScriptChan for HistoryTraversalTaskSource { fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> { let HistoryTraversalTaskSource(ref chan) = *self; chan.send(MainThreadScriptMsg::Common(msg)).map_err(|_| ()) } fn clone(&self) -> Box<ScriptChan + Send> { let HistoryTraversalTaskSource(ref chan) = *self; box HistoryTraversalTaskSource((*chan).clone()) } }<|fim▁end|>
<|file_name|>color.test.js<|end_file_name|><|fim▁begin|>goog.provide('ol.test.color'); goog.require('ol.color'); describe('ol.color', function() { describe('ol.color.asArray()', function() { it('returns the same for an array', function() { var color = [1, 2, 3, 0.4]; var got = ol.color.asArray(color); expect(got).to.be(color); }); it('returns an array given an rgba string', function() { var color = ol.color.asArray('rgba(1,2,3,0.4)'); expect(color).to.eql([1, 2, 3, 0.4]); }); it('returns an array given an rgb string', function() { var color = ol.color.asArray('rgb(1,2,3)'); expect(color).to.eql([1, 2, 3, 1]); }); it('returns an array given a hex string', function() { var color = ol.color.asArray('#00ccff'); expect(color).to.eql([0, 204, 255, 1]); }); }); describe('ol.color.asString()', function() { it('returns the same for a string', function() { var color = 'rgba(0,1,2,0.3)'; var got = ol.color.asString(color); expect(got).to.be(color); }); it('returns a string given an rgba array', function() { var color = ol.color.asString([1, 2, 3, 0.4]); expect(color).to.eql('rgba(1,2,3,0.4)'); }); it('returns a string given an rgb array', function() { var color = ol.color.asString([1, 2, 3]); expect(color).to.eql('rgba(1,2,3,1)'); }); }); describe('ol.color.fromString', function() { before(function() { sinon.spy(ol.color, 'fromStringInternal_'); }); after(function() { ol.color.fromStringInternal_.restore(); }); if (ol.ENABLE_NAMED_COLORS) { it('can parse named colors', function() { expect(ol.color.fromString('red')).to.eql([255, 0, 0, 1]); }); } it('can parse 3-digit hex colors', function() { expect(ol.color.fromString('#087')).to.eql([0, 136, 119, 1]); }); it('can parse 6-digit hex colors', function() { expect(ol.color.fromString('#56789a')).to.eql([86, 120, 154, 1]); }); it('can parse rgb colors', function() { expect(ol.color.fromString('rgb(0, 0, 255)')).to.eql([0, 0, 255, 1]); });<|fim▁hole|> expect(ol.color.fromString('rgba(255, 255, 0, 0.1)')).to.eql( [255, 255, 0, 0.1]); }); it('caches parsed values', function() { var count = ol.color.fromStringInternal_.callCount; ol.color.fromString('aquamarine'); expect(ol.color.fromStringInternal_.callCount).to.be(count + 1); ol.color.fromString('aquamarine'); expect(ol.color.fromStringInternal_.callCount).to.be(count + 1); }); it('throws an error on invalid colors', function() { var invalidColors = ['tuesday', '#1234567', 'rgb(255.0,0,0)']; var i, ii; for (i = 0, ii < invalidColors.length; i < ii; ++i) { expect(function() { ol.color.fromString(invalidColors[i]); }).to.throwException(); } }); }); describe('ol.color.normalize', function() { it('clamps out-of-range channels', function() { expect(ol.color.normalize([-1, 256, 0, 2])).to.eql([0, 255, 0, 1]); }); it('rounds color channels to integers', function() { expect(ol.color.normalize([1.2, 2.5, 3.7, 1])).to.eql([1, 3, 4, 1]); }); }); describe('ol.color.toString', function() { it('converts valid colors', function() { expect(ol.color.toString([1, 2, 3, 0.4])).to.be('rgba(1,2,3,0.4)'); }); it('rounds to integers if needed', function() { expect(ol.color.toString([1.2, 2.5, 3.7, 0.4])).to.be('rgba(1,3,4,0.4)'); }); it('sets default alpha value if undefined', function() { expect(ol.color.toString([0, 0, 0])).to.be('rgba(0,0,0,1)'); }); }); });<|fim▁end|>
it('can parse rgba colors', function() {
<|file_name|>SystemTrayIcon.hpp<|end_file_name|><|fim▁begin|>#pragma once #include <MellowPlayer/Presentation/Notifications/ISystemTrayIcon.hpp> #include <QMenu> #include <QSystemTrayIcon> namespace MellowPlayer::Domain { class ILogger; class IPlayer; class Setting; class Settings; } class SystemTrayIconStrings : public QObject { Q_OBJECT public: QString playPause() const; QString next() const; QString previous() const; QString restoreWindow() const; QString quit() const; }; namespace MellowPlayer::Infrastructure { class IApplication; } namespace MellowPlayer::Presentation { class IMainWindow; class SystemTrayIcon : public QObject, public ISystemTrayIcon { Q_OBJECT public: SystemTrayIcon(Domain::IPlayer& player, IMainWindow& mainWindow, Domain::Settings& settings); void show() override; void hide() override; void showMessage(const QString& title, const QString& message) override; public slots: void onActivated(QSystemTrayIcon::ActivationReason reason); void togglePlayPause(); void next(); void previous(); void restoreWindow(); void quit(); private slots: void onShowTrayIconSettingValueChanged(); private: void setUpMenu(); Domain::ILogger& logger_; Domain::IPlayer& player_; IMainWindow& mainWindow_; Domain::Settings& settings_; Domain::Setting& showTrayIconSetting_; QSystemTrayIcon qSystemTrayIcon_; QMenu menu_; QAction* playPauseAction_; QAction* previousSongAction_; QAction* nextSongAction_; QAction* restoreWindowAction_; QAction* quitApplicationAction_;<|fim▁hole|> }; }<|fim▁end|>
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! RustPN<|fim▁hole|>//! A stack-based scripting language. extern crate num; mod lex; pub mod item; pub mod parse; pub mod vm; pub mod builtin;<|fim▁end|>
//!
<|file_name|>konm~es_k~om~es_.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
showWord(["n. ","Boutik, magazen, kote yo vann. Nan ri sa a, gen anpil konmès." ])
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># pylint: disable=missing-docstring<|fim▁hole|>from .cpython.testwith import *<|fim▁end|>
# pylint: disable=wildcard-import from .test_mocks import * from .cpython.testmock import *
<|file_name|>assoc-types.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //<|fim▁hole|>// except according to those terms. #![crate_type="lib"] // @has assoc_types/trait.Index.html pub trait Index<I: ?Sized> { // @has - '//*[@id="associatedtype.Output"]//code' 'type Output: ?Sized' type Output: ?Sized; // @has - '//*[@id="tymethod.index"]//code' \ // "fn index<'a>(&'a self, index: I) -> &'a Self::Output" fn index<'a>(&'a self, index: I) -> &'a Self::Output; } // @has assoc_types/fn.use_output.html // @has - '//*[@class="rust fn"]' '-> &T::Output' pub fn use_output<T: Index<usize>>(obj: &T, index: usize) -> &T::Output { obj.index(index) } pub trait Feed { type Input; } // @has assoc_types/fn.use_input.html // @has - '//*[@class="rust fn"]' 'T::Input' pub fn use_input<T: Feed>(_feed: &T, _element: T::Input) { } // @has assoc_types/fn.cmp_input.html // @has - '//*[@class="rust fn"]' 'where T::Input: PartialEq<U::Input>' pub fn cmp_input<T: Feed, U: Feed>(a: &T::Input, b: &U::Input) -> bool where T::Input: PartialEq<U::Input> { a == b }<|fim▁end|>
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed
<|file_name|>mainWindow.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # M4Baker # Copyright (C) 2010 Kilian Lackhove # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ Module implementing MainWindow. """ from PyQt4.QtGui import * from PyQt4.QtCore import * from Ui_mainWindow import Ui_MainWindow from baseclasses import * from splitDialog import splitDialog from aboutDialog import aboutDialog TITLE, CHAPTER, TRACK, DURATION, STARTTIME, FILENAME, ENDTIME = range(7) def makeClickable(widget): class clickFilter(QObject): clicked = pyqtSignal() def eventFilter(self, obj, event): if obj == widget: if event.type() == QEvent.MouseButtonRelease: self.clicked.emit() return True return False filter = clickFilter(widget) widget.installEventFilter(filter) return filter.clicked class MainWindow(QMainWindow, Ui_MainWindow): """ Class documentation goes here. """ def __init__(self, parent = None): """ Constructor """ class delkeyFilter(QObject): delkeyPressed = pyqtSignal() def eventFilter(self, obj, event): if event.type() == QEvent.KeyPress: if event.key() == Qt.Key_Delete: self.delkeyPressed.emit() return True return False class returnkeyFilter(QObject): def eventFilter(self, obj, event): if event.type() == QEvent.KeyPress: if event.key() == Qt.Key_Return: current = obj.currentIndex() current = obj.indexBelow(current) obj.setCurrentIndex(current) return False self.audiobookList = audiobookContainer() self.currentDir = os.getcwd() QMainWindow.__init__(self, parent) self.setupUi(self) self.stackedWidget.setCurrentWidget(self.infoPage) makeClickable(self.coverLabel).connect(self.on_coverLabel_clicked) self.model = audiobookTreeModel() self.dataTreeView.setModel(self.model) self.progessDelegate = progressBarDelegate() self.dataTreeView.setItemDelegateForColumn(1, self.progessDelegate) self.connect(self.dataTreeView.selectionModel(), SIGNAL('currentChanged(QModelIndex, QModelIndex)'), self.on_dataTreeView_currentItemChanged) self.connect(self.model, SIGNAL('dataChanged(QModelIndex,QModelIndex)'), self.dataChanged) self.connect(self.model, SIGNAL('expand(QModelIndex)'), self.dataTreeView.expand) #trying the new style of connecting signals self.model.processingDone.connect(self.on_processingDone) self.delfilter = delkeyFilter() self.dataTreeView.installEventFilter(self.delfilter) self.connect(self.delfilter, SIGNAL('delkeyPressed()'), self.on_actionRemove_triggered) self.returnFilter = returnkeyFilter() self.dataTreeView.installEventFilter(self.returnFilter) #allow only numbers in yearEdit self.yearEdit.setValidator(QRegExpValidator(QRegExp(r'\d*'), self)) #set icons self.actionMoveDown.setIcon(QIcon.fromTheme('go-down')) self.actionMoveUp_2.setIcon(QIcon.fromTheme('go-up')) #TODO: clean the name of this action self.actionRemove.setIcon(QIcon.fromTheme('edit-delete')) self.actionAddAudiobook.setIcon(QIcon.fromTheme('address-book-new')) self.actionAddChapter.setIcon(QIcon.fromTheme('document-new')) self.action_About.setIcon(QIcon.fromTheme('help-about')) self.action_help.setIcon(QIcon.fromTheme('help-browser')) self.actionExit.setIcon(QIcon.fromTheme('application-exit')) self.actionProcess.setIcon(QIcon.fromTheme('system-run')) self.chapterFileButton.setIcon(QIcon.fromTheme('document-open')) self.outfileButton.setIcon(QIcon.fromTheme('document-open')) self.updateTree() def okToQuit(self): reply = QMessageBox.question(self,"M4Baker - really quit?", \ "Really quit?",QMessageBox.Yes|QMessageBox.Cancel) if reply == QMessageBox.Cancel: return False elif reply == QMessageBox.Yes: return True def closeEvent(self, event): if not self.okToQuit(): event.ignore() @pyqtSignature("") def on_actionAddAudiobook_triggered(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() formats = ["*%s" % format for format in supportedInputFiles] fnames = QFileDialog.getOpenFileNames( self, "Choose audio files to create audiobook from", self.currentDir, 'audio files (%s)' % " ".join(formats)) if fnames: #fnames = [unicode(element) for element in fnames] self.currentDir = fnames[-1].section(os.sep,0,-2) newbook = audiobook([chapter(element) for element in fnames]) self.model.addAudiobooks(newbook, current) self.updateTree() @pyqtSignature("") def on_actionMoveDown_triggered(self): """ Slot documentation goes here. """ indexes = self.dataTreeView.selectionModel().selectedIndexes() #clean indexes list from double entries cleanIndexes = [] for index in indexes: if index.column() == 0: cleanIndexes.append(index) indexes = cleanIndexes self.model.move(indexes, 'down') @pyqtSignature("") def on_actionRemove_triggered(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() indexes = self.dataTreeView.selectionModel().selectedIndexes() #clean indexes list from double entries cleanIndexes = [] for index in indexes: if index.column() == 0: cleanIndexes.append(index) indexes = cleanIndexes self.model.remove(indexes) self.updateTree() @pyqtSignature("") def on_actionAddChapter_triggered(self): """ Slot documentation goes here. """ formats = ["*%s" % format for format in supportedInputFiles] fnames = QFileDialog.getOpenFileNames( self, "Choose audio files to append to audiobook", self.currentDir, 'audio files (%s)' % " ".join(formats)) if fnames: self.currentDir = fnames[-1].section(os.sep,0,-2) #fnames = [unicode(element) for element in fnames] chaplist = [chapter(element) for element in fnames] current = self.dataTreeView.currentIndex() self.model.addChapters(chaplist, current)<|fim▁hole|> @pyqtSignature("") def on_actionSortByFilename_triggered(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() self.model.sort(current, 'filename') self.updateTree() @pyqtSignature("") def on_actionSortByTracknumber_triggered(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() self.model.sort(current, 'trackNumber') self.updateTree() @pyqtSignature("") def on_actionProcess_triggered(self): """ Slot documentation goes here. """ uiElements = (self.actionAddChapter, self.actionMoveDown, self.actionMoveUp_2, self.actionProcess, self.actionRemove, self.actionSortByFilename, self.actionSortByTracknumber, self.actionSplit, self.actionAddAudiobook) for element in uiElements: element.setEnabled(False) #switch to about docker to prevent data from being changed self.stackedWidget.setCurrentWidget(self.infoPage) #disable treeview self.dataTreeView.setEnabled(False) self.model.process() @pyqtSignature("") def on_actionMoveUp_2_triggered(self): """ Slot documentation goes here. """ indexes = self.dataTreeView.selectionModel().selectedIndexes() #clean indexes list from double entries cleanIndexes = [] for index in indexes: if index.column() == 0: cleanIndexes.append(index) indexes = cleanIndexes self.model.move(indexes, 'up') def populateChapterProperties(self): #current must be a chapter, otherwise this method wont be called current = self.dataTreeView.currentIndex() title = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.DisplayRole).toString() startTime = self.model.data(self.model.index(current.row(), STARTTIME, current.parent()), Qt.DisplayRole).toString() duration = self.model.data(self.model.index(current.row(), DURATION, current.parent()), Qt.DisplayRole).toString() filename = self.model.data(self.model.index(current.row(), FILENAME, current.parent()), Qt.DisplayRole).toString() endTime= self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['endTime'] endTime = u'%.2d:%.2d:%#06.3f' % secConverter(endTime) self.chapterTitleEdit.setText(title) self.startTimeEdit.setText(startTime) self.durationEdit.setText(duration) self.chapterFileEdit.setText(filename) self.endTimeEdit.setText(endTime) def populateAudiobookProperties(self): current = self.dataTreeView.currentIndex() title = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['title'] booknum = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['booknum'] author = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['author'] encodeString = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['encodeString'] outfileName = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['outfileName'] year = self.model.data(self.model.index(current.row(), TITLE, current.parent()), Qt.UserRole)['year'] self.authorEdit.setText(author) self.titleEdit.setText(title) self.yearEdit.setText(year) self.faacEdit.setText(encodeString) self.outfileEdit.setText(outfileName) pixmap = self.model.data(self.model.index(current.row(), 0, current.parent()), Qt.UserRole).get('cover') if pixmap: pixmap = self.model.data(self.model.index(current.row(), 0, current.parent()), Qt.UserRole)['cover'] width = self.coverLabel.size().width() pixmap = pixmap.scaledToWidth(width) self.coverLabel.setPixmap(pixmap) else: self.coverLabel.setText('(click to change)') @pyqtSignature("QModelIndex*, QModelIndex*") def on_dataTreeView_currentItemChanged(self, current, previous): """ Slot documentation goes here. """ uiElements = (self.actionAddChapter, self.actionMoveDown, self.actionMoveUp_2, self.actionProcess, self.actionRemove, self.actionSortByFilename, self.actionSortByTracknumber, self.actionSplit) if not current.isValid(): #current is rootItem for element in uiElements: element.setDisabled(True) return else: for element in uiElements: element.setEnabled(True) if not current.parent().isValid(): #current is audiobook self.stackedWidget.setCurrentWidget(self.audiobookPropertiesPage) self.populateAudiobookProperties() if current.row() == 0: #current is first audiobook self.actionMoveUp_2.setEnabled(False) if current.row() == self.model.rowCount(current.parent()) -1: #current is last audiobook self.actionMoveDown.setEnabled(False) else: #current is chapter self.stackedWidget.setCurrentWidget(self.chapterPropertiesPage) self.populateChapterProperties() if current.row() == 0: #current is the first chapter of its book if current.parent().row() == 0: #current is the first chapter of the first book self.actionMoveUp_2.setEnabled(False) if current.row() == self.model.rowCount(current.parent()) -1: #current is the last chapter of its book if current.parent().row() == self.model.rowCount(current.parent().parent()) -1: #current is the last chapter of the last book self.actionMoveDown.setEnabled(False) @pyqtSignature("") def on_chapterFileButton_clicked(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() formats = ["*%s" % format for format in supportedInputFiles] fname = QFileDialog.getOpenFileName( self, "change chapter source file", self.currentDir, 'audio files (%s)' % " ".join(formats)) if not fname.isEmpty(): self.currentDir = fname.section(os.sep,0,-2) self.model.setData(self.model.index(current.row(), FILENAME, current.parent()), QVariant(fname)) self.populateChapterProperties() @pyqtSignature("") def on_outfileButton_clicked(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() fname = QFileDialog.getSaveFileName( self, 'choose audiobook output file', self.currentDir, "Audiobook files (*.m4b)") if not fname.isEmpty(): self.currentDir = fname.section(os.sep,0,-2) if not fname.endsWith('.m4b'): fname += ".m4b" self.model.setData(self.model.index(current.row(), FILENAME, current.parent()), QVariant(fname)) self.populateAudiobookProperties() @pyqtSignature("") def on_action_About_triggered(self): dialog = aboutDialog() if dialog.exec_(): pass @pyqtSignature("") def on_actionSplit_triggered(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() if not current.parent().isValid(): #audiobook pass else: #chapter current = current.parent() minSplitDuration = self.model.data(current, Qt.UserRole)['minSplitDuration'] hours, minutes, seconds = secConverter(minSplitDuration) minSplitDuration = QTime(hours, minutes, seconds+1) dialog = splitDialog(minSplitDuration) if dialog.exec_(): maxSplitDuration = dialog.getMaxSplitDuration() self.model.split(current, maxSplitDuration) self.updateTree() @pyqtSignature("") def on_coverLabel_clicked(self): current = self.dataTreeView.currentIndex() fname = QFileDialog.getOpenFileName( self, "Choose a cover file", self.currentDir, "image files (*.png *.jpg *.jpeg *.bmp *.gif *.pbm *.pgm *ppm *xpm *xpm)", "cover.png" ) if not fname.isEmpty(): self.currentDir = fname.section(os.sep,0,-2) self.model.setData(self.model.index(current.row(), 0, current.parent()), {'cover':QPixmap(fname)}, Qt.UserRole) self.populateAudiobookProperties() def updateTree(self): for i in range(6): self.dataTreeView.resizeColumnToContents(i) def dataChanged(self, topLeft, bottomRight): current = self.dataTreeView.currentIndex() if not current.parent().isValid(): #audiobook self.populateAudiobookProperties() else: #chapter self.populateChapterProperties() def on_processingDone(self): self.actionProcess.setEnabled(True) self.actionAddAudiobook.setEnabled(True) self.dataTreeView.setEnabled(True) self.dataTreeView.reset() @pyqtSignature("") def on_chapterTitleEdit_editingFinished(self): """ Slot documentation goes here. """ current = self.dataTreeView.currentIndex() text = self.chapterTitleEdit.text() self.model.setData(self.model.index(current.row(), TITLE, current.parent()), QVariant(text)) @pyqtSignature("") def on_faacEdit_editingFinished(self): """ Slot documentation goes here. """ text = self.faacEdit.text() current = self.dataTreeView.currentIndex() value = {'encodeString':QVariant(text)} self.model.setData(self.model.index(current.row(), 0, QModelIndex()), value, Qt.UserRole) @pyqtSignature("") def on_titleEdit_editingFinished(self): """ Slot documentation goes here. """ text = self.titleEdit.text() current = self.dataTreeView.currentIndex() self.model.setData(self.model.index(current.row(), TITLE, QModelIndex()), QVariant(text)) @pyqtSignature("") def on_yearEdit_editingFinished(self): """ Slot documentation goes here. """ text = self.titleEdit.text() current = self.dataTreeView.currentIndex() self.model.setData(self.model.index(current.row(), TITLE, QModelIndex()), QVariant(text)) @pyqtSignature("") def on_authorEdit_editingFinished(self): """ Slot documentation goes here. """ text = self.authorEdit.text() current = self.dataTreeView.currentIndex() value = {'author':QVariant(text)} self.model.setData(self.model.index(current.row(), 0, QModelIndex()), value, Qt.UserRole) @pyqtSignature("") def on_action_help_triggered(self): """ Slot documentation goes here. """ self.stackedWidget.setCurrentWidget(self.infoPage)<|fim▁end|>
self.updateTree() #TODO: maybe it is smarter to add the chapter after current item?
<|file_name|>pycapsule.rs<|end_file_name|><|fim▁begin|>use libc::{c_void, c_char, c_int}; use object::*; #[cfg_attr(windows, link(name="pythonXY"))] extern "C" { pub static mut PyCapsule_Type: PyTypeObject; } pub type PyCapsule_Destructor = unsafe extern "C" fn(o: *mut PyObject); #[inline] pub unsafe fn PyCapsule_CheckExact(ob: *mut PyObject) -> c_int { (Py_TYPE(ob) == &mut PyCapsule_Type) as c_int }<|fim▁hole|>#[cfg_attr(windows, link(name="pythonXY"))] extern "C" { pub fn PyCapsule_New(pointer: *mut c_void, name: *const c_char, destructor: Option<PyCapsule_Destructor>) -> *mut PyObject; pub fn PyCapsule_GetPointer(capsule: *mut PyObject, name: *const c_char) -> *mut c_void; pub fn PyCapsule_GetDestructor(capsule: *mut PyObject) -> Option<PyCapsule_Destructor>; pub fn PyCapsule_GetName(capsule: *mut PyObject) -> *const c_char; pub fn PyCapsule_GetContext(capsule: *mut PyObject) -> *mut c_void; pub fn PyCapsule_IsValid(capsule: *mut PyObject, name: *const c_char) -> c_int; pub fn PyCapsule_SetPointer(capsule: *mut PyObject, pointer: *mut c_void) -> c_int; pub fn PyCapsule_SetDestructor(capsule: *mut PyObject, destructor: Option<PyCapsule_Destructor>) -> c_int; pub fn PyCapsule_SetName(capsule: *mut PyObject, name: *const c_char) -> c_int; pub fn PyCapsule_SetContext(capsule: *mut PyObject, context: *mut c_void) -> c_int; pub fn PyCapsule_Import(name: *const c_char, no_block: c_int) -> *mut c_void; }<|fim▁end|>
<|file_name|>GrupoEstudoTests.java<|end_file_name|><|fim▁begin|>package br.edu.fumep.entity; import org.junit.Before; import org.junit.Test; import java.util.ArrayList; import java.util.List; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; /** * Created by arabasso on 09/05/2017. */ public class GrupoEstudoTests { private GrupoEstudo grupoEstudo; private Aluno aluno; private Usuario usuario; @Before public void inicializacao() { grupoEstudo = new GrupoEstudo(); grupoEstudo.setNome("Amigos"); grupoEstudo.setCurso("Ciência da Computação"); grupoEstudo.setMateria("Engenharia de Software II"); grupoEstudo.setProfessor("José da Silva"); grupoEstudo.setCoordenador("João dos Santos"); List<GrupoEstudoAluno> gruposEstudoAlunos = new ArrayList<>(); usuario = new Usuario(); aluno = new Aluno("Paulo", usuario); gruposEstudoAlunos.add(new GrupoEstudoAluno(grupoEstudo, aluno)); grupoEstudo.setGruposEstudoAluno(gruposEstudoAlunos); } @Test<|fim▁hole|> assertThat(grupoEstudo.alunoEstaInserido(aluno), is(true)); } @Test public void alunoNuloEstaInseridoGrupo() { assertThat(grupoEstudo.alunoEstaInserido(null), is(false)); } @Test public void temTags(){ assertThat(grupoEstudo.temTags(), is(false)); } @Test public void tagsVazia() { assertThat(grupoEstudo.getTags(), is("")); } @Test public void variasTags() { Tag tag1 = new Tag("Álgebra"); Tag tag2 = new Tag("Cálculo"); grupoEstudo.setGruposEstudoTag(new ArrayList<>()); grupoEstudo.getGruposEstudoTag().add(new GrupoEstudoTag(grupoEstudo, tag1)); grupoEstudo.getGruposEstudoTag().add(new GrupoEstudoTag(grupoEstudo, tag2)); assertThat(grupoEstudo.getTags(), is("Álgebra, Cálculo")); } }<|fim▁end|>
public void alunoEstaInseridoGrupo() {
<|file_name|>netmiko_sh_arp.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Use Netmiko to execute 'show arp' on pynet-rtr1, pynet-rtr2, and juniper-srx. from netmiko import ConnectHandler <|fim▁hole|>def main(): # Definition of routers rtr1 = { 'device_type': 'cisco_ios', 'ip': '50.76.53.27', 'username': 'pyclass', 'password': '88newclass', } rtr2 = { 'device_type': 'cisco_ios', 'ip': '50.76.53.27', 'username': 'pyclass', 'password': '88newclass', 'port': 8022, } srx = { 'device_type': 'juniper', 'ip': '50.76.53.27', 'username': 'pyclass', 'password': '88newclass', 'port': 9822, } # Create a list of all the routers. all_routers = [rtr1, rtr2, srx] # Loop through all the routers and show arp. for a_router in all_routers: net_connect = ConnectHandler(**a_router) output = net_connect.send_command("show arp") print "\n\n>>>>>>>>> Device {0} <<<<<<<<<".format(a_router['device_type']) print output print ">>>>>>>>> End <<<<<<<<<" if __name__ == "__main__": main()<|fim▁end|>
<|file_name|>com_object.cpp<|end_file_name|><|fim▁begin|>#include "com_object.hpp" #include <Windows.h> namespace pw { com_object::com_object() { CoInitializeEx(nullptr, COINIT_MULTITHREADED); } com_object::~com_object() {<|fim▁hole|><|fim▁end|>
CoUninitialize(); } }
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#[cfg(test)] mod tests {<|fim▁hole|><|fim▁end|>
#[test] fn it_works() { } }
<|file_name|>c00_wulala.js<|end_file_name|><|fim▁begin|>var casper = require('casper').create(); var utils = require('utils');<|fim▁hole|> var x = require('casper').selectXPath; casper.on("resource.error", function(resourceError) { console.log('Unable to load resource (#' + resourceError.id + 'URL:' + resourceError.url + ')'); console.log('Error code: ' + resourceError.errorCode + '. Description: ' + resourceError.errorString); }); casper.start('http://www.apple.com/'); casper.wait(3000, function() { var title = casper.getTitle(); console.log('>>> PageTitle: ' + title); console.log('>>> PageHTML:\n' + casper.getHTML()); }); casper.run(function() { this.exit(0); });<|fim▁end|>
// casper.userAgent('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36');
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// ams - Advanced Memory Scanner // Copyright (C) 2018 th0rex // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use communication::{Address, MemoryRegion}; pub struct StackRegion { pub address: Address,<|fim▁hole|> #[cfg(target_os = "linux")] mod linux; #[cfg(target_os = "windows")] mod windows; #[cfg(target_os = "linux")] pub use self::linux::get_stack_regions; #[cfg(target_os = "windows")] pub use self::windows::get_stack_regions;<|fim▁end|>
pub memory_region: MemoryRegion, }
<|file_name|>index.js<|end_file_name|><|fim▁begin|>"use strict"; const Base = require('yeoman-generator'); const generatorArguments = require('./arguments');<|fim▁hole|> constructor(args, options) { super(args, options); Object.keys(generatorArguments).forEach(key => this.argument(key, generatorArguments[key])); Object.keys(generatorOptions).forEach(key => this.option(key, generatorOptions[key])); this.description = 'Scaffold a new response'; } get configuring() { return generatorSteps.configuring; } get conflicts() { return generatorSteps.conflicts; } get default() { return generatorSteps.default; } get end() { return generatorSteps.end; } get initializing() { return generatorSteps.initializing } get install() { return generatorSteps.install; } get prompting() { return generatorSteps.prompting } get writing() { return generatorSteps.writing; } };<|fim▁end|>
const generatorOptions = require('./options'); const generatorSteps = require('./steps'); module.exports = class ResponseGenerator extends Base {
<|file_name|>assignment-operator-unimplemented.rs<|end_file_name|><|fim▁begin|>struct Foo;<|fim▁hole|> fn main() { let mut a = Foo; let ref b = Foo; a += *b; //~ Error: binary assignment operation `+=` cannot be applied to type `Foo` }<|fim▁end|>
<|file_name|>meshRefinementMerge.C<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | foam-extend: Open Source CFD \\ / O peration | Version: 3.2 \\ / A nd | Web: http://www.foam-extend.org \\/ M anipulation | For copyright notice see file Copyright ------------------------------------------------------------------------------- License This file is part of foam-extend. foam-extend is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. foam-extend is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with foam-extend. If not, see <http://www.gnu.org/licenses/>. \*----------------------------------------------------------------------------*/ #include "meshRefinement.H" #include "combineFaces.H" #include "directTopoChange.H" #include "removePoints.H" // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // Merge faces that are in-line. Foam::label Foam::meshRefinement::mergePatchFaces ( const scalar minCos, const scalar concaveCos, const labelList& patchIDs ) { // Patch face merging engine combineFaces faceCombiner(mesh_); const polyBoundaryMesh& patches = mesh_.boundaryMesh(); // Pick up all candidate cells on boundary labelHashSet boundaryCells(mesh_.nFaces()-mesh_.nInternalFaces()); forAll(patchIDs, i) { label patchI = patchIDs[i]; const polyPatch& patch = patches[patchI]; if (!patch.coupled()) { forAll(patch, i) { boundaryCells.insert(mesh_.faceOwner()[patch.start()+i]); } } } // Get all sets of faces that can be merged labelListList mergeSets ( faceCombiner.getMergeSets ( minCos, concaveCos, boundaryCells ) ); label nFaceSets = returnReduce(mergeSets.size(), sumOp<label>()); Info<< "mergePatchFaces : Merging " << nFaceSets << " sets of faces." << endl; if (nFaceSets > 0) { // Topology changes container directTopoChange meshMod(mesh_); // Merge all faces of a set into the first face of the set. Remove // unused points. faceCombiner.setRefinement(mergeSets, meshMod); // Change the mesh (no inflation) autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh_, false, true); // Update fields mesh_.updateMesh(map); // Move mesh (since morphing does not do this) if (map().hasMotionPoints()) { mesh_.movePoints(map().preMotionPoints()); } else { // Delete mesh volumes. No other way to do this? mesh_.clearOut(); } if (overwrite()) { mesh_.setInstance(oldInstance()); } faceCombiner.updateMesh(map); // Get the kept faces that need to be recalculated. // Merging two boundary faces might shift the cell centre // (unless the faces are absolutely planar) labelHashSet retestFaces(6*mergeSets.size()); forAll(mergeSets, setI) { label oldMasterI = mergeSets[setI][0]; label faceI = map().reverseFaceMap()[oldMasterI]; // faceI is always uncoupled boundary face const cell& cFaces = mesh_.cells()[mesh_.faceOwner()[faceI]]; forAll(cFaces, i) { retestFaces.insert(cFaces[i]); } } updateMesh(map, retestFaces.toc()); } return nFaceSets; } // Remove points not used by any face or points used by only two faces where // the edges are in line Foam::autoPtr<Foam::mapPolyMesh> Foam::meshRefinement::mergeEdges ( const scalar minCos ) { // Point removal analysis engine removePoints pointRemover(mesh_); // Count usage of points boolList pointCanBeDeleted; label nRemove = pointRemover.countPointUsage(minCos, pointCanBeDeleted); Info<< "Removing " << nRemove << " straight edge points." << endl; autoPtr<mapPolyMesh> map; if (nRemove > 0) { // Save my local faces that will change. These changed faces might // cause a shift in the cell centre which needs to be retested. // Have to do this before changing mesh since point will be removed. labelHashSet retestOldFaces(nRemove / Pstream::nProcs()); { const faceList& faces = mesh_.faces(); forAll(faces, faceI) { const face& f = faces[faceI]; forAll(f, fp) { if (pointCanBeDeleted[f[fp]]) { retestOldFaces.insert(faceI); break; } } } } // Topology changes container directTopoChange meshMod(mesh_); pointRemover.setRefinement(pointCanBeDeleted, meshMod); // Change the mesh (no inflation) map = meshMod.changeMesh(mesh_, false, true); // Update fields mesh_.updateMesh(map); // Move mesh (since morphing does not do this) if (map().hasMotionPoints()) { mesh_.movePoints(map().preMotionPoints()); } else { // Delete mesh volumes. No other way to do this? mesh_.clearOut(); } if (overwrite()) { mesh_.setInstance(oldInstance()); } pointRemover.updateMesh(map); <|fim▁hole|> forAllConstIter(labelHashSet, retestOldFaces, iter) { label faceI = map().reverseFaceMap()[iter.key()]; const cell& ownFaces = cells[mesh_.faceOwner()[faceI]]; forAll(ownFaces, i) { retestFaces.insert(ownFaces[i]); } if (mesh_.isInternalFace(faceI)) { const cell& neiFaces = cells[mesh_.faceNeighbour()[faceI]]; forAll(neiFaces, i) { retestFaces.insert(neiFaces[i]); } } } updateMesh(map, retestFaces.toc()); } return map; } // ************************************************************************* //<|fim▁end|>
// Get the kept faces that need to be recalculated. labelHashSet retestFaces(6*retestOldFaces.size()); const cellList& cells = mesh_.cells();
<|file_name|>test_play_iterator.py<|end_file_name|><|fim▁begin|># (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError from ansible.executor.play_iterator import HostState, PlayIterator from ansible.playbook import Playbook from ansible.playbook.task import Task from ansible.playbook.play_context import PlayContext from units.mock.loader import DictDataLoader from units.mock.path import mock_unfrackpath_noop class TestPlayIterator(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_host_state(self): hs = HostState(blocks=[x for x in range(0, 10)]) hs.tasks_child_state = HostState(blocks=[0]) hs.rescue_child_state = HostState(blocks=[1]) hs.always_child_state = HostState(blocks=[2]) hs.__repr__() hs.run_state = 100 hs.__repr__() hs.fail_state = 15 hs.__repr__() for i in range(0, 10): hs.cur_block = i self.assertEqual(hs.get_current_block(), i) new_hs = hs.copy() @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_play_iterator(self): #import epdb; epdb.st() fake_loader = DictDataLoader({ "test_play.yml": """ - hosts: all gather_facts: false roles: - test_role pre_tasks: - debug: msg="this is a pre_task" tasks: - debug: msg="this is a regular task" - block: - debug: msg="this is a block task" - block: - debug: msg="this is a sub-block in a block" rescue: - debug: msg="this is a rescue task" - block: - debug: msg="this is a sub-block in a rescue" always: - debug: msg="this is an always task" - block: - debug: msg="this is a sub-block in an always" post_tasks: - debug: msg="this is a post_task" """, '/etc/ansible/roles/test_role/tasks/main.yml': """ - name: role task debug: msg="this is a role task" - block: - name: role block task debug: msg="inside block in role" always: - name: role always task debug: msg="always task in block in role" - include: foo.yml - name: role task after include debug: msg="after include in role" - block: - name: starting role nested block 1 debug: - block: - name: role nested block 1 task 1 debug: - name: role nested block 1 task 2 debug: - name: role nested block 1 task 3 debug: - name: end of role nested block 1 debug: - name: starting role nested block 2 debug: - block: - name: role nested block 2 task 1 debug: - name: role nested block 2 task 2 debug: - name: role nested block 2 task 3 debug: - name: end of role nested block 2 debug: """, '/etc/ansible/roles/test_role/tasks/foo.yml': """ - name: role included task debug: msg="this is task in an include from a role" """ }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) mock_var_manager._fact_cache['host00'] = dict() inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # lookup up an original task target_task = p._entries[0].tasks[0].block[0] task_copy = target_task.copy(exclude_parent=True) found_task = itr.get_original_task(hosts[0], task_copy) self.assertEqual(target_task, found_task) bad_task = Task() found_task = itr.get_original_task(hosts[0], bad_task) self.assertIsNone(found_task) # pre task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # role task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.name, "role task") self.assertIsNotNone(task._role) # role block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role block task") self.assertIsNotNone(task._role) # role block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role always task") self.assertIsNotNone(task._role) # role include task #(host_state, task) = itr.get_next_task_for_host(hosts[0]) #self.assertIsNotNone(task) #self.assertEqual(task.action, 'debug') #self.assertEqual(task.name, "role included task") #self.assertIsNotNone(task._role) # role task after include (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role task after include") self.assertIsNotNone(task._role) # role nested block tasks (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "starting role nested block 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 1 task 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 1 task 2") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 1 task 3") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "end of role nested block 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "starting role nested block 2") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 2 task 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 2 task 2") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 2 task 3") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "end of role nested block 2") self.assertIsNotNone(task._role) # regular play task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertIsNone(task._role) # block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a block task")) # sub-block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in a block")) # mark the host failed itr.mark_host_failed(hosts[0]) # block rescue task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a rescue task")) # sub-block rescue task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue")) # block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is an always task")) # sub-block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in an always")) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # post task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # end of iteration (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task) # host 0 shouldn't be in the failed hosts, as the error # was handled by a rescue block failed_hosts = itr.get_failed_hosts() self.assertNotIn(hosts[0], failed_hosts) def test_play_iterator_nested_blocks(self): fake_loader = DictDataLoader({<|fim▁hole|> "test_play.yml": """ - hosts: all gather_facts: false tasks: - block: - block: - block: - block: - block: - debug: msg="this is the first task" - ping: rescue: - block: - block: - block: - block: - debug: msg="this is the rescue task" always: - block: - block: - block: - block: - debug: msg="this is the always task" """, }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # get the first task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg='this is the first task')) # fail the host itr.mark_host_failed(hosts[0]) # get the resuce task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg='this is the rescue task')) # get the always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg='this is the always task')) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # end of iteration (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task) def test_play_iterator_add_tasks(self): fake_loader = DictDataLoader({ 'test_play.yml': """ - hosts: all gather_facts: no tasks: - debug: msg="dummy task" """, }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # test the high-level add_tasks() method s = HostState(blocks=[0,1,2]) itr._insert_tasks_into_state = MagicMock(return_value=s) itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()]) self.assertEqual(itr._host_states[hosts[0].name], s) # now actually test the lower-level method that does the work itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # iterate past first task _, task = itr.get_next_task_for_host(hosts[0]) while(task and task.action != 'debug'): _, task = itr.get_next_task_for_host(hosts[0]) if task is None: raise Exception("iterated past end of play while looking for place to insert tasks") # get the current host state and copy it so we can mutate it s = itr.get_host_state(hosts[0]) s_copy = s.copy() # assert with an empty task list, or if we're in a failed state, we simply return the state as-is res_state = itr._insert_tasks_into_state(s_copy, task_list=[]) self.assertEqual(res_state, s_copy) s_copy.fail_state = itr.FAILED_TASKS res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()]) self.assertEqual(res_state, s_copy) # but if we've failed with a rescue/always block mock_task = MagicMock() s_copy.run_state = itr.ITERATING_RESCUE res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task]) self.assertEqual(res_state, s_copy) self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue) itr._host_states[hosts[0].name] = res_state (next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True) self.assertEqual(next_task, mock_task) itr._host_states[hosts[0].name] = s # test a regular insertion s_copy = s.copy() res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])<|fim▁end|>
<|file_name|>viewer_controller.py<|end_file_name|><|fim▁begin|># Nessus results viewing tools # # Developed by Felix Ingram, [email protected], @lllamaboy # http://www.github.com/nccgroup/lapith # # Released under AGPL. See LICENSE for more information import wx import os from model.Nessus import NessusFile, NessusTreeItem, MergedNessusReport, NessusReport, NessusItem import difflib from drop_target import MyFileDropTarget from view import ( ViewerView, SaveDialog, ID_Load_Files, ID_Merge_Files, ID_Generate_CSV, ID_Generate_VulnXML, ID_Generate_RST, ID_About, ) from wx.lib.wordwrap import wordwrap import csv from xml.sax.saxutils import escape from datetime import datetime from jinja2 import Template SEVERITY = {0:"Other", 1:"Low", 2:"Med", 3:"High", 4:"Critical"} OUTPUT_TEMPLATE=Template("""\ {{item.name}} {{hosts_count}} hosts with this issue {% for host in hosts %} {{host}}{% endfor %} --------------------------------------------- {% for host in identical_hosts %} {{host}}{% endfor %} {{ initial_output }} """) RST_TEMPLATE=Template("""\ {%- for vuln in vulns %}{% if not vuln.name.startswith("PORT:") %}{{ vuln.name }} {% for a in vuln.name %}={% endfor %} .. affectedhosts::{% for host in merged_scans.hosts_with_pid(vuln.pid) %}{% for item in host.items_for_pid(vuln.pid) %} {{ host.address }}, {{ item.info_dict.port }}/{{ item.info_dict.protocol }} {%- endfor %}{%- endfor %} :severity:`{{ vuln.item.info_dict["severity_text"] }}` :cvss:`{{ vuln.item.info_dict["cvss_base_score"] }}` :cvss:`{{ vuln.item.info_dict["cvss_vector"] }}` Description ----------- {{ "\n".join(vuln.issue.initial_output.splitlines()[7:])|replace("Plugin Output:", "Plugin Output::\n") }} {% endif %} Recommendation -------------- References ---------- {% if vuln.item.info_dict["cve"] %} CVE: {% for cve in vuln.item.info_dict["cve"] %} {{ cve }}: `http://web.nvd.nist.gov/view/vuln/detail?vulnId={{ cve }}` {%- endfor %} {%- endif %} {% if vuln.item.info_dict["bid"] %} BID: {% for bid in vuln.item.info_dict["bid"] %} {{ bid }}: `http://www.securityfocus.com/bid/{{ bid }}` {%- endfor %} {%- endif %} {% if vuln.item.info_dict["xref"] %} Other References: {% for xref in vuln.item.info_dict["xref"] %} {{ xref }} {%- endfor %} {%- endif %} {% if vuln.item.info_dict["see_also"] %} See also: {% for xref in vuln.item.info_dict["see_also"] %} {{ xref }} {%- endfor %} {%- endif %} {% endfor %} """) VULNXML_TEMPLATE=Template("""<?xml version="1.0"?> <Results Date="{{ timestamp|e }}" Tool="Lapith"> <Hosts>{% for host in hosts %} <Host dnsname="{{ host.dns_name|e }}" ipv6="" ipv4="{{ host.address|e }}"> <Vulns> {% for vuln in host.items %}<Vuln TestPhase="" id="{{ vuln.pid|e }}"> <Data Type="afh:TCP Ports" encoding="">{{ vuln.info_dict.port }}/{{ vuln.info_dict.protocol }}</Data> </Vuln> {% endfor %}</Vulns> </Host> {% endfor %}</Hosts> <Vulns> {% for vuln in vulns %} <Vuln group="" id="{{ vuln.pid|e }}"> <Title>{{ vuln.name|e }}</Title> <Description encoding=""> {{ "\n".join(vuln.issue.initial_output.splitlines()[7:])|replace("Plugin Output:", "Plugin Output::\n") | e}} ------------------------ {{ vuln.diffs|e }} </Description> <Recommendation encoding=""></Recommendation> <References/> <Category/> <Patches/> <CVSS> <OverallScore>{% if vuln.item.info_dict["cvss_base_score"] %}{{ vuln.item.info_dict["cvss_base_score"]|e }}{% else %}{{ vuln.severity|e }}{% endif %}</OverallScore> <Vector>{{ vuln.item.info_dict["cvss_vector"]|replace("CVSS2#", "")|e }}</Vector> </CVSS> <Severity>{{ vuln.severity|e }}</Severity> </Vuln> {% endfor %} </Vulns> <Groups/> </Results> """) ID_Save_Results = wx.NewId() class ViewerController: def __init__(self): # def initView(self): self.view = ViewerView() ## Instance vars self.files = [] self.tests = [] self.tree_hooks = {} self._search_text = "" ## Flags self._in_search = False ## Dialog paths self._save_path = os.getcwd() self._open_path = os.getcwd() self.create_tree() drop_target = MyFileDropTarget(self.view.tree, { "nessus": self.drop_action, }, self.view.display.write ) self.view.tree.SetDropTarget(drop_target) self.bind_events() self.view.Layout() self.view.Show() #self.view.search.SetFocus() def drop_action(self, file_): self.files.append(NessusFile(file_)) self.create_scan_trees() def on_do_search(self, event): text = self.view.search.GetValue() self.search(text) def search(self, text): self._in_search = True self._search_text = text for host in self.files: pass #hook = self.hooks[host.name][FILES] #if self.view.tree.IsExpanded(hook): ## Only need to do it for expanded #files = host.get_full_files(search=text) #self.view.tree.DeleteChildren(hook) #for f in files: #item = self.view.tree.AppendItem(hook, f.name, 0) #self.view.tree.SetPyData(item, f) #self.view.tree.SortChildren(hook) self.view.search.SetFocus() self._in_search = False def add_output_page(self, title, text, font="Courier New"): display = self.view.CreateTextCtrl(font=font) display.SetValue(text) self.delete_page_with_title(title) self.view.notebook.AddPage(display, title) return self.view.notebook.GetPageIndex(display) def load_files(self, event): wildcard = "Nessus files (*.nessus)|*.nessus|" \ "All files (*.*)|*.*" dlg = wx.FileDialog( self.view, message="Choose a file", defaultDir=os.getcwd(), defaultFile="", wildcard=wildcard, style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR ) if dlg.ShowModal() == wx.ID_OK: # This returns a Python list of files that were selected. paths = dlg.GetPaths() if paths: for path in paths: self.files.append(NessusFile(path)) self._open_path = paths[0].rsplit(os.sep, 1)[0] dlg.Destroy() self.create_scan_trees() def delete_page_with_title(self, title): notebook = self.view.notebook page_count = notebook.GetPageCount() for i in xrange(page_count): if notebook.GetPageText(i) == title: notebook.DeletePage(i) def create_tree(self): self.view.tree.DeleteAllItems() self.view.tree.AddRoot("Scans") self.create_scan_trees() self.view.tree.Expand(self.view.tree.GetRootItem()) def create_scan_trees(self): scans = self.view.tree.GetRootItem() self.view.tree.DeleteChildren(scans) for file_ in self.files: self.create_scan_tree(file_, scans) self.view.tree.Expand(scans) def sorted_tree_items(self, report, items): list_ = list(set([NessusTreeItem(report, i) for i in items])) list_.sort() return list_ def create_scan_tree(self, file_, hosts): reports = file_.get_all_reports() scans_hook = self.view.tree.GetRootItem() file_hook = self.view.tree.AppendItem(scans_hook, file_.short_name, 0) for report in reports: scan = self.view.tree.AppendItem(file_hook, report.reportname, 0) self.view.tree.SetPyData(scan, report) info = self.view.tree.AppendItem(scan, "Info", 0) self.view.tree.SetPyData(info, report.info) if report.policy: policy = self.view.tree.AppendItem(scan, "Policy", 0) self.view.tree.SetPyData(policy, report.policy) hosts = self.view.tree.AppendItem(scan, "Hosts", 0) self.view.tree.SetPyData(hosts, "\n".join(str(h) for h in report.hosts)) items_hook = self.view.tree.AppendItem(scan, "Findings", 0) self.view.tree.SetPyData(items_hook, self.sorted_tree_items(report, report.criticals+report.highs+report.meds+report.lows+report.others)) critical_hook = self.view.tree.AppendItem(items_hook, "Criticals", 0) self.view.tree.SetPyData(critical_hook, self.sorted_tree_items(report, report.criticals)) high_hook = self.view.tree.AppendItem(items_hook, "Highs", 0) self.view.tree.SetPyData(high_hook, self.sorted_tree_items(report, report.highs)) med_hook = self.view.tree.AppendItem(items_hook, "Meds", 0) self.view.tree.SetPyData(med_hook, self.sorted_tree_items(report, report.meds)) low_hook = self.view.tree.AppendItem(items_hook, "Lows", 0) self.view.tree.SetPyData(low_hook, self.sorted_tree_items(report, report.lows)) other_hook = self.view.tree.AppendItem(items_hook, "Others", 0) self.view.tree.SetPyData(other_hook, self.sorted_tree_items(report, report.others)) for crit in self.sorted_tree_items(report, report.criticals): item = self.view.tree.AppendItem(critical_hook, str(crit), 0) self.view.tree.SetPyData(item, crit) for high in self.sorted_tree_items(report, report.highs): item = self.view.tree.AppendItem(high_hook, str(high), 0) self.view.tree.SetPyData(item, high) for med in self.sorted_tree_items(report, report.meds): item = self.view.tree.AppendItem(med_hook, str(med), 0) self.view.tree.SetPyData(item, med) for low in self.sorted_tree_items(report, report.lows): item = self.view.tree.AppendItem(low_hook, str(low), 0) self.view.tree.SetPyData(item, low) for other in [NessusTreeItem(report, o) for o in report.others]: item = self.view.tree.AppendItem(other_hook, str(other), 0) self.view.tree.SetPyData(item, other) def get_item_output(self, item): hosts = item.report.hosts_with_pid(item.pid) initial_output = hosts[0].plugin_output(item.pid) diffs = [] for host in hosts[1:]: diff = difflib.unified_diff(initial_output.splitlines(), host.plugin_output(item.pid).splitlines()) diffs.append((host, "\n".join(list(diff)))) initial_output = item.name.strip() + "\n\n" + initial_output diff_output = "" identical_hosts = [hosts[0]] for (host, diff) in diffs: if diff: diff_output += "=" * 70 + "\n\n%s\n%s\n\n" % (host, diff) else: identical_hosts.append(host) output = OUTPUT_TEMPLATE.render( item=item, hosts_count=len(hosts), hosts=hosts, identical_hosts=identical_hosts, initial_output=initial_output ) return output, diff_output, dict(item=item, hosts=hosts, identical_hosts=identical_hosts, initial_output=initial_output) # output = item.name+"\n" # output += "%s hosts with this issue\n" % len(hosts) # output += "\n".join(str(i).split()[0] for i in hosts) # output += "\n"+"-"*20+"\n" # output += "\n".join(str(i) for i in identical_hosts) + "\n\n" + initial_output # return output, diff_output def show_nessus_item(self, item): output, diff_output, _ = self.get_item_output(item) diff_title = "Diffs" self.delete_page_with_title(diff_title) display = self.view.display if diff_output: self.add_output_page(diff_title, diff_output, font="Courier New") display.SetValue(output) def generate_rst(self, event): saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save RST as...").get_choice() if saveas: merged_scans = MergedNessusReport(self.files) if not saveas.endswith(".rst"): saveas = saveas+".rst" sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others) with open(saveas, "wb") as f: for item in sorted_tree_items: issue, diffs, meta = self.get_item_output(item) item.issue = meta item.diffs = diffs item.severity = SEVERITY[item.item.severity] f.write(RST_TEMPLATE.render( timestamp=datetime.now(), hosts=merged_scans.hosts, vulns=sorted_tree_items, merged_scans=merged_scans, ) ) def generate_vulnxml(self, event): saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save VulnXML as...").get_choice() if saveas: merged_scans = MergedNessusReport(self.files) if not saveas.endswith(".xml"): saveas = saveas+".xml" sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others) with open(saveas, "wb") as f: for item in sorted_tree_items: issue, diffs, meta = self.get_item_output(item) item.issue = meta item.diffs = diffs item.severity = SEVERITY[item.item.severity] f.write(VULNXML_TEMPLATE.render( timestamp=datetime.now(), hosts=merged_scans.hosts, vulns=sorted_tree_items, merged_scans=merged_scans, ) ) def generate_csv(self, event): saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save csv as...").get_choice() if saveas: merged_scans = MergedNessusReport(self.files) if not saveas.endswith(".csv"): saveas = saveas+".csv" sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others) with open(saveas, "wb") as f: csv_writer = csv.writer(f) csv_writer.writerow(["PID","Severity","Hosts","Output","Diffs"]) for item in sorted_tree_items: csv_writer.writerow([ item.pid, SEVERITY[item.item.severity], "\n".join(x.address for x in merged_scans.hosts_with_pid(item.pid)), self.get_item_output(item)[0], self.get_item_output(item)[1], ] ) def combine_files(self, event): scans_hook = self.view.tree.GetRootItem() merged_scans = MergedNessusReport(self.files) if merged_scans.get_all_reports(): merge_hook = self.view.tree.AppendItem(scans_hook, "Merged Files", 0) items_hook = self.view.tree.AppendItem(merge_hook, "Findings", 0) self.view.tree.SetPyData(items_hook, self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others)) critical_hook = self.view.tree.AppendItem(items_hook, "Critical", 0) self.view.tree.SetPyData(critical_hook, self.sorted_tree_items(merged_scans, merged_scans.criticals)) high_hook = self.view.tree.AppendItem(items_hook, "Highs", 0) self.view.tree.SetPyData(high_hook, self.sorted_tree_items(merged_scans, merged_scans.highs)) med_hook = self.view.tree.AppendItem(items_hook, "Meds", 0) self.view.tree.SetPyData(med_hook, self.sorted_tree_items(merged_scans, merged_scans.meds)) low_hook = self.view.tree.AppendItem(items_hook, "Lows", 0) self.view.tree.SetPyData(low_hook, self.sorted_tree_items(merged_scans, merged_scans.lows)) other_hook = self.view.tree.AppendItem(items_hook, "Others", 0) self.view.tree.SetPyData(other_hook, self.sorted_tree_items(merged_scans, merged_scans.others)) for crit in self.sorted_tree_items(merged_scans, merged_scans.criticals): item = self.view.tree.AppendItem(critical_hook, str(crit), 0) self.view.tree.SetPyData(item, crit) for high in self.sorted_tree_items(merged_scans, merged_scans.highs): item = self.view.tree.AppendItem(high_hook, str(high), 0) self.view.tree.SetPyData(item, high) for med in self.sorted_tree_items(merged_scans, merged_scans.meds): item = self.view.tree.AppendItem(med_hook, str(med), 0) self.view.tree.SetPyData(item, med) for low in self.sorted_tree_items(merged_scans, merged_scans.lows): item = self.view.tree.AppendItem(low_hook, str(low), 0) self.view.tree.SetPyData(item, low) for other in merged_scans.others: item = self.view.tree.AppendItem(other_hook, str(other), 0) self.view.tree.SetPyData(item, other) self.view.tree.Expand(scans_hook) def bind_events(self): # Toolbar events self.view.Bind(wx.EVT_TOOL, self.load_files, id=ID_Load_Files) <|fim▁hole|> self.view.Bind(wx.EVT_TOOL, self.generate_vulnxml, id=ID_Generate_VulnXML) self.view.Bind(wx.EVT_TOOL, self.generate_rst, id=ID_Generate_RST) # Tree clicking and selections self.view.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_sel_changed, self.view.tree) self.view.tree.Bind(wx.EVT_TREE_ITEM_MENU, self.on_right_click, self.view.tree) # Tab close event - will prevent closing the output tab self.view.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_page_close) # Menu stuff self.view.Bind(wx.EVT_MENU, self.load_files, id=wx.ID_OPEN) self.view.Bind(wx.EVT_MENU, self.extract_results, id=ID_Save_Results) self.view.Bind(wx.EVT_MENU, self.on_exit, id=wx.ID_EXIT) self.view.Bind(wx.EVT_MENU, self.on_about, id=ID_About) ## Search #self.view.search.Bind(wx.EVT_TEXT_ENTER, self.on_do_search) #self.view.search.Bind(wx.EVT_TEXT, self.on_do_search) def extract_results(self, event): item = self.view.tree.GetSelection() data = self.view.tree.GetItemData(item).GetData() saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save results as...").get_choice() if saveas: with open(saveas, "w") as f: output = "" if isinstance(data, list): for item in data: output, diff_output, _ = self.get_item_output(item) f.write("="*20+"\n") f.write(output) f.write(diff_output) elif isinstance(data, NessusReport): pass elif isinstance(data, MergedNessusReport): pass def on_right_click(self, event): item = event.GetItem() self.view.tree.SelectItem(item) data = self.view.tree.GetItemData(item).GetData() if isinstance(data, NessusReport) or isinstance(data, MergedNessusReport) or isinstance(data, list): menu = wx.Menu() menu.Append(ID_Save_Results, "Save all results") self.view.PopupMenu(menu) menu.Destroy() def on_page_close(self, event): ## We don't want the user to be able to close any tabs ## TODO Find a way to diable the cross on the GUI event.Veto() def on_sel_changed(self, event): item = event.GetItem() tree = self.view.tree data = tree.GetItemData(item).GetData() if isinstance(data, NessusReport): self.view.display.Clear() self.view.display.SetValue(data.reportname) self.view.notebook.SetSelection(0) self.view.tree.SetFocus() elif isinstance(data, NessusItem): self.view.display.Clear() self.view.display.SetValue(data.output.replace('\\n', "\n")) self.view.notebook.SetSelection(0) self.view.tree.SetFocus() elif isinstance(data, NessusTreeItem): self.show_nessus_item(data) self.view.tree.SetFocus() elif isinstance(data, str): self.view.display.Clear() self.view.display.SetValue(data.replace('\\n', "\n")) self.view.notebook.SetSelection(0) self.view.tree.SetFocus() def on_exit(self, event): self.view.Close() def on_about(self, event): ## Just display a dialog box info = wx.AboutDialogInfo() info.Name = "Nessus Results - The right way around" info.Version = "1.0.2\n" info.Copyright = "(C) 2012 Felix Ingram\n" info.Description = wordwrap( "Sometimes you need Nessus results on a per-issue basis, " "sometimes you need to combine a load of reports into one.", 350, wx.ClientDC(self.view)) info.Developers = [ "Felix Ingram",] ## Then we call wx.AboutBox giving it that info object wx.AboutBox(info)<|fim▁end|>
self.view.Bind(wx.EVT_TOOL, self.combine_files, id=ID_Merge_Files) self.view.Bind(wx.EVT_TOOL, self.generate_csv, id=ID_Generate_CSV)
<|file_name|>class_example.py<|end_file_name|><|fim▁begin|>class Friend: def walk(self,shravan=""): ''' >>> Friend().walk() walking ''' print "walking", def talk(self): print "talking", def fight(self): print "fighting",<|fim▁hole|>f1=Friend() f1.walk() import doctest doctest.testmod()<|fim▁end|>
<|file_name|>upload.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' import os, subprocess, hashlib, shutil, glob, stat, sys, time from subprocess import check_call from tempfile import NamedTemporaryFile, mkdtemp from zipfile import ZipFile if __name__ == '__main__': d = os.path.dirname sys.path.insert(0, d(d(os.path.abspath(__file__)))) from setup import Command, __version__, installer_name, __appname__ PREFIX = "/var/www/calibre-ebook.com"<|fim▁hole|>DOWNLOADS = PREFIX+"/htdocs/downloads" BETAS = DOWNLOADS +'/betas' USER_MANUAL = '/var/www/localhost/htdocs/' HTML2LRF = "calibre/ebooks/lrf/html/demo" TXT2LRF = "src/calibre/ebooks/lrf/txt/demo" STAGING_HOST = '67.207.135.179' STAGING_USER = 'root' STAGING_DIR = '/root/staging' def installers(): installers = list(map(installer_name, ('dmg', 'msi', 'tar.bz2'))) installers.append(installer_name('tar.bz2', is64bit=True)) installers.insert(0, 'dist/%s-%s.tar.xz'%(__appname__, __version__)) installers.append('dist/%s-portable-%s.zip'%(__appname__, __version__)) return installers def installer_description(fname): if fname.endswith('.tar.xz'): return 'Source code' if fname.endswith('.tar.bz2'): bits = '32' if 'i686' in fname else '64' return bits + 'bit Linux binary' if fname.endswith('.msi'): return 'Windows installer' if fname.endswith('.dmg'): return 'OS X dmg' if fname.endswith('.zip'): return 'Calibre Portable' return 'Unknown file' class ReUpload(Command): # {{{ description = 'Re-uplaod any installers present in dist/' sub_commands = ['upload_installers'] def pre_sub_commands(self, opts): opts.replace = True def run(self, opts): for x in installers(): if os.path.exists(x): os.remove(x) # }}} # Data {{{ def get_google_data(): with open(os.path.expanduser('~/work/kde/conf/googlecodecalibre'), 'rb') as f: gc_password, ga_un, pw = f.read().strip().split('|') return { 'username':ga_un, 'password':pw, 'gc_password':gc_password, 'path_map_server':'[email protected]', 'path_map_location':'/var/www/status.calibre-ebook.com/googlepaths', # If you change this remember to change it in the # status.calibre-ebook.com server as well 'project':'calibre-ebook' } def get_sourceforge_data(): return {'username':'kovidgoyal', 'project':'calibre'} def send_data(loc): subprocess.check_call(['rsync', '--inplace', '--delete', '-r', '-z', '-h', '--progress', '-e', 'ssh -x', loc+'/', '%s@%s:%s'%(STAGING_USER, STAGING_HOST, STAGING_DIR)]) def gc_cmdline(ver, gdata): return [__appname__, ver, 'fmap', 'googlecode', gdata['project'], gdata['username'], gdata['password'], gdata['gc_password'], '--path-map-server', gdata['path_map_server'], '--path-map-location', gdata['path_map_location']] def sf_cmdline(ver, sdata): return [__appname__, ver, 'fmap', 'sourceforge', sdata['project'], sdata['username']] def run_remote_upload(args): print 'Running remotely:', ' '.join(args) subprocess.check_call(['ssh', '-x', '%s@%s'%(STAGING_USER, STAGING_HOST), 'cd', STAGING_DIR, '&&', 'python', 'hosting.py']+args) # }}} class UploadInstallers(Command): # {{{ def add_options(self, parser): parser.add_option('--replace', default=False, action='store_true', help= 'Replace existing installers, when uploading to google') def run(self, opts): all_possible = set(installers()) available = set(glob.glob('dist/*')) files = {x:installer_description(x) for x in all_possible.intersection(available)} tdir = mkdtemp() try: self.upload_to_staging(tdir, files) self.upload_to_sourceforge() self.upload_to_google(opts.replace) finally: shutil.rmtree(tdir, ignore_errors=True) def upload_to_staging(self, tdir, files): os.mkdir(tdir+'/dist') hosting = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hosting.py') shutil.copyfile(hosting, os.path.join(tdir, 'hosting.py')) for f in files: shutil.copyfile(f, os.path.join(tdir, f)) with open(os.path.join(tdir, 'fmap'), 'wb') as fo: for f, desc in files.iteritems(): fo.write('%s: %s\n'%(f, desc)) while True: try: send_data(tdir) except: print('\nUpload to staging failed, retrying in a minute') time.sleep(60) else: break def upload_to_google(self, replace): gdata = get_google_data() args = gc_cmdline(__version__, gdata) if replace: args = ['--replace'] + args run_remote_upload(args) def upload_to_sourceforge(self): sdata = get_sourceforge_data() args = sf_cmdline(__version__, sdata) run_remote_upload(args) # }}} class UploadUserManual(Command): # {{{ description = 'Build and upload the User Manual' sub_commands = ['manual'] def build_plugin_example(self, path): from calibre import CurrentDir with NamedTemporaryFile(suffix='.zip') as f: os.fchmod(f.fileno(), stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH|stat.S_IWRITE) with CurrentDir(path): with ZipFile(f, 'w') as zf: for x in os.listdir('.'): if x.endswith('.swp'): continue zf.write(x) if os.path.isdir(x): for y in os.listdir(x): zf.write(os.path.join(x, y)) bname = self.b(path) + '_plugin.zip' dest = '%s/%s'%(DOWNLOADS, bname) subprocess.check_call(['scp', f.name, 'divok:'+dest]) def run(self, opts): path = self.j(self.SRC, '..', 'manual', 'plugin_examples') for x in glob.glob(self.j(path, '*')): self.build_plugin_example(x) check_call(' '.join(['rsync', '-z', '-r', '--progress', 'manual/.build/html/', 'bugs:%s'%USER_MANUAL]), shell=True) # }}} class UploadDemo(Command): # {{{ description = 'Rebuild and upload various demos' def run(self, opts): check_call( '''ebook-convert %s/demo.html /tmp/html2lrf.lrf ''' '''--title='Demonstration of html2lrf' --authors='Kovid Goyal' ''' '''--header ''' '''--serif-family "/usr/share/fonts/corefonts, Times New Roman" ''' '''--mono-family "/usr/share/fonts/corefonts, Andale Mono" ''' ''''''%self.j(self.SRC, HTML2LRF), shell=True) check_call( 'cd src/calibre/ebooks/lrf/html/demo/ && ' 'zip -j /tmp/html-demo.zip * /tmp/html2lrf.lrf', shell=True) check_call('scp /tmp/html-demo.zip divok:%s/'%(DOWNLOADS,), shell=True) # }}} class UploadToServer(Command): # {{{ description = 'Upload miscellaneous data to calibre server' def run(self, opts): check_call('ssh divok rm -f %s/calibre-\*.tar.xz'%DOWNLOADS, shell=True) #check_call('scp dist/calibre-*.tar.xz divok:%s/'%DOWNLOADS, shell=True) check_call('gpg --armor --detach-sign dist/calibre-*.tar.xz', shell=True) check_call('scp dist/calibre-*.tar.xz.asc divok:%s/signatures/'%DOWNLOADS, shell=True) check_call('ssh divok bzr update /usr/local/calibre', shell=True) check_call('''ssh divok echo %s \\> %s/latest_version'''\ %(__version__, DOWNLOADS), shell=True) check_call('ssh divok /etc/init.d/apache2 graceful', shell=True) tdir = mkdtemp() for installer in installers(): if not os.path.exists(installer): continue with open(installer, 'rb') as f: raw = f.read() fingerprint = hashlib.sha512(raw).hexdigest() fname = os.path.basename(installer+'.sha512') with open(os.path.join(tdir, fname), 'wb') as f: f.write(fingerprint) check_call('scp %s/*.sha512 divok:%s/signatures/' % (tdir, DOWNLOADS), shell=True) shutil.rmtree(tdir) # }}} # Testing {{{ def write_files(fmap): for f in fmap: with open(f, 'wb') as f: f.write(os.urandom(100)) f.write(b'a'*1000000) with open('fmap', 'wb') as fo: for f, desc in fmap.iteritems(): fo.write('%s: %s\n'%(f, desc)) def setup_installers(): ver = '0.0.1' files = {x.replace(__version__, ver):installer_description(x) for x in installers()} tdir = mkdtemp() os.chdir(tdir) return tdir, files, ver def test_google_uploader(): gdata = get_google_data() gdata['project'] = 'calibre-hosting-uploader' gdata['path_map_location'] += '-test' hosting = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hosting.py') tdir, files, ver = setup_installers() try: os.mkdir('dist') write_files(files) shutil.copyfile(hosting, 'hosting.py') send_data(tdir) args = gc_cmdline(ver, gdata) print ('Doing initial upload') run_remote_upload(args) raw_input('Press Enter to proceed:') print ('\nDoing re-upload') run_remote_upload(['--replace']+args) raw_input('Press Enter to proceed:') nv = ver + '.1' files = {x.replace(__version__, nv):installer_description(x) for x in installers()} write_files(files) send_data(tdir) args[1] = nv print ('\nDoing update upload') run_remote_upload(args) print ('\nDont forget to delete any remaining files in the %s project'% gdata['project']) finally: shutil.rmtree(tdir) # }}} if __name__ == '__main__': test_google_uploader()<|fim▁end|>
<|file_name|>switch.rs<|end_file_name|><|fim▁begin|>use core::sync::atomic::Ordering; use arch; use super::{contexts, Context, Status, CONTEXT_ID}; /// Switch to the next context<|fim▁hole|>/// /// Do not call this while holding locks! pub unsafe fn switch() -> bool { use core::ops::DerefMut; // Set the global lock to avoid the unsafe operations below from causing issues while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) { arch::interrupt::pause(); } let cpu_id = ::cpu_id(); let from_ptr; let mut to_ptr = 0 as *mut Context; { let contexts = contexts(); { let context_lock = contexts.current().expect("context::switch: not inside of context"); let mut context = context_lock.write(); from_ptr = context.deref_mut() as *mut Context; } let check_context = |context: &mut Context| -> bool { if context.cpu_id == None && cpu_id == 0 { context.cpu_id = Some(cpu_id); // println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.lock())); } if context.status == Status::Blocked && context.wake.is_some() { let wake = context.wake.expect("context::switch: wake not set"); let current = arch::time::monotonic(); if current.0 > wake.0 || (current.0 == wake.0 && current.1 >= wake.1) { context.unblock(); } } if context.cpu_id == Some(cpu_id) { if context.status == Status::Runnable && ! context.running { return true; } } false }; for (pid, context_lock) in contexts.iter() { if *pid > (*from_ptr).id { let mut context = context_lock.write(); if check_context(&mut context) { to_ptr = context.deref_mut() as *mut Context; break; } } } if to_ptr as usize == 0 { for (pid, context_lock) in contexts.iter() { if *pid < (*from_ptr).id { let mut context = context_lock.write(); if check_context(&mut context) { to_ptr = context.deref_mut() as *mut Context; break; } } } } }; if to_ptr as usize == 0 { // Unset global lock if no context found arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst); return false; } (&mut *from_ptr).running = false; (&mut *to_ptr).running = true; if let Some(ref stack) = (*to_ptr).kstack { arch::gdt::TSS.rsp[0] = (stack.as_ptr() as usize + stack.len() - 256) as u64; } CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst); // Unset global lock before switch, as arch is only usable by the current CPU at this time arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst); (&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch); true }<|fim▁end|>
/// /// # Safety
<|file_name|>myAirSimClient.py<|end_file_name|><|fim▁begin|>import numpy as np import time import math import cv2 from pylab import array, arange, uint8 from PIL import Image import eventlet from eventlet import Timeout import multiprocessing as mp # Change the path below to point to the directoy where you installed the AirSim PythonClient #sys.path.append('C:/Users/Kjell/Google Drive/MASTER-THESIS/AirSimpy') <|fim▁hole|> def __init__(self): self.img1 = None self.img2 = None MultirotorClient.__init__(self) MultirotorClient.confirmConnection(self) self.enableApiControl(True) self.armDisarm(True) self.home_pos = self.getPosition() self.home_ori = self.getOrientation() self.z = -6 def straight(self, duration, speed): pitch, roll, yaw = self.getPitchRollYaw() vx = math.cos(yaw) * speed vy = math.sin(yaw) * speed self.moveByVelocityZ(vx, vy, self.z, duration, DrivetrainType.ForwardOnly) start = time.time() return start, duration def yaw_right(self, duration): self.rotateByYawRate(30, duration) start = time.time() return start, duration def yaw_left(self, duration): self.rotateByYawRate(-30, duration) start = time.time() return start, duration def take_action(self, action): #check if copter is on level cause sometimes he goes up without a reason x = 0 while self.getPosition().z_val < -7.0: self.moveToZ(-6, 3) time.sleep(1) print(self.getPosition().z_val, "and", x) x = x + 1 if x > 10: return True start = time.time() duration = 0 collided = False if action == 0: start, duration = self.straight(1, 4) while duration > time.time() - start: if self.getCollisionInfo().has_collided == True: return True self.moveByVelocity(0, 0, 0, 1) self.rotateByYawRate(0, 1) if action == 1: start, duration = self.yaw_right(0.8) while duration > time.time() - start: if self.getCollisionInfo().has_collided == True: return True self.moveByVelocity(0, 0, 0, 1) self.rotateByYawRate(0, 1) if action == 2: start, duration = self.yaw_left(1) while duration > time.time() - start: if self.getCollisionInfo().has_collided == True: return True self.moveByVelocity(0, 0, 0, 1) self.rotateByYawRate(0, 1) return collided def goal_direction(self, goal, pos): pitch, roll, yaw = self.getPitchRollYaw() yaw = math.degrees(yaw) pos_angle = math.atan2(goal[1] - pos.y_val, goal[0]- pos.x_val) pos_angle = math.degrees(pos_angle) % 360 track = math.radians(pos_angle - yaw) return ((math.degrees(track) - 180) % 360) - 180 def getScreenDepthVis(self, track): responses = self.simGetImages([ImageRequest(0, AirSimImageType.DepthPerspective, True, False)]) img1d = np.array(responses[0].image_data_float, dtype=np.float) img1d = 255/np.maximum(np.ones(img1d.size), img1d) img2d = np.reshape(img1d, (responses[0].height, responses[0].width)) image = np.invert(np.array(Image.fromarray(img2d.astype(np.uint8), mode='L'))) factor = 10 maxIntensity = 255.0 # depends on dtype of image data # Decrease intensity such that dark pixels become much darker, bright pixels become slightly dark newImage1 = (maxIntensity)*(image/maxIntensity)**factor newImage1 = array(newImage1,dtype=uint8) small = cv2.resize(newImage1, (0,0), fx=0.39, fy=0.38) cut = small[20:40,:] info_section = np.zeros((10,cut.shape[1]),dtype=np.uint8) + 255 info_section[9,:] = 0 line = np.int((((track - -180) * (100 - 0)) / (180 - -180)) + 0) if line != (0 or 100): info_section[:,line-1:line+2] = 0 elif line == 0: info_section[:,0:3] = 0 elif line == 100: info_section[:,info_section.shape[1]-3:info_section.shape[1]] = 0 total = np.concatenate((info_section, cut), axis=0) #cv2.imshow("Test", total) #cv2.waitKey(0) return total def AirSim_reset(self): self.reset() time.sleep(0.2) self.enableApiControl(True) self.armDisarm(True) time.sleep(1) self.moveToZ(self.z, 3) time.sleep(3) def AirSim_reset_old(self): reset = False z = -6.0 while reset != True: now = self.getPosition() self.simSetPose(Pose(Vector3r(now.x_val, now.y_val, -30),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True) now = self.getPosition() if (now.z_val - (-30)) == 0: self.simSetPose(Pose(Vector3r(self.home_pos.x_val, self.home_pos.y_val, -30),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True) now = self.getPosition() if (now.x_val - self.home_pos.x_val) == 0 and (now.y_val - self.home_pos.y_val) == 0 and (now.z_val - (-30)) == 0 : self.simSetPose(Pose(Vector3r(self.home_pos.x_val, self.home_pos.y_val, self.home_pos.z_val),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True) now = self.getPosition() if (now.x_val - self.home_pos.x_val) == 0 and (now.y_val - self.home_pos.y_val) == 0 and (now.z_val - self.home_pos.z_val) == 0: reset = True self.moveByVelocity(0, 0, 0, 1) time.sleep(1) self.moveToZ(z, 3) time.sleep(3)<|fim▁end|>
from AirSimClient import * class myAirSimClient(MultirotorClient):
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::fs; use std::io::prelude::*; use std::net::TcpListener; use std::net::TcpStream; use std::thread; use std::time::Duration; use web_server::ThreadPool; fn main() { let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); let pool = ThreadPool::new(4); for stream in listener.incoming().take(2) { let stream = stream.unwrap(); pool.execute(|| { handle_connection(stream); }); } println!("Shutting down.") } fn handle_connection(mut stream: TcpStream) { let mut buffer = [0; 1024];<|fim▁hole|> let (status_line, filename) = if buffer.starts_with(get) { ("HTTP/1.1 200 OK\r\n\r\n", "hello.html") } else if buffer.starts_with(sleep) { thread::sleep(Duration::from_secs(5)); ("HTTP/1.1 200 OK\r\n\r\n", "hello.html") } else { ("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html") }; let contents = fs::read_to_string(filename).unwrap(); let response = format!("{}{}", status_line, contents); stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); }<|fim▁end|>
stream.read(&mut buffer).unwrap(); let get = b"GET / HTTP/1.1\r\n"; let sleep = b"GET /sleep HTTP/1.1\r\n";
<|file_name|>test_mariofile.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # python 2 support via python-future from __future__ import absolute_import, division, print_function, unicode_literals from builtins import dict import os import pytest from mariobros import mariofile SIMPLE_MARIOFILE = """[section_one] text one [section_two] text two """ COMPLEX_MARIOFILE = """default text [section] \ntext section """ GARBAGE_MARIOFILE = """default [garbage_section] # garbage """ INVALID_SECTION_MARIOFILE = """ # spaces not allowed in section name [section one] """ MORE_COMPLEX_MARIOFILE = """# default [section_one] text one # comment text two # inline comment [section_two] text three [three] [DEFAULT] last""" def test_parse_sections(): simple_mariofile_sections = dict(mariofile.parse_sections(SIMPLE_MARIOFILE.splitlines(True))) assert len(simple_mariofile_sections) == 3 complex_mariofile_sections = dict(mariofile.parse_sections(COMPLEX_MARIOFILE.splitlines(True))) assert len(complex_mariofile_sections) == 2 assert sorted(complex_mariofile_sections.keys()) == ['DEFAULT', 'section'] assert complex_mariofile_sections['DEFAULT'] == ['default text\n', '\n'] with pytest.raises(mariofile.ConfigurationFileError): dict(mariofile.parse_sections(GARBAGE_MARIOFILE.splitlines(True))) with pytest.raises(mariofile.ConfigurationFileError): dict(mariofile.parse_sections(INVALID_SECTION_MARIOFILE.splitlines(True))) more_complex_mariofile_sections = dict( mariofile.parse_sections(MORE_COMPLEX_MARIOFILE.splitlines(True)) ) more_complex_mariofile_sections_keys = ['DEFAULT', 'section_one', 'section_two', 'three'] assert sorted(more_complex_mariofile_sections.keys()) == more_complex_mariofile_sections_keys assert more_complex_mariofile_sections['three'] == [] CRASH_MARIOFILE_1 = ''' [a] name target: a = 1 ''' CRASH_MARIOFILE_2 = ''' [a] name variable = 1 ''' def test_statements(): with pytest.raises(mariofile.ConfigurationFileError): mariofile.parse_section_body(CRASH_MARIOFILE_1.splitlines()) with pytest.raises(mariofile.ConfigurationFileError): mariofile.parse_section_body(CRASH_MARIOFILE_2.splitlines()) STRING_PARSE_STATEMENTS = ''' # commento statement statement con commento #commento # altro commento ''' def test_parse_statements(): parsed_statement = mariofile.parse_statements(STRING_PARSE_STATEMENTS.splitlines()) assert '\n'.join(parsed_statement) == "statement\nstatement con commento" SECTION = """ variable = 6 target: source task """ SECTION_MULTIPLE_RULE = """ target1: source1 task1 target2: source2 task2 """ INVALID_CONFIG = """ not a definition target: source """ def test_parse_section_body(): output_section = { 'action_template': ' task', 'sources_repls': 'source', 'variable': '6', 'target_pattern': 'target', } assert mariofile.parse_section_body(SECTION.splitlines(True)) == output_section with pytest.raises(mariofile.ConfigurationFileError): mariofile.parse_section_body(SECTION_MULTIPLE_RULE.splitlines(True)) with pytest.raises(mariofile.ConfigurationFileError): mariofile.parse_section_body(INVALID_CONFIG.splitlines(True)) INCLUDE_FILE = """ include prova.ini\t include\taltrofile.ini variable_definition = None [first_section] """ INCLUDE_UNIQUE_FILE = "include prova.ini" def test_parse_include(): filepaths, current_line = mariofile.parse_include(INCLUDE_FILE.splitlines(True)) assert filepaths == ['prova.ini', 'altrofile.ini'] assert current_line == 4 filepaths, current_line = mariofile.parse_include(INCLUDE_UNIQUE_FILE.splitlines(True)) assert filepaths == ['prova.ini'] assert current_line == 1 MARIOFILE = """[DEFAULT] variable = 1 [section_one] target1: source1 task1 """ MARIOFILE_AND_INCLUDE = """ include test_parse_config.ini [section_include_1] """ MARIOFILE_INCLUDE = """ task_cmd = task_command [section_include] variable_include_2 = 0 target_include: source_include \t${task_cmd} [section_include_1] variable_include_3 = 3 """ TOUCH_MARIOFILE = """ DEFAULT: touch [task] target: source task """ TEST_PARSE_CONFIG = """ include test_include.ini variable_default = 1 [section_main] [section_include_1] variable_include1 = 3 """ def test_parse_config(tmpdir): parsed_mariofile = { 'DEFAULT': { 'action_template': '', 'sources_repls': '', 'target_pattern': '', 'variable': '1' }, 'section_one': { 'action_template': ' task1', 'sources_repls': 'source1', 'target_pattern': 'target1'} } mariofile.parse_config(MARIOFILE.splitlines(True)) == parsed_mariofile parsed_mariofile_include_test = { 'DEFAULT': { 'action_template': '', 'sources_repls': '',<|fim▁hole|> }, 'section_include': { 'variable_include_2': '0', 'action_template': '\t${task_cmd}', 'target_pattern': 'target_include', 'sources_repls': 'source_include', }, 'section_include_1': { 'action_template': '', 'sources_repls': '', 'target_pattern': '', 'variable_include_3': '3', } } mario_folder = tmpdir.mkdir('tmpdir') f = mario_folder.join('test_parse_config.ini') f.write(MARIOFILE_INCLUDE) g = mario_folder.join('test_include.ini') g.write('') mario_folder.chdir() parsed_mariofile_include = mariofile.parse_config( MARIOFILE_AND_INCLUDE.splitlines(True), cwd=os.path.join(str(mario_folder.dirname), 'tmpdir') ) for key, value in parsed_mariofile_include.items(): assert value == parsed_mariofile_include_test[key], print(key) parsed_mariofile_multiple_include = { 'DEFAULT': { 'action_template': '', 'sources_repls': '', 'target_pattern': '', 'variable_default': '1', }, 'section_main': { 'action_template': u'', 'sources_repls': u'', 'target_pattern': u'' }, 'section_include_1': { 'action_template': '', 'sources_repls': '', 'target_pattern': '', 'variable_include1': '3', } } h = mario_folder.join('test_parse_config.ini') h.write(TEST_PARSE_CONFIG) parsed_mariofile_include = mariofile.parse_config(MARIOFILE_AND_INCLUDE.splitlines(True), cwd=os.path.join( str(mario_folder.dirname), 'tmpdir' )) assert parsed_mariofile_include == parsed_mariofile_multiple_include<|fim▁end|>
'target_pattern': '', 'task_cmd': 'task_command',
<|file_name|>storage-gen.go<|end_file_name|><|fim▁begin|>// Package storage provides access to the Cloud Storage JSON API. // // See https://developers.google.com/storage/docs/json_api/ // // Usage example: // // import "google.golang.org/api/storage/v1" // ... // storageService, err := storage.New(oauthHttpClient) package storage // import "google.golang.org/api/storage/v1" import ( "bytes" "encoding/json" "errors" "fmt" context "golang.org/x/net/context" ctxhttp "golang.org/x/net/context/ctxhttp" gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = ctxhttp.Do const apiId = "storage:v1" const apiName = "storage" const apiVersion = "v1" const basePath = "https://www.googleapis.com/storage/v1/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View your data across Google Cloud Platform services CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" // Manage your data and permissions in Google Cloud Storage DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" // View your data in Google Cloud Storage DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" // Manage your data in Google Cloud Storage DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.BucketAccessControls = NewBucketAccessControlsService(s) s.Buckets = NewBucketsService(s) s.Channels = NewChannelsService(s) s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) s.ObjectAccessControls = NewObjectAccessControlsService(s) s.Objects = NewObjectsService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment BucketAccessControls *BucketAccessControlsService Buckets *BucketsService Channels *ChannelsService DefaultObjectAccessControls *DefaultObjectAccessControlsService ObjectAccessControls *ObjectAccessControlsService Objects *ObjectsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs } type BucketAccessControlsService struct { s *Service } func NewBucketsService(s *Service) *BucketsService { rs := &BucketsService{s: s} return rs } type BucketsService struct { s *Service } func NewChannelsService(s *Service) *ChannelsService { rs := &ChannelsService{s: s} return rs } type ChannelsService struct { s *Service } func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { rs := &DefaultObjectAccessControlsService{s: s} return rs } type DefaultObjectAccessControlsService struct { s *Service } func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { rs := &ObjectAccessControlsService{s: s} return rs } type ObjectAccessControlsService struct { s *Service } func NewObjectsService(s *Service) *ObjectsService { rs := &ObjectsService{s: s} return rs } type ObjectsService struct { s *Service } // Bucket: A bucket. type Bucket struct { // Acl: Access controls on the bucket. Acl []*BucketAccessControl `json:"acl,omitempty"` // Cors: The bucket's Cross-Origin Resource Sharing (CORS) // configuration. Cors []*BucketCors `json:"cors,omitempty"` // DefaultObjectAcl: Default access controls to apply to new objects // when no ACL is provided. DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` // Id: The ID of the bucket. For buckets, the id and name properities // are the same. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For buckets, this is always // storage#bucket. Kind string `json:"kind,omitempty"` // Lifecycle: The bucket's lifecycle configuration. See lifecycle // management for more information. Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` // Location: The location of the bucket. Object data for objects in the // bucket resides in physical storage within this region. Defaults to // US. See the developer's guide for the authoritative list. Location string `json:"location,omitempty"` // Logging: The bucket's logging configuration, which defines the // destination bucket and optional name prefix for the current bucket's // logs. Logging *BucketLogging `json:"logging,omitempty"` // Metageneration: The metadata generation of this bucket. Metageneration int64 `json:"metageneration,omitempty,string"` // Name: The name of the bucket. Name string `json:"name,omitempty"` // Owner: The owner of the bucket. This is always the project team's // owner group. Owner *BucketOwner `json:"owner,omitempty"` // ProjectNumber: The project number of the project the bucket belongs // to. ProjectNumber uint64 `json:"projectNumber,omitempty,string"` // SelfLink: The URI of this bucket. SelfLink string `json:"selfLink,omitempty"` // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines // how objects in the bucket are stored and determines the SLA and the // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value // is not specified when the bucket is created, it will default to // STANDARD. For more information, see storage classes. StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the bucket in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` // Updated: The modification time of the bucket in RFC 3339 format. Updated string `json:"updated,omitempty"` // Versioning: The bucket's versioning configuration. Versioning *BucketVersioning `json:"versioning,omitempty"` // Website: The bucket's website configuration, controlling how the // service behaves when accessing bucket contents as a web site. See the // Static Website Examples for more information. Website *BucketWebsite `json:"website,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Acl") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Acl") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Bucket) MarshalJSON() ([]byte, error) { type noMethod Bucket raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type BucketCors struct { // MaxAgeSeconds: The value, in seconds, to return in the // Access-Control-Max-Age header used in preflight responses. MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` // Method: The list of HTTP methods on which to include CORS response // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list // of methods, and means "any method". Method []string `json:"method,omitempty"` // Origin: The list of Origins eligible to receive CORS response // headers. Note: "*" is permitted in the list of origins, and means // "any Origin". Origin []string `json:"origin,omitempty"` // ResponseHeader: The list of HTTP headers other than the simple // response headers to give permission for the user-agent to share // across domains. ResponseHeader []string `json:"responseHeader,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketCors) MarshalJSON() ([]byte, error) { type noMethod BucketCors raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle // management for more information. type BucketLifecycle struct { // Rule: A lifecycle management rule, which is made of an action to take // and the condition(s) under which the action will be taken. Rule []*BucketLifecycleRule `json:"rule,omitempty"` // ForceSendFields is a list of field names (e.g. "Rule") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Rule") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycle raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type BucketLifecycleRule struct { // Action: The action to take. Action *BucketLifecycleRuleAction `json:"action,omitempty"` // Condition: The condition(s) under which the action will be taken. Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Action") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycleRule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleAction: The action to take. type BucketLifecycleRuleAction struct { // StorageClass: Target storage class. Required iff the type of the // action is SetStorageClass. StorageClass string `json:"storageClass,omitempty"` // Type: Type of the action. Currently, only Delete and SetStorageClass // are supported. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "StorageClass") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "StorageClass") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycleRuleAction raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleCondition: The condition(s) under which the action // will be taken. type BucketLifecycleRuleCondition struct { // Age: Age of an object (in days). This condition is satisfied when an // object reaches the specified age. Age int64 `json:"age,omitempty"` // CreatedBefore: A date in RFC 3339 format with only the date part (for // instance, "2013-01-15"). This condition is satisfied when an object // is created before midnight of the specified date in UTC. CreatedBefore string `json:"createdBefore,omitempty"` // IsLive: Relevant only for versioned objects. If the value is true, // this condition matches live objects; if the value is false, it // matches archived objects. IsLive bool `json:"isLive,omitempty"` // MatchesStorageClass: Objects having any of the storage classes // specified by this condition will be matched. Values include // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and // DURABLE_REDUCED_AVAILABILITY. MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` // NumNewerVersions: Relevant only for versioned objects. If the value // is N, this condition is satisfied when there are at least N versions // (including the live version) newer than this version of the object. NumNewerVersions int64 `json:"numNewerVersions,omitempty"` // ForceSendFields is a list of field names (e.g. "Age") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Age") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycleRuleCondition raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketLogging: The bucket's logging configuration, which defines the // destination bucket and optional name prefix for the current bucket's // logs. type BucketLogging struct { // LogBucket: The destination bucket where the current bucket's logs // should be placed. LogBucket string `json:"logBucket,omitempty"` // LogObjectPrefix: A prefix for log object names. LogObjectPrefix string `json:"logObjectPrefix,omitempty"` // ForceSendFields is a list of field names (e.g. "LogBucket") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LogBucket") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketLogging) MarshalJSON() ([]byte, error) { type noMethod BucketLogging raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketOwner: The owner of the bucket. This is always the project // team's owner group. type BucketOwner struct { // Entity: The entity, in the form project-owner-projectId. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity. EntityId string `json:"entityId,omitempty"` // ForceSendFields is a list of field names (e.g. "Entity") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Entity") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketOwner) MarshalJSON() ([]byte, error) { type noMethod BucketOwner raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketVersioning: The bucket's versioning configuration. type BucketVersioning struct { // Enabled: While set to true, versioning is fully enabled for this // bucket. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Enabled") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketVersioning) MarshalJSON() ([]byte, error) { type noMethod BucketVersioning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketWebsite: The bucket's website configuration, controlling how // the service behaves when accessing bucket contents as a web site. See // the Static Website Examples for more information. type BucketWebsite struct { // MainPageSuffix: If the requested object path is missing, the service // will ensure the path has a trailing '/', append this suffix, and // attempt to retrieve the resulting object. This allows the creation of // index.html objects to represent directory pages. MainPageSuffix string `json:"mainPageSuffix,omitempty"` // NotFoundPage: If the requested object path is missing, and any // mainPageSuffix object is missing, if applicable, the service will // return the named object from this bucket as the content for a 404 Not // Found result. NotFoundPage string `json:"notFoundPage,omitempty"` // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MainPageSuffix") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *BucketWebsite) MarshalJSON() ([]byte, error) { type noMethod BucketWebsite raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketAccessControl: An access-control entry. type BucketAccessControl struct { // Bucket: The name of the bucket. Bucket string `json:"bucket,omitempty"` // Domain: The domain associated with the entity, if any. Domain string `json:"domain,omitempty"` // Email: The email address associated with the entity, if any. Email string `json:"email,omitempty"` // Entity: The entity holding the permission, in one of the following // forms: // - user-userId // - user-email // - group-groupId // - group-email // - domain-domain // - project-team-projectId // - allUsers // - allAuthenticatedUsers Examples: // - The user [email protected] would be [email protected]. // - The group [email protected] would be // [email protected]. // - To refer to all members of the Google Apps for Business domain // example.com, the entity would be domain-example.com. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity, if any. EntityId string `json:"entityId,omitempty"` // Etag: HTTP 1.1 Entity tag for the access-control entry. Etag string `json:"etag,omitempty"` // Id: The ID of the access-control entry. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For bucket access control entries, // this is always storage#bucketAccessControl. Kind string `json:"kind,omitempty"` // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` // Role: The access permission for the entity. Role string `json:"role,omitempty"` // SelfLink: The link to this access-control entry. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Bucket") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Bucket") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { type noMethod BucketAccessControl raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketAccessControlProjectTeam: The project team associated with the // entity, if any. type BucketAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` // Team: The team. Team string `json:"team,omitempty"` // ForceSendFields is a list of field names (e.g. "ProjectNumber") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ProjectNumber") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type noMethod BucketAccessControlProjectTeam raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BucketAccessControls: An access-control list. type BucketAccessControls struct { // Items: The list of items. Items []*BucketAccessControl `json:"items,omitempty"` // Kind: The kind of item this is. For lists of bucket access control // entries, this is always storage#bucketAccessControls. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Items") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Items") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { type noMethod BucketAccessControls raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Buckets: A list of buckets. type Buckets struct { // Items: The list of items. Items []*Bucket `json:"items,omitempty"` // Kind: The kind of item this is. For lists of buckets, this is always // storage#buckets. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large // result sets. Provide this value in a subsequent request to return the // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Items") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Items") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Buckets) MarshalJSON() ([]byte, error) { type noMethod Buckets raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Channel: An notification channel used to watch for resource changes. type Channel struct { // Address: The address where notifications are delivered for this // channel. Address string `json:"address,omitempty"` // Expiration: Date and time of notification channel expiration, // expressed as a Unix timestamp, in milliseconds. Optional. Expiration int64 `json:"expiration,omitempty,string"` // Id: A UUID or similar unique string that identifies this channel. Id string `json:"id,omitempty"` // Kind: Identifies this as a notification channel used to watch for // changes to a resource. Value: the fixed string "api#channel". Kind string `json:"kind,omitempty"` // Params: Additional parameters controlling delivery channel behavior. // Optional. Params map[string]string `json:"params,omitempty"` // Payload: A Boolean value to indicate whether payload is wanted. // Optional. Payload bool `json:"payload,omitempty"` // ResourceId: An opaque ID that identifies the resource being watched // on this channel. Stable across different API versions. ResourceId string `json:"resourceId,omitempty"` // ResourceUri: A version-specific identifier for the watched resource. ResourceUri string `json:"resourceUri,omitempty"` // Token: An arbitrary string delivered to the target address with each // notification delivered over this channel. Optional. Token string `json:"token,omitempty"` // Type: The type of delivery mechanism used for this channel. Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Address") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Address") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Channel) MarshalJSON() ([]byte, error) { type noMethod Channel raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ComposeRequest: A Compose request. type ComposeRequest struct { // Destination: Properties of the resulting object. Destination *Object `json:"destination,omitempty"` // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // SourceObjects: The list of source objects that will be concatenated // into a single object. SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` // ForceSendFields is a list of field names (e.g. "Destination") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Destination") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ComposeRequest) MarshalJSON() ([]byte, error) { type noMethod ComposeRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ComposeRequestSourceObjects struct { // Generation: The generation of this object to use as the source. Generation int64 `json:"generation,omitempty,string"` // Name: The source object's name. The source object's bucket is // implicitly the destination bucket. Name string `json:"name,omitempty"` // ObjectPreconditions: Conditions that must be met for this operation // to execute. ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` // ForceSendFields is a list of field names (e.g. "Generation") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Generation") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { type noMethod ComposeRequestSourceObjects raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ComposeRequestSourceObjectsObjectPreconditions: Conditions that must // be met for this operation to execute. type ComposeRequestSourceObjectsObjectPreconditions struct { // IfGenerationMatch: Only perform the composition if the generation of // the source object that would be used matches this value. If this // value and a generation are both specified, they must be the same // value or the call will fail. IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IfGenerationMatch") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { type noMethod ComposeRequestSourceObjectsObjectPreconditions raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Object: An object. type Object struct { // Acl: Access controls on the object. Acl []*ObjectAccessControl `json:"acl,omitempty"` // Bucket: The name of the bucket containing this object. Bucket string `json:"bucket,omitempty"` // CacheControl: Cache-Control directive for the object data. If // omitted, and the object is accessible to all anonymous users, the // default will be public, max-age=3600. CacheControl string `json:"cacheControl,omitempty"` // ComponentCount: Number of underlying components that make up this // object. Components are accumulated by compose operations. ComponentCount int64 `json:"componentCount,omitempty"` // ContentDisposition: Content-Disposition of the object data. ContentDisposition string `json:"contentDisposition,omitempty"` // ContentEncoding: Content-Encoding of the object data. ContentEncoding string `json:"contentEncoding,omitempty"` // ContentLanguage: Content-Language of the object data. ContentLanguage string `json:"contentLanguage,omitempty"` // ContentType: Content-Type of the object data. If contentType is not // specified, object downloads will be served as // application/octet-stream. ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; // encoded using base64 in big-endian byte order. For more information // about using the CRC32c checksum, see Hashes and ETags: Best // Practices. Crc32c string `json:"crc32c,omitempty"` // CustomerEncryption: Metadata of customer-supplied encryption key, if // the object is encrypted by such a key. CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` // Etag: HTTP 1.1 Entity tag for the object. Etag string `json:"etag,omitempty"` // Generation: The content generation of this object. Used for object // versioning. Generation int64 `json:"generation,omitempty,string"` // Id: The ID of the object, including the bucket name, object name, and // generation number. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For objects, this is always // storage#object. Kind string `json:"kind,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. For more // information about using the MD5 hash, see Hashes and ETags: Best // Practices. Md5Hash string `json:"md5Hash,omitempty"` // MediaLink: Media download link. MediaLink string `json:"mediaLink,omitempty"` // Metadata: User-provided metadata, in key/value pairs. Metadata map[string]string `json:"metadata,omitempty"` // Metageneration: The version of the metadata for this object at this // generation. Used for preconditions and for detecting changes in // metadata. A metageneration number is only meaningful in the context // of a particular generation of a particular object. Metageneration int64 `json:"metageneration,omitempty,string"` // Name: The name of the object. Required if not specified by URL // parameter. Name string `json:"name,omitempty"` // Owner: The owner of the object. This will always be the uploader of // the object. Owner *ObjectOwner `json:"owner,omitempty"` // SelfLink: The link to this object. SelfLink string `json:"selfLink,omitempty"` // Size: Content-Length of the data in bytes. Size uint64 `json:"size,omitempty,string"` // StorageClass: Storage class of the object. StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the object in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` // TimeDeleted: The deletion time of the object in RFC 3339 format. Will // be returned if and only if this version of the object has been // deleted. TimeDeleted string `json:"timeDeleted,omitempty"` // TimeStorageClassUpdated: The time at which the object's storage class // was last changed. When the object is initially created, it will be // set to timeCreated. TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` // Updated: The modification time of the object metadata in RFC 3339 // format. Updated string `json:"updated,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Acl") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Acl") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Object) MarshalJSON() ([]byte, error) { type noMethod Object raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ObjectCustomerEncryption: Metadata of customer-supplied encryption // key, if the object is encrypted by such a key. type ObjectCustomerEncryption struct { // EncryptionAlgorithm: The encryption algorithm. EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` // KeySha256: SHA256 hash value of the encryption key. KeySha256 string `json:"keySha256,omitempty"` // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { type noMethod ObjectCustomerEncryption raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ObjectOwner: The owner of the object. This will always be the // uploader of the object. type ObjectOwner struct { // Entity: The entity, in the form user-userId. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity. EntityId string `json:"entityId,omitempty"` // ForceSendFields is a list of field names (e.g. "Entity") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Entity") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ObjectOwner) MarshalJSON() ([]byte, error) { type noMethod ObjectOwner raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ObjectAccessControl: An access-control entry. type ObjectAccessControl struct { // Bucket: The name of the bucket. Bucket string `json:"bucket,omitempty"` // Domain: The domain associated with the entity, if any. Domain string `json:"domain,omitempty"` // Email: The email address associated with the entity, if any. Email string `json:"email,omitempty"` // Entity: The entity holding the permission, in one of the following // forms: // - user-userId // - user-email // - group-groupId // - group-email // - domain-domain // - project-team-projectId // - allUsers // - allAuthenticatedUsers Examples: // - The user [email protected] would be [email protected]. // - The group [email protected] would be // [email protected]. // - To refer to all members of the Google Apps for Business domain // example.com, the entity would be domain-example.com. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity, if any. EntityId string `json:"entityId,omitempty"` // Etag: HTTP 1.1 Entity tag for the access-control entry. Etag string `json:"etag,omitempty"` // Generation: The content generation of the object, if applied to an // object. Generation int64 `json:"generation,omitempty,string"` // Id: The ID of the access-control entry. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For object access control entries, // this is always storage#objectAccessControl. Kind string `json:"kind,omitempty"` // Object: The name of the object, if applied to an object. Object string `json:"object,omitempty"` // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` // Role: The access permission for the entity. Role string `json:"role,omitempty"` // SelfLink: The link to this access-control entry. SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Bucket") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Bucket") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { type noMethod ObjectAccessControl raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ObjectAccessControlProjectTeam: The project team associated with the // entity, if any. type ObjectAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` // Team: The team. Team string `json:"team,omitempty"` // ForceSendFields is a list of field names (e.g. "ProjectNumber") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ProjectNumber") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type noMethod ObjectAccessControlProjectTeam raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ObjectAccessControls: An access-control list. type ObjectAccessControls struct { // Items: The list of items. Items []*ObjectAccessControl `json:"items,omitempty"` // Kind: The kind of item this is. For lists of object access control // entries, this is always storage#objectAccessControls. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Items") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Items") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { type noMethod ObjectAccessControls raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Objects: A list of objects. type Objects struct { // Items: The list of items. Items []*Object `json:"items,omitempty"` // Kind: The kind of item this is. For lists of objects, this is always // storage#objects. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large // result sets. Provide this value in a subsequent request to return the // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Prefixes: The list of prefixes of objects matching-but-not-listed up // to and including the requested delimiter. Prefixes []string `json:"prefixes,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Items") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Items") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Objects) MarshalJSON() ([]byte, error) { type noMethod Objects raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Policy: A bucket/object IAM policy. type Policy struct { // Bindings: An association between a role, which comes with a set of // permissions, and members who may assume that role. Bindings []*PolicyBindings `json:"bindings,omitempty"` // Etag: HTTP 1.1 Entity tag for the policy. Etag string `json:"etag,omitempty"` // Kind: The kind of item this is. For policies, this is always // storage#policy. This field is ignored on input. Kind string `json:"kind,omitempty"` // ResourceId: The ID of the resource to which this policy belongs. Will // be of the form buckets/bucket for buckets, and // buckets/bucket/objects/object for objects. A specific generation may // be specified by appending #generationNumber to the end of the object // name, e.g. buckets/my-bucket/objects/data.txt#17. The current // generation can be denoted with #0. This field is ignored on input. ResourceId string `json:"resourceId,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Bindings") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Bindings") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Policy) MarshalJSON() ([]byte, error) { type noMethod Policy raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type PolicyBindings struct { // Members: A collection of identifiers for members who may assume the // provided role. Recognized identifiers are as follows: // - allUsers — A special identifier that represents anyone on the // internet; with or without a Google account. // - allAuthenticatedUsers — A special identifier that represents // anyone who is authenticated with a Google account or a service // account. // - user:emailid — An email address that represents a specific // account. For example, user:[email protected] or user:[email protected]. // // - serviceAccount:emailid — An email address that represents a // service account. For example, // serviceAccount:[email protected] . // - group:emailid — An email address that represents a Google group. // For example, group:[email protected]. // - domain:domain — A Google Apps domain name that represents all the // users of that domain. For example, domain:google.com or // domain:example.com. // - projectOwner:projectid — Owners of the given project. For // example, projectOwner:my-example-project // - projectEditor:projectid — Editors of the given project. For // example, projectEditor:my-example-project // - projectViewer:projectid — Viewers of the given project. For // example, projectViewer:my-example-project Members []string `json:"members,omitempty"` // Role: The role to which members belong. Two types of roles are // supported: new IAM roles, which grant permissions that do not map // directly to those provided by ACLs, and legacy IAM roles, which do // map directly to ACL permissions. All roles are of the format // roles/storage.specificRole. // The new IAM roles are: // - roles/storage.admin — Full control of Google Cloud Storage // resources. // - roles/storage.objectViewer — Read-Only access to Google Cloud // Storage objects. // - roles/storage.objectCreator — Access to create objects in Google // Cloud Storage. // - roles/storage.objectAdmin — Full control of Google Cloud Storage // objects. The legacy IAM roles are: // - roles/storage.legacyObjectReader — Read-only access to objects // without listing. Equivalent to an ACL entry on an object with the // READER role. // - roles/storage.legacyObjectOwner — Read/write access to existing // objects without listing. Equivalent to an ACL entry on an object with // the OWNER role. // - roles/storage.legacyBucketReader — Read access to buckets with // object listing. Equivalent to an ACL entry on a bucket with the // READER role. // - roles/storage.legacyBucketWriter — Read access to buckets with // object listing/creation/deletion. Equivalent to an ACL entry on a // bucket with the WRITER role. // - roles/storage.legacyBucketOwner — Read and write access to // existing buckets with object listing/creation/deletion. Equivalent to // an ACL entry on a bucket with the OWNER role. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Members") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Members") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PolicyBindings) MarshalJSON() ([]byte, error) { type noMethod PolicyBindings raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RewriteResponse: A rewrite response. type RewriteResponse struct { // Done: true if the copy is finished; otherwise, false if the copy is // in progress. This property is always present in the response. Done bool `json:"done,omitempty"` // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // ObjectSize: The total size of the object being copied in bytes. This // property is always present in the response. ObjectSize uint64 `json:"objectSize,omitempty,string"` // Resource: A resource containing the metadata for the copied-to // object. This property is present in the response only when copying // completes. Resource *Object `json:"resource,omitempty"` // RewriteToken: A token to use in subsequent requests to continue // copying data. This token is present in the response only when there // is more data to copy. RewriteToken string `json:"rewriteToken,omitempty"` // TotalBytesRewritten: The total bytes written so far, which can be // used to provide a waiting user with a progress indicator. This // property is always present in the response. TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Done") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Done") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *RewriteResponse) MarshalJSON() ([]byte, error) { type noMethod RewriteResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: A // storage.(buckets|objects).testIamPermissions response. type TestIamPermissionsResponse struct { // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // Permissions: The permissions held by the caller. Permissions are // always of the format storage.resource.capability, where resource is // one of buckets or objects. The supported permissions are as follows: // // - storage.buckets.delete — Delete bucket. // - storage.buckets.get — Read bucket metadata. // - storage.buckets.getIamPolicy — Read bucket IAM policy. // - storage.buckets.create — Create bucket. // - storage.buckets.list — List buckets. // - storage.buckets.setIamPolicy — Update bucket IAM policy. // - storage.buckets.update — Update bucket metadata. // - storage.objects.delete — Delete object. // - storage.objects.get — Read object data and metadata. // - storage.objects.getIamPolicy — Read object IAM policy. // - storage.objects.create — Create object. // - storage.objects.list — List objects. // - storage.objects.setIamPolicy — Update object IAM policy. // - storage.objects.update — Update object metadata. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Kind") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type noMethod TestIamPermissionsResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "storage.bucketAccessControls.delete": type BucketAccessControlsDeleteCall struct { s *Service bucket string entity string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.delete" call. func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", // "httpMethod": "DELETE", // "id": "storage.bucketAccessControls.delete", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.get": type BucketAccessControlsGetCall struct { s *Service bucket string entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns the ACL entry for the specified entity on the specified // bucket. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.get" call. // Exactly one of *BucketAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *BucketAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Returns the ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", // "id": "storage.bucketAccessControls.get", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.insert": type BucketAccessControlsInsertCall struct { s *Service bucket string bucketaccesscontrol *BucketAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Insert: Creates a new ACL entry on the specified bucket. func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.bucketaccesscontrol = bucketaccesscontrol return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.insert" call. // Exactly one of *BucketAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *BucketAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new ACL entry on the specified bucket.", // "httpMethod": "POST", // "id": "storage.bucketAccessControls.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl", // "request": { // "$ref": "BucketAccessControl" // }, // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.list": type BucketAccessControlsListCall struct { s *Service bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Retrieves ACL entries on the specified bucket. func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.list" call. // Exactly one of *BucketAccessControls or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *BucketAccessControls.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves ACL entries on the specified bucket.", // "httpMethod": "GET", // "id": "storage.bucketAccessControls.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl", // "response": { // "$ref": "BucketAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.patch": type BucketAccessControlsPatchCall struct { s *Service bucket string entity string bucketaccesscontrol *BucketAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an ACL entry on the specified bucket. This method // supports patch semantics. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.bucketaccesscontrol = bucketaccesscontrol return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.patch" call. // Exactly one of *BucketAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *BucketAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.bucketAccessControls.patch", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "request": { // "$ref": "BucketAccessControl" // }, // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.update": type BucketAccessControlsUpdateCall struct { s *Service bucket string entity string bucketaccesscontrol *BucketAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates an ACL entry on the specified bucket. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.bucketaccesscontrol = bucketaccesscontrol return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.update" call. // Exactly one of *BucketAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *BucketAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified bucket.", // "httpMethod": "PUT", // "id": "storage.bucketAccessControls.update", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "request": { // "$ref": "BucketAccessControl" // }, // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.buckets.delete": type BucketsDeleteCall struct { s *Service bucket string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Permanently deletes an empty bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": If set, only deletes the bucket if its // metageneration matches this value. func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": If set, only deletes the bucket if its // metageneration does not match this value. func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.delete" call. func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes an empty bucket.", // "httpMethod": "DELETE", // "id": "storage.buckets.delete", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "If set, only deletes the bucket if its metageneration matches this value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "If set, only deletes the bucket if its metageneration does not match this value.", // "format": "int64", // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.get": type BucketsGetCall struct { s *Service bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns metadata for the specified bucket. func (r *BucketsService) Get(bucket string) *BucketsGetCall { c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.get" call. // Exactly one of *Bucket or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Bucket.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Returns metadata for the specified bucket.", // "httpMethod": "GET", // "id": "storage.buckets.get", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.getIamPolicy": type BucketsGetIamPolicyCall struct { s *Service bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // GetIamPolicy: Returns an IAM policy for the specified bucket. func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Returns an IAM policy for the specified bucket.", // "httpMethod": "GET", // "id": "storage.buckets.getIamPolicy", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/iam", // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.insert": type BucketsInsertCall struct { s *Service bucket *Bucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Insert: Creates a new bucket. func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("project", projectid) c.bucket = bucket return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // // Possible values: // "authenticatedRead" - Project team owners get OWNER access, and // allAuthenticatedUsers get READER access. // "private" - Project team owners get OWNER access. // "projectPrivate" - Project team members get access according to // their roles. // "publicRead" - Project team owners get OWNER access, and allUsers // get READER access. // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // PredefinedDefaultObjectAcl sets the optional parameter // "predefinedDefaultObjectAcl": Apply a predefined set of default // object access controls to this bucket. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the bucket resource // specifies acl or defaultObjectAcl properties, when it defaults to // full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.insert" call. // Exactly one of *Bucket or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Bucket.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new bucket.", // "httpMethod": "POST", // "id": "storage.buckets.insert", // "parameterOrder": [ // "project" // ], // "parameters": { // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ // "authenticatedRead", // "private", // "projectPrivate", // "publicRead", // "publicReadWrite" // ], // "enumDescriptions": [ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", // "Project team owners get OWNER access.", // "Project team members get access according to their roles.", // "Project team owners get OWNER access, and allUsers get READER access.", // "Project team owners get OWNER access, and allUsers get WRITER access." // ], // "location": "query", // "type": "string" // }, // "predefinedDefaultObjectAcl": { // "description": "Apply a predefined set of default object access controls to this bucket.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "project": { // "description": "A valid API project identifier.", // "location": "query", // "required": true, // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b", // "request": { // "$ref": "Bucket" // }, // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.list": type BucketsListCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Retrieves a list of buckets for a given project. func (r *BucketsService) List(projectid string) *BucketsListCall { c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("project", projectid) return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of buckets to return in a single response. The service will use this // parameter or 1,000 items, whichever is smaller. func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Prefix sets the optional parameter "prefix": Filter results to // buckets whose names begin with this prefix. func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { c.urlParams_.Set("prefix", prefix) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.list" call. // Exactly one of *Buckets or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Buckets.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves a list of buckets for a given project.", // "httpMethod": "GET", // "id": "storage.buckets.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "maxResults": { // "default": "1000", // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, // "prefix": { // "description": "Filter results to buckets whose names begin with this prefix.", // "location": "query", // "type": "string" // }, // "project": { // "description": "A valid API project identifier.", // "location": "query", // "required": true, // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b", // "response": { // "$ref": "Buckets" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "storage.buckets.patch": type BucketsPatchCall struct { s *Service bucket string bucket2 *Bucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates a bucket. Changes to the bucket will be readable // immediately after writing, but configuration changes may take time to // propagate. This method supports patch semantics. func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.bucket2 = bucket2 return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // // Possible values: // "authenticatedRead" - Project team owners get OWNER access, and // allAuthenticatedUsers get READER access. // "private" - Project team owners get OWNER access. // "projectPrivate" - Project team members get access according to // their roles. // "publicRead" - Project team owners get OWNER access, and allUsers // get READER access. // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // PredefinedDefaultObjectAcl sets the optional parameter // "predefinedDefaultObjectAcl": Apply a predefined set of default // object access controls to this bucket. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.patch" call. // Exactly one of *Bucket or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Bucket.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.buckets.patch", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ // "authenticatedRead", // "private", // "projectPrivate", // "publicRead", // "publicReadWrite" // ], // "enumDescriptions": [ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", // "Project team owners get OWNER access.", // "Project team members get access according to their roles.", // "Project team owners get OWNER access, and allUsers get READER access.", // "Project team owners get OWNER access, and allUsers get WRITER access." // ], // "location": "query", // "type": "string" // }, // "predefinedDefaultObjectAcl": { // "description": "Apply a predefined set of default object access controls to this bucket.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "request": { // "$ref": "Bucket" // }, // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.buckets.setIamPolicy": type BucketsSetIamPolicyCall struct { s *Service bucket string policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SetIamPolicy: Updates an IAM policy for the specified bucket. func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an IAM policy for the specified bucket.", // "httpMethod": "PUT", // "id": "storage.buckets.setIamPolicy", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/iam", // "request": { // "$ref": "Policy" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.testIamPermissions": type BucketsTestIamPermissionsCall struct { s *Service bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // TestIamPermissions: Tests a set of permissions on the given bucket to // see which, if any, are held by the caller. func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.testIamPermissions" call. // Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *TestIamPermissionsResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", // "httpMethod": "GET", // "id": "storage.buckets.testIamPermissions", // "parameterOrder": [ // "bucket", // "permissions" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "permissions": { // "description": "Permissions to test.", // "location": "query", // "repeated": true, // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/iam/testPermissions", // "response": { // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.update": type BucketsUpdateCall struct { s *Service bucket string bucket2 *Bucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates a bucket. Changes to the bucket will be readable // immediately after writing, but configuration changes may take time to // propagate. func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.bucket2 = bucket2 return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // // Possible values: // "authenticatedRead" - Project team owners get OWNER access, and // allAuthenticatedUsers get READER access. // "private" - Project team owners get OWNER access. // "projectPrivate" - Project team members get access according to // their roles. // "publicRead" - Project team owners get OWNER access, and allUsers // get READER access. // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // PredefinedDefaultObjectAcl sets the optional parameter // "predefinedDefaultObjectAcl": Apply a predefined set of default // object access controls to this bucket. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *BucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.update" call. // Exactly one of *Bucket or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Bucket.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", // "httpMethod": "PUT", // "id": "storage.buckets.update", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ // "authenticatedRead", // "private", // "projectPrivate", // "publicRead", // "publicReadWrite" // ], // "enumDescriptions": [ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", // "Project team owners get OWNER access.", // "Project team members get access according to their roles.", // "Project team owners get OWNER access, and allUsers get READER access.", // "Project team owners get OWNER access, and allUsers get WRITER access." // ], // "location": "query", // "type": "string" // }, // "predefinedDefaultObjectAcl": { // "description": "Apply a predefined set of default object access controls to this bucket.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "request": { // "$ref": "Bucket" // }, // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.channels.stop": type ChannelsStopCall struct { s *Service channel *Channel urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Stop: Stop watching resources through this channel func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.channel = channel return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ChannelsStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.channels.stop" call. func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Stop watching resources through this channel", // "httpMethod": "POST", // "id": "storage.channels.stop", // "path": "channels/stop", // "request": { // "$ref": "Channel", // "parameterName": "resource" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.defaultObjectAccessControls.delete": type DefaultObjectAccessControlsDeleteCall struct { s *Service bucket string entity string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.delete" call. func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", // "httpMethod": "DELETE", // "id": "storage.defaultObjectAccessControls.delete", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.get": type DefaultObjectAccessControlsGetCall struct { s *Service bucket string entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.get" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", // "id": "storage.defaultObjectAccessControls.get", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.insert": type DefaultObjectAccessControlsInsertCall struct { s *Service bucket string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Insert: Creates a new default object ACL entry on the specified // bucket. func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.objectaccesscontrol = objectaccesscontrol return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.insert" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new default object ACL entry on the specified bucket.", // "httpMethod": "POST", // "id": "storage.defaultObjectAccessControls.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.list": type DefaultObjectAccessControlsListCall struct { s *Service bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Retrieves default object ACL entries on the specified bucket. func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": If present, only return default ACL listing // if the bucket's current metageneration matches this value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": If present, only return default ACL // listing if the bucket's current metageneration does not match the // given value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DefaultObjectAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.list" call. // Exactly one of *ObjectAccessControls or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControls.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves default object ACL entries on the specified bucket.", // "httpMethod": "GET", // "id": "storage.defaultObjectAccessControls.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl", // "response": { // "$ref": "ObjectAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.patch": type DefaultObjectAccessControlsPatchCall struct { s *Service bucket string entity string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates a default object ACL entry on the specified bucket. // This method supports patch semantics. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.patch" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.defaultObjectAccessControls.patch", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.update": type DefaultObjectAccessControlsUpdateCall struct { s *Service bucket string entity string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates a default object ACL entry on the specified bucket. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.update" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates a default object ACL entry on the specified bucket.", // "httpMethod": "PUT", // "id": "storage.defaultObjectAccessControls.update", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.delete": type ObjectAccessControlsDeleteCall struct { s *Service bucket string object string entity string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.delete" call. func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", // "httpMethod": "DELETE", // "id": "storage.objectAccessControls.delete", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.get": type ObjectAccessControlsGetCall struct { s *Service bucket string object string entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns the ACL entry for the specified entity on the specified // object.<|fim▁hole|> c.bucket = bucket c.object = object c.entity = entity return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.get" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Returns the ACL entry for the specified entity on the specified object.", // "httpMethod": "GET", // "id": "storage.objectAccessControls.get", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.insert": type ObjectAccessControlsInsertCall struct { s *Service bucket string object string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Insert: Creates a new ACL entry on the specified object. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.objectaccesscontrol = objectaccesscontrol return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.insert" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new ACL entry on the specified object.", // "httpMethod": "POST", // "id": "storage.objectAccessControls.insert", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.list": type ObjectAccessControlsListCall struct { s *Service bucket string object string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Retrieves ACL entries on the specified object. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.list" call. // Exactly one of *ObjectAccessControls or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControls.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves ACL entries on the specified object.", // "httpMethod": "GET", // "id": "storage.objectAccessControls.list", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl", // "response": { // "$ref": "ObjectAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.patch": type ObjectAccessControlsPatchCall struct { s *Service bucket string object string entity string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an ACL entry on the specified object. This method // supports patch semantics. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.patch" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.objectAccessControls.patch", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.update": type ObjectAccessControlsUpdateCall struct { s *Service bucket string object string entity string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates an ACL entry on the specified object. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.update" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified object.", // "httpMethod": "PUT", // "id": "storage.objectAccessControls.update", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objects.compose": type ObjectsComposeCall struct { s *Service destinationBucket string destinationObject string composerequest *ComposeRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Compose: Concatenates a list of existing objects into a new object in // the same bucket. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.composerequest = composerequest return c } // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do and Download // methods. Any pending HTTP request will be aborted if the provided // context is canceled. func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsComposeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() return nil, err } return res, nil } // Do executes the "storage.objects.compose" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Concatenates a list of existing objects into a new object in the same bucket.", // "httpMethod": "POST", // "id": "storage.objects.compose", // "parameterOrder": [ // "destinationBucket", // "destinationObject" // ], // "parameters": { // "destinationBucket": { // "description": "Name of the bucket in which to store the new object.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationObject": { // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationPredefinedAcl": { // "description": "Apply a predefined set of access controls to the destination object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // } // }, // "path": "b/{destinationBucket}/o/{destinationObject}/compose", // "request": { // "$ref": "ComposeRequest" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true, // "useMediaDownloadService": true // } } // method id "storage.objects.copy": type ObjectsCopyCall struct { s *Service sourceBucket string sourceObject string destinationBucket string destinationObject string object *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Copy: Copies a source object to a destination object. Optionally // overrides metadata. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket c.sourceObject = sourceObject c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.object = object return c } // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the destination object's // current generation matches the given value. func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the destination object's current generation does not match the given // value. func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the destination object's current metageneration matches the given // value. func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the destination object's current metageneration does not // match the given value. func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // IfSourceGenerationMatch sets the optional parameter // "ifSourceGenerationMatch": Makes the operation conditional on whether // the source object's generation matches the given value. func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } // IfSourceGenerationNotMatch sets the optional parameter // "ifSourceGenerationNotMatch": Makes the operation conditional on // whether the source object's generation does not match the given // value. func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } // IfSourceMetagenerationMatch sets the optional parameter // "ifSourceMetagenerationMatch": Makes the operation conditional on // whether the source object's current metageneration matches the given // value. func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } // IfSourceMetagenerationNotMatch sets the optional parameter // "ifSourceMetagenerationNotMatch": Makes the operation conditional on // whether the source object's current metageneration does not match the // given value. func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the object resource // specifies the acl property, when it defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { c.urlParams_.Set("projection", projection) return c } // SourceGeneration sets the optional parameter "sourceGeneration": If // present, selects a specific revision of the source object (as opposed // to the latest version, the default). func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do and Download // methods. Any pending HTTP request will be aborted if the provided // context is canceled. func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsCopyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() return nil, err } return res, nil } // Do executes the "storage.objects.copy" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Copies a source object to a destination object. Optionally overrides metadata.", // "httpMethod": "POST", // "id": "storage.objects.copy", // "parameterOrder": [ // "sourceBucket", // "sourceObject", // "destinationBucket", // "destinationObject" // ], // "parameters": { // "destinationBucket": { // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationPredefinedAcl": { // "description": "Apply a predefined set of access controls to the destination object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationMatch": { // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // }, // "sourceBucket": { // "description": "Name of the bucket in which to find the source object.", // "location": "path", // "required": true, // "type": "string" // }, // "sourceGeneration": { // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "sourceObject": { // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true, // "useMediaDownloadService": true // } } // method id "storage.objects.delete": type ObjectsDeleteCall struct { s *Service bucket string object string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes an object and its metadata. Deletions are permanent // if versioning is not enabled for the bucket, or if the generation // parameter is used. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // permanently deletes a specific revision of this object (as opposed to // the latest version, the default). func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.delete" call. func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", // "httpMethod": "DELETE", // "id": "storage.objects.delete", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.get": type ObjectsGetCall struct { s *Service bucket string object string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Retrieves an object or its metadata. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's generation // matches the given value. func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's generation does not match the given value. func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do and Download // methods. Any pending HTTP request will be aborted if the provided // context is canceled. func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() return nil, err } return res, nil } // Do executes the "storage.objects.get" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves an object or its metadata.", // "httpMethod": "GET", // "id": "storage.objects.get", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true, // "useMediaDownloadService": true // } } // method id "storage.objects.getIamPolicy": type ObjectsGetIamPolicyCall struct { s *Service bucket string object string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // GetIamPolicy: Returns an IAM policy for the specified object. func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Returns an IAM policy for the specified object.", // "httpMethod": "GET", // "id": "storage.objects.getIamPolicy", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/iam", // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.insert": type ObjectsInsertCall struct { s *Service bucket string object *Object urlParams_ gensupport.URLParams media_ io.Reader mediaBuffer_ *gensupport.MediaBuffer mediaType_ string mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. progressUpdater_ googleapi.ProgressUpdater ctx_ context.Context header_ http.Header } // Insert: Stores a new object and metadata. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c } // ContentEncoding sets the optional parameter "contentEncoding": If // set, sets the contentEncoding property of the final object to this // value. Setting this parameter is equivalent to setting the // contentEncoding metadata property. This can be useful when uploading // an object with uploadType=media to indicate the encoding of the // content being uploaded. func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { c.urlParams_.Set("contentEncoding", contentEncoding) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Name sets the optional parameter "name": Name of the object. Required // when the object metadata is not otherwise provided. Overrides the // object metadata's name value, if any. For information about how to // URL encode object names to be path safe, see Encoding URI Path Parts. func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { c.urlParams_.Set("name", name) return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the object resource // specifies the acl property, when it defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { c.urlParams_.Set("projection", projection) return c } // Media specifies the media to upload in one or more chunks. The chunk // size may be controlled by supplying a MediaOption generated by // googleapi.ChunkSize. The chunk size defaults to // googleapi.DefaultUploadChunkSize.The Content-Type header used in the // upload request will be determined by sniffing the contents of r, // unless a MediaOption generated by googleapi.ContentType is // supplied. // At most one of Media and ResumableMedia may be set. func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { if ct := c.object.ContentType; ct != "" { options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) } opts := googleapi.ProcessMediaOptions(options) chunkSize := opts.ChunkSize if !opts.ForceEmptyContentType { r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) } c.media_, c.mediaBuffer_ = gensupport.PrepareUpload(r, chunkSize) return c } // ResumableMedia specifies the media to upload in chunks and can be // canceled with ctx. // // Deprecated: use Media instead. // // At most one of Media and ResumableMedia may be set. mediaType // identifies the MIME media type of the upload, such as "image/png". If // mediaType is "", it will be auto-detected. The provided ctx will // supersede any context previously provided to the Context method. func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { c.ctx_ = ctx rdr := gensupport.ReaderAtToReader(r, size) rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) c.mediaBuffer_ = gensupport.NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize) c.media_ = nil c.mediaSize_ = size return c } // ProgressUpdater provides a callback function that will be called // after every chunk. It should be a low-latency function in order to // not slow down the upload operation. This should only be called when // using ResumableMedia (as opposed to Media). func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { c.progressUpdater_ = pu return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. // This context will supersede any context previously provided to the // ResumableMedia method. func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") if c.media_ != nil || c.mediaBuffer_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) protocol := "multipart" if c.mediaBuffer_ != nil { protocol = "resumable" } c.urlParams_.Set("uploadType", protocol) } if body == nil { body = new(bytes.Buffer) reqHeaders.Set("Content-Type", "application/json") } if c.media_ != nil { combined, ctype := gensupport.CombineBodyMedia(body, "application/json", c.media_, c.mediaType_) defer combined.Close() reqHeaders.Set("Content-Type", ctype) body = combined } if c.mediaBuffer_ != nil && c.mediaType_ != "" { reqHeaders.Set("X-Upload-Content-Type", c.mediaType_) } urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } if c.mediaBuffer_ != nil { loc := res.Header.Get("Location") rx := &gensupport.ResumableUpload{ Client: c.s.client, UserAgent: c.s.userAgent(), URI: loc, Media: c.mediaBuffer_, MediaType: c.mediaType_, Callback: func(curr int64) { if c.progressUpdater_ != nil { c.progressUpdater_(curr, c.mediaSize_) } }, } ctx := c.ctx_ if ctx == nil { ctx = context.TODO() } res, err = rx.Upload(ctx) if err != nil { return nil, err } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { return nil, err } } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Stores a new object and metadata.", // "httpMethod": "POST", // "id": "storage.objects.insert", // "mediaUpload": { // "accept": [ // "*/*" // ], // "protocols": { // "resumable": { // "multipart": true, // "path": "/resumable/upload/storage/v1/b/{bucket}/o" // }, // "simple": { // "multipart": true, // "path": "/upload/storage/v1/b/{bucket}/o" // } // } // }, // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "contentEncoding": { // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "name": { // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "query", // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true, // "supportsMediaUpload": true, // "useMediaDownloadService": true // } } // method id "storage.objects.list": type ObjectsListCall struct { s *Service bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Retrieves a list of objects matching the criteria. func (r *ObjectsService) List(bucket string) *ObjectsListCall { c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } // Delimiter sets the optional parameter "delimiter": Returns results in // a directory-like mode. items will contain only objects whose names, // aside from the prefix, do not contain delimiter. Objects whose names, // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. Duplicate // prefixes are omitted. func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { c.urlParams_.Set("delimiter", delimiter) return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of items plus prefixes to return in a single page of responses. As // duplicate prefixes are omitted, fewer total results may be returned // than requested. The service will use this parameter or 1,000 items, // whichever is smaller. func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Prefix sets the optional parameter "prefix": Filter results to // objects whose names begin with this prefix. func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { c.urlParams_.Set("prefix", prefix) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { c.urlParams_.Set("projection", projection) return c } // Versions sets the optional parameter "versions": If true, lists all // versions of an object as distinct results. The default is false. For // more information, see Object Versioning. func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.list" call. // Exactly one of *Objects or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Objects.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Objects{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves a list of objects matching the criteria.", // "httpMethod": "GET", // "id": "storage.objects.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which to look for objects.", // "location": "path", // "required": true, // "type": "string" // }, // "delimiter": { // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "1000", // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, // "prefix": { // "description": "Filter results to objects whose names begin with this prefix.", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // }, // "versions": { // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", // "location": "query", // "type": "boolean" // } // }, // "path": "b/{bucket}/o", // "response": { // "$ref": "Objects" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsSubscription": true // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "storage.objects.patch": type ObjectsPatchCall struct { s *Service bucket string object string object2 *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an object's metadata. This method supports patch // semantics. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.object2 = object2 return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.patch" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an object's metadata. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.objects.patch", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objects.rewrite": type ObjectsRewriteCall struct { s *Service sourceBucket string sourceObject string destinationBucket string destinationObject string object *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket c.sourceObject = sourceObject c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.object = object return c } // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the destination object's // current generation matches the given value. func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the destination object's current generation does not match the given // value. func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the destination object's current metageneration matches the given // value. func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the destination object's current metageneration does not // match the given value. func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // IfSourceGenerationMatch sets the optional parameter // "ifSourceGenerationMatch": Makes the operation conditional on whether // the source object's generation matches the given value. func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } // IfSourceGenerationNotMatch sets the optional parameter // "ifSourceGenerationNotMatch": Makes the operation conditional on // whether the source object's generation does not match the given // value. func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } // IfSourceMetagenerationMatch sets the optional parameter // "ifSourceMetagenerationMatch": Makes the operation conditional on // whether the source object's current metageneration matches the given // value. func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } // IfSourceMetagenerationNotMatch sets the optional parameter // "ifSourceMetagenerationNotMatch": Makes the operation conditional on // whether the source object's current metageneration does not match the // given value. func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } // MaxBytesRewrittenPerCall sets the optional parameter // "maxBytesRewrittenPerCall": The maximum number of bytes that will be // rewritten per rewrite request. Most callers shouldn't need to specify // this parameter - it is primarily in place to support testing. If // specified the value must be an integral multiple of 1 MiB (1048576). // Also, this only applies to requests where the source and destination // span locations and/or storage classes. Finally, this value must not // change across rewrite calls else you'll get an error that the // rewriteToken is invalid. func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the object resource // specifies the acl property, when it defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.urlParams_.Set("projection", projection) return c } // RewriteToken sets the optional parameter "rewriteToken": Include this // field (from the previous rewrite response) on each rewrite request // after the first one, until the rewrite response 'done' flag is true. // Calls that provide a rewriteToken can omit all other request fields, // but if included those fields must match the values provided in the // first rewrite request. func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { c.urlParams_.Set("rewriteToken", rewriteToken) return c } // SourceGeneration sets the optional parameter "sourceGeneration": If // present, selects a specific revision of the source object (as opposed // to the latest version, the default). func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsRewriteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.rewrite" call. // Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *RewriteResponse.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &RewriteResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", // "httpMethod": "POST", // "id": "storage.objects.rewrite", // "parameterOrder": [ // "sourceBucket", // "sourceObject", // "destinationBucket", // "destinationObject" // ], // "parameters": { // "destinationBucket": { // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationPredefinedAcl": { // "description": "Apply a predefined set of access controls to the destination object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationMatch": { // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "maxBytesRewrittenPerCall": { // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // }, // "rewriteToken": { // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", // "location": "query", // "type": "string" // }, // "sourceBucket": { // "description": "Name of the bucket in which to find the source object.", // "location": "path", // "required": true, // "type": "string" // }, // "sourceGeneration": { // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "sourceObject": { // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "RewriteResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.setIamPolicy": type ObjectsSetIamPolicyCall struct { s *Service bucket string object string policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SetIamPolicy: Updates an IAM policy for the specified object. func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.policy = policy return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an IAM policy for the specified object.", // "httpMethod": "PUT", // "id": "storage.objects.setIamPolicy", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/iam", // "request": { // "$ref": "Policy" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.testIamPermissions": type ObjectsTestIamPermissionsCall struct { s *Service bucket string object string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // TestIamPermissions: Tests a set of permissions on the given object to // see which, if any, are held by the caller. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.testIamPermissions" call. // Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *TestIamPermissionsResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", // "httpMethod": "GET", // "id": "storage.objects.testIamPermissions", // "parameterOrder": [ // "bucket", // "object", // "permissions" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "permissions": { // "description": "Permissions to test.", // "location": "query", // "repeated": true, // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/iam/testPermissions", // "response": { // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.update": type ObjectsUpdateCall struct { s *Service bucket string object string object2 *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates an object's metadata. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.object2 = object2 return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { c.urlParams_.Set("projection", projection) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do and Download // methods. Any pending HTTP request will be aborted if the provided // context is canceled. func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() return nil, err } return res, nil } // Do executes the "storage.objects.update" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Updates an object's metadata.", // "httpMethod": "PUT", // "id": "storage.objects.update", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", // "required": true, // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ], // "supportsMediaDownload": true, // "useMediaDownloadService": true // } } // method id "storage.objects.watchAll": type ObjectsWatchAllCall struct { s *Service bucket string channel *Channel urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // WatchAll: Watch for changes on all objects in a bucket. func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.channel = channel return c } // Delimiter sets the optional parameter "delimiter": Returns results in // a directory-like mode. items will contain only objects whose names, // aside from the prefix, do not contain delimiter. Objects whose names, // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. Duplicate // prefixes are omitted. func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { c.urlParams_.Set("delimiter", delimiter) return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of items plus prefixes to return in a single page of responses. As // duplicate prefixes are omitted, fewer total results may be returned // than requested. The service will use this parameter or 1,000 items, // whichever is smaller. func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { c.urlParams_.Set("pageToken", pageToken) return c } // Prefix sets the optional parameter "prefix": Filter results to // objects whose names begin with this prefix. func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { c.urlParams_.Set("prefix", prefix) return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { c.urlParams_.Set("projection", projection) return c } // Versions sets the optional parameter "versions": If true, lists all // versions of an object as distinct results. The default is false. For // more information, see Object Versioning. func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ObjectsWatchAllCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.watchAll" call. // Exactly one of *Channel or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Channel.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Channel{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { // "description": "Watch for changes on all objects in a bucket.", // "httpMethod": "POST", // "id": "storage.objects.watchAll", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which to look for objects.", // "location": "path", // "required": true, // "type": "string" // }, // "delimiter": { // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "1000", // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, // "prefix": { // "description": "Filter results to objects whose names begin with this prefix.", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" // }, // "versions": { // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", // "location": "query", // "type": "boolean" // } // }, // "path": "b/{bucket}/o/watch", // "request": { // "$ref": "Channel", // "parameterName": "resource" // }, // "response": { // "$ref": "Channel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsSubscription": true // } }<|fim▁end|>
func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
<|file_name|>char.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license<|fim▁hole|>// except according to those terms. //! Character manipulation (`char` type, Unicode Scalar Value) //! //! This module provides the `CharExt` trait, as well as its //! implementation for the primitive `char` type, in order to allow //! basic character manipulation. //! //! A `char` actually represents a //! *[Unicode Scalar //! Value](http://www.unicode.org/glossary/#unicode_scalar_value)*, as it can //! contain any Unicode code point except high-surrogate and low-surrogate code //! points. //! //! As such, only values in the ranges \[0x0,0xD7FF\] and \[0xE000,0x10FFFF\] //! (inclusive) are allowed. A `char` can always be safely cast to a `u32`; //! however the converse is not always true due to the above range limits //! and, as such, should be performed via the `from_u32` function. #![stable(feature = "rust1", since = "1.0.0")] #![doc(primitive = "char")] use core::char::CharExt as C; use core::option::Option::{self, Some, None}; use core::iter::Iterator; use tables::{derived_property, property, general_category, conversions, charwidth}; // stable reexports pub use core::char::{MAX, from_u32, from_digit, EscapeUnicode, EscapeDefault}; // unstable reexports #[allow(deprecated)] pub use normalize::{decompose_canonical, decompose_compatible, compose}; #[allow(deprecated)] pub use tables::normalization::canonical_combining_class; pub use tables::UNICODE_VERSION; /// An iterator over the lowercase mapping of a given character, returned from /// the [`to_lowercase` method](../primitive.char.html#method.to_lowercase) on /// characters. #[stable(feature = "rust1", since = "1.0.0")] pub struct ToLowercase(CaseMappingIter); #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for ToLowercase { type Item = char; fn next(&mut self) -> Option<char> { self.0.next() } } /// An iterator over the uppercase mapping of a given character, returned from /// the [`to_uppercase` method](../primitive.char.html#method.to_uppercase) on /// characters. #[stable(feature = "rust1", since = "1.0.0")] pub struct ToUppercase(CaseMappingIter); #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for ToUppercase { type Item = char; fn next(&mut self) -> Option<char> { self.0.next() } } /// An iterator over the titlecase mapping of a given character, returned from /// the [`to_titlecase` method](../primitive.char.html#method.to_titlecase) on /// characters. #[unstable(feature = "unicode", reason = "recently added")] pub struct ToTitlecase(CaseMappingIter); #[stable(feature = "unicode_case_mapping", since = "1.2.0")] impl Iterator for ToTitlecase { type Item = char; fn next(&mut self) -> Option<char> { self.0.next() } } enum CaseMappingIter { Three(char, char, char), Two(char, char), One(char), Zero } impl CaseMappingIter { fn new(chars: [char; 3]) -> CaseMappingIter { if chars[2] == '\0' { if chars[1] == '\0' { CaseMappingIter::One(chars[0]) // Including if chars[0] == '\0' } else { CaseMappingIter::Two(chars[0], chars[1]) } } else { CaseMappingIter::Three(chars[0], chars[1], chars[2]) } } } impl Iterator for CaseMappingIter { type Item = char; fn next(&mut self) -> Option<char> { match *self { CaseMappingIter::Three(a, b, c) => { *self = CaseMappingIter::Two(b, c); Some(a) } CaseMappingIter::Two(b, c) => { *self = CaseMappingIter::One(c); Some(b) } CaseMappingIter::One(c) => { *self = CaseMappingIter::Zero; Some(c) } CaseMappingIter::Zero => None, } } } #[stable(feature = "rust1", since = "1.0.0")] #[lang = "char"] impl char { /// Checks if a `char` parses as a numeric digit in the given radix. /// /// Compared to `is_numeric()`, this function only recognizes the characters /// `0-9`, `a-z` and `A-Z`. /// /// # Return value /// /// Returns `true` if `c` is a valid digit under `radix`, and `false` /// otherwise. /// /// # Panics /// /// Panics if given a radix > 36. /// /// # Examples /// /// ``` /// let c = '1'; /// /// assert!(c.is_digit(10)); /// /// assert!('f'.is_digit(16)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_digit(self, radix: u32) -> bool { C::is_digit(self, radix) } /// Converts a character to the corresponding digit. /// /// # Return value /// /// If `c` is between '0' and '9', the corresponding value between 0 and /// 9. If `c` is 'a' or 'A', 10. If `c` is 'b' or 'B', 11, etc. Returns /// none if the character does not refer to a digit in the given radix. /// /// # Panics /// /// Panics if given a radix outside the range [0..36]. /// /// # Examples /// /// ``` /// let c = '1'; /// /// assert_eq!(c.to_digit(10), Some(1)); /// /// assert_eq!('f'.to_digit(16), Some(15)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn to_digit(self, radix: u32) -> Option<u32> { C::to_digit(self, radix) } /// Returns an iterator that yields the hexadecimal Unicode escape of a /// character, as `char`s. /// /// All characters are escaped with Rust syntax of the form `\\u{NNNN}` /// where `NNNN` is the shortest hexadecimal representation of the code /// point. /// /// # Examples /// /// ``` /// for i in '❤'.escape_unicode() { /// println!("{}", i); /// } /// ``` /// /// This prints: /// /// ```text /// \ /// u /// { /// 2 /// 7 /// 6 /// 4 /// } /// ``` /// /// Collecting into a `String`: /// /// ``` /// let heart: String = '❤'.escape_unicode().collect(); /// /// assert_eq!(heart, r"\u{2764}"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn escape_unicode(self) -> EscapeUnicode { C::escape_unicode(self) } /// Returns an iterator that yields the 'default' ASCII and /// C++11-like literal escape of a character, as `char`s. /// /// The default is chosen with a bias toward producing literals that are /// legal in a variety of languages, including C++11 and similar C-family /// languages. The exact rules are: /// /// * Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively. /// * Single-quote, double-quote and backslash chars are backslash- /// escaped. /// * Any other chars in the range [0x20,0x7e] are not escaped. /// * Any other chars are given hex Unicode escapes; see `escape_unicode`. /// /// # Examples /// /// ``` /// for i in '"'.escape_default() { /// println!("{}", i); /// } /// ``` /// /// This prints: /// /// ```text /// \ /// " /// ``` /// /// Collecting into a `String`: /// /// ``` /// let quote: String = '"'.escape_default().collect(); /// /// assert_eq!(quote, "\\\""); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn escape_default(self) -> EscapeDefault { C::escape_default(self) } /// Returns the number of bytes this character would need if encoded in /// UTF-8. /// /// # Examples /// /// ``` /// let n = 'ß'.len_utf8(); /// /// assert_eq!(n, 2); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len_utf8(self) -> usize { C::len_utf8(self) } /// Returns the number of 16-bit code units this character would need if /// encoded in UTF-16. /// /// # Examples /// /// ``` /// let n = 'ß'.len_utf16(); /// /// assert_eq!(n, 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len_utf16(self) -> usize { C::len_utf16(self) } /// Encodes this character as UTF-8 into the provided byte buffer, and then /// returns the number of bytes written. /// /// If the buffer is not large enough, nothing will be written into it and a /// `None` will be returned. A buffer of length four is large enough to /// encode any `char`. /// /// # Examples /// /// In both of these examples, 'ß' takes two bytes to encode. /// /// ``` /// # #![feature(unicode)] /// let mut b = [0; 2]; /// /// let result = 'ß'.encode_utf8(&mut b); /// /// assert_eq!(result, Some(2)); /// ``` /// /// A buffer that's too small: /// /// ``` /// # #![feature(unicode)] /// let mut b = [0; 1]; /// /// let result = 'ß'.encode_utf8(&mut b); /// /// assert_eq!(result, None); /// ``` #[unstable(feature = "unicode", reason = "pending decision about Iterator/Writer/Reader")] pub fn encode_utf8(self, dst: &mut [u8]) -> Option<usize> { C::encode_utf8(self, dst) } /// Encodes this character as UTF-16 into the provided `u16` buffer, and /// then returns the number of `u16`s written. /// /// If the buffer is not large enough, nothing will be written into it and a /// `None` will be returned. A buffer of length 2 is large enough to encode /// any `char`. /// /// # Examples /// /// In both of these examples, 'ß' takes one `u16` to encode. /// /// ``` /// # #![feature(unicode)] /// let mut b = [0; 1]; /// /// let result = 'ß'.encode_utf16(&mut b); /// /// assert_eq!(result, Some(1)); /// ``` /// /// A buffer that's too small: /// /// ``` /// # #![feature(unicode)] /// let mut b = [0; 0]; /// /// let result = 'ß'.encode_utf8(&mut b); /// /// assert_eq!(result, None); /// ``` #[unstable(feature = "unicode", reason = "pending decision about Iterator/Writer/Reader")] pub fn encode_utf16(self, dst: &mut [u16]) -> Option<usize> { C::encode_utf16(self, dst) } /// Returns whether the specified character is considered a Unicode /// alphabetic code point. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_alphabetic(self) -> bool { match self { 'a' ... 'z' | 'A' ... 'Z' => true, c if c > '\x7f' => derived_property::Alphabetic(c), _ => false } } /// Returns whether the specified character satisfies the 'XID_Start' /// Unicode property. /// /// 'XID_Start' is a Unicode Derived Property specified in /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications), /// mostly similar to ID_Start but modified for closure under NFKx. #[unstable(feature = "unicode", reason = "mainly needed for compiler internals")] #[inline] pub fn is_xid_start(self) -> bool { derived_property::XID_Start(self) } /// Returns whether the specified `char` satisfies the 'XID_Continue' /// Unicode property. /// /// 'XID_Continue' is a Unicode Derived Property specified in /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications), /// mostly similar to 'ID_Continue' but modified for closure under NFKx. #[unstable(feature = "unicode", reason = "mainly needed for compiler internals")] #[inline] pub fn is_xid_continue(self) -> bool { derived_property::XID_Continue(self) } /// Indicates whether a character is in lowercase. /// /// This is defined according to the terms of the Unicode Derived Core /// Property `Lowercase`. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_lowercase(self) -> bool { match self { 'a' ... 'z' => true, c if c > '\x7f' => derived_property::Lowercase(c), _ => false } } /// Indicates whether a character is in uppercase. /// /// This is defined according to the terms of the Unicode Derived Core /// Property `Uppercase`. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_uppercase(self) -> bool { match self { 'A' ... 'Z' => true, c if c > '\x7f' => derived_property::Uppercase(c), _ => false } } /// Indicates whether a character is whitespace. /// /// Whitespace is defined in terms of the Unicode Property `White_Space`. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_whitespace(self) -> bool { match self { ' ' | '\x09' ... '\x0d' => true, c if c > '\x7f' => property::White_Space(c), _ => false } } /// Indicates whether a character is alphanumeric. /// /// Alphanumericness is defined in terms of the Unicode General Categories /// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_alphanumeric(self) -> bool { self.is_alphabetic() || self.is_numeric() } /// Indicates whether a character is a control code point. /// /// Control code points are defined in terms of the Unicode General /// Category `Cc`. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_control(self) -> bool { general_category::Cc(self) } /// Indicates whether the character is numeric (Nd, Nl, or No). #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_numeric(self) -> bool { match self { '0' ... '9' => true, c if c > '\x7f' => general_category::N(c), _ => false } } /// Converts a character to its lowercase equivalent. /// /// This performs complex unconditional mappings with no tailoring. /// See `to_uppercase()` for references and more information. /// /// # Return value /// /// Returns an iterator which yields the characters corresponding to the /// lowercase equivalent of the character. If no conversion is possible then /// an iterator with just the input character is returned. #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_lowercase(self) -> ToLowercase { ToLowercase(CaseMappingIter::new(conversions::to_lower(self))) } /// Converts a character to its titlecase equivalent. /// /// This performs complex unconditional mappings with no tailoring. /// See `to_uppercase()` for references and more information. /// /// This differs from `to_uppercase()` since Unicode contains /// digraphs and ligature characters. /// For example, U+01F3 “dz” and U+FB01 “fi” /// map to U+01F1 “DZ” and U+0046 U+0069 “Fi”, respectively. /// /// # Return value /// /// Returns an iterator which yields the characters corresponding to the /// lowercase equivalent of the character. If no conversion is possible then /// an iterator with just the input character is returned. #[unstable(feature = "unicode", reason = "recently added")] #[inline] pub fn to_titlecase(self) -> ToTitlecase { ToTitlecase(CaseMappingIter::new(conversions::to_title(self))) } /// Converts a character to its uppercase equivalent. /// /// This performs complex unconditional mappings with no tailoring: /// it maps one Unicode character to its uppercase equivalent /// according to the Unicode database [1] /// and the additional complex mappings [`SpecialCasing.txt`]. /// Conditional mappings (based on context or language) are not considerd here. /// /// A full reference can be found here [2]. /// /// # Return value /// /// Returns an iterator which yields the characters corresponding to the /// uppercase equivalent of the character. If no conversion is possible then /// an iterator with just the input character is returned. /// /// [1]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt /// /// [`SpecialCasing.txt`]: ftp://ftp.unicode.org/Public/UNIDATA/SpecialCasing.txt /// /// [2]: http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992 #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_uppercase(self) -> ToUppercase { ToUppercase(CaseMappingIter::new(conversions::to_upper(self))) } /// Returns this character's displayed width in columns, or `None` if it is a /// control character other than `'\x00'`. /// /// `is_cjk` determines behavior for characters in the Ambiguous category: /// if `is_cjk` is `true`, these are 2 columns wide; otherwise, they are 1. /// In CJK contexts, `is_cjk` should be `true`, else it should be `false`. /// [Unicode Standard Annex #11](http://www.unicode.org/reports/tr11/) /// recommends that these characters be treated as 1 column (i.e., /// `is_cjk` = `false`) if the context cannot be reliably determined. #[deprecated(reason = "use the crates.io `unicode-width` library instead", since = "1.0.0")] #[unstable(feature = "unicode", reason = "needs expert opinion. is_cjk flag stands out as ugly")] pub fn width(self, is_cjk: bool) -> Option<usize> { charwidth::width(self, is_cjk) } }<|fim▁end|>
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed
<|file_name|>0091_auto_20180727_1844.py<|end_file_name|><|fim▁begin|># Generated by Django 1.11.11 on 2018-07-27 18:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('course_metadata', '0090_degree_curriculum_reset'), ] operations = [ migrations.AddField( model_name='degree', name='campus_image_desktop', field=models.ImageField(blank=True, help_text='Provide a campus image to display on desktop displays', null=True, upload_to='media/degree_marketing/campus_images/'), ), migrations.AddField( model_name='degree', name='campus_image_mobile', field=models.ImageField(blank=True, help_text='Provide a campus image to display on mobile displays', null=True, upload_to='media/degree_marketing/campus_images/'), ), migrations.AddField( model_name='degree', name='campus_image_tablet', field=models.ImageField(blank=True, help_text='Provide a campus image to display on tablet displays', null=True, upload_to='media/degree_marketing/campus_images/'), ),<|fim▁hole|> migrations.AddField( model_name='degree', name='overall_ranking', field=models.CharField(blank=True, help_text='Overall program ranking (e.g. "#1 in the U.S.")', max_length=255), ), migrations.AlterModelOptions( name='degree', options={'verbose_name_plural': 'Degrees'}, ), ]<|fim▁end|>
<|file_name|>PAYBYCREDITCARDPROJECTION.java<|end_file_name|><|fim▁begin|>/** * This class is generated by jOOQ */ package com.aviafix.db.generated.tables.pojos; import java.io.Serializable; import java.time.LocalDate; import javax.annotation.Generated; /** * This class is generated by jOOQ. */ @Generated( value = { "http://www.jooq.org", "jOOQ version:3.8.5" }, comments = "This class is generated by jOOQ" ) @SuppressWarnings({ "all", "unchecked", "rawtypes" }) public class PAYBYCREDITCARDPROJECTION implements Serializable { private static final long serialVersionUID = 1444342293; private Integer ETID; private Integer CREDITCARDNUM; private LocalDate EXPDATE; private Integer CODE; private String CARDHOLDERNAME; private Double AMOUNT; public PAYBYCREDITCARDPROJECTION() {} public PAYBYCREDITCARDPROJECTION(PAYBYCREDITCARDPROJECTION value) { this.ETID = value.ETID; this.CREDITCARDNUM = value.CREDITCARDNUM; this.EXPDATE = value.EXPDATE; this.CODE = value.CODE; this.CARDHOLDERNAME = value.CARDHOLDERNAME; this.AMOUNT = value.AMOUNT; } public PAYBYCREDITCARDPROJECTION( Integer ETID, Integer CREDITCARDNUM, LocalDate EXPDATE, Integer CODE, String CARDHOLDERNAME,<|fim▁hole|> Double AMOUNT ) { this.ETID = ETID; this.CREDITCARDNUM = CREDITCARDNUM; this.EXPDATE = EXPDATE; this.CODE = CODE; this.CARDHOLDERNAME = CARDHOLDERNAME; this.AMOUNT = AMOUNT; } public Integer ETID() { return this.ETID; } public void ETID(Integer ETID) { this.ETID = ETID; } public Integer CREDITCARDNUM() { return this.CREDITCARDNUM; } public void CREDITCARDNUM(Integer CREDITCARDNUM) { this.CREDITCARDNUM = CREDITCARDNUM; } public LocalDate EXPDATE() { return this.EXPDATE; } public void EXPDATE(LocalDate EXPDATE) { this.EXPDATE = EXPDATE; } public Integer CODE() { return this.CODE; } public void CODE(Integer CODE) { this.CODE = CODE; } public String CARDHOLDERNAME() { return this.CARDHOLDERNAME; } public void CARDHOLDERNAME(String CARDHOLDERNAME) { this.CARDHOLDERNAME = CARDHOLDERNAME; } public Double AMOUNT() { return this.AMOUNT; } public void AMOUNT(Double AMOUNT) { this.AMOUNT = AMOUNT; } @Override public String toString() { StringBuilder sb = new StringBuilder("PAYBYCREDITCARDPROJECTION ("); sb.append(ETID); sb.append(", ").append(CREDITCARDNUM); sb.append(", ").append(EXPDATE); sb.append(", ").append(CODE); sb.append(", ").append(CARDHOLDERNAME); sb.append(", ").append(AMOUNT); sb.append(")"); return sb.toString(); } }<|fim▁end|>
<|file_name|>fixhtml.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2 """ Quick helper to add HTML5 DOCTYPE and <title> to every testcase. """ import os import re import sys def fixhtml(folder): changed = 0 for dirpath, _, filenames in os.walk(folder): for file in filenames: name, ext = os.path.splitext(file) if ext != '.html': continue path = '%s/%s' % (dirpath, file) title = ' '.join(name.split('-')) shouldbe = '<!DOCTYPE html>\n<title>%s</title>\n' % title with open(path, 'r') as f: content = f.read() if content.startswith(shouldbe): continue changed += 1 content = re.sub('\s*<!DOCTYPE[^>]*>\s*<title>[^<]*</title>\s*', '', content) with open(path, 'w') as f:<|fim▁hole|> if __name__ == '__main__': folder = '.' if len(sys.argv) < 2 else sys.argv[1] changed = fixhtml(folder) print('Fixed %d files.' % changed)<|fim▁end|>
f.write(shouldbe + content) return changed
<|file_name|>doc.go<|end_file_name|><|fim▁begin|>// Package bingo is a pastebin-like where the server has zero knowledge of pasted data. // Data is encrypted/decrypted in the browser using AES.<|fim▁hole|><|fim▁end|>
package bingo
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" Tests for Dynamo3 """ import sys import unittest from decimal import Decimal from pickle import dumps, loads from urllib.parse import urlparse from botocore.exceptions import ClientError from mock import ANY, MagicMock, patch from dynamo3 import ( Binary, Dynamizer, DynamoDBConnection, DynamoDBError, DynamoKey, GlobalIndex, Limit, Table, ThroughputException, ) from dynamo3.constants import STRING from dynamo3.result import Capacity, ConsumedCapacity, Count, ResultSet, add_dicts class BaseSystemTest(unittest.TestCase): """Base class for system tests""" dynamo: DynamoDBConnection = None # type: ignore def setUp(self): super(BaseSystemTest, self).setUp() # Clear out any pre-existing tables for tablename in self.dynamo.list_tables(): self.dynamo.delete_table(tablename) def tearDown(self): super(BaseSystemTest, self).tearDown() for tablename in self.dynamo.list_tables(): self.dynamo.delete_table(tablename) self.dynamo.clear_hooks() class TestMisc(BaseSystemTest): """Tests that don't fit anywhere else""" def tearDown(self): super(TestMisc, self).tearDown() self.dynamo.default_return_capacity = False def test_connection_host(self): """Connection can access host of endpoint""" urlparse(self.dynamo.host) def test_connection_region(self): """Connection can access name of connected region""" self.assertTrue(isinstance(self.dynamo.region, str)) def test_connect_to_region(self): """Can connect to a dynamo region""" conn = DynamoDBConnection.connect("us-west-1") self.assertIsNotNone(conn.host) def test_connect_to_region_creds(self): """Can connect to a dynamo region with credentials""" conn = DynamoDBConnection.connect( "us-west-1", access_key="abc", secret_key="12345" ) self.assertIsNotNone(conn.host) def test_connect_to_host_without_session(self): """Can connect to a dynamo host without passing in a session""" conn = DynamoDBConnection.connect("us-west-1", host="localhost") self.assertIsNotNone(conn.host) @patch("dynamo3.connection.time") def test_retry_on_throughput_error(self, time): """Throughput exceptions trigger a retry of the request""" def call(*_, **__): """Dummy service call""" response = { "ResponseMetadata": { "HTTPStatusCode": 400, }, "Error": { "Code": "ProvisionedThroughputExceededException", "Message": "Does not matter", }, } raise ClientError(response, "list_tables") with patch.object(self.dynamo, "client") as client: client.list_tables.side_effect = call with self.assertRaises(ThroughputException): self.dynamo.call("list_tables") self.assertEqual(len(time.sleep.mock_calls), self.dynamo.request_retries - 1) self.assertTrue(time.sleep.called) def test_describe_missing(self): """Describing a missing table returns None""" ret = self.dynamo.describe_table("foobar") self.assertIsNone(ret) def test_magic_table_props(self): """Table can look up properties on response object""" hash_key = DynamoKey("id") self.dynamo.create_table("foobar", hash_key=hash_key) ret = self.dynamo.describe_table("foobar") assert ret is not None self.assertEqual(ret.item_count, ret["ItemCount"]) with self.assertRaises(KeyError): self.assertIsNotNone(ret["Missing"]) def test_magic_index_props(self): """Index can look up properties on response object""" index = GlobalIndex.all("idx-name", DynamoKey("id")) index.response = {"FooBar": 2} self.assertEqual(index["FooBar"], 2) with self.assertRaises(KeyError): self.assertIsNotNone(index["Missing"]) def test_describe_during_delete(self): """Describing a table during a delete operation should not crash""" response = { "ItemCount": 0, "ProvisionedThroughput": { "NumberOfDecreasesToday": 0, "ReadCapacityUnits": 5, "WriteCapacityUnits": 5, }, "TableName": "myTableName", "TableSizeBytes": 0, "TableStatus": "DELETING", } table = Table.from_response(response) self.assertEqual(table.status, "DELETING") def test_delete_missing(self): """Deleting a missing table returns False""" ret = self.dynamo.delete_table("foobar") self.assertTrue(not ret) def test_re_raise_passthrough(self): """DynamoDBError can re-raise itself if missing original exception""" err = DynamoDBError(400, Code="ErrCode", Message="Ouch", args={}) caught = False try: err.re_raise() except DynamoDBError as e: caught = True self.assertEqual(err, e) self.assertTrue(caught) def test_re_raise(self): """DynamoDBError can re-raise itself with stacktrace of original exc""" caught = False try: try: raise Exception("Hello") except Exception as e1: err = DynamoDBError( 400, Code="ErrCode", Message="Ouch", args={}, exc_info=sys.exc_info(), ) err.re_raise() except DynamoDBError as e: caught = True import traceback tb = traceback.format_tb(e.__traceback__) self.assertIn("Hello", tb[-1]) self.assertEqual(e.status_code, 400) self.assertTrue(caught) def test_default_return_capacity(self): """When default_return_capacity=True, always return capacity""" self.dynamo.default_return_capacity = True with patch.object(self.dynamo, "call") as call: call().get.return_value = None rs = self.dynamo.scan("foobar") list(rs) call.assert_called_with( "scan", TableName="foobar", ReturnConsumedCapacity="INDEXES", ConsistentRead=False, ) def test_list_tables_page(self): """Call to ListTables should page results""" hash_key = DynamoKey("id") for i in range(120): self.dynamo.create_table("table%d" % i, hash_key=hash_key) tables = list(self.dynamo.list_tables(110)) self.assertEqual(len(tables), 110) def test_limit_complete(self): """A limit with item_capacity = 0 is 'complete'""" limit = Limit(item_limit=0) self.assertTrue(limit.complete) def test_wait_create_table(self): """Create table shall wait for the table to come online.""" tablename = "foobar_wait" hash_key = DynamoKey("id") self.dynamo.create_table(tablename, hash_key=hash_key, wait=True) self.assertIsNotNone(self.dynamo.describe_table(tablename)) def test_wait_delete_table(self): """Delete table shall wait for the table to go offline.""" tablename = "foobar_wait" hash_key = DynamoKey("id") self.dynamo.create_table(tablename, hash_key=hash_key, wait=True) result = self.dynamo.delete_table(tablename, wait=True) self.assertTrue(result) class TestDataTypes(BaseSystemTest): """Tests for Dynamo data types""" def make_table(self): """Convenience method for making a table""" hash_key = DynamoKey("id") self.dynamo.create_table("foobar", hash_key=hash_key) def test_string(self): """Store and retrieve a string""" self.make_table() self.dynamo.put_item("foobar", {"id": "abc"}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["id"], "abc") self.assertTrue(isinstance(item["id"], str)) def test_int(self): """Store and retrieve an int""" self.make_table() self.dynamo.put_item("foobar", {"id": "a", "num": 1}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["num"], 1) def test_float(self): """Store and retrieve a float""" self.make_table() self.dynamo.put_item("foobar", {"id": "a", "num": 1.1}) item = list(self.dynamo.scan("foobar"))[0] self.assertAlmostEqual(float(item["num"]), 1.1) def test_decimal(self): """Store and retrieve a Decimal""" self.make_table() self.dynamo.put_item("foobar", {"id": "a", "num": Decimal("1.1")}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["num"], Decimal("1.1")) def test_binary(self): """Store and retrieve a binary""" self.make_table() self.dynamo.put_item("foobar", {"id": "a", "data": Binary("abc")}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["data"].value, b"abc") def test_binary_bytes(self): """Store and retrieve bytes as a binary""" self.make_table() data = {"a": 1, "b": 2} self.dynamo.put_item("foobar", {"id": "a", "data": Binary(dumps(data))}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(loads(item["data"].value), data) def test_string_set(self): """Store and retrieve a string set""" self.make_table() item = { "id": "a", "datas": set(["a", "b"]), } self.dynamo.put_item("foobar", item) ret = list(self.dynamo.scan("foobar"))[0] self.assertEqual(ret, item) def test_number_set(self): """Store and retrieve a number set""" self.make_table() item = { "id": "a", "datas": set([1, 2, 3]), } self.dynamo.put_item("foobar", item) ret = list(self.dynamo.scan("foobar"))[0] self.assertEqual(ret, item) def test_binary_set(self): """Store and retrieve a binary set""" self.make_table() item = { "id": "a", "datas": set([Binary("a"), Binary("b")]), } self.dynamo.put_item("foobar", item) ret = list(self.dynamo.scan("foobar"))[0] self.assertEqual(ret, item) def test_binary_equal(self): """Binary should eq other Binaries and also raw bytestrings""" self.assertEqual(Binary("a"), Binary("a")) self.assertEqual(Binary("a"), b"a") self.assertFalse(Binary("a") != Binary("a")) def test_binary_repr(self): """Binary repr should wrap the contained value""" self.assertEqual(repr(Binary("a")), "Binary(%r)" % b"a") def test_binary_converts_unicode(self): """Binary will convert unicode to bytes""" b = Binary("a") self.assertTrue(isinstance(b.value, bytes)) def test_binary_force_string(self): """Binary must wrap a string type""" with self.assertRaises(TypeError): Binary(2) # type: ignore <|fim▁hole|> item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["b"], True) self.assertTrue(isinstance(item["b"], bool)) def test_list(self): """Store and retrieve a list""" self.make_table() self.dynamo.put_item("foobar", {"id": "abc", "l": ["a", 1, False]}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["l"], ["a", 1, False]) def test_dict(self): """Store and retrieve a dict""" self.make_table() data = { "i": 1, "s": "abc", "n": None, "l": ["a", 1, True], "b": False, } self.dynamo.put_item("foobar", {"id": "abc", "d": data}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["d"], data) def test_nested_dict(self): """Store and retrieve a nested dict""" self.make_table() data = { "s": "abc", "d": { "i": 42, }, } self.dynamo.put_item("foobar", {"id": "abc", "d": data}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["d"], data) def test_nested_list(self): """Store and retrieve a nested list""" self.make_table() data = [ 1, [ True, None, "abc", ], ] self.dynamo.put_item("foobar", {"id": "abc", "l": data}) item = list(self.dynamo.scan("foobar"))[0] self.assertEqual(item["l"], data) def test_unrecognized_type(self): """Dynamizer throws error on unrecognized type""" value = { "ASDF": "abc", } with self.assertRaises(TypeError): self.dynamo.dynamizer.decode(value) class TestDynamizer(unittest.TestCase): """Tests for the Dynamizer""" def test_register_encoder(self): """Can register a custom encoder""" from datetime import datetime dynamizer = Dynamizer() dynamizer.register_encoder(datetime, lambda d, v: (STRING, v.isoformat())) now = datetime.utcnow() self.assertEqual(dynamizer.raw_encode(now), (STRING, now.isoformat())) def test_encoder_missing(self): """If no encoder is found, raise ValueError""" from datetime import datetime dynamizer = Dynamizer() with self.assertRaises(ValueError): dynamizer.encode(datetime.utcnow()) class TestResultModels(unittest.TestCase): """Tests for the model classes in results.py""" def test_add_dicts_base_case(self): """add_dict where one argument is None returns the other""" f = object() self.assertEqual(add_dicts(f, None), f) self.assertEqual(add_dicts(None, f), f) def test_add_dicts(self): """Merge two dicts of values together""" a = { "a": 1, "b": 2, } b = { "a": 3, "c": 4, } ret = add_dicts(a, b) self.assertEqual( ret, { "a": 4, "b": 2, "c": 4, }, ) def test_count_repr(self): """Count repr""" count = Count(0, 0) self.assertEqual(repr(count), "Count(0)") def test_count_addition(self): """Count addition""" count = Count(4, 2) self.assertEqual(count + 5, 9) def test_count_subtraction(self): """Count subtraction""" count = Count(4, 2) self.assertEqual(count - 2, 2) def test_count_multiplication(self): """Count multiplication""" count = Count(4, 2) self.assertEqual(2 * count, 8) def test_count_division(self): """Count division""" count = Count(4, 2) self.assertEqual(count / 2, 2) def test_count_add_none_capacity(self): """Count addition with one None consumed_capacity""" cap = Capacity(3, 0) count = Count(4, 2) count2 = Count(5, 3, cap) ret = count + count2 self.assertEqual(ret, 9) self.assertEqual(ret.scanned_count, 5) self.assertEqual(ret.consumed_capacity, cap) def test_count_add_capacity(self): """Count addition with consumed_capacity""" count = Count(4, 2, Capacity(3, 0)) count2 = Count(5, 3, Capacity(2, 0)) ret = count + count2 self.assertEqual(ret, 9) self.assertEqual(ret.scanned_count, 5) self.assertEqual(ret.consumed_capacity.read, 5) def test_capacity_math(self): """Capacity addition and equality""" cap = Capacity(2, 4) s = set([cap]) self.assertIn(Capacity(2, 4), s) self.assertNotEqual(Capacity(1, 4), cap) self.assertEqual(Capacity(1, 1) + Capacity(2, 2), Capacity(3, 3)) def test_capacity_format(self): """String formatting for Capacity""" c = Capacity(1, 3) self.assertEqual(str(c), "R:1.0 W:3.0") c = Capacity(0, 0) self.assertEqual(str(c), "0") def test_total_consumed_capacity(self): """ConsumedCapacity can parse results with only Total""" response = { "TableName": "foobar", "ReadCapacityUnits": 4, "WriteCapacityUnits": 5, } cap = ConsumedCapacity.from_response(response) self.assertEqual(cap.total, (4, 5)) self.assertIsNone(cap.table_capacity) def test_consumed_capacity_equality(self): """ConsumedCapacity addition and equality""" cap = ConsumedCapacity( "foobar", Capacity(0, 10), Capacity(0, 2), { "l-index": Capacity(0, 4), }, { "g-index": Capacity(0, 3), }, ) c2 = ConsumedCapacity( "foobar", Capacity(0, 10), Capacity(0, 2), { "l-index": Capacity(0, 4), "l-index2": Capacity(0, 7), }, ) self.assertNotEqual(cap, c2) c3 = ConsumedCapacity( "foobar", Capacity(0, 10), Capacity(0, 2), { "l-index": Capacity(0, 4), }, { "g-index": Capacity(0, 3), }, ) self.assertIn(cap, set([c3])) combined = cap + c2 self.assertEqual( cap + c2, ConsumedCapacity( "foobar", Capacity(0, 20), Capacity(0, 4), { "l-index": Capacity(0, 8), "l-index2": Capacity(0, 7), }, { "g-index": Capacity(0, 3), }, ), ) self.assertIn(str(Capacity(0, 3)), str(combined)) def test_add_different_tables(self): """Cannot add ConsumedCapacity of two different tables""" c1 = ConsumedCapacity("foobar", Capacity(1, 28)) c2 = ConsumedCapacity("boofar", Capacity(3, 0)) with self.assertRaises(TypeError): c1 += c2 def test_always_continue_query(self): """Regression test. If result has no items but does have LastEvaluatedKey, keep querying. """ conn = MagicMock() conn.dynamizer.decode_keys.side_effect = lambda x: x items = ["a", "b"] results = [ {"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}}, {"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}}, {"Items": items}, ] conn.call.side_effect = lambda *_, **__: results.pop(0) rs = ResultSet(conn, Limit()) results = list(rs) self.assertEqual(results, items) class TestHooks(BaseSystemTest): """Tests for connection callback hooks""" def tearDown(self): super(TestHooks, self).tearDown() for hooks in self.dynamo._hooks.values(): while hooks: hooks.pop() def test_precall(self): """precall hooks are called before an API call""" hook = MagicMock() self.dynamo.subscribe("precall", hook) def throw(**_): """Throw an exception to terminate the request""" raise Exception() with patch.object(self.dynamo, "client") as client: client.describe_table.side_effect = throw with self.assertRaises(Exception): self.dynamo.describe_table("foobar") hook.assert_called_with(self.dynamo, "describe_table", {"TableName": "foobar"}) def test_postcall(self): """postcall hooks are called after API call""" hash_key = DynamoKey("id") self.dynamo.create_table("foobar", hash_key=hash_key) calls = [] def hook(*args): """Log the call into a list""" calls.append(args) self.dynamo.subscribe("postcall", hook) self.dynamo.describe_table("foobar") self.assertEqual(len(calls), 1) args = calls[0] self.assertEqual(len(args), 4) conn, command, kwargs, response = args self.assertEqual(conn, self.dynamo) self.assertEqual(command, "describe_table") self.assertEqual(kwargs["TableName"], "foobar") self.assertEqual(response["Table"]["TableName"], "foobar") def test_capacity(self): """capacity hooks are called whenever response has ConsumedCapacity""" hash_key = DynamoKey("id") self.dynamo.create_table("foobar", hash_key=hash_key) hook = MagicMock() self.dynamo.subscribe("capacity", hook) with patch.object(self.dynamo, "client") as client: client.scan.return_value = { "Items": [], "ConsumedCapacity": { "TableName": "foobar", "ReadCapacityUnits": 4, }, } rs = self.dynamo.scan("foobar") list(rs) cap = ConsumedCapacity("foobar", Capacity(4, 0)) hook.assert_called_with(self.dynamo, "scan", ANY, ANY, cap) def test_subscribe(self): """Can subscribe and unsubscribe from hooks""" hook = lambda: None self.dynamo.subscribe("precall", hook) self.assertEqual(len(self.dynamo._hooks["precall"]), 1) self.dynamo.unsubscribe("precall", hook) self.assertEqual(len(self.dynamo._hooks["precall"]), 0)<|fim▁end|>
def test_bool(self): """Store and retrieve a boolean""" self.make_table() self.dynamo.put_item("foobar", {"id": "abc", "b": True})
<|file_name|>autobind.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed<|fim▁hole|> fn f<T>(x: Vec<T>) -> T { return x.into_iter().next().unwrap(); } fn g(act: |Vec<int> | -> int) -> int { return act(vec!(1, 2, 3)); } pub fn main() { assert_eq!(g(f), 1); let f1: |Vec<String>| -> String = f; assert_eq!(f1(vec!["x".to_string(), "y".to_string(), "z".to_string()]), "x".to_string()); }<|fim▁end|>
// except according to those terms.
<|file_name|>0006_eventcalander.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-09-25 12:24 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('venue', '0005_auto_20170916_0701'), ] operations = [ migrations.CreateModel( name='EventCalander', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='Default Event', max_length=200)), ('calander_id', models.TextField()), ('active', models.BooleanField(default=True)), ], ),<|fim▁hole|><|fim▁end|>
]
<|file_name|>qgswmsgetcapabilities.cpp<|end_file_name|><|fim▁begin|>/*************************************************************************** qgswmsgetmap.h ------------------------- begin : December 20 , 2016 copyright : (C) 2007 by Marco Hugentobler (original code) (C) 2014 by Alessandro Pasotti (original code) (C) 2016 by David Marteau email : marco dot hugentobler at karto dot baug dot ethz dot ch a dot pasotti at itopen dot it david dot marteau at 3liz dot com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgswmsutils.h" #include "qgswmsgetcapabilities.h" #include "qgsserverprojectutils.h" #include "qgslayoutmanager.h" #include "qgsprintlayout.h" #include "qgslayoutitemmap.h" #include "qgslayoutitemlabel.h" #include "qgslayoutitemhtml.h" #include "qgslayoutframe.h" #include "qgslayoutpagecollection.h" #include "qgslayertreenode.h" #include "qgslayertreegroup.h" #include "qgslayertreelayer.h" #include "qgslayertreemodel.h" #include "qgslayertree.h" #include "qgsmaplayerstylemanager.h" #include "qgsexception.h" #include "qgsexpressionnodeimpl.h" #include "qgsvectorlayer.h" namespace QgsWms { namespace { void appendLayerProjectSettings( QDomDocument &doc, QDomElement &layerElem, QgsMapLayer *currentLayer ); void appendDrawingOrder( QDomDocument &doc, QDomElement &parentElem, QgsServerInterface *serverIface, const QgsProject *project ); void combineExtentAndCrsOfGroupChildren( QDomDocument &doc, QDomElement &groupElem, const QgsProject *project, bool considerMapExtent = false ); bool crsSetFromLayerElement( const QDomElement &layerElement, QSet<QString> &crsSet ); QgsRectangle layerBoundingBoxInProjectCrs( const QDomDocument &doc, const QDomElement &layerElem, const QgsProject *project ); void appendLayerBoundingBox( QDomDocument &doc, QDomElement &layerElem, const QgsRectangle &layerExtent, const QgsCoordinateReferenceSystem &layerCRS, const QString &crsText, const QgsProject *project ); void appendLayerBoundingBoxes( QDomDocument &doc, QDomElement &layerElem, const QgsRectangle &lExtent, const QgsCoordinateReferenceSystem &layerCRS, const QStringList &crsList, const QStringList &constrainedCrsList, const QgsProject *project ); void appendCrsElementToLayer( QDomDocument &doc, QDomElement &layerElement, const QDomElement &precedingElement, const QString &crsText ); void appendCrsElementsToLayer( QDomDocument &doc, QDomElement &layerElement, const QStringList &crsList, const QStringList &constrainedCrsList ); void appendLayerStyles( QDomDocument &doc, QDomElement &layerElem, QgsMapLayer *currentLayer, const QgsProject *project, const QString &version, const QgsServerRequest &request ); void appendLayersFromTreeGroup( QDomDocument &doc, QDomElement &parentLayer, QgsServerInterface *serverIface, const QgsProject *project, const QString &version, const QgsServerRequest &request, const QgsLayerTreeGroup *layerTreeGroup, bool projectSettings ); void addKeywordListElement( const QgsProject *project, QDomDocument &doc, QDomElement &parent ); } void writeGetCapabilities( QgsServerInterface *serverIface, const QgsProject *project, const QString &version, const QgsServerRequest &request, QgsServerResponse &response, bool projectSettings ) { QgsAccessControl *accessControl = serverIface->accessControls(); QDomDocument doc; const QDomDocument *capabilitiesDocument = nullptr; // Data for WMS capabilities server memory cache QString configFilePath = serverIface->configFilePath(); QgsCapabilitiesCache *capabilitiesCache = serverIface->capabilitiesCache(); QStringList cacheKeyList; cacheKeyList << ( projectSettings ? QStringLiteral( "projectSettings" ) : version ); cacheKeyList << request.url().host(); bool cache = true; if ( accessControl ) cache = accessControl->fillCacheKey( cacheKeyList ); QString cacheKey = cacheKeyList.join( '-' ); QgsServerCacheManager *cacheManager = serverIface->cacheManager(); if ( cacheManager && cacheManager->getCachedDocument( &doc, project, request, accessControl ) ) { capabilitiesDocument = &doc; } if ( !capabilitiesDocument && cache ) //capabilities xml not in cache plugins { capabilitiesDocument = capabilitiesCache->searchCapabilitiesDocument( configFilePath, cacheKey ); } if ( !capabilitiesDocument ) //capabilities xml not in cache. Create a new one { QgsMessageLog::logMessage( QStringLiteral( "WMS capabilities document not found in cache" ) ); doc = getCapabilities( serverIface, project, version, request, projectSettings ); if ( cacheManager && cacheManager->setCachedDocument( &doc, project, request, accessControl ) ) { capabilitiesDocument = &doc; } else if ( cache ) { capabilitiesCache->insertCapabilitiesDocument( configFilePath, cacheKey, &doc ); capabilitiesDocument = capabilitiesCache->searchCapabilitiesDocument( configFilePath, cacheKey ); } if ( !capabilitiesDocument ) { capabilitiesDocument = &doc; } else { QgsMessageLog::logMessage( QStringLiteral( "Set WMS capabilities document in cache" ) ); } } else { QgsMessageLog::logMessage( QStringLiteral( "Found WMS capabilities document in cache" ) ); } response.setHeader( QStringLiteral( "Content-Type" ), QStringLiteral( "text/xml; charset=utf-8" ) ); response.write( capabilitiesDocument->toByteArray() ); } QDomDocument getCapabilities( QgsServerInterface *serverIface, const QgsProject *project, const QString &version, const QgsServerRequest &request, bool projectSettings ) { QDomDocument doc; QDomElement wmsCapabilitiesElement; QgsServerRequest::Parameters parameters = request.parameters(); // Get service URL QUrl href = serviceUrl( request, project ); //href needs to be a prefix QString hrefString = href.toString(); hrefString.append( href.hasQuery() ? "&" : "?" ); // XML declaration QDomProcessingInstruction xmlDeclaration = doc.createProcessingInstruction( QStringLiteral( "xml" ), QStringLiteral( "version=\"1.0\" encoding=\"utf-8\"" ) ); // Append format helper std::function < void ( QDomElement &, const QString & ) > appendFormat = [&doc]( QDomElement & elem, const QString & format ) { QDomElement formatElem = doc.createElement( QStringLiteral( "Format" )/*wms:Format*/ ); formatElem.appendChild( doc.createTextNode( format ) ); elem.appendChild( formatElem ); }; if ( version == QLatin1String( "1.1.1" ) ) { doc = QDomDocument( QStringLiteral( "WMT_MS_Capabilities SYSTEM 'http://schemas.opengis.net/wms/1.1.1/WMS_MS_Capabilities.dtd'" ) ); //WMS 1.1.1 needs DOCTYPE "SYSTEM http://schemas.opengis.net/wms/1.1.1/WMS_MS_Capabilities.dtd" doc.appendChild( xmlDeclaration ); wmsCapabilitiesElement = doc.createElement( QStringLiteral( "WMT_MS_Capabilities" )/*wms:WMS_Capabilities*/ ); } else // 1.3.0 as default { doc.appendChild( xmlDeclaration ); wmsCapabilitiesElement = doc.createElement( QStringLiteral( "WMS_Capabilities" )/*wms:WMS_Capabilities*/ ); wmsCapabilitiesElement.setAttribute( QStringLiteral( "xmlns" ), QStringLiteral( "http://www.opengis.net/wms" ) ); wmsCapabilitiesElement.setAttribute( QStringLiteral( "xmlns:sld" ), QStringLiteral( "http://www.opengis.net/sld" ) ); wmsCapabilitiesElement.setAttribute( QStringLiteral( "xmlns:qgs" ), QStringLiteral( "http://www.qgis.org/wms" ) ); wmsCapabilitiesElement.setAttribute( QStringLiteral( "xmlns:xsi" ), QStringLiteral( "http://www.w3.org/2001/XMLSchema-instance" ) ); QString schemaLocation = QStringLiteral( "http://www.opengis.net/wms" ); schemaLocation += QLatin1String( " http://schemas.opengis.net/wms/1.3.0/capabilities_1_3_0.xsd" ); schemaLocation += QLatin1String( " http://www.opengis.net/sld" ); schemaLocation += QLatin1String( " http://schemas.opengis.net/sld/1.1.0/sld_capabilities.xsd" ); schemaLocation += QLatin1String( " http://www.qgis.org/wms" ); if ( QgsServerProjectUtils::wmsInspireActivate( *project ) ) { wmsCapabilitiesElement.setAttribute( QStringLiteral( "xmlns:inspire_common" ), QStringLiteral( "http://inspire.ec.europa.eu/schemas/common/1.0" ) ); wmsCapabilitiesElement.setAttribute( QStringLiteral( "xmlns:inspire_vs" ), QStringLiteral( "http://inspire.ec.europa.eu/schemas/inspire_vs/1.0" ) ); schemaLocation += QLatin1String( " http://inspire.ec.europa.eu/schemas/inspire_vs/1.0" ); schemaLocation += QLatin1String( " http://inspire.ec.europa.eu/schemas/inspire_vs/1.0/inspire_vs.xsd" ); } schemaLocation += " " + hrefString + "SERVICE=WMS&REQUEST=GetSchemaExtension"; wmsCapabilitiesElement.setAttribute( QStringLiteral( "xsi:schemaLocation" ), schemaLocation ); } wmsCapabilitiesElement.setAttribute( QStringLiteral( "version" ), version ); doc.appendChild( wmsCapabilitiesElement ); //INSERT Service wmsCapabilitiesElement.appendChild( getServiceElement( doc, project, version, request ) ); //wms:Capability element QDomElement capabilityElement = getCapabilityElement( doc, project, version, request, projectSettings ); wmsCapabilitiesElement.appendChild( capabilityElement ); if ( projectSettings ) { //Insert <ComposerTemplate> elements derived from wms:_ExtendedCapabilities capabilityElement.appendChild( getComposerTemplatesElement( doc, project ) ); //WFS layers capabilityElement.appendChild( getWFSLayersElement( doc, project ) ); } capabilityElement.appendChild( getLayersAndStylesCapabilitiesElement( doc, serverIface, project, version, request, projectSettings ) ); if ( projectSettings ) { appendDrawingOrder( doc, capabilityElement, serverIface, project ); } return doc; } QDomElement getServiceElement( QDomDocument &doc, const QgsProject *project, const QString &version, const QgsServerRequest &request ) { //Service element QDomElement serviceElem = doc.createElement( QStringLiteral( "Service" ) ); //Service name QDomElement nameElem = doc.createElement( QStringLiteral( "Name" ) ); QDomText nameText = doc.createTextNode( QStringLiteral( "WMS" ) ); nameElem.appendChild( nameText ); serviceElem.appendChild( nameElem ); QString title = QgsServerProjectUtils::owsServiceTitle( *project ); if ( !title.isEmpty() ) { QDomElement titleElem = doc.createElement( QStringLiteral( "Title" ) ); QDomText titleText = doc.createTextNode( title ); titleElem.appendChild( titleText ); serviceElem.appendChild( titleElem ); } QString abstract = QgsServerProjectUtils::owsServiceAbstract( *project ); if ( !abstract.isEmpty() ) { QDomElement abstractElem = doc.createElement( QStringLiteral( "Abstract" ) ); QDomText abstractText = doc.createCDATASection( abstract ); abstractElem.appendChild( abstractText ); serviceElem.appendChild( abstractElem ); } addKeywordListElement( project, doc, serviceElem ); QString onlineResource = QgsServerProjectUtils::owsServiceOnlineResource( *project ); if ( onlineResource.isEmpty() ) { onlineResource = serviceUrl( request, project ).toString(); } QDomElement onlineResourceElem = doc.createElement( QStringLiteral( "OnlineResource" ) ); onlineResourceElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); onlineResourceElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); onlineResourceElem.setAttribute( QStringLiteral( "xlink:href" ), onlineResource ); serviceElem.appendChild( onlineResourceElem ); QString contactPerson = QgsServerProjectUtils::owsServiceContactPerson( *project ); QString contactOrganization = QgsServerProjectUtils::owsServiceContactOrganization( *project ); QString contactPosition = QgsServerProjectUtils::owsServiceContactPosition( *project ); QString contactMail = QgsServerProjectUtils::owsServiceContactMail( *project ); QString contactPhone = QgsServerProjectUtils::owsServiceContactPhone( *project ); if ( !contactPerson.isEmpty() || !contactOrganization.isEmpty() || !contactPosition.isEmpty() || !contactMail.isEmpty() || !contactPhone.isEmpty() ) { //Contact information QDomElement contactInfoElem = doc.createElement( QStringLiteral( "ContactInformation" ) ); //Contact person primary if ( !contactPerson.isEmpty() || !contactOrganization.isEmpty() || !contactPosition.isEmpty() ) { QDomElement contactPersonPrimaryElem = doc.createElement( QStringLiteral( "ContactPersonPrimary" ) ); if ( !contactPerson.isEmpty() ) { QDomElement contactPersonElem = doc.createElement( QStringLiteral( "ContactPerson" ) ); QDomText contactPersonText = doc.createTextNode( contactPerson ); contactPersonElem.appendChild( contactPersonText ); contactPersonPrimaryElem.appendChild( contactPersonElem ); } if ( !contactOrganization.isEmpty() ) { QDomElement contactOrganizationElem = doc.createElement( QStringLiteral( "ContactOrganization" ) ); QDomText contactOrganizationText = doc.createTextNode( contactOrganization ); contactOrganizationElem.appendChild( contactOrganizationText ); contactPersonPrimaryElem.appendChild( contactOrganizationElem ); } if ( !contactPosition.isEmpty() ) { QDomElement contactPositionElem = doc.createElement( QStringLiteral( "ContactPosition" ) ); QDomText contactPositionText = doc.createTextNode( contactPosition ); contactPositionElem.appendChild( contactPositionText ); contactPersonPrimaryElem.appendChild( contactPositionElem ); } contactInfoElem.appendChild( contactPersonPrimaryElem ); } if ( !contactPhone.isEmpty() ) { QDomElement phoneElem = doc.createElement( QStringLiteral( "ContactVoiceTelephone" ) ); QDomText phoneText = doc.createTextNode( contactPhone ); phoneElem.appendChild( phoneText ); contactInfoElem.appendChild( phoneElem ); } if ( !contactMail.isEmpty() ) { QDomElement mailElem = doc.createElement( QStringLiteral( "ContactElectronicMailAddress" ) ); QDomText mailText = doc.createTextNode( contactMail ); mailElem.appendChild( mailText ); contactInfoElem.appendChild( mailElem ); } serviceElem.appendChild( contactInfoElem ); } QDomElement feesElem = doc.createElement( QStringLiteral( "Fees" ) ); QDomText feesText = doc.createTextNode( QStringLiteral( "None" ) ); // default value if fees are unknown QString fees = QgsServerProjectUtils::owsServiceFees( *project ); if ( !fees.isEmpty() ) { feesText = doc.createTextNode( fees ); } feesElem.appendChild( feesText ); serviceElem.appendChild( feesElem ); QDomElement accessConstraintsElem = doc.createElement( QStringLiteral( "AccessConstraints" ) ); QDomText accessConstraintsText = doc.createTextNode( QStringLiteral( "None" ) ); // default value if access constraints are unknown QString accessConstraints = QgsServerProjectUtils::owsServiceAccessConstraints( *project ); if ( !accessConstraints.isEmpty() ) { accessConstraintsText = doc.createTextNode( accessConstraints ); } accessConstraintsElem.appendChild( accessConstraintsText ); serviceElem.appendChild( accessConstraintsElem ); if ( version == QLatin1String( "1.3.0" ) ) { int maxWidth = QgsServerProjectUtils::wmsMaxWidth( *project ); if ( maxWidth > 0 ) { QDomElement maxWidthElem = doc.createElement( QStringLiteral( "MaxWidth" ) ); QDomText maxWidthText = doc.createTextNode( QString::number( maxWidth ) ); maxWidthElem.appendChild( maxWidthText ); serviceElem.appendChild( maxWidthElem ); } int maxHeight = QgsServerProjectUtils::wmsMaxHeight( *project ); if ( maxHeight > 0 ) { QDomElement maxHeightElem = doc.createElement( QStringLiteral( "MaxHeight" ) ); QDomText maxHeightText = doc.createTextNode( QString::number( maxHeight ) ); maxHeightElem.appendChild( maxHeightText ); serviceElem.appendChild( maxHeightElem ); } } return serviceElem; } QDomElement getCapabilityElement( QDomDocument &doc, const QgsProject *project, const QString &version, const QgsServerRequest &request, bool projectSettings ) { QgsServerRequest::Parameters parameters = request.parameters(); // Get service URL QUrl href = serviceUrl( request, project ); //href needs to be a prefix QString hrefString = href.toString(); hrefString.append( href.hasQuery() ? "&" : "?" ); QDomElement capabilityElem = doc.createElement( QStringLiteral( "Capability" )/*wms:Capability*/ ); //wms:Request element QDomElement requestElem = doc.createElement( QStringLiteral( "Request" )/*wms:Request*/ ); capabilityElem.appendChild( requestElem ); QDomElement dcpTypeElem = doc.createElement( QStringLiteral( "DCPType" )/*wms:DCPType*/ ); QDomElement httpElem = doc.createElement( QStringLiteral( "HTTP" )/*wms:HTTP*/ ); dcpTypeElem.appendChild( httpElem ); // Append format helper std::function < void ( QDomElement &, const QString & ) > appendFormat = [&doc]( QDomElement & elem, const QString & format ) { QDomElement formatElem = doc.createElement( QStringLiteral( "Format" )/*wms:Format*/ ); formatElem.appendChild( doc.createTextNode( format ) ); elem.appendChild( formatElem ); }; QDomElement elem; //wms:GetCapabilities elem = doc.createElement( QStringLiteral( "GetCapabilities" )/*wms:GetCapabilities*/ ); appendFormat( elem, ( version == QLatin1String( "1.1.1" ) ? "application/vnd.ogc.wms_xml" : "text/xml" ) ); elem.appendChild( dcpTypeElem ); requestElem.appendChild( elem ); // SOAP platform //only give this information if it is not a WMS request to be in sync with the WMS capabilities schema // XXX Not even sure that cam be ever true if ( parameters.value( QStringLiteral( "SERVICE" ) ).compare( QLatin1String( "WMS" ), Qt::CaseInsensitive ) != 0 ) { QDomElement soapElem = doc.createElement( QStringLiteral( "SOAP" )/*wms:SOAP*/ ); httpElem.appendChild( soapElem ); QDomElement soapResourceElem = doc.createElement( QStringLiteral( "OnlineResource" )/*wms:OnlineResource*/ ); soapResourceElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); soapResourceElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); soapResourceElem.setAttribute( QStringLiteral( "xlink:href" ), hrefString ); soapElem.appendChild( soapResourceElem ); } //only Get supported for the moment QDomElement getElem = doc.createElement( QStringLiteral( "Get" )/*wms:Get*/ ); httpElem.appendChild( getElem ); QDomElement olResourceElem = doc.createElement( QStringLiteral( "OnlineResource" )/*wms:OnlineResource*/ ); olResourceElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); olResourceElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); olResourceElem.setAttribute( QStringLiteral( "xlink:href" ), hrefString ); getElem.appendChild( olResourceElem ); //wms:GetMap elem = doc.createElement( QStringLiteral( "GetMap" )/*wms:GetMap*/ ); appendFormat( elem, QStringLiteral( "image/jpeg" ) ); appendFormat( elem, QStringLiteral( "image/png" ) ); appendFormat( elem, QStringLiteral( "image/png; mode=16bit" ) ); appendFormat( elem, QStringLiteral( "image/png; mode=8bit" ) ); appendFormat( elem, QStringLiteral( "image/png; mode=1bit" ) ); appendFormat( elem, QStringLiteral( "application/dxf" ) ); elem.appendChild( dcpTypeElem.cloneNode().toElement() ); //this is the same as for 'GetCapabilities' requestElem.appendChild( elem ); //wms:GetFeatureInfo elem = doc.createElement( QStringLiteral( "GetFeatureInfo" ) ); appendFormat( elem, QStringLiteral( "text/plain" ) ); appendFormat( elem, QStringLiteral( "text/html" ) ); appendFormat( elem, QStringLiteral( "text/xml" ) ); appendFormat( elem, QStringLiteral( "application/vnd.ogc.gml" ) ); appendFormat( elem, QStringLiteral( "application/vnd.ogc.gml/3.1.1" ) ); elem.appendChild( dcpTypeElem.cloneNode().toElement() ); //this is the same as for 'GetCapabilities' requestElem.appendChild( elem ); //wms:GetLegendGraphic elem = doc.createElement( ( version == QLatin1String( "1.1.1" ) ? "GetLegendGraphic" : "sld:GetLegendGraphic" )/*wms:GetLegendGraphic*/ ); appendFormat( elem, QStringLiteral( "image/jpeg" ) ); appendFormat( elem, QStringLiteral( "image/png" ) ); elem.appendChild( dcpTypeElem.cloneNode().toElement() ); //this is the same as for 'GetCapabilities' requestElem.appendChild( elem ); //wms:DescribeLayer elem = doc.createElement( ( version == QLatin1String( "1.1.1" ) ? "DescribeLayer" : "sld:DescribeLayer" )/*wms:GetLegendGraphic*/ ); appendFormat( elem, QStringLiteral( "text/xml" ) ); elem.appendChild( dcpTypeElem.cloneNode().toElement() ); //this is the same as for 'GetCapabilities' requestElem.appendChild( elem ); //wms:GetStyles elem = doc.createElement( ( version == QLatin1String( "1.1.1" ) ? "GetStyles" : "qgs:GetStyles" )/*wms:GetStyles*/ ); appendFormat( elem, QStringLiteral( "text/xml" ) ); elem.appendChild( dcpTypeElem.cloneNode().toElement() ); //this is the same as for 'GetCapabilities' requestElem.appendChild( elem ); if ( projectSettings ) //remove composer templates from GetCapabilities in the long term { //wms:GetPrint elem = doc.createElement( QStringLiteral( "GetPrint" ) /*wms:GetPrint*/ ); appendFormat( elem, QStringLiteral( "svg" ) ); appendFormat( elem, QStringLiteral( "png" ) ); appendFormat( elem, QStringLiteral( "pdf" ) ); elem.appendChild( dcpTypeElem.cloneNode().toElement() ); //this is the same as for 'GetCapabilities' requestElem.appendChild( elem ); } //Exception element is mandatory elem = doc.createElement( QStringLiteral( "Exception" ) ); appendFormat( elem, ( version == QLatin1String( "1.1.1" ) ? "application/vnd.ogc.se_xml" : "XML" ) ); capabilityElem.appendChild( elem ); //UserDefinedSymbolization element if ( version == QLatin1String( "1.3.0" ) ) { elem = doc.createElement( QStringLiteral( "sld:UserDefinedSymbolization" ) ); elem.setAttribute( QStringLiteral( "SupportSLD" ), QStringLiteral( "1" ) ); elem.setAttribute( QStringLiteral( "UserLayer" ), QStringLiteral( "0" ) ); elem.setAttribute( QStringLiteral( "UserStyle" ), QStringLiteral( "1" ) ); elem.setAttribute( QStringLiteral( "RemoteWFS" ), QStringLiteral( "0" ) ); elem.setAttribute( QStringLiteral( "InlineFeature" ), QStringLiteral( "0" ) ); elem.setAttribute( QStringLiteral( "RemoteWCS" ), QStringLiteral( "0" ) ); capabilityElem.appendChild( elem ); if ( QgsServerProjectUtils::wmsInspireActivate( *project ) ) { capabilityElem.appendChild( getInspireCapabilitiesElement( doc, project ) ); } } return capabilityElem; } QDomElement getInspireCapabilitiesElement( QDomDocument &doc, const QgsProject *project ) { QDomElement inspireCapabilitiesElem; if ( !QgsServerProjectUtils::wmsInspireActivate( *project ) ) return inspireCapabilitiesElem; inspireCapabilitiesElem = doc.createElement( QStringLiteral( "inspire_vs:ExtendedCapabilities" ) ); QString inspireMetadataUrl = QgsServerProjectUtils::wmsInspireMetadataUrl( *project ); // inspire scenario 1 if ( !inspireMetadataUrl.isEmpty() ) { QDomElement inspireCommonMetadataUrlElem = doc.createElement( QStringLiteral( "inspire_common:MetadataUrl" ) ); inspireCommonMetadataUrlElem.setAttribute( QStringLiteral( "xsi:type" ), QStringLiteral( "inspire_common:resourceLocatorType" ) ); QDomElement inspireCommonMetadataUrlUrlElem = doc.createElement( QStringLiteral( "inspire_common:URL" ) ); inspireCommonMetadataUrlUrlElem.appendChild( doc.createTextNode( inspireMetadataUrl ) ); inspireCommonMetadataUrlElem.appendChild( inspireCommonMetadataUrlUrlElem ); QString inspireMetadataUrlType = QgsServerProjectUtils::wmsInspireMetadataUrlType( *project ); if ( !inspireMetadataUrlType.isNull() ) { QDomElement inspireCommonMetadataUrlMediaTypeElem = doc.createElement( QStringLiteral( "inspire_common:MediaType" ) ); inspireCommonMetadataUrlMediaTypeElem.appendChild( doc.createTextNode( inspireMetadataUrlType ) ); inspireCommonMetadataUrlElem.appendChild( inspireCommonMetadataUrlMediaTypeElem ); } inspireCapabilitiesElem.appendChild( inspireCommonMetadataUrlElem ); } else { QDomElement inspireCommonResourceTypeElem = doc.createElement( QStringLiteral( "inspire_common:ResourceType" ) ); inspireCommonResourceTypeElem.appendChild( doc.createTextNode( QStringLiteral( "service" ) ) ); inspireCapabilitiesElem.appendChild( inspireCommonResourceTypeElem ); QDomElement inspireCommonSpatialDataServiceTypeElem = doc.createElement( QStringLiteral( "inspire_common:SpatialDataServiceType" ) ); inspireCommonSpatialDataServiceTypeElem.appendChild( doc.createTextNode( QStringLiteral( "view" ) ) ); inspireCapabilitiesElem.appendChild( inspireCommonSpatialDataServiceTypeElem ); QString inspireTemporalReference = QgsServerProjectUtils::wmsInspireTemporalReference( *project ); if ( !inspireTemporalReference.isNull() ) { QDomElement inspireCommonTemporalReferenceElem = doc.createElement( QStringLiteral( "inspire_common:TemporalReference" ) ); QDomElement inspireCommonDateOfLastRevisionElem = doc.createElement( QStringLiteral( "inspire_common:DateOfLastRevision" ) ); inspireCommonDateOfLastRevisionElem.appendChild( doc.createTextNode( inspireTemporalReference ) ); inspireCommonTemporalReferenceElem.appendChild( inspireCommonDateOfLastRevisionElem ); inspireCapabilitiesElem.appendChild( inspireCommonTemporalReferenceElem ); } QDomElement inspireCommonMetadataPointOfContactElem = doc.createElement( QStringLiteral( "inspire_common:MetadataPointOfContact" ) ); QString contactOrganization = QgsServerProjectUtils::owsServiceContactOrganization( *project ); QDomElement inspireCommonOrganisationNameElem = doc.createElement( QStringLiteral( "inspire_common:OrganisationName" ) ); if ( !contactOrganization.isNull() ) { inspireCommonOrganisationNameElem.appendChild( doc.createTextNode( contactOrganization ) ); } inspireCommonMetadataPointOfContactElem.appendChild( inspireCommonOrganisationNameElem ); QString contactMail = QgsServerProjectUtils::owsServiceContactMail( *project ); QDomElement inspireCommonEmailAddressElem = doc.createElement( QStringLiteral( "inspire_common:EmailAddress" ) ); if ( !contactMail.isNull() ) { inspireCommonEmailAddressElem.appendChild( doc.createTextNode( contactMail ) ); } inspireCommonMetadataPointOfContactElem.appendChild( inspireCommonEmailAddressElem ); inspireCapabilitiesElem.appendChild( inspireCommonMetadataPointOfContactElem ); QString inspireMetadataDate = QgsServerProjectUtils::wmsInspireMetadataDate( *project ); if ( !inspireMetadataDate.isNull() ) { QDomElement inspireCommonMetadataDateElem = doc.createElement( QStringLiteral( "inspire_common:MetadataDate" ) ); inspireCommonMetadataDateElem.appendChild( doc.createTextNode( inspireMetadataDate ) ); inspireCapabilitiesElem.appendChild( inspireCommonMetadataDateElem ); } } // Supported languages QDomElement inspireCommonSupportedLanguagesElem = doc.createElement( QStringLiteral( "inspire_common:SupportedLanguages" ) ); inspireCommonSupportedLanguagesElem.setAttribute( QStringLiteral( "xsi:type" ), QStringLiteral( "inspire_common:supportedLanguagesType" ) ); QDomElement inspireCommonLanguageElem = doc.createElement( QStringLiteral( "inspire_common:Language" ) ); inspireCommonLanguageElem.appendChild( doc.createTextNode( QgsServerProjectUtils::wmsInspireLanguage( *project ) ) ); QDomElement inspireCommonDefaultLanguageElem = doc.createElement( QStringLiteral( "inspire_common:DefaultLanguage" ) ); inspireCommonDefaultLanguageElem.appendChild( inspireCommonLanguageElem ); inspireCommonSupportedLanguagesElem.appendChild( inspireCommonDefaultLanguageElem ); #if 0 /* Supported language has to be different from default one */ QDomElement inspireCommonSupportedLanguageElem = doc.createElement( "inspire_common:SupportedLanguage" ); inspireCommonSupportedLanguageElem.appendChild( inspireCommonLanguageElem.cloneNode().toElement() ); inspireCommonSupportedLanguagesElem.appendChild( inspireCommonSupportedLanguageElem ); #endif inspireCapabilitiesElem.appendChild( inspireCommonSupportedLanguagesElem ); QDomElement inspireCommonResponseLanguageElem = doc.createElement( QStringLiteral( "inspire_common:ResponseLanguage" ) ); inspireCommonResponseLanguageElem.appendChild( inspireCommonLanguageElem.cloneNode().toElement() ); inspireCapabilitiesElem.appendChild( inspireCommonResponseLanguageElem ); return inspireCapabilitiesElem; } QDomElement getComposerTemplatesElement( QDomDocument &doc, const QgsProject *project ) { QList< QgsPrintLayout * > projectComposers = project->layoutManager()->printLayouts(); if ( projectComposers.size() == 0 ) return QDomElement(); QStringList restrictedComposers = QgsServerProjectUtils::wmsRestrictedComposers( *project ); QDomElement composerTemplatesElem = doc.createElement( QStringLiteral( "ComposerTemplates" ) ); QList<QgsPrintLayout *>::const_iterator cIt = projectComposers.constBegin(); for ( ; cIt != projectComposers.constEnd(); ++cIt ) { QgsPrintLayout *layout = *cIt; if ( restrictedComposers.contains( layout->name() ) ) continue; // Check that we have at least one page if ( layout->pageCollection()->pageCount() < 1 ) continue; // Get width and height from first page of the collection QgsLayoutSize layoutSize( layout->pageCollection()->page( 0 )->sizeWithUnits() ); QgsLayoutMeasurement width( layout->convertFromLayoutUnits( layoutSize.width(), QgsUnitTypes::LayoutUnit::LayoutMillimeters ) ); QgsLayoutMeasurement height( layout->convertFromLayoutUnits( layoutSize.height(), QgsUnitTypes::LayoutUnit::LayoutMillimeters ) ); QDomElement composerTemplateElem = doc.createElement( QStringLiteral( "ComposerTemplate" ) ); composerTemplateElem.setAttribute( QStringLiteral( "name" ), layout->name() ); //get paper width and height in mm from composition composerTemplateElem.setAttribute( QStringLiteral( "width" ), width.length() ); composerTemplateElem.setAttribute( QStringLiteral( "height" ), height.length() ); //add available composer maps and their size in mm QList<QgsLayoutItemMap *> layoutMapList; layout->layoutItems<QgsLayoutItemMap>( layoutMapList ); QList<QgsLayoutItemMap *>::const_iterator cmIt = layoutMapList.constBegin(); // Add map id int mapId = 0; for ( ; cmIt != layoutMapList.constEnd(); ++cmIt ) { const QgsLayoutItemMap *composerMap = *cmIt; QDomElement composerMapElem = doc.createElement( QStringLiteral( "ComposerMap" ) ); composerMapElem.setAttribute( QStringLiteral( "name" ), QStringLiteral( "map%1" ).arg( mapId ) ); mapId++; composerMapElem.setAttribute( QStringLiteral( "width" ), composerMap->rect().width() ); composerMapElem.setAttribute( QStringLiteral( "height" ), composerMap->rect().height() ); composerTemplateElem.appendChild( composerMapElem ); } //add available composer labels QList<QgsLayoutItemLabel *> composerLabelList; layout->layoutItems<QgsLayoutItemLabel>( composerLabelList ); QList<QgsLayoutItemLabel *>::const_iterator clIt = composerLabelList.constBegin(); for ( ; clIt != composerLabelList.constEnd(); ++clIt ) { QgsLayoutItemLabel *composerLabel = *clIt; QString id = composerLabel->id(); if ( id.isEmpty() ) continue; QDomElement composerLabelElem = doc.createElement( QStringLiteral( "ComposerLabel" ) ); composerLabelElem.setAttribute( QStringLiteral( "name" ), id ); composerTemplateElem.appendChild( composerLabelElem ); } //add available composer HTML QList<QgsLayoutItemHtml *> composerHtmlList; layout->layoutObjects<QgsLayoutItemHtml>( composerHtmlList ); QList<QgsLayoutItemHtml *>::const_iterator chIt = composerHtmlList.constBegin(); for ( ; chIt != composerHtmlList.constEnd(); ++chIt ) { QgsLayoutItemHtml *composerHtml = *chIt; if ( composerHtml->frameCount() == 0 ) continue; QString id = composerHtml->frame( 0 )->id(); if ( id.isEmpty() ) continue; QDomElement composerHtmlElem = doc.createElement( QStringLiteral( "ComposerHtml" ) ); composerHtmlElem.setAttribute( QStringLiteral( "name" ), id ); composerTemplateElem.appendChild( composerHtmlElem ); } composerTemplatesElem.appendChild( composerTemplateElem ); } if ( composerTemplatesElem.childNodes().size() == 0 ) return QDomElement(); return composerTemplatesElem; } QDomElement getWFSLayersElement( QDomDocument &doc, const QgsProject *project ) { QStringList wfsLayerIds = QgsServerProjectUtils::wfsLayerIds( *project ); if ( wfsLayerIds.size() == 0 ) return QDomElement(); QDomElement wfsLayersElem = doc.createElement( QStringLiteral( "WFSLayers" ) ); for ( int i = 0; i < wfsLayerIds.size(); ++i ) { QgsMapLayer *layer = project->mapLayer( wfsLayerIds.at( i ) ); if ( layer->type() != QgsMapLayer::LayerType::VectorLayer ) { continue; } QDomElement wfsLayerElem = doc.createElement( QStringLiteral( "WFSLayer" ) ); if ( QgsServerProjectUtils::wmsUseLayerIds( *project ) ) { wfsLayerElem.setAttribute( QStringLiteral( "name" ), layer->id() ); } else { wfsLayerElem.setAttribute( QStringLiteral( "name" ), layer->name() ); } wfsLayersElem.appendChild( wfsLayerElem ); } return wfsLayersElem; } QDomElement getLayersAndStylesCapabilitiesElement( QDomDocument &doc, QgsServerInterface *serverIface, const QgsProject *project, const QString &version, const QgsServerRequest &request, bool projectSettings ) { QStringList nonIdentifiableLayers = project->nonIdentifiableLayers(); const QgsLayerTree *projectLayerTreeRoot = project->layerTreeRoot(); QDomElement layerParentElem = doc.createElement( QStringLiteral( "Layer" ) ); if ( !project->title().isEmpty() ) { // Root Layer title QDomElement layerParentTitleElem = doc.createElement( QStringLiteral( "Title" ) ); QDomText layerParentTitleText = doc.createTextNode( project->title() ); layerParentTitleElem.appendChild( layerParentTitleText ); layerParentElem.appendChild( layerParentTitleElem ); // Root Layer abstract QDomElement layerParentAbstElem = doc.createElement( QStringLiteral( "Abstract" ) ); QDomText layerParentAbstText = doc.createTextNode( project->title() ); layerParentAbstElem.appendChild( layerParentAbstText ); layerParentElem.appendChild( layerParentAbstElem ); } // Root Layer name QString rootLayerName = QgsServerProjectUtils::wmsRootName( *project ); if ( rootLayerName.isEmpty() && !project->title().isEmpty() ) { rootLayerName = project->title(); } if ( !rootLayerName.isEmpty() ) { QDomElement layerParentNameElem = doc.createElement( QStringLiteral( "Name" ) ); QDomText layerParentNameText = doc.createTextNode( rootLayerName ); layerParentNameElem.appendChild( layerParentNameText ); layerParentElem.appendChild( layerParentNameElem ); } // Keyword list addKeywordListElement( project, doc, layerParentElem ); // Root Layer tree name if ( projectSettings ) { QDomElement treeNameElem = doc.createElement( QStringLiteral( "TreeName" ) ); QDomText treeNameText = doc.createTextNode( project->title() ); treeNameElem.appendChild( treeNameText ); layerParentElem.appendChild( treeNameElem ); } appendLayersFromTreeGroup( doc, layerParentElem, serverIface, project, version, request, projectLayerTreeRoot, projectSettings ); combineExtentAndCrsOfGroupChildren( doc, layerParentElem, project, true ); return layerParentElem; } namespace { void appendLayersFromTreeGroup( QDomDocument &doc, QDomElement &parentLayer, QgsServerInterface *serverIface, const QgsProject *project, const QString &version, const QgsServerRequest &request, const QgsLayerTreeGroup *layerTreeGroup, bool projectSettings ) { bool useLayerIds = QgsServerProjectUtils::wmsUseLayerIds( *project ); bool siaFormat = QgsServerProjectUtils::wmsInfoFormatSia2045( *project ); QStringList restrictedLayers = QgsServerProjectUtils::wmsRestrictedLayers( *project ); QList< QgsLayerTreeNode * > layerTreeGroupChildren = layerTreeGroup->children(); for ( int i = 0; i < layerTreeGroupChildren.size(); ++i ) { QgsLayerTreeNode *treeNode = layerTreeGroupChildren.at( i ); QDomElement layerElem = doc.createElement( QStringLiteral( "Layer" ) ); if ( projectSettings ) { layerElem.setAttribute( QStringLiteral( "visible" ), treeNode->isVisible() ); } if ( treeNode->nodeType() == QgsLayerTreeNode::NodeGroup ) { QgsLayerTreeGroup *treeGroupChild = static_cast<QgsLayerTreeGroup *>( treeNode ); QString name = treeGroupChild->name(); if ( restrictedLayers.contains( name ) ) //unpublished group { continue; } if ( projectSettings ) { layerElem.setAttribute( QStringLiteral( "mutuallyExclusive" ), treeGroupChild->isMutuallyExclusive() ); } QString shortName = treeGroupChild->customProperty( QStringLiteral( "wmsShortName" ) ).toString(); QString title = treeGroupChild->customProperty( QStringLiteral( "wmsTitle" ) ).toString(); QDomElement nameElem = doc.createElement( QStringLiteral( "Name" ) ); QDomText nameText; if ( !shortName.isEmpty() ) nameText = doc.createTextNode( shortName ); else nameText = doc.createTextNode( name ); nameElem.appendChild( nameText ); layerElem.appendChild( nameElem ); QDomElement titleElem = doc.createElement( QStringLiteral( "Title" ) ); QDomText titleText; if ( !title.isEmpty() ) titleText = doc.createTextNode( title ); else titleText = doc.createTextNode( name ); titleElem.appendChild( titleText ); layerElem.appendChild( titleElem ); QString abstract = treeGroupChild->customProperty( QStringLiteral( "wmsAbstract" ) ).toString(); if ( !abstract.isEmpty() ) { QDomElement abstractElem = doc.createElement( QStringLiteral( "Abstract" ) ); QDomText abstractText = doc.createTextNode( abstract ); abstractElem.appendChild( abstractText ); layerElem.appendChild( abstractElem ); } // Layer tree name if ( projectSettings ) { QDomElement treeNameElem = doc.createElement( QStringLiteral( "TreeName" ) ); QDomText treeNameText = doc.createTextNode( name ); treeNameElem.appendChild( treeNameText ); layerElem.appendChild( treeNameElem ); } appendLayersFromTreeGroup( doc, layerElem, serverIface, project, version, request, treeGroupChild, projectSettings ); combineExtentAndCrsOfGroupChildren( doc, layerElem, project ); } else { QgsLayerTreeLayer *treeLayer = static_cast<QgsLayerTreeLayer *>( treeNode ); QgsMapLayer *l = treeLayer->layer(); if ( restrictedLayers.contains( l->name() ) ) //unpublished layer { continue; } QgsAccessControl *accessControl = serverIface->accessControls(); if ( accessControl && !accessControl->layerReadPermission( l ) ) { continue; } QString wmsName = l->name(); if ( useLayerIds ) { wmsName = l->id(); } else if ( !l->shortName().isEmpty() ) { wmsName = l->shortName(); } // queryable layer if ( project->nonIdentifiableLayers().contains( l->id() ) ) { layerElem.setAttribute( QStringLiteral( "queryable" ), QStringLiteral( "0" ) ); } else { layerElem.setAttribute( QStringLiteral( "queryable" ), QStringLiteral( "1" ) ); } QDomElement nameElem = doc.createElement( QStringLiteral( "Name" ) ); QDomText nameText = doc.createTextNode( wmsName ); nameElem.appendChild( nameText ); layerElem.appendChild( nameElem ); QDomElement titleElem = doc.createElement( QStringLiteral( "Title" ) ); QString title = l->title(); if ( title.isEmpty() ) { title = l->name(); } QDomText titleText = doc.createTextNode( title ); titleElem.appendChild( titleText ); layerElem.appendChild( titleElem ); QString abstract = l->abstract(); if ( !abstract.isEmpty() ) { QDomElement abstractElem = doc.createElement( QStringLiteral( "Abstract" ) ); QDomText abstractText = doc.createTextNode( abstract ); abstractElem.appendChild( abstractText ); layerElem.appendChild( abstractElem ); } //keyword list if ( !l->keywordList().isEmpty() ) { QStringList keywordStringList = l->keywordList().split( ',' ); QDomElement keywordListElem = doc.createElement( QStringLiteral( "KeywordList" ) ); for ( int i = 0; i < keywordStringList.size(); ++i ) { QDomElement keywordElem = doc.createElement( QStringLiteral( "Keyword" ) ); QDomText keywordText = doc.createTextNode( keywordStringList.at( i ).trimmed() ); keywordElem.appendChild( keywordText ); if ( siaFormat ) { keywordElem.setAttribute( QStringLiteral( "vocabulary" ), QStringLiteral( "SIA_Geo405" ) ); } keywordListElem.appendChild( keywordElem ); } layerElem.appendChild( keywordListElem ); } //vector layer without geometry bool geometryLayer = true; if ( l->type() == QgsMapLayer::VectorLayer ) { QgsVectorLayer *vLayer = qobject_cast<QgsVectorLayer *>( l ); if ( vLayer ) { if ( vLayer->wkbType() == QgsWkbTypes::NoGeometry ) { geometryLayer = false; } } } //CRS if ( geometryLayer ) { QStringList crsList; crsList << l->crs().authid(); QStringList outputCrsList = QgsServerProjectUtils::wmsOutputCrsList( *project ); appendCrsElementsToLayer( doc, layerElem, crsList, outputCrsList ); //Ex_GeographicBoundingBox appendLayerBoundingBoxes( doc, layerElem, l->extent(), l->crs(), crsList, outputCrsList, project ); } // add details about supported styles of the layer appendLayerStyles( doc, layerElem, l, project, version, request ); //min/max scale denominatorScaleBasedVisibility if ( l->hasScaleBasedVisibility() ) { if ( version == QLatin1String( "1.1.1" ) ) { double OGC_PX_M = 0.00028; // OGC reference pixel size in meter, also used by qgis double SCALE_TO_SCALEHINT = OGC_PX_M * M_SQRT2; QDomElement scaleHintElem = doc.createElement( QStringLiteral( "ScaleHint" ) ); scaleHintElem.setAttribute( QStringLiteral( "min" ), QString::number( l->maximumScale() * SCALE_TO_SCALEHINT ) ); scaleHintElem.setAttribute( QStringLiteral( "max" ), QString::number( l->minimumScale() * SCALE_TO_SCALEHINT ) ); layerElem.appendChild( scaleHintElem ); } else { QString minScaleString = QString::number( l->maximumScale() ); QDomElement minScaleElem = doc.createElement( QStringLiteral( "MinScaleDenominator" ) ); QDomText minScaleText = doc.createTextNode( minScaleString ); minScaleElem.appendChild( minScaleText ); layerElem.appendChild( minScaleElem ); QString maxScaleString = QString::number( l->minimumScale() ); QDomElement maxScaleElem = doc.createElement( QStringLiteral( "MaxScaleDenominator" ) ); QDomText maxScaleText = doc.createTextNode( maxScaleString ); maxScaleElem.appendChild( maxScaleText ); layerElem.appendChild( maxScaleElem ); } } // layer data URL QString dataUrl = l->dataUrl(); if ( !dataUrl.isEmpty() ) { QDomElement dataUrlElem = doc.createElement( QStringLiteral( "DataURL" ) ); QDomElement dataUrlFormatElem = doc.createElement( QStringLiteral( "Format" ) ); QString dataUrlFormat = l->dataUrlFormat(); QDomText dataUrlFormatText = doc.createTextNode( dataUrlFormat ); dataUrlFormatElem.appendChild( dataUrlFormatText ); dataUrlElem.appendChild( dataUrlFormatElem ); QDomElement dataORElem = doc.createElement( QStringLiteral( "OnlineResource" ) ); dataORElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); dataORElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); dataORElem.setAttribute( QStringLiteral( "xlink:href" ), dataUrl ); dataUrlElem.appendChild( dataORElem ); layerElem.appendChild( dataUrlElem ); } // layer attribution QString attribution = l->attribution(); if ( !attribution.isEmpty() ) { QDomElement attribElem = doc.createElement( QStringLiteral( "Attribution" ) ); QDomElement attribTitleElem = doc.createElement( QStringLiteral( "Title" ) ); QDomText attribText = doc.createTextNode( attribution ); attribTitleElem.appendChild( attribText ); attribElem.appendChild( attribTitleElem ); QString attributionUrl = l->attributionUrl(); if ( !attributionUrl.isEmpty() ) { QDomElement attribORElem = doc.createElement( QStringLiteral( "OnlineResource" ) ); attribORElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); attribORElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); attribORElem.setAttribute( QStringLiteral( "xlink:href" ), attributionUrl ); attribElem.appendChild( attribORElem ); } layerElem.appendChild( attribElem ); } // layer metadata URL QString metadataUrl = l->metadataUrl(); if ( !metadataUrl.isEmpty() ) { QDomElement metaUrlElem = doc.createElement( QStringLiteral( "MetadataURL" ) ); QString metadataUrlType = l->metadataUrlType(); if ( version == QLatin1String( "1.1.1" ) ) { metaUrlElem.setAttribute( QStringLiteral( "type" ), metadataUrlType ); } else if ( metadataUrlType == QLatin1String( "FGDC" ) ) { metaUrlElem.setAttribute( QStringLiteral( "type" ), QStringLiteral( "FGDC:1998" ) ); } else if ( metadataUrlType == QLatin1String( "TC211" ) ) { metaUrlElem.setAttribute( QStringLiteral( "type" ), QStringLiteral( "ISO19115:2003" ) ); } else { metaUrlElem.setAttribute( QStringLiteral( "type" ), metadataUrlType ); } QString metadataUrlFormat = l->metadataUrlFormat(); if ( !metadataUrlFormat.isEmpty() ) { QDomElement metaUrlFormatElem = doc.createElement( QStringLiteral( "Format" ) ); QDomText metaUrlFormatText = doc.createTextNode( metadataUrlFormat ); metaUrlFormatElem.appendChild( metaUrlFormatText ); metaUrlElem.appendChild( metaUrlFormatElem ); } QDomElement metaUrlORElem = doc.createElement( QStringLiteral( "OnlineResource" ) ); metaUrlORElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); metaUrlORElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); metaUrlORElem.setAttribute( QStringLiteral( "xlink:href" ), metadataUrl ); metaUrlElem.appendChild( metaUrlORElem ); layerElem.appendChild( metaUrlElem ); } if ( projectSettings ) { appendLayerProjectSettings( doc, layerElem, l ); } } parentLayer.appendChild( layerElem ); } } void appendLayerStyles( QDomDocument &doc, QDomElement &layerElem, QgsMapLayer *currentLayer, const QgsProject *project, const QString &version, const QgsServerRequest &request ) { // Get service URL QUrl href = serviceUrl( request, project ); //href needs to be a prefix QString hrefString = href.toString(); hrefString.append( href.hasQuery() ? "&" : "?" ); for ( const QString &styleName : currentLayer->styleManager()->styles() ) { QDomElement styleElem = doc.createElement( QStringLiteral( "Style" ) );<|fim▁hole|> QDomText styleNameText = doc.createTextNode( styleName ); styleNameElem.appendChild( styleNameText ); QDomElement styleTitleElem = doc.createElement( QStringLiteral( "Title" ) ); QDomText styleTitleText = doc.createTextNode( styleName ); styleTitleElem.appendChild( styleTitleText ); styleElem.appendChild( styleNameElem ); styleElem.appendChild( styleTitleElem ); // QString LegendURL for explicit layerbased GetLegendGraphic request QDomElement getLayerLegendGraphicElem = doc.createElement( QStringLiteral( "LegendURL" ) ); QString customHrefString = currentLayer->legendUrl(); QStringList getLayerLegendGraphicFormats; if ( !customHrefString.isEmpty() ) { getLayerLegendGraphicFormats << currentLayer->legendUrlFormat(); } else { getLayerLegendGraphicFormats << QStringLiteral( "image/png" ); // << "jpeg" << "image/jpeg" } for ( int i = 0; i < getLayerLegendGraphicFormats.size(); ++i ) { QDomElement getLayerLegendGraphicFormatElem = doc.createElement( QStringLiteral( "Format" ) ); QString getLayerLegendGraphicFormat = getLayerLegendGraphicFormats[i]; QDomText getLayerLegendGraphicFormatText = doc.createTextNode( getLayerLegendGraphicFormat ); getLayerLegendGraphicFormatElem.appendChild( getLayerLegendGraphicFormatText ); getLayerLegendGraphicElem.appendChild( getLayerLegendGraphicFormatElem ); } // no parameters on custom hrefUrl, because should link directly to graphic if ( customHrefString.isEmpty() ) { QString layerName = currentLayer->name(); if ( QgsServerProjectUtils::wmsUseLayerIds( *project ) ) layerName = currentLayer->id(); else if ( !currentLayer->shortName().isEmpty() ) layerName = currentLayer->shortName(); QUrlQuery mapUrl( hrefString ); mapUrl.addQueryItem( QStringLiteral( "SERVICE" ), QStringLiteral( "WMS" ) ); mapUrl.addQueryItem( QStringLiteral( "VERSION" ), version ); mapUrl.addQueryItem( QStringLiteral( "REQUEST" ), QStringLiteral( "GetLegendGraphic" ) ); mapUrl.addQueryItem( QStringLiteral( "LAYER" ), layerName ); mapUrl.addQueryItem( QStringLiteral( "FORMAT" ), QStringLiteral( "image/png" ) ); mapUrl.addQueryItem( QStringLiteral( "STYLE" ), styleNameText.data() ); if ( version == QLatin1String( "1.3.0" ) ) { mapUrl.addQueryItem( QStringLiteral( "SLD_VERSION" ), QStringLiteral( "1.1.0" ) ); } customHrefString = mapUrl.toString(); } QDomElement getLayerLegendGraphicORElem = doc.createElement( QStringLiteral( "OnlineResource" ) ); getLayerLegendGraphicORElem.setAttribute( QStringLiteral( "xmlns:xlink" ), QStringLiteral( "http://www.w3.org/1999/xlink" ) ); getLayerLegendGraphicORElem.setAttribute( QStringLiteral( "xlink:type" ), QStringLiteral( "simple" ) ); getLayerLegendGraphicORElem.setAttribute( QStringLiteral( "xlink:href" ), customHrefString ); getLayerLegendGraphicElem.appendChild( getLayerLegendGraphicORElem ); styleElem.appendChild( getLayerLegendGraphicElem ); layerElem.appendChild( styleElem ); } } void appendCrsElementsToLayer( QDomDocument &doc, QDomElement &layerElement, const QStringList &crsList, const QStringList &constrainedCrsList ) { if ( layerElement.isNull() ) { return; } //insert the CRS elements after the title element to be in accordance with the WMS 1.3 specification QDomElement titleElement = layerElement.firstChildElement( QStringLiteral( "Title" ) ); QDomElement abstractElement = layerElement.firstChildElement( QStringLiteral( "Abstract" ) ); QDomElement CRSPrecedingElement = abstractElement.isNull() ? titleElement : abstractElement; //last element before the CRS elements if ( CRSPrecedingElement.isNull() ) { // keyword list element is never empty const QDomElement keyElement = layerElement.firstChildElement( QStringLiteral( "KeywordList" ) ); CRSPrecedingElement = keyElement; } //In case the number of advertised CRS is constrained if ( !constrainedCrsList.isEmpty() ) { for ( int i = constrainedCrsList.size() - 1; i >= 0; --i ) { appendCrsElementToLayer( doc, layerElement, CRSPrecedingElement, constrainedCrsList.at( i ) ); } } else //no crs constraint { for ( const QString &crs : crsList ) { appendCrsElementToLayer( doc, layerElement, CRSPrecedingElement, crs ); } } //Support for CRS:84 is mandatory (equals EPSG:4326 with reversed axis) appendCrsElementToLayer( doc, layerElement, CRSPrecedingElement, QString( "CRS:84" ) ); } void appendCrsElementToLayer( QDomDocument &doc, QDomElement &layerElement, const QDomElement &precedingElement, const QString &crsText ) { if ( crsText.isEmpty() ) return; QString version = doc.documentElement().attribute( QStringLiteral( "version" ) ); QDomElement crsElement = doc.createElement( version == QLatin1String( "1.1.1" ) ? "SRS" : "CRS" ); QDomText crsTextNode = doc.createTextNode( crsText ); crsElement.appendChild( crsTextNode ); layerElement.insertAfter( crsElement, precedingElement ); } void appendLayerBoundingBoxes( QDomDocument &doc, QDomElement &layerElem, const QgsRectangle &lExtent, const QgsCoordinateReferenceSystem &layerCRS, const QStringList &crsList, const QStringList &constrainedCrsList, const QgsProject *project ) { if ( layerElem.isNull() ) { return; } QgsRectangle layerExtent = lExtent; if ( qgsDoubleNear( layerExtent.xMinimum(), layerExtent.xMaximum() ) || qgsDoubleNear( layerExtent.yMinimum(), layerExtent.yMaximum() ) ) { //layer bbox cannot be empty layerExtent.grow( 0.000001 ); } QgsCoordinateReferenceSystem wgs84 = QgsCoordinateReferenceSystem::fromOgcWmsCrs( GEO_EPSG_CRS_AUTHID ); QString version = doc.documentElement().attribute( QStringLiteral( "version" ) ); //Ex_GeographicBoundingBox QDomElement ExGeoBBoxElement; //transform the layers native CRS into WGS84 QgsRectangle wgs84BoundingRect; if ( !layerExtent.isNull() ) { QgsCoordinateTransform exGeoTransform( layerCRS, wgs84, project ); try { wgs84BoundingRect = exGeoTransform.transformBoundingBox( layerExtent ); } catch ( const QgsCsException & ) { wgs84BoundingRect = QgsRectangle(); } } if ( version == QLatin1String( "1.1.1" ) ) // WMS Version 1.1.1 { ExGeoBBoxElement = doc.createElement( QStringLiteral( "LatLonBoundingBox" ) ); ExGeoBBoxElement.setAttribute( QStringLiteral( "minx" ), QString::number( wgs84BoundingRect.xMinimum() ) ); ExGeoBBoxElement.setAttribute( QStringLiteral( "maxx" ), QString::number( wgs84BoundingRect.xMaximum() ) ); ExGeoBBoxElement.setAttribute( QStringLiteral( "miny" ), QString::number( wgs84BoundingRect.yMinimum() ) ); ExGeoBBoxElement.setAttribute( QStringLiteral( "maxy" ), QString::number( wgs84BoundingRect.yMaximum() ) ); } else // WMS Version 1.3.0 { ExGeoBBoxElement = doc.createElement( QStringLiteral( "EX_GeographicBoundingBox" ) ); QDomElement wBoundLongitudeElement = doc.createElement( QStringLiteral( "westBoundLongitude" ) ); QDomText wBoundLongitudeText = doc.createTextNode( QString::number( wgs84BoundingRect.xMinimum() ) ); wBoundLongitudeElement.appendChild( wBoundLongitudeText ); ExGeoBBoxElement.appendChild( wBoundLongitudeElement ); QDomElement eBoundLongitudeElement = doc.createElement( QStringLiteral( "eastBoundLongitude" ) ); QDomText eBoundLongitudeText = doc.createTextNode( QString::number( wgs84BoundingRect.xMaximum() ) ); eBoundLongitudeElement.appendChild( eBoundLongitudeText ); ExGeoBBoxElement.appendChild( eBoundLongitudeElement ); QDomElement sBoundLatitudeElement = doc.createElement( QStringLiteral( "southBoundLatitude" ) ); QDomText sBoundLatitudeText = doc.createTextNode( QString::number( wgs84BoundingRect.yMinimum() ) ); sBoundLatitudeElement.appendChild( sBoundLatitudeText ); ExGeoBBoxElement.appendChild( sBoundLatitudeElement ); QDomElement nBoundLatitudeElement = doc.createElement( QStringLiteral( "northBoundLatitude" ) ); QDomText nBoundLatitudeText = doc.createTextNode( QString::number( wgs84BoundingRect.yMaximum() ) ); nBoundLatitudeElement.appendChild( nBoundLatitudeText ); ExGeoBBoxElement.appendChild( nBoundLatitudeElement ); } if ( !wgs84BoundingRect.isNull() ) //LatLonBoundingBox / Ex_GeographicBounding box is optional { QDomElement lastCRSElem = layerElem.lastChildElement( version == QLatin1String( "1.1.1" ) ? "SRS" : "CRS" ); if ( !lastCRSElem.isNull() ) { layerElem.insertAfter( ExGeoBBoxElement, lastCRSElem ); } else { layerElem.appendChild( ExGeoBBoxElement ); } } //In case the number of advertised CRS is constrained if ( !constrainedCrsList.isEmpty() ) { for ( int i = constrainedCrsList.size() - 1; i >= 0; --i ) { appendLayerBoundingBox( doc, layerElem, layerExtent, layerCRS, constrainedCrsList.at( i ), project ); } } else //no crs constraint { for ( const QString &crs : crsList ) { appendLayerBoundingBox( doc, layerElem, layerExtent, layerCRS, crs, project ); } } } void appendLayerBoundingBox( QDomDocument &doc, QDomElement &layerElem, const QgsRectangle &layerExtent, const QgsCoordinateReferenceSystem &layerCRS, const QString &crsText, const QgsProject *project ) { if ( layerElem.isNull() ) { return; } if ( crsText.isEmpty() ) { return; } QString version = doc.documentElement().attribute( QStringLiteral( "version" ) ); QgsCoordinateReferenceSystem crs = QgsCoordinateReferenceSystem::fromOgcWmsCrs( crsText ); //transform the layers native CRS into CRS QgsRectangle crsExtent; if ( !layerExtent.isNull() ) { QgsCoordinateTransform crsTransform( layerCRS, crs, project ); try { crsExtent = crsTransform.transformBoundingBox( layerExtent ); } catch ( QgsCsException &cse ) { Q_UNUSED( cse ); return; } } if ( crsExtent.isNull() ) { return; } //BoundingBox element QDomElement bBoxElement = doc.createElement( QStringLiteral( "BoundingBox" ) ); if ( crs.isValid() ) { bBoxElement.setAttribute( version == QLatin1String( "1.1.1" ) ? "SRS" : "CRS", crs.authid() ); } if ( version != QLatin1String( "1.1.1" ) && crs.hasAxisInverted() ) { crsExtent.invert(); } bBoxElement.setAttribute( QStringLiteral( "minx" ), QString::number( crsExtent.xMinimum() ) ); bBoxElement.setAttribute( QStringLiteral( "miny" ), QString::number( crsExtent.yMinimum() ) ); bBoxElement.setAttribute( QStringLiteral( "maxx" ), QString::number( crsExtent.xMaximum() ) ); bBoxElement.setAttribute( QStringLiteral( "maxy" ), QString::number( crsExtent.yMaximum() ) ); QDomElement lastBBoxElem = layerElem.lastChildElement( QStringLiteral( "BoundingBox" ) ); if ( !lastBBoxElem.isNull() ) { layerElem.insertAfter( bBoxElement, lastBBoxElem ); } else { lastBBoxElem = layerElem.lastChildElement( version == QLatin1String( "1.1.1" ) ? "LatLonBoundingBox" : "EX_GeographicBoundingBox" ); if ( !lastBBoxElem.isNull() ) { layerElem.insertAfter( bBoxElement, lastBBoxElem ); } else { layerElem.appendChild( bBoxElement ); } } } QgsRectangle layerBoundingBoxInProjectCrs( const QDomDocument &doc, const QDomElement &layerElem, const QgsProject *project ) { QgsRectangle BBox; if ( layerElem.isNull() ) { return BBox; } //read box coordinates and layer auth. id QDomElement boundingBoxElem = layerElem.firstChildElement( QStringLiteral( "BoundingBox" ) ); if ( boundingBoxElem.isNull() ) { return BBox; } double minx, miny, maxx, maxy; bool conversionOk; minx = boundingBoxElem.attribute( QStringLiteral( "minx" ) ).toDouble( &conversionOk ); if ( !conversionOk ) { return BBox; } miny = boundingBoxElem.attribute( QStringLiteral( "miny" ) ).toDouble( &conversionOk ); if ( !conversionOk ) { return BBox; } maxx = boundingBoxElem.attribute( QStringLiteral( "maxx" ) ).toDouble( &conversionOk ); if ( !conversionOk ) { return BBox; } maxy = boundingBoxElem.attribute( QStringLiteral( "maxy" ) ).toDouble( &conversionOk ); if ( !conversionOk ) { return BBox; } QString version = doc.documentElement().attribute( QStringLiteral( "version" ) ); //create layer crs QgsCoordinateReferenceSystem layerCrs = QgsCoordinateReferenceSystem::fromOgcWmsCrs( boundingBoxElem.attribute( version == QLatin1String( "1.1.1" ) ? "SRS" : "CRS" ) ); if ( !layerCrs.isValid() ) { return BBox; } BBox.setXMinimum( minx ); BBox.setXMaximum( maxx ); BBox.setYMinimum( miny ); BBox.setYMaximum( maxy ); if ( version != QLatin1String( "1.1.1" ) && layerCrs.hasAxisInverted() ) { BBox.invert(); } //get project crs QgsCoordinateTransform t( layerCrs, project->crs(), project ); //transform try { BBox = t.transformBoundingBox( BBox ); } catch ( const QgsCsException & ) { BBox = QgsRectangle(); } return BBox; } bool crsSetFromLayerElement( const QDomElement &layerElement, QSet<QString> &crsSet ) { if ( layerElement.isNull() ) { return false; } crsSet.clear(); QDomNodeList crsNodeList; crsNodeList = layerElement.elementsByTagName( QStringLiteral( "CRS" ) ); // WMS 1.3.0 for ( int i = 0; i < crsNodeList.size(); ++i ) { crsSet.insert( crsNodeList.at( i ).toElement().text() ); } crsNodeList = layerElement.elementsByTagName( QStringLiteral( "SRS" ) ); // WMS 1.1.1 for ( int i = 0; i < crsNodeList.size(); ++i ) { crsSet.insert( crsNodeList.at( i ).toElement().text() ); } return true; } void combineExtentAndCrsOfGroupChildren( QDomDocument &doc, QDomElement &groupElem, const QgsProject *project, bool considerMapExtent ) { QgsRectangle combinedBBox; QSet<QString> combinedCRSSet; bool firstBBox = true; bool firstCRSSet = true; QDomNodeList layerChildren = groupElem.childNodes(); for ( int j = 0; j < layerChildren.size(); ++j ) { QDomElement childElem = layerChildren.at( j ).toElement(); if ( childElem.tagName() != QLatin1String( "Layer" ) ) continue; QgsRectangle bbox = layerBoundingBoxInProjectCrs( doc, childElem, project ); if ( bbox.isNull() ) { continue; } if ( !bbox.isEmpty() ) { if ( firstBBox ) { combinedBBox = bbox; firstBBox = false; } else { combinedBBox.combineExtentWith( bbox ); } } //combine crs set QSet<QString> crsSet; if ( crsSetFromLayerElement( childElem, crsSet ) ) { if ( firstCRSSet ) { combinedCRSSet = crsSet; firstCRSSet = false; } else { combinedCRSSet.intersect( crsSet ); } } } QStringList outputCrsList = QgsServerProjectUtils::wmsOutputCrsList( *project ); appendCrsElementsToLayer( doc, groupElem, combinedCRSSet.toList(), outputCrsList ); QgsCoordinateReferenceSystem groupCRS = project->crs(); if ( considerMapExtent ) { QgsRectangle mapRect = QgsServerProjectUtils::wmsExtent( *project ); if ( !mapRect.isEmpty() ) { combinedBBox = mapRect; } } appendLayerBoundingBoxes( doc, groupElem, combinedBBox, groupCRS, combinedCRSSet.toList(), outputCrsList, project ); } void appendDrawingOrder( QDomDocument &doc, QDomElement &parentElem, QgsServerInterface *serverIface, const QgsProject *project ) { QgsAccessControl *accessControl = serverIface->accessControls(); bool useLayerIds = QgsServerProjectUtils::wmsUseLayerIds( *project ); QStringList restrictedLayers = QgsServerProjectUtils::wmsRestrictedLayers( *project ); QStringList layerList; const QgsLayerTree *projectLayerTreeRoot = project->layerTreeRoot(); QList< QgsMapLayer * > projectLayerOrder = projectLayerTreeRoot->layerOrder(); for ( int i = 0; i < projectLayerOrder.size(); ++i ) { QgsMapLayer *l = projectLayerOrder.at( i ); if ( restrictedLayers.contains( l->name() ) ) //unpublished layer { continue; } if ( accessControl && !accessControl->layerReadPermission( l ) ) { continue; } QString wmsName = l->name(); if ( useLayerIds ) { wmsName = l->id(); } else if ( !l->shortName().isEmpty() ) { wmsName = l->shortName(); } layerList << wmsName; } if ( !layerList.isEmpty() ) { QStringList reversedList; reversedList.reserve( layerList.size() ); for ( int i = layerList.size() - 1; i >= 0; --i ) reversedList << layerList[ i ]; QDomElement layerDrawingOrderElem = doc.createElement( QStringLiteral( "LayerDrawingOrder" ) ); QDomText drawingOrderText = doc.createTextNode( reversedList.join( ',' ) ); layerDrawingOrderElem.appendChild( drawingOrderText ); parentElem.appendChild( layerDrawingOrderElem ); } } void appendLayerProjectSettings( QDomDocument &doc, QDomElement &layerElem, QgsMapLayer *currentLayer ) { if ( !currentLayer ) { return; } // Layer tree name QDomElement treeNameElem = doc.createElement( QStringLiteral( "TreeName" ) ); QDomText treeNameText = doc.createTextNode( currentLayer->name() ); treeNameElem.appendChild( treeNameText ); layerElem.appendChild( treeNameElem ); if ( currentLayer->type() == QgsMapLayer::VectorLayer ) { QgsVectorLayer *vLayer = static_cast<QgsVectorLayer *>( currentLayer ); const QSet<QString> &excludedAttributes = vLayer->excludeAttributesWms(); int displayFieldIdx = -1; QString displayField = QStringLiteral( "maptip" ); QgsExpression exp( vLayer->displayExpression() ); if ( exp.isField() ) { displayField = static_cast<const QgsExpressionNodeColumnRef *>( exp.rootNode() )->name(); displayFieldIdx = vLayer->fields().lookupField( displayField ); } //attributes QDomElement attributesElem = doc.createElement( QStringLiteral( "Attributes" ) ); const QgsFields layerFields = vLayer->fields(); for ( int idx = 0; idx < layerFields.count(); ++idx ) { QgsField field = layerFields.at( idx ); if ( excludedAttributes.contains( field.name() ) ) { continue; } // field alias in case of displayField if ( idx == displayFieldIdx ) { displayField = vLayer->attributeDisplayName( idx ); } QDomElement attributeElem = doc.createElement( QStringLiteral( "Attribute" ) ); attributeElem.setAttribute( QStringLiteral( "name" ), field.name() ); attributeElem.setAttribute( QStringLiteral( "type" ), QVariant::typeToName( field.type() ) ); attributeElem.setAttribute( QStringLiteral( "typeName" ), field.typeName() ); QString alias = field.alias(); if ( !alias.isEmpty() ) { attributeElem.setAttribute( QStringLiteral( "alias" ), alias ); } //edit type to text attributeElem.setAttribute( QStringLiteral( "editType" ), vLayer->editorWidgetSetup( idx ).type() ); attributeElem.setAttribute( QStringLiteral( "comment" ), field.comment() ); attributeElem.setAttribute( QStringLiteral( "length" ), field.length() ); attributeElem.setAttribute( QStringLiteral( "precision" ), field.precision() ); attributesElem.appendChild( attributeElem ); } //displayfield layerElem.setAttribute( QStringLiteral( "displayField" ), displayField ); //geometry type layerElem.setAttribute( QStringLiteral( "geometryType" ), QgsWkbTypes::displayString( vLayer->wkbType() ) ); layerElem.appendChild( attributesElem ); } else if ( currentLayer->type() == QgsMapLayer::RasterLayer ) { const QgsDataProvider *provider = currentLayer->dataProvider(); if ( provider && provider->name() == "wms" ) { //advertise as web map background layer QVariant wmsBackgroundLayer = currentLayer->customProperty( QStringLiteral( "WMSBackgroundLayer" ), false ); QDomElement wmsBackgroundLayerElem = doc.createElement( "WMSBackgroundLayer" ); QDomText wmsBackgroundLayerText = doc.createTextNode( wmsBackgroundLayer.toBool() ? QStringLiteral( "1" ) : QStringLiteral( "0" ) ); wmsBackgroundLayerElem.appendChild( wmsBackgroundLayerText ); layerElem.appendChild( wmsBackgroundLayerElem ); //publish datasource QVariant wmsPublishDataSourceUrl = currentLayer->customProperty( QStringLiteral( "WMSPublishDataSourceUrl" ), false ); if ( wmsPublishDataSourceUrl.toBool() ) { QList< QVariant > resolutionList = provider->property( "resolutions" ).toList(); bool tiled = resolutionList.size() > 0; QDomElement dataSourceElem = doc.createElement( tiled ? QStringLiteral( "WMTSDataSource" ) : QStringLiteral( "WMSDataSource" ) ); QDomText dataSourceUri = doc.createTextNode( provider->dataSourceUri() ); dataSourceElem.appendChild( dataSourceUri ); layerElem.appendChild( dataSourceElem ); } } QVariant wmsPrintLayer = currentLayer->customProperty( QStringLiteral( "WMSPrintLayer" ) ); if ( wmsPrintLayer.isValid() ) { QDomElement wmsPrintLayerElem = doc.createElement( "WMSPrintLayer" ); QDomText wmsPrintLayerText = doc.createTextNode( wmsPrintLayer.toString() ); wmsPrintLayerElem.appendChild( wmsPrintLayerText ); layerElem.appendChild( wmsPrintLayerElem ); } } } void addKeywordListElement( const QgsProject *project, QDomDocument &doc, QDomElement &parent ) { bool sia2045 = QgsServerProjectUtils::wmsInfoFormatSia2045( *project ); QDomElement keywordsElem = doc.createElement( QStringLiteral( "KeywordList" ) ); //add default keyword QDomElement keywordElem = doc.createElement( QStringLiteral( "Keyword" ) ); keywordElem.setAttribute( QStringLiteral( "vocabulary" ), QStringLiteral( "ISO" ) ); QDomText keywordText = doc.createTextNode( QStringLiteral( "infoMapAccessService" ) ); keywordElem.appendChild( keywordText ); keywordsElem.appendChild( keywordElem ); parent.appendChild( keywordsElem ); QStringList keywords = QgsServerProjectUtils::owsServiceKeywords( *project ); for ( const QString &keyword : qgis::as_const( keywords ) ) { if ( !keyword.isEmpty() ) { keywordElem = doc.createElement( QStringLiteral( "Keyword" ) ); keywordText = doc.createTextNode( keyword ); keywordElem.appendChild( keywordText ); if ( sia2045 ) { keywordElem.setAttribute( QStringLiteral( "vocabulary" ), QStringLiteral( "SIA_Geo405" ) ); } keywordsElem.appendChild( keywordElem ); } } parent.appendChild( keywordsElem ); } } } // namespace QgsWms<|fim▁end|>
QDomElement styleNameElem = doc.createElement( QStringLiteral( "Name" ) );
<|file_name|>TSRRobot.cpp<|end_file_name|><|fim▁begin|>/*********************************************************************** Copyright (c) 2014, Carnegie Mellon University All rights reserved. Authors: Jennifer King <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT<|fim▁hole|>DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #include <boost/make_shared.hpp> #include <or_ompl/TSRRobot.h> #include <or_ompl/or_conversions.h> using namespace or_ompl; TSRRobot::TSRRobot(const std::vector<TSR::Ptr> &tsrs, const OpenRAVE::EnvironmentBasePtr &penv) : _tsrs(tsrs), _penv(penv), _initialized(false), _solver("GeneralIK") { } bool TSRRobot::construct() { if(_initialized){ RAVELOG_ERROR("[TSRRobot] Already initialized. TSRRobot::construct cannot be called twice."); throw OpenRAVE::openrave_exception( "TSRRobot::construct cannot be called twice", OpenRAVE::ORE_Failed ); } _initialized = false; // Create an emtpy robot of the correct type _probot = RaveCreateRobot(_penv, "GenericRobot"); if( _probot.get() == NULL ){ RAVELOG_ERROR("[TSRRobot] Failed to create robot of type GenericRobot"); return _initialized; } // TODO: mimic body // Build the robot std::vector<OpenRAVE::KinBody::LinkInfoConstPtr> link_infos; std::vector<OpenRAVE::KinBody::JointInfoConstPtr> joint_infos; std::vector<OpenRAVE::RobotBase::ManipulatorInfoConstPtr> manip_infos; std::vector<OpenRAVE::RobotBase::AttachedSensorInfoConstPtr> sensor_infos; const std::string bodyprefix = "Body"; int bodynumber = 1; Eigen::Affine3d Tw0_e = Eigen::Affine3d::Identity(); for(unsigned int i=0; i < _tsrs.size(); i++){ TSR::Ptr tsr = _tsrs[i]; Eigen::Matrix<double, 6, 2> Bw = tsr->getBounds(); if (tsr->relative_body_name() != "NULL") { RAVELOG_ERROR("[TSRRobot] ERROR: TSRs relative to a body is not supported.\n"); return _initialized; } for(int j=0; j < 6; j++){ // Don't add a body if there is no freedom in this dimension if(Bw(j,0) == 0.0 && Bw(j,1) == 0.0){ continue; } // If the bounds are equal and non-zero, we should do something reasonable // For now, this isn't supported if(Bw(j,0) == Bw(j,1)){ RAVELOG_ERROR("[TSRRobot] ERROR: TSR Chains are currently unable to deal with cases where two bounds are equal but non-zero, cannot robotize.\n"); return _initialized; } // Check for axis flip, marked by Bw values being backwards bool bFlipAxis = false; if(Bw(j,0) > Bw(j,1)){ Bw(j,0) = -Bw(j,0); Bw(j,1) = -Bw(j,1); bFlipAxis = true; } // TODO: Handle mimic joints // Store joint limits _lowerlimits.push_back(Bw(j,0)); _upperlimits.push_back(Bw(j,1)); // Create a Link std::string prev_bodyname = (boost::format("%s%d") % bodyprefix % (bodynumber-1)).str(); std::string bodyname = (boost::format("%s%d") % bodyprefix % bodynumber).str(); OpenRAVE::KinBody::LinkInfoPtr link_info = boost::make_shared<OpenRAVE::KinBody::LinkInfo>(); link_info->_name = bodyname; link_info->_t = toOR<double>(Tw0_e); // transform OpenRAVE::KinBody::GeometryInfoPtr geom_info = boost::make_shared<OpenRAVE::KinBody::GeometryInfo>(); if(j < 3){ geom_info->_type = OpenRAVE::GT_Box; }else{ geom_info->_type = OpenRAVE::GT_Cylinder; geom_info->_vGeomData = OpenRAVE::Vector(0.03, 0.07, 0.0); //cylinder radius, height, ignored } if(j == 0){ geom_info->_vGeomData = OpenRAVE::Vector(0.04, 0.02, 0.02); // box extents }else if(j == 1){ geom_info->_vGeomData = OpenRAVE::Vector(0.02, 0.04, 0.02); // box extents }else if(j == 2){ geom_info->_vGeomData = OpenRAVE::Vector(0.02, 0.02, 0.04); // box extents }else if(j == 3){ OpenRAVE::RaveTransformMatrix<OpenRAVE::dReal> t = OpenRAVE::geometry::matrixFromAxisAngle(OpenRAVE::Vector(0, 0, 1), 90.); geom_info->_t = t; }else if(j == 4){ OpenRAVE::RaveTransformMatrix<OpenRAVE::dReal> t = OpenRAVE::geometry::matrixFromAxisAngle(OpenRAVE::Vector(0, 1, 0), 90.); geom_info->_t = t; }else if(j == 5){ OpenRAVE::RaveTransformMatrix<OpenRAVE::dReal> t = OpenRAVE::geometry::matrixFromAxisAngle(OpenRAVE::Vector(1, 0, 0), 90.); geom_info->_t = t; } geom_info->_vDiffuseColor = OpenRAVE::Vector(0.3, 0.7, 0.3); link_info->_vgeometryinfos.push_back(geom_info); link_infos.push_back(link_info); // Now create a joint OpenRAVE::KinBody::JointInfoPtr joint_info = boost::make_shared<OpenRAVE::KinBody::JointInfo>(); std::string joint_name = (boost::format("J%d") % bodynumber).str(); joint_info->_name = joint_name; if(j < 3){ joint_info->_type = OpenRAVE::KinBody::JointSlider; }else{ joint_info->_type = OpenRAVE::KinBody::JointHinge; } joint_info->_linkname0 = prev_bodyname; joint_info->_linkname1 = bodyname; joint_info->_vweights[0] = 1.; joint_info->_vmaxvel[0] = 1.; joint_info->_vresolution[0] = 1.; joint_info->_vlowerlimit[0] = Bw(j,0); joint_info->_vupperlimit[0] = Bw(j,1); joint_info->_vaxes[0] = OpenRAVE::Vector(0., 0., 0.); unsigned int aidx = (j % 3); if(j > 3 && bFlipAxis){ joint_info->_vaxes[0][aidx] = -1.; }else{ joint_info->_vaxes[0][aidx] = 1.; } joint_infos.push_back(joint_info); bodynumber++; } Tw0_e = Tw0_e * tsr->getEndEffectorOffsetTransform(); } _num_dof = bodynumber - 1; // now add a geometry to the last body with the offset of the last TSR, this will be the target for the manipulator TSR::Ptr last_tsr = _tsrs.back(); Tw0_e = last_tsr->getEndEffectorOffsetTransform(); OpenRAVE::KinBody::LinkInfoPtr link_info = boost::make_shared<OpenRAVE::KinBody::LinkInfo>(); std::string bodyname = (boost::format("%s%d") % bodyprefix % (bodynumber-1)).str(); link_info->_name = bodyname; link_info->_bStatic = false; OpenRAVE::KinBody::GeometryInfoPtr geom_info = boost::make_shared<OpenRAVE::KinBody::GeometryInfo>(); geom_info->_t = toOR<double>(Tw0_e); geom_info->_type = OpenRAVE::GT_Sphere; geom_info->_vGeomData = OpenRAVE::Vector(0.03, 0., 0.); //radius, ignored, ignored geom_info->_vDiffuseColor = OpenRAVE::Vector(0.3, 0.7, 0.3); link_info->_vgeometryinfos.push_back(geom_info); link_infos.push_back(link_info); if(bodynumber > 1){ _point_tsr = false; OpenRAVE::RobotBase::ManipulatorInfoPtr manip_info = boost::make_shared<OpenRAVE::RobotBase::ManipulatorInfo>(); manip_info->_name = "dummy"; manip_info->_sBaseLinkName = (boost::format("%s0") % bodyprefix).str(); manip_info->_sEffectorLinkName = bodyname; manip_infos.push_back(manip_info); }else{ _point_tsr = true; RAVELOG_DEBUG("[TSRRobot] This is a point TSR, no robotized TSR needed."); _initialized = true; return _initialized; } if(_point_tsr && _tsrs.size() != 1){ RAVELOG_ERROR("[TSRRobot] Can't yet handle case where the TSRChain has no freedom but multiple TSRs, try making it a chain of length 1.\n"); _initialized = false; return _initialized; } // If we made it this far, then we can build the robot. _probot->Init(link_infos, joint_infos, manip_infos, sensor_infos); // Set the name properly std::string robotname = (boost::format("TSRChain%lu") % (unsigned long int) this).str(); _probot->SetName(robotname); // Add it to the environment _penv->Add(_probot, true); // Set the pose // TODO: mimic joint stuff _probot->SetTransform(toOR<double>(_tsrs[0]->getOriginTransform())); // Create an IK Solver _ik_solver = OpenRAVE::RaveCreateIkSolver(_penv, _solver); if(_ik_solver.get() == NULL){ RAVELOG_ERROR("[TSRRobot] Cannot create IK solver, make sure you have the GeneralIK plugin loadable by OpenRAVE\n"); _initialized = false; return _initialized; } // Grab the active manipulator on our newly created robot OpenRAVE::RobotBase::ManipulatorPtr pmanip = _probot->GetActiveManipulator(); _ik_solver->Init(pmanip); // Finally, disable the robot so we don't do collision checking against it _probot->Enable(false); _initialized = true; return _initialized; } Eigen::Affine3d TSRRobot::findNearestFeasibleTransform(const Eigen::Affine3d &Ttarget) { OpenRAVE::Transform or_target = toOR<double>(Ttarget); if(_solver.compare("GeneralIK") != 0){ RAVELOG_ERROR("[TSRRobot] Only GeneralIK solver supported."); throw OpenRAVE::openrave_exception( "Only GeneralIK solver supported.", OpenRAVE::ORE_Failed ); } // Setup the free parameters - the format and meaning of these is defined directly by // the IK solver - in our case GeneralIK std::vector<OpenRAVE::dReal> ikfreeparams; ikfreeparams.resize(12); ikfreeparams[0] = 1; // The number of targets - in this case always 1 ikfreeparams[1] = 0; // The manipulator associated the target - only one manipulator to always 0 // Pose of target ikfreeparams[2] = or_target.rot.x; ikfreeparams[3] = or_target.rot.y; ikfreeparams[4] = or_target.rot.z; ikfreeparams[5] = or_target.rot.w; ikfreeparams[6] = or_target.trans.x; ikfreeparams[7] = or_target.trans.y; ikfreeparams[8] = or_target.trans.z; ikfreeparams[9] = 0; // no balancing ikfreeparams[10] = 0; // junk parameters - mode in previous versions of GeneralIK ikfreeparams[11] = 0; // not translation only - aka do rotation std::vector<OpenRAVE::dReal> q0; boost::shared_ptr<std::vector<OpenRAVE::dReal> > solution = boost::make_shared<std::vector<OpenRAVE::dReal> >(); // solve ik _ik_solver->Solve(OpenRAVE::IkParameterization(), q0, ikfreeparams, OpenRAVE::IKFO_IgnoreSelfCollisions, solution); // Set the dof values to the solution and grab the end-effector transform in world coordinates _probot->SetDOFValues(*solution); Eigen::Affine3d ee_pose = toEigen(_probot->GetActiveManipulator()->GetEndEffectorTransform()); // Convert to proper frame Eigen::Affine3d closest = ee_pose * _tsrs.back()->getEndEffectorOffsetTransform(); return closest; }<|fim▁end|>
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
<|file_name|>Constants.java<|end_file_name|><|fim▁begin|>package io.omengye.common.utils.constants; public class Constants { private Constants(){} public static final String RESULT_FLAG = "flag";<|fim▁hole|><|fim▁end|>
}
<|file_name|>deferred.js<|end_file_name|><|fim▁begin|>/* * aem-sling-contrib * https://github.com/dherges/aem-sling-contrib * * Copyright (c) 2016 David Herges * Licensed under the MIT license. */ define([ "./core", "./var/slice", "./callbacks" ], function( jQuery, slice ) { jQuery.extend({ Deferred: function( func ) { var tuples = [ // action, add listener, listener list, final state [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], [ "notify", "progress", jQuery.Callbacks("memory") ] ], state = "pending", promise = { state: function() { return state; }, always: function() { deferred.done( arguments ).fail( arguments ); return this; }, then: function( /* fnDone, fnFail, fnProgress */ ) { var fns = arguments; return jQuery.Deferred(function( newDefer ) { jQuery.each( tuples, function( i, tuple ) { var fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; // deferred[ done | fail | progress ] for forwarding actions to newDefer deferred[ tuple[1] ](function() { var returned = fn && fn.apply( this, arguments ); if ( returned && jQuery.isFunction( returned.promise ) ) { returned.promise() .done( newDefer.resolve ) .fail( newDefer.reject ) .progress( newDefer.notify ); } else { newDefer[ tuple[ 0 ] + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); } }); }); fns = null; }).promise(); }, // Get a promise for this deferred // If obj is provided, the promise aspect is added to the object promise: function( obj ) { return obj != null ? jQuery.extend( obj, promise ) : promise; } }, deferred = {}; // Keep pipe for back-compat promise.pipe = promise.then; // Add list-specific methods jQuery.each( tuples, function( i, tuple ) { var list = tuple[ 2 ], stateString = tuple[ 3 ]; // promise[ done | fail | progress ] = list.add promise[ tuple[1] ] = list.add; // Handle state if ( stateString ) { list.add(function() { // state = [ resolved | rejected ] state = stateString; // [ reject_list | resolve_list ].disable; progress_list.lock }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); } // deferred[ resolve | reject | notify ] deferred[ tuple[0] ] = function() { deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); return this; }; deferred[ tuple[0] + "With" ] = list.fireWith; }); // Make the deferred a promise promise.promise( deferred ); // Call given func if any if ( func ) { func.call( deferred, deferred ); } // All done! return deferred; }, // Deferred helper when: function( subordinate /* , ..., subordinateN */ ) { var i = 0, resolveValues = slice.call( arguments ), length = resolveValues.length, // the count of uncompleted subordinates remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, // the master Deferred. If resolveValues consist of only a single Deferred, just use that. deferred = remaining === 1 ? subordinate : jQuery.Deferred(), // Update function for both resolve and progress values updateFunc = function( i, contexts, values ) { return function( value ) { contexts[ i ] = this; values[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; if ( values === progressValues ) { deferred.notifyWith( contexts, values ); } else if ( !( --remaining ) ) { deferred.resolveWith( contexts, values ); } }; }, progressValues, progressContexts, resolveContexts; // Add listeners to Deferred subordinates; treat others as resolved if ( length > 1 ) { progressValues = new Array( length ); progressContexts = new Array( length ); resolveContexts = new Array( length );<|fim▁hole|> .fail( deferred.reject ) .progress( updateFunc( i, progressContexts, progressValues ) ); } else { --remaining; } } } // If we're not waiting on anything, resolve the master if ( !remaining ) { deferred.resolveWith( resolveContexts, resolveValues ); } return deferred.promise(); } }); return jQuery; });<|fim▁end|>
for ( ; i < length; i++ ) { if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { resolveValues[ i ].promise() .done( updateFunc( i, resolveContexts, resolveValues ) )
<|file_name|>visualization.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*- # Modified from https://github.com/tylerneylon/explacy import io from collections import defaultdict from pprint import pprint from phrasetree.tree import Tree def make_table(rows, insert_header=False): col_widths = [max(len(s) for s in col) for col in zip(*rows[1:])] rows[0] = [x[:l] for x, l in zip(rows[0], col_widths)] fmt = '\t'.join('%%-%ds' % width for width in col_widths) if insert_header: rows.insert(1, ['─' * width for width in col_widths]) return '\n'.join(fmt % tuple(row) for row in rows) def _start_end(arrow): start, end = arrow['from'], arrow['to'] mn = min(start, end) mx = max(start, end) return start, end, mn, mx def pretty_tree_horizontal(arrows, _do_print_debug_info=False): """Print the dependency tree horizontally Args: arrows: _do_print_debug_info: (Default value = False) Returns: """ # Set the base height; these may increase to allow room for arrowheads after this. arrows_with_deps = defaultdict(set) for i, arrow in enumerate(arrows): arrow['underset'] = set() if _do_print_debug_info: print('Arrow %d: "%s" -> "%s"' % (i, arrow['from'], arrow['to'])) num_deps = 0 start, end, mn, mx = _start_end(arrow) for j, other in enumerate(arrows): if arrow is other: continue o_start, o_end, o_mn, o_mx = _start_end(other) if ((start == o_start and mn <= o_end <= mx) or (start != o_start and mn <= o_start <= mx)): num_deps += 1 if _do_print_debug_info: print('%d is over %d' % (i, j)) arrow['underset'].add(j) arrow['num_deps_left'] = arrow['num_deps'] = num_deps arrows_with_deps[num_deps].add(i) if _do_print_debug_info: print('') print('arrows:') pprint(arrows) print('') print('arrows_with_deps:') pprint(arrows_with_deps) # Render the arrows in characters. Some heights will be raised to make room for arrowheads. sent_len = (max([max(arrow['from'], arrow['to']) for arrow in arrows]) if arrows else 0) + 1 lines = [[] for i in range(sent_len)] num_arrows_left = len(arrows) while num_arrows_left > 0: assert len(arrows_with_deps[0]) arrow_index = arrows_with_deps[0].pop() arrow = arrows[arrow_index] src, dst, mn, mx = _start_end(arrow) # Check the height needed. height = 3 if arrow['underset']: height = max(arrows[i]['height'] for i in arrow['underset']) + 1 height = max(height, 3, len(lines[dst]) + 3) arrow['height'] = height if _do_print_debug_info: print('') print('Rendering arrow %d: "%s" -> "%s"' % (arrow_index, arrow['from'], arrow['to'])) print(' height = %d' % height) goes_up = src > dst # Draw the outgoing src line. if lines[src] and len(lines[src]) < height: lines[src][-1].add('w') while len(lines[src]) < height - 1: lines[src].append(set(['e', 'w'])) if len(lines[src]) < height: lines[src].append({'e'}) lines[src][height - 1].add('n' if goes_up else 's') # Draw the incoming dst line. lines[dst].append(u'►') while len(lines[dst]) < height: lines[dst].append(set(['e', 'w'])) lines[dst][-1] = set(['e', 's']) if goes_up else set(['e', 'n']) # Draw the adjoining vertical line. for i in range(mn + 1, mx): while len(lines[i]) < height - 1: lines[i].append(' ') lines[i].append(set(['n', 's'])) # Update arrows_with_deps. for arr_i, arr in enumerate(arrows): if arrow_index in arr['underset']: arrows_with_deps[arr['num_deps_left']].remove(arr_i) arr['num_deps_left'] -= 1 arrows_with_deps[arr['num_deps_left']].add(arr_i) num_arrows_left -= 1 return render_arrows(lines) def render_arrows(lines): arr_chars = {'ew': u'─', 'ns': u'│', 'en': u'└', 'es': u'┌', 'enw': u'┴', 'ensw': u'┼', 'ens': u'├', 'esw': u'┬'} # Convert the character lists into strings. max_len = max(len(line) for line in lines) for i in range(len(lines)): lines[i] = [arr_chars[''.join(sorted(ch))] if type(ch) is set else ch for ch in lines[i]] lines[i] = ''.join(reversed(lines[i])) lines[i] = ' ' * (max_len - len(lines[i])) + lines[i] return lines def render_span(begin, end, unidirectional=False): if end - begin == 1: return ['───►'] elif end - begin == 2: return [ '──┐', '──┴►', ] if unidirectional else [ '◄─┐', '◄─┴►', ] rows = [] for i in range(begin, end): if i == (end - begin) // 2 + begin: rows.append(' ├►') elif i == begin: rows.append('──┐' if unidirectional else '◄─┐') elif i == end - 1: rows.append('──┘' if unidirectional else '◄─┘') else: rows.append(' │') return rows def tree_to_list(T): return [T.label(), [tree_to_list(t) if isinstance(t, Tree) else t for t in T]] def list_to_tree(L): if isinstance(L, str): return L return Tree(L[0], [list_to_tree(child) for child in L[1]]) def render_labeled_span(b, e, spans, labels, label, offset, unidirectional=False): spans.extend([''] * (b - offset)) spans.extend(render_span(b, e, unidirectional)) center = b + (e - b) // 2 labels.extend([''] * (center - offset)) labels.append(label) labels.extend([''] * (e - center - 1)) def main(): # arrows = [{'from': 1, 'to': 0}, {'from': 2, 'to': 1}, {'from': 2, 'to': 4}, {'from': 2, 'to': 5}, # {'from': 4, 'to': 3}] # lines = pretty_tree_horizontal(arrows) # print('\n'.join(lines)) # print('\n'.join([ # '◄─┐', # ' │', # ' ├►', # ' │', # '◄─┘', # ])) print('\n'.join(render_span(7, 12))) if __name__ == '__main__': main() left_rule = {'<': ':', '^': ':', '>': '-'}<|fim▁hole|> def evalute_field(record, field_spec): """Evalute a field of a record using the type of the field_spec as a guide. Args: record: field_spec: Returns: """ if type(field_spec) is int: return str(record[field_spec]) elif type(field_spec) is str: return str(getattr(record, field_spec)) else: return str(field_spec(record)) def markdown_table(headings, records, fields=None, alignment=None, file=None): """Generate a Doxygen-flavor Markdown table from records. See https://stackoverflow.com/questions/13394140/generate-markdown-tables file -- Any object with a 'write' method that takes a single string parameter. records -- Iterable. Rows will be generated from this. fields -- List of fields for each row. Each entry may be an integer, string or a function. If the entry is an integer, it is assumed to be an index of each record. If the entry is a string, it is assumed to be a field of each record. If the entry is a function, it is called with the record and its return value is taken as the value of the field. headings -- List of column headings. alignment - List of pairs alignment characters. The first of the pair specifies the alignment of the header, (Doxygen won't respect this, but it might look good, the second specifies the alignment of the cells in the column. Possible alignment characters are: '<' = Left align '>' = Right align (default for cells) '^' = Center (default for column headings) Args: headings: records: fields: (Default value = None) alignment: (Default value = None) file: (Default value = None) Returns: """ if not file: file = io.StringIO() num_columns = len(headings) if not fields: fields = list(range(num_columns)) assert len(headings) == num_columns # Compute the table cell data columns = [[] for i in range(num_columns)] for record in records: for i, field in enumerate(fields): columns[i].append(evalute_field(record, field)) # Fill out any missing alignment characters. extended_align = alignment if alignment is not None else [('^', '<')] if len(extended_align) > num_columns: extended_align = extended_align[0:num_columns] elif len(extended_align) < num_columns: extended_align += [('^', '>') for i in range(num_columns - len(extended_align))] heading_align, cell_align = [x for x in zip(*extended_align)] field_widths = [len(max(column, key=len)) if len(column) > 0 else 0 for column in columns] heading_widths = [max(len(head), 2) for head in headings] column_widths = [max(x) for x in zip(field_widths, heading_widths)] _ = ' | '.join(['{:' + a + str(w) + '}' for a, w in zip(heading_align, column_widths)]) heading_template = '| ' + _ + ' |' _ = ' | '.join(['{:' + a + str(w) + '}' for a, w in zip(cell_align, column_widths)]) row_template = '| ' + _ + ' |' _ = ' | '.join([left_rule[a] + '-' * (w - 2) + right_rule[a] for a, w in zip(cell_align, column_widths)]) ruling = '| ' + _ + ' |' file.write(heading_template.format(*headings).rstrip() + '\n') file.write(ruling.rstrip() + '\n') for row in zip(*columns): file.write(row_template.format(*row).rstrip() + '\n') if isinstance(file, io.StringIO): text = file.getvalue() file.close() return text<|fim▁end|>
right_rule = {'<': '-', '^': ':', '>': ':'}
<|file_name|>insertVsColonZero.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python from time import time REPS = 17500 def insert(): m = [None] i = 0 now = time() while i < REPS: m.insert(0, i) i += 1 print 'Elapsed (insert):', time() - now def colonZero(): m = [None]<|fim▁hole|> i = 0 now = time() while i < REPS: m[:0] = [i] i += 1 print 'Elapsed (colon-0):', time() - now def main(): insert() colonZero() if __name__ == '__main__': main() raw_input()<|fim▁end|>
<|file_name|>images.py<|end_file_name|><|fim▁begin|>from rewpapi.common.http import Request from rewpapi.listings.listing import ListingResidential class RemoteListingImages(Request): def __init__(self, base_site, auth, listing_type, listing_uuid): super(RemoteListingImages, self).__init__(auth) self._base_site = base_site self._auth = auth self._listing_type = listing_type self._listing_uuid = listing_uuid self._endpoint = base_site + "/api/listings/%s/%s/images/" % ( listing_type, listing_uuid) def get_all(self): """ Returns a list of Listing images """ remote_listing_images = self.execute() listing_images = [] if remote_listing_images: for a in remote_listing_images: new_listing_images = ListingImages(self._base_site, self._auth, self._listing_type, self._listing_uuid) new_listing_images.FIELDS = [] for k, v in a.items(): setattr(new_listing_images, k, v) new_listing_images.FIELDS.append(k) listing_images.append(new_listing_images) return listing_images return None def get(self, uuid): """ Returns a single ListingImage instance, matching uuid. Raises a DoesNotExist exception if the object does not exist. """ b = ListingResidential() b.branch_name = "Foo" return b class ListingImages(RemoteListingImages): """ A ListingImages object represents a Listing's images. Once instantiated, you can: - Change its values and send an update() - Create it if it doesn't exist """ def set_fields(self, images): self.images = images def update(self): """ Update this listing's images. """ self._endpoint = self._base_site + "/api/listings/%s/%s/images/" % (<|fim▁hole|> self._listing_type, self._listing_uuid) images = [] for image in self.images: image_dict = {} image_dict['image'] = image.image image_dict['caption'] = image.caption image_dict['sha1'] = image.sha1 images.append(image_dict) self.execute("PUT", images)<|fim▁end|>
<|file_name|>linkintegrity.py<|end_file_name|><|fim▁begin|>from castle.cms.interfaces import IReferenceNamedImage from plone.app.uuid.utils import uuidToObject from persistent.mapping import PersistentMapping from persistent.dict import PersistentDict from lxml.html import fromstring from lxml.html import tostring from plone import api from plone.app.blocks.layoutbehavior import ILayoutAware from plone.app.linkintegrity import handlers as li from plone.app.linkintegrity.parser import extractLinks from plone.tiles.data import ANNOTATIONS_KEY_PREFIX from z3c.relationfield import RelationValue from zope.annotation.interfaces import IAnnotations from zope.component import getUtility from zope.intid.interfaces import IIntIds from zope.keyreference.interfaces import NotYet def scan(obj): """ a dexterity based object was modified """ if not li.check_linkintegrity_dependencies(obj): return refs = get_content_links(obj)<|fim▁hole|> def get_ref(obj, intids=None): if intids is None: intids = getUtility(IIntIds) try: objid = intids.getId(obj) except KeyError: try: intids.register(obj) objid = intids.getId(obj) except NotYet: # if we get a NotYet error, the object is not # attached yet and we will need to get links # at a later time when the object has an intid pass return objid def get_content_links(obj): refs = set() if ILayoutAware.providedBy(obj): behavior_data = ILayoutAware(obj) # get data from tile data annotations = IAnnotations(obj) for key in annotations.keys(): if key.startswith(ANNOTATIONS_KEY_PREFIX): data = annotations[key] refs |= get_tile_data_links(obj, data) if not behavior_data.contentLayout and behavior_data.content: dom = fromstring(behavior_data.content) for el in dom.cssselect('.mosaic-text-tile .mosaic-tile-content'): links = extractLinks(tostring(el)) refs |= li.getObjectsFromLinks(obj, links) try: # scan more than just this we probably should... value = obj.text.raw links = extractLinks(value) refs |= li.getObjectsFromLinks(obj, links) except AttributeError: pass if getattr(obj, 'image', None): if IReferenceNamedImage.providedBy(obj.image): sub_obj = uuidToObject(obj.image.reference) if sub_obj: objid = get_ref(obj) if objid: refs.add(RelationValue(objid)) return refs def get_tile_data_links(obj, data): refs = set() if type(data) in (dict, PersistentMapping, PersistentDict): for field_name in ('content', 'video', 'image', 'images', 'audio'): if field_name not in data: continue val = data.get(field_name) if isinstance(val, basestring): links = extractLinks(val) refs |= li.getObjectsFromLinks(obj, links) elif isinstance(val, list): # could be list of uids refs |= get_refs_from_uids(val) return refs def get_refs_from_uids(uids): intids = getUtility(IIntIds) objects = set() catalog = api.portal.get_tool('portal_catalog') for brain in catalog(UID=uids): obj = brain.getObject() objid = get_ref(obj, intids) if objid: relation = RelationValue(objid) objects.add(relation) return objects<|fim▁end|>
li.updateReferences(obj, refs)
<|file_name|>id_test.go<|end_file_name|><|fim▁begin|>// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package id_test import ( "encoding/json" "fmt" "testing" "github.com/google/gapid/core/assert" "github.com/google/gapid/core/data/id" ) var ( sampleID = id.ID{ 0x00, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x00, 0x00, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x00, } sampleIDString = "000123456789abcdef00" + "000123456789abcdef00" quotedSampleID = `"` + sampleIDString + `"` ) func TestIDToString(t *testing.T) { assert := assert.To(t) str := sampleID.String() assert.For("str").That(str).Equals(sampleIDString) } func TestIDFormat(t *testing.T) { assert := assert.To(t) str := fmt.Sprint(sampleID)<|fim▁hole|> assert := assert.To(t) id, err := id.Parse(sampleIDString) assert.For("err").ThatError(err).Succeeded() assert.For("id").That(id).Equals(sampleID) } func TestParseTooLongID(t *testing.T) { assert := assert.To(t) _, err := id.Parse(sampleIDString + "00") assert.For("err").ThatError(err).Failed() } func TestParseTruncatedID(t *testing.T) { assert := assert.To(t) _, err := id.Parse(sampleIDString[:len(sampleIDString)-2]) assert.For("err").ThatError(err).Failed() } func TestParseInvalidID(t *testing.T) { assert := assert.To(t) _, err := id.Parse("abcdefghijklmnopqrs") assert.For("err").ThatError(err).Failed() } func TestValid(t *testing.T) { assert := assert.To(t) assert.For("ID{}").That(id.ID{}.IsValid()).Equals(false) assert.For("sampleID").That(sampleID.IsValid()).Equals(true) } func TestOfBytes(t *testing.T) { assert := assert.To(t) id := id.OfBytes([]byte{0x00, 0x01, 0x02, 0x03}) assert.For("id").ThatString(id).Equals("a02a05b025b928c039cf1ae7e8ee04e7c190c0db") } func TestOfString(t *testing.T) { assert := assert.To(t) id := id.OfString("Test\n") assert.For("id").ThatString(id).Equals("1c68ea370b40c06fcaf7f26c8b1dba9d9caf5dea") } func TestMarshalJSON(t *testing.T) { assert := assert.To(t) data, err := json.Marshal(sampleID) assert.For("err").ThatError(err).Succeeded() assert.For("data").ThatString(data).Equals(quotedSampleID) } func TestUnarshalJSON(t *testing.T) { assert := assert.To(t) id := id.ID{} err := json.Unmarshal([]byte(quotedSampleID), &id) assert.For("err").ThatError(err).Succeeded() assert.For("id").That(id).Equals(sampleID) } func TestInvalidUnarshalJSON(t *testing.T) { assert := assert.To(t) id := id.ID{} err := json.Unmarshal([]byte("0"), &id) assert.For("err").ThatError(err).Failed() } func TestUnique(t *testing.T) { assert := assert.To(t) id1 := id.Unique() id2 := id.Unique() assert.For("id1").That(id1.IsValid()).Equals(true) assert.For("id2").That(id2.IsValid()).Equals(true) assert.For("not-eq").That(id1).DeepNotEquals(id2) }<|fim▁end|>
assert.For("id").That(str).Equals(sampleIDString) } func TestParseID(t *testing.T) {
<|file_name|>test_pool_upgrade_no_loop_reinstall.py<|end_file_name|><|fim▁begin|>from copy import deepcopy import pytest from sovrin_node.test import waits from stp_core.loop.eventually import eventually from plenum.common.constants import VERSION from sovrin_common.constants import REINSTALL from sovrin_node.test.upgrade.helper import bumpedVersion, checkUpgradeScheduled, \ ensureUpgradeSent, check_no_loop from sovrin_node.server.upgrade_log import UpgradeLog<|fim▁hole|>def test_upgrade_does_not_get_into_loop_if_reinstall( looper, tconf, nodeSet, validUpgrade, trustee, trusteeWallet, monkeypatch): new_version = bumpedVersion() upgr1 = deepcopy(validUpgrade) upgr1[VERSION] = new_version upgr1[REINSTALL] = True # An upgrade scheduled, it should pass ensureUpgradeSent(looper, trustee, trusteeWallet, upgr1) looper.run( eventually( checkUpgradeScheduled, nodeSet, upgr1[VERSION], retryWait=1, timeout=waits.expectedUpgradeScheduled())) # here we make nodes think they have upgraded successfully monkeypatch.setattr(sovrin_node.__metadata__, '__version__', new_version) check_no_loop(nodeSet, UpgradeLog.UPGRADE_SUCCEEDED)<|fim▁end|>
import sovrin_node
<|file_name|>cb_balance_grabber.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python import requests, json from os.path import expanduser<|fim▁hole|>accounts = client.get_accounts() print accounts ['data'][0]['balance']<|fim▁end|>
from coinbase.wallet.client import Client home = expanduser('~') client = Client('YOUR_API_KEY', 'YOUR_API_SECRET')
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>from django.conf import settings BACKEND_CLASS = getattr( settings, "COURRIERS_BACKEND_CLASS", "courriers.backends.simple.SimpleBackend" ) MAILCHIMP_API_KEY = getattr(settings, "COURRIERS_MAILCHIMP_API_KEY", "") MAILJET_API_KEY = getattr(settings, "COURRIERS_MAILJET_API_KEY", "") MAILJET_CONTACTSLIST_LIMIT = getattr( settings, "COURRIERS_MAILJET_CONTACTSLIST_LIMIT", 1000 ) MAILJET_CONTACTFILTER_LIMIT = getattr( settings, "COURRIERS_MAILJET_CONTACTFILTER_LIMIT", 1000 ) MAILJET_API_SECRET_KEY = getattr(settings, "COURRIERS_MAILJET_API_SECRET_KEY", "") DEFAULT_FROM_EMAIL = getattr( settings, "COURRIERS_DEFAULT_FROM_EMAIL", settings.DEFAULT_FROM_EMAIL ) DEFAULT_FROM_NAME = getattr(settings, "COURRIERS_DEFAULT_FROM_NAME", "") ALLOWED_LANGUAGES = getattr(settings, "COURRIERS_ALLOWED_LANGUAGES", settings.LANGUAGES) PRE_PROCESSORS = getattr(settings, "COURRIERS_PRE_PROCESSORS", ())<|fim▁hole|> FAIL_SILENTLY = getattr(settings, "COURRIERS_FAIL_SILENTLY", False) NEWSLETTERLIST_MODEL = getattr( settings, "COURRIERS_NEWSLETTERLIST_MODEL", "courriers.models.newsletterlist.NewsletterList", ) NEWSLETTER_MODEL = getattr( settings, "COURRIERS_NEWSLETTER_MODEL", "courriers.models.newsletter.Newsletter" ) NEWSLETTERITEM_MODEL = getattr( settings, "COURRIERS_NEWSLETTERITEM_MODEL", "courriers.models.newsletteritem.NewsletterItem", ) NEWSLETTERSEGMENT_MODEL = getattr( settings, "COURRIERS_NEWSLETTERSEGMENT_MODEL", "courriers.models.newslettersegment.NewsletterSegment", )<|fim▁end|>
PAGINATE_BY = getattr(settings, "COURRIERS_PAGINATE_BY", 9)
<|file_name|>eggwriter.py<|end_file_name|><|fim▁begin|>""" Writes Python egg files. Supports what's needed for saving and loading components/simulations. """ import copy import os.path import re import sys import zipfile import pkg_resources from openmdao.util import eggobserver __all__ = ('egg_filename', 'write') # Legal egg strings. _EGG_NAME_RE = re.compile('[a-zA-Z][_a-zA-Z0-9]*') _EGG_VERSION_RE = \ re.compile('([a-zA-Z0-9][_a-zA-Z0-9]*)+(\.[_a-zA-Z0-9][_a-zA-Z0-9]*)*')<|fim▁hole|>def egg_filename(name, version): """ Returns name for egg file as generated by :mod:`setuptools`. name: string Must be alphanumeric. version: string Must be alphanumeric. """ assert name and isinstance(name, basestring) match = _EGG_NAME_RE.search(name) if match is None or match.group() != name: raise ValueError('Egg name must be alphanumeric') assert version and isinstance(version, basestring) match = _EGG_VERSION_RE.search(version) if match is None or match.group() != version: raise ValueError('Egg version must be alphanumeric') name = pkg_resources.to_filename(pkg_resources.safe_name(name)) version = pkg_resources.to_filename(pkg_resources.safe_version(version)) return '%s-%s-py%s.egg' % (name, version, sys.version[:3]) def write(name, version, doc, entry_map, src_files, distributions, modules, dst_dir, logger, observer=None, compress=True): """ Write egg in the manner of :mod:`setuptools`, with some differences: - Writes directly to the zip file, avoiding some intermediate copies. - Doesn't compile any Python modules. name: string Must be an alphanumeric string. version: string Must be an alphanumeric string. doc: string Used for the `Summary` and `Description` entries in the egg's metadata. entry_map: dict A :mod:`pkg_resources` :class:`EntryPoint` map: a dictionary mapping group names to dictionaries mapping entry point names to :class:`EntryPoint` objects. src_files: list List of non-Python files to include. distributions: list List of Distributions this egg depends on. It is used for the `Requires` entry in the egg's metadata. modules: list List of module names not found in a distribution that this egg depends on. It is used for the `Requires` entry in the egg's metadata and is also recorded in the 'openmdao_orphans.txt' resource. dst_dir: string The directory to write the egg to. logger: Logger Used for recording progress, etc. observer: callable Will be called via an :class:`EggObserver` intermediary. Returns the egg's filename. """ observer = eggobserver.EggObserver(observer, logger) egg_name = egg_filename(name, version) egg_path = os.path.join(dst_dir, egg_name) distributions = sorted(distributions, key=lambda dist: dist.project_name) modules = sorted(modules) sources = [] files = [] size = 0 # Approximate (uncompressed) size. Used to set allowZip64 flag. # Collect src_files. for path in src_files: path = os.path.join(name, path) files.append(path) size += os.path.getsize(path) # Collect Python modules. for dirpath, dirnames, filenames in os.walk('.', followlinks=True): dirs = copy.copy(dirnames) for path in dirs: if not os.path.exists(os.path.join(dirpath, path, '__init__.py')): dirnames.remove(path) for path in filenames: if path.endswith('.py'): path = os.path.join(dirpath[2:], path) # Skip leading './' files.append(path) size += os.path.getsize(path) sources.append(path) # Package info -> EGG-INFO/PKG-INFO pkg_info = [] pkg_info.append('Metadata-Version: 1.1') pkg_info.append('Name: %s' % pkg_resources.safe_name(name)) pkg_info.append('Version: %s' % pkg_resources.safe_version(version)) pkg_info.append('Summary: %s' % doc.strip().split('\n')[0]) pkg_info.append('Description: %s' % doc.strip()) pkg_info.append('Author-email: UNKNOWN') pkg_info.append('License: UNKNOWN') pkg_info.append('Platform: UNKNOWN') for dist in distributions: pkg_info.append('Requires: %s (%s)' % (dist.project_name, dist.version)) for module in modules: pkg_info.append('Requires: %s' % module) pkg_info = '\n'.join(pkg_info) + '\n' sources.append(name + '.egg-info/PKG-INFO') size += len(pkg_info) # Dependency links -> EGG-INFO/dependency_links.txt dependency_links = '\n' sources.append(name + '.egg-info/dependency_links.txt') size += len(dependency_links) # Entry points -> EGG-INFO/entry_points.txt entry_points = [] for entry_group in sorted(entry_map.keys()): entry_points.append('[%s]' % entry_group) for entry_name in sorted(entry_map[entry_group].keys()): entry_points.append('%s' % entry_map[entry_group][entry_name]) entry_points.append('') entry_points = '\n'.join(entry_points) + '\n' sources.append(name + '.egg-info/entry_points.txt') size += len(entry_points) # Unsafe -> EGG-INFO/not-zip-safe not_zip_safe = '\n' sources.append(name + '.egg-info/not-zip-safe') size += len(not_zip_safe) # Requirements -> EGG-INFO/requires.txt requirements = [str(dist.as_requirement()) for dist in distributions] requirements = '\n'.join(requirements) + '\n' sources.append(name + '.egg-info/requires.txt') size += len(requirements) # Modules not part of a distribution -> EGG-INFO/openmdao_orphans.txt orphans = '\n'.join(modules) + '\n' sources.append(name + '.egg-info/openmdao_orphans.txt') size += len(orphans) # Top-level names -> EGG-INFO/top_level.txt top_level = '%s\n' % name sources.append(name + '.egg-info/top_level.txt') size += len(top_level) # Manifest -> EGG-INFO/SOURCES.txt sources.append(name + '.egg-info/SOURCES.txt') sources = '\n'.join(sorted(sources)) + '\n' size += len(sources) # Open zipfile. logger.debug('Creating %s', egg_path) zip64 = size > zipfile.ZIP64_LIMIT compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED egg = zipfile.ZipFile(egg_path, 'w', compression, zip64) stats = {'completed_files': 0., 'total_files': float(8 + len(files)), 'completed_bytes': 0., 'total_bytes': float(size)} # Write egg info. _write_info(egg, 'PKG-INFO', pkg_info, observer, stats) _write_info(egg, 'dependency_links.txt', dependency_links, observer, stats) _write_info(egg, 'entry_points.txt', entry_points, observer, stats) _write_info(egg, 'not-zip-safe', not_zip_safe, observer, stats) _write_info(egg, 'requires.txt', requirements, observer, stats) _write_info(egg, 'openmdao_orphans.txt', orphans, observer, stats) _write_info(egg, 'top_level.txt', top_level, observer, stats) _write_info(egg, 'SOURCES.txt', sources, observer, stats) # Write collected files. for path in sorted(files): _write_file(egg, path, observer, stats) observer.complete(egg_name) egg.close() if os.path.getsize(egg_path) > zipfile.ZIP64_LIMIT: logger.warning('Egg zipfile requires Zip64 support to unzip.') return egg_name def _write_info(egg, name, info, observer, stats): """ Write info string to egg. """ path = os.path.join('EGG-INFO', name) observer.add(path, stats['completed_files'] / stats['total_files'], stats['completed_bytes'] / stats['total_bytes']) egg.writestr(path, info) stats['completed_files'] += 1 stats['completed_bytes'] += len(info) def _write_file(egg, path, observer, stats): """ Write file to egg. """ observer.add(path, stats['completed_files'] / stats['total_files'], stats['completed_bytes'] / stats['total_bytes']) egg.write(path) stats['completed_files'] += 1 stats['completed_bytes'] += os.path.getsize(path)<|fim▁end|>
<|file_name|>reducer.js<|end_file_name|><|fim▁begin|>import {makeInstanceAction} from '#/main/app/store/actions' import {makeReducer} from '#/main/app/store/reducer' import {makeFormReducer} from '#/main/app/content/form/store/reducer' import {RESOURCE_LOAD} from '#/main/core/resource/store/actions' import {selectors as rssSelectors} from '#/plugin/rss/resources/rss-feed/store/selectors' import {selectors} from '#/plugin/rss/resources/rss-feed/editor/store/selectors' <|fim▁hole|> rssFeedForm: makeFormReducer(selectors.FORM_NAME, {}, { originalData: makeReducer({}, { [makeInstanceAction(RESOURCE_LOAD, rssSelectors.STORE_NAME)]: (state, action) => action.resourceData.slideshow || state }), data: makeReducer({}, { [makeInstanceAction(RESOURCE_LOAD, rssSelectors.STORE_NAME)]: (state, action) => action.resourceData.slideshow || state }) }) } export { reducer }<|fim▁end|>
const reducer = {
<|file_name|>io_aggfile.py<|end_file_name|><|fim▁begin|>""" Module which groups all the aggregated precomputed information in order to save computational power. """ import pandas as pd<|fim▁hole|> def read_agg(filepath): "Read file of aggregated info." table = pd.read_csv(filepath, sep=';') table = cp2str(table) return table def read_aggregation(filepath, typevars): ## TODO aggtable = read_agg(filepath) aggfeatures = aggtable[typevars['feat_vars']] agglocs = aggtable[typevars['loc_vars']] return agglocs, aggfeatures<|fim▁end|>
from FirmsLocations.Preprocess.preprocess_cols import cp2str
<|file_name|>key_tests.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2012-2013 The Bitcoin Core developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "key.h" #include "base58.h" #include "script/script.h" #include "uint256.h" #include "util.h" #include "utilstrencodings.h" #include <string> #include <vector> #include <boost/test/unit_test.hpp> using namespace std; <<<<<<< HEAD static const string strSecret1 ("6uu5bsZLA2Lm6yCxgwxDxHyZmhYeqBMLQT83Fyq738YhYucQPQf"); static const string strSecret2 ("6vZDRwYgTNidWzmKs9x8QzQGeWCqbdUtNRpEKZMaP67ZSn8XMjb"); static const string strSecret1C ("T6UsJv9hYpvDfM5noKYkB3vfeHxhyegkeWJ4y7qKeQJuyXMK11XX"); static const string strSecret2C ("T9PBs5kq9QrkBPxeGNWKitMi4XuFVr25jaXTnuopLVZxCUAJbixA"); static const CBitcoinAddress addr1 ("LWaFezDtucfCA4xcVEfs3R3xfgGWjSwcZr"); static const CBitcoinAddress addr2 ("LXwHM6mRd432EzLJYwuKQMPhTzrgr7ur9K"); static const CBitcoinAddress addr1C("LZWK8h7C166niP6GmpUmiGrvn4oxPqQgFV"); static const CBitcoinAddress addr2C("Lgb6tdqmdW3n5E12johSuEAqRMt4kAr7yu"); static const string strAddressBad("LRjyUS2uuieEPkhZNdQz8hE5YycxVEqSXA"); ======= static const string strSecret1 ("6uGFQ4DSW7zh1viHZi6iiVT17CncvoaV4MHvGvJKPDaLCdymj87"); static const string strSecret2 ("6vVo7sPkeLTwVdAntrv4Gbnsyr75H8ChD3P5iyHziwaqe8mCYR5"); static const string strSecret1C ("T3gJYmBuZXsdd65E7NQF88ZmUP2MaUanqnZg9GFS94W7kND4Ebjq"); static const string strSecret2C ("T986ZKRRdnuuXLeDZuKBRrZW1ujotAncU9WTrFU1n7vMgRW75ZtF"); static const CBitcoinAddress addr1 ("LiUo6Zn39joYJBzPUhssbDwAywhjFcoHE3"); static const CBitcoinAddress addr2 ("LZJvLSP5SGKcFS13MHgdrVhpFUbEMB5XVC"); static const CBitcoinAddress addr1C("Lh2G82Bi33RNuzz4UfSMZbh54jnWHVnmw8"); static const CBitcoinAddress addr2C("LWegHWHB5rmaF5rgWYt1YN3StapRdnGJfU"); static const string strAddressBad("Lbi6bpMhSwp2CXkivEeUK9wzyQEFzHDfSr"); >>>>>>> d1691e599121d643db2c1f2b5f5529eb64f2a771 #ifdef KEY_TESTS_DUMPINFO void dumpKeyInfo(uint256 privkey) { CKey key; key.resize(32); memcpy(&secret[0], &privkey, 32); vector<unsigned char> sec; sec.resize(32); memcpy(&sec[0], &secret[0], 32); printf(" * secret (hex): %s\n", HexStr(sec).c_str()); for (int nCompressed=0; nCompressed<2; nCompressed++) { bool fCompressed = nCompressed == 1; printf(" * %s:\n", fCompressed ? "compressed" : "uncompressed"); CBitcoinSecret bsecret; bsecret.SetSecret(secret, fCompressed); printf(" * secret (base58): %s\n", bsecret.ToString().c_str()); CKey key; key.SetSecret(secret, fCompressed); vector<unsigned char> vchPubKey = key.GetPubKey(); printf(" * pubkey (hex): %s\n", HexStr(vchPubKey).c_str()); printf(" * address (base58): %s\n", CBitcoinAddress(vchPubKey).ToString().c_str()); } } #endif BOOST_AUTO_TEST_SUITE(key_tests) BOOST_AUTO_TEST_CASE(key_test1) { CBitcoinSecret bsecret1, bsecret2, bsecret1C, bsecret2C, baddress1; BOOST_CHECK( bsecret1.SetString (strSecret1)); BOOST_CHECK( bsecret2.SetString (strSecret2)); BOOST_CHECK( bsecret1C.SetString(strSecret1C)); BOOST_CHECK( bsecret2C.SetString(strSecret2C)); BOOST_CHECK(!baddress1.SetString(strAddressBad)); CKey key1 = bsecret1.GetKey(); BOOST_CHECK(key1.IsCompressed() == false);<|fim▁hole|> CKey key1C = bsecret1C.GetKey(); BOOST_CHECK(key1C.IsCompressed() == true); CKey key2C = bsecret2C.GetKey(); <<<<<<< HEAD BOOST_CHECK(key1C.IsCompressed() == true); ======= BOOST_CHECK(key2C.IsCompressed() == true); >>>>>>> d1691e599121d643db2c1f2b5f5529eb64f2a771 CPubKey pubkey1 = key1. GetPubKey(); CPubKey pubkey2 = key2. GetPubKey(); CPubKey pubkey1C = key1C.GetPubKey(); CPubKey pubkey2C = key2C.GetPubKey(); <<<<<<< HEAD ======= BOOST_CHECK(key1.VerifyPubKey(pubkey1)); BOOST_CHECK(!key1.VerifyPubKey(pubkey1C)); BOOST_CHECK(!key1.VerifyPubKey(pubkey2)); BOOST_CHECK(!key1.VerifyPubKey(pubkey2C)); BOOST_CHECK(!key1C.VerifyPubKey(pubkey1)); BOOST_CHECK(key1C.VerifyPubKey(pubkey1C)); BOOST_CHECK(!key1C.VerifyPubKey(pubkey2)); BOOST_CHECK(!key1C.VerifyPubKey(pubkey2C)); BOOST_CHECK(!key2.VerifyPubKey(pubkey1)); BOOST_CHECK(!key2.VerifyPubKey(pubkey1C)); BOOST_CHECK(key2.VerifyPubKey(pubkey2)); BOOST_CHECK(!key2.VerifyPubKey(pubkey2C)); BOOST_CHECK(!key2C.VerifyPubKey(pubkey1)); BOOST_CHECK(!key2C.VerifyPubKey(pubkey1C)); BOOST_CHECK(!key2C.VerifyPubKey(pubkey2)); BOOST_CHECK(key2C.VerifyPubKey(pubkey2C)); >>>>>>> d1691e599121d643db2c1f2b5f5529eb64f2a771 BOOST_CHECK(addr1.Get() == CTxDestination(pubkey1.GetID())); BOOST_CHECK(addr2.Get() == CTxDestination(pubkey2.GetID())); BOOST_CHECK(addr1C.Get() == CTxDestination(pubkey1C.GetID())); BOOST_CHECK(addr2C.Get() == CTxDestination(pubkey2C.GetID())); for (int n=0; n<16; n++) { string strMsg = strprintf("Very secret message %i: 11", n); uint256 hashMsg = Hash(strMsg.begin(), strMsg.end()); // normal signatures vector<unsigned char> sign1, sign2, sign1C, sign2C; BOOST_CHECK(key1.Sign (hashMsg, sign1)); BOOST_CHECK(key2.Sign (hashMsg, sign2)); BOOST_CHECK(key1C.Sign(hashMsg, sign1C)); BOOST_CHECK(key2C.Sign(hashMsg, sign2C)); BOOST_CHECK( pubkey1.Verify(hashMsg, sign1)); BOOST_CHECK(!pubkey1.Verify(hashMsg, sign2)); BOOST_CHECK( pubkey1.Verify(hashMsg, sign1C)); BOOST_CHECK(!pubkey1.Verify(hashMsg, sign2C)); BOOST_CHECK(!pubkey2.Verify(hashMsg, sign1)); BOOST_CHECK( pubkey2.Verify(hashMsg, sign2)); BOOST_CHECK(!pubkey2.Verify(hashMsg, sign1C)); BOOST_CHECK( pubkey2.Verify(hashMsg, sign2C)); BOOST_CHECK( pubkey1C.Verify(hashMsg, sign1)); BOOST_CHECK(!pubkey1C.Verify(hashMsg, sign2)); BOOST_CHECK( pubkey1C.Verify(hashMsg, sign1C)); BOOST_CHECK(!pubkey1C.Verify(hashMsg, sign2C)); BOOST_CHECK(!pubkey2C.Verify(hashMsg, sign1)); BOOST_CHECK( pubkey2C.Verify(hashMsg, sign2)); BOOST_CHECK(!pubkey2C.Verify(hashMsg, sign1C)); BOOST_CHECK( pubkey2C.Verify(hashMsg, sign2C)); // compact signatures (with key recovery) vector<unsigned char> csign1, csign2, csign1C, csign2C; BOOST_CHECK(key1.SignCompact (hashMsg, csign1)); BOOST_CHECK(key2.SignCompact (hashMsg, csign2)); BOOST_CHECK(key1C.SignCompact(hashMsg, csign1C)); BOOST_CHECK(key2C.SignCompact(hashMsg, csign2C)); CPubKey rkey1, rkey2, rkey1C, rkey2C; <<<<<<< HEAD BOOST_CHECK(rkey1.RecoverCompact (hashMsg, csign1)); BOOST_CHECK(rkey2.RecoverCompact (hashMsg, csign2)); BOOST_CHECK(rkey1C.RecoverCompact(hashMsg, csign1C)); BOOST_CHECK(rkey2C.RecoverCompact(hashMsg, csign2C)); ======= BOOST_CHECK(rkey1.RecoverCompact (hashMsg, csign1)); BOOST_CHECK(rkey2.RecoverCompact (hashMsg, csign2)); BOOST_CHECK(rkey1C.RecoverCompact(hashMsg, csign1C)); BOOST_CHECK(rkey2C.RecoverCompact(hashMsg, csign2C)); >>>>>>> d1691e599121d643db2c1f2b5f5529eb64f2a771 BOOST_CHECK(rkey1 == pubkey1); BOOST_CHECK(rkey2 == pubkey2); BOOST_CHECK(rkey1C == pubkey1C); BOOST_CHECK(rkey2C == pubkey2C); } // test deterministic signing std::vector<unsigned char> detsig, detsigc; string strMsg = "Very deterministic message"; uint256 hashMsg = Hash(strMsg.begin(), strMsg.end()); BOOST_CHECK(key1.Sign(hashMsg, detsig)); BOOST_CHECK(key1C.Sign(hashMsg, detsigc)); BOOST_CHECK(detsig == detsigc); BOOST_CHECK(detsig == ParseHex("304402205dbbddda71772d95ce91cd2d14b592cfbc1dd0aabd6a394b6c2d377bbe59d31d022014ddda21494a4e221f0824f0b8b924c43fa43c0ad57dccdaa11f81a6bd4582f6")); BOOST_CHECK(key2.Sign(hashMsg, detsig)); BOOST_CHECK(key2C.Sign(hashMsg, detsigc)); BOOST_CHECK(detsig == detsigc); BOOST_CHECK(detsig == ParseHex("3044022052d8a32079c11e79db95af63bb9600c5b04f21a9ca33dc129c2bfa8ac9dc1cd5022061d8ae5e0f6c1a16bde3719c64c2fd70e404b6428ab9a69566962e8771b5944d")); BOOST_CHECK(key1.SignCompact(hashMsg, detsig)); BOOST_CHECK(key1C.SignCompact(hashMsg, detsigc)); BOOST_CHECK(detsig == ParseHex("1c5dbbddda71772d95ce91cd2d14b592cfbc1dd0aabd6a394b6c2d377bbe59d31d14ddda21494a4e221f0824f0b8b924c43fa43c0ad57dccdaa11f81a6bd4582f6")); BOOST_CHECK(detsigc == ParseHex("205dbbddda71772d95ce91cd2d14b592cfbc1dd0aabd6a394b6c2d377bbe59d31d14ddda21494a4e221f0824f0b8b924c43fa43c0ad57dccdaa11f81a6bd4582f6")); BOOST_CHECK(key2.SignCompact(hashMsg, detsig)); BOOST_CHECK(key2C.SignCompact(hashMsg, detsigc)); BOOST_CHECK(detsig == ParseHex("1c52d8a32079c11e79db95af63bb9600c5b04f21a9ca33dc129c2bfa8ac9dc1cd561d8ae5e0f6c1a16bde3719c64c2fd70e404b6428ab9a69566962e8771b5944d")); BOOST_CHECK(detsigc == ParseHex("2052d8a32079c11e79db95af63bb9600c5b04f21a9ca33dc129c2bfa8ac9dc1cd561d8ae5e0f6c1a16bde3719c64c2fd70e404b6428ab9a69566962e8771b5944d")); } BOOST_AUTO_TEST_SUITE_END()<|fim▁end|>
CKey key2 = bsecret2.GetKey(); BOOST_CHECK(key2.IsCompressed() == false);
<|file_name|>coord_bounds.py<|end_file_name|><|fim▁begin|>''' Work of Cameron Palk ''' import sys import pandas as pd def main( argv ): try: csv_filepath = argv[ 0 ] output_filepath = argv[ 1 ] except IndexError: print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" ) <|fim▁hole|> training_data = pd.read_csv( csv_filepath ) training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude training_data.dropna() print( training_data[ 'clean_Latitude' ] ) for axis in [ 'clean_Longitude', 'clean_Latitude' ]: print( "{:16} min: {:16} max: {:16}".format( axis, min( training_data[ axis ] ), max( training_data[ axis ] ) ) ) # if __name__=='__main__': main( sys.argv[ 1: ] )<|fim▁end|>
return
<|file_name|>diskmetricstore.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "encoding/gob" "errors" "fmt" "io/ioutil" "os" "path" "sort" "strings" "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" //nolint:staticcheck // Ignore SA1019. Dependencies use the deprecated package, so we have to, too. "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) const ( pushMetricName = "push_time_seconds" pushMetricHelp = "Last Unix time when changing this group in the Pushgateway succeeded." pushFailedMetricName = "push_failure_time_seconds" pushFailedMetricHelp = "Last Unix time when changing this group in the Pushgateway failed." writeQueueCapacity = 1000 ) var errTimestamp = errors.New("pushed metrics must not have timestamps") // DiskMetricStore is an implementation of MetricStore that persists metrics to // disk. type DiskMetricStore struct { lock sync.RWMutex // Protects metricFamilies. writeQueue chan WriteRequest drain chan struct{} done chan error metricGroups GroupingKeyToMetricGroup persistenceFile string predefinedHelp map[string]string logger log.Logger } type mfStat struct { pos int // Where in the result slice is the MetricFamily? copied bool // Has the MetricFamily already been copied? } // NewDiskMetricStore returns a DiskMetricStore ready to use. To cleanly shut it // down and free resources, the Shutdown() method has to be called. // // If persistenceFile is the empty string, no persisting to disk will // happen. Otherwise, a file of that name is used for persisting metrics to // disk. If the file already exists, metrics are read from it as part of the // start-up. Persisting is happening upon shutdown and after every write action, // but the latter will only happen persistenceDuration after the previous // persisting. // // If a non-nil Gatherer is provided, the help strings of metrics gathered by it // will be used as standard. Pushed metrics with deviating help strings will be // adjusted to avoid inconsistent expositions. func NewDiskMetricStore( persistenceFile string, persistenceInterval time.Duration, gatherPredefinedHelpFrom prometheus.Gatherer, logger log.Logger, ) *DiskMetricStore { // TODO: Do that outside of the constructor to allow the HTTP server to // serve /-/healthy and /-/ready earlier. dms := &DiskMetricStore{ writeQueue: make(chan WriteRequest, writeQueueCapacity), drain: make(chan struct{}), done: make(chan error), metricGroups: GroupingKeyToMetricGroup{}, persistenceFile: persistenceFile, logger: logger, } if err := dms.restore(); err != nil { level.Error(logger).Log("msg", "could not load persisted metrics", "err", err) } if helpStrings, err := extractPredefinedHelpStrings(gatherPredefinedHelpFrom); err == nil { dms.predefinedHelp = helpStrings } else { level.Error(logger).Log("msg", "could not gather metrics for predefined help strings", "err", err) } go dms.loop(persistenceInterval) return dms } // SubmitWriteRequest implements the MetricStore interface. func (dms *DiskMetricStore) SubmitWriteRequest(req WriteRequest) { dms.writeQueue <- req } // Shutdown implements the MetricStore interface. func (dms *DiskMetricStore) Shutdown() error { close(dms.drain) return <-dms.done } // Healthy implements the MetricStore interface. func (dms *DiskMetricStore) Healthy() error { // By taking the lock we check that there is no deadlock. dms.lock.Lock() defer dms.lock.Unlock() // A pushgateway that cannot be written to should not be // considered as healthy. if len(dms.writeQueue) == cap(dms.writeQueue) { return fmt.Errorf("write queue is full") } return nil } // Ready implements the MetricStore interface. func (dms *DiskMetricStore) Ready() error { return dms.Healthy() } // GetMetricFamilies implements the MetricStore interface. func (dms *DiskMetricStore) GetMetricFamilies() []*dto.MetricFamily { dms.lock.RLock() defer dms.lock.RUnlock() result := []*dto.MetricFamily{} mfStatByName := map[string]mfStat{} for _, group := range dms.metricGroups { for name, tmf := range group.Metrics { mf := tmf.GetMetricFamily() if mf == nil { level.Warn(dms.logger).Log("msg", "storage corruption detected, consider wiping the persistence file") continue } stat, exists := mfStatByName[name] if exists { existingMF := result[stat.pos] if !stat.copied { mfStatByName[name] = mfStat{ pos: stat.pos, copied: true, } existingMF = copyMetricFamily(existingMF) result[stat.pos] = existingMF } if mf.GetHelp() != existingMF.GetHelp() { level.Info(dms.logger).Log("msg", "metric families inconsistent help strings", "err", "Metric families have inconsistent help strings. The latter will have priority. This is bad. Fix your pushed metrics!", "new", mf, "old", existingMF) } // Type inconsistency cannot be fixed here. We will detect it during // gathering anyway, so no reason to log anything here. existingMF.Metric = append(existingMF.Metric, mf.Metric...) } else { copied := false if help, ok := dms.predefinedHelp[name]; ok && mf.GetHelp() != help { level.Info(dms.logger).Log("msg", "metric families overlap", "err", "Metric family has the same name as a metric family used by the Pushgateway itself but it has a different help string. Changing it to the standard help string. This is bad. Fix your pushed metrics!", "metric_family", mf, "standard_help", help) mf = copyMetricFamily(mf) copied = true mf.Help = proto.String(help) } mfStatByName[name] = mfStat{ pos: len(result), copied: copied, } result = append(result, mf) } } } return result } // GetMetricFamiliesMap implements the MetricStore interface. func (dms *DiskMetricStore) GetMetricFamiliesMap() GroupingKeyToMetricGroup { dms.lock.RLock() defer dms.lock.RUnlock() groupsCopy := make(GroupingKeyToMetricGroup, len(dms.metricGroups)) for k, g := range dms.metricGroups { metricsCopy := make(NameToTimestampedMetricFamilyMap, len(g.Metrics)) groupsCopy[k] = MetricGroup{Labels: g.Labels, Metrics: metricsCopy} for n, tmf := range g.Metrics { metricsCopy[n] = tmf<|fim▁hole|> } return groupsCopy } func (dms *DiskMetricStore) loop(persistenceInterval time.Duration) { lastPersist := time.Now() persistScheduled := false lastWrite := time.Time{} persistDone := make(chan time.Time) var persistTimer *time.Timer checkPersist := func() { if dms.persistenceFile != "" && !persistScheduled && lastWrite.After(lastPersist) { persistTimer = time.AfterFunc( persistenceInterval-lastWrite.Sub(lastPersist), func() { persistStarted := time.Now() if err := dms.persist(); err != nil { level.Error(dms.logger).Log("msg", "error persisting metrics", "err", err) } else { level.Info(dms.logger).Log("msg", "metrics persisted", "file", dms.persistenceFile) } persistDone <- persistStarted }, ) persistScheduled = true } } for { select { case wr := <-dms.writeQueue: lastWrite = time.Now() if dms.checkWriteRequest(wr) { dms.processWriteRequest(wr) } else { dms.setPushFailedTimestamp(wr) } if wr.Done != nil { close(wr.Done) } checkPersist() case lastPersist = <-persistDone: persistScheduled = false checkPersist() // In case something has been written in the meantime. case <-dms.drain: // Prevent a scheduled persist from firing later. if persistTimer != nil { persistTimer.Stop() } // Now draining... for { select { case wr := <-dms.writeQueue: if dms.checkWriteRequest(wr) { dms.processWriteRequest(wr) } else { dms.setPushFailedTimestamp(wr) } default: dms.done <- dms.persist() return } } } } } func (dms *DiskMetricStore) processWriteRequest(wr WriteRequest) { dms.lock.Lock() defer dms.lock.Unlock() key := groupingKeyFor(wr.Labels) if wr.MetricFamilies == nil { // No MetricFamilies means delete request. Delete the whole // metric group, and we are done here. delete(dms.metricGroups, key) return } // Otherwise, it's an update. group, ok := dms.metricGroups[key] if !ok { group = MetricGroup{ Labels: wr.Labels, Metrics: NameToTimestampedMetricFamilyMap{}, } dms.metricGroups[key] = group } else if wr.Replace { // For replace, we have to delete all metric families in the // group except pre-existing push timestamps. for name := range group.Metrics { if name != pushMetricName && name != pushFailedMetricName { delete(group.Metrics, name) } } } wr.MetricFamilies[pushMetricName] = newPushTimestampGauge(wr.Labels, wr.Timestamp) // Only add a zero push-failed metric if none is there yet, so that a // previously added fail timestamp is retained. if _, ok := group.Metrics[pushFailedMetricName]; !ok { wr.MetricFamilies[pushFailedMetricName] = newPushFailedTimestampGauge(wr.Labels, time.Time{}) } for name, mf := range wr.MetricFamilies { group.Metrics[name] = TimestampedMetricFamily{ Timestamp: wr.Timestamp, GobbableMetricFamily: (*GobbableMetricFamily)(mf), } } } func (dms *DiskMetricStore) setPushFailedTimestamp(wr WriteRequest) { dms.lock.Lock() defer dms.lock.Unlock() key := groupingKeyFor(wr.Labels) group, ok := dms.metricGroups[key] if !ok { group = MetricGroup{ Labels: wr.Labels, Metrics: NameToTimestampedMetricFamilyMap{}, } dms.metricGroups[key] = group } group.Metrics[pushFailedMetricName] = TimestampedMetricFamily{ Timestamp: wr.Timestamp, GobbableMetricFamily: (*GobbableMetricFamily)(newPushFailedTimestampGauge(wr.Labels, wr.Timestamp)), } // Only add a zero push metric if none is there yet, so that a // previously added push timestamp is retained. if _, ok := group.Metrics[pushMetricName]; !ok { group.Metrics[pushMetricName] = TimestampedMetricFamily{ Timestamp: wr.Timestamp, GobbableMetricFamily: (*GobbableMetricFamily)(newPushTimestampGauge(wr.Labels, time.Time{})), } } } // checkWriteRequest return if applying the provided WriteRequest will result in // a consistent state of metrics. The dms is not modified by the check. However, // the WriteRequest _will_ be sanitized: the MetricFamilies are ensured to // contain the grouping Labels after the check. If false is returned, the // causing error is written to the Done channel of the WriteRequest. // // Special case: If the WriteRequest has no Done channel set, the (expensive) // consistency check is skipped. The WriteRequest is still sanitized, and the // presence of timestamps still results in returning false. func (dms *DiskMetricStore) checkWriteRequest(wr WriteRequest) bool { if wr.MetricFamilies == nil { // Delete request cannot create inconsistencies, and nothing has // to be sanitized. return true } var err error defer func() { if err != nil && wr.Done != nil { wr.Done <- err } }() if timestampsPresent(wr.MetricFamilies) { err = errTimestamp return false } for _, mf := range wr.MetricFamilies { sanitizeLabels(mf, wr.Labels) } // Without Done channel, don't do the expensive consistency check. if wr.Done == nil { return true } // Construct a test dms, acting on a copy of the metrics, to test the // WriteRequest with. tdms := &DiskMetricStore{ metricGroups: dms.GetMetricFamiliesMap(), predefinedHelp: dms.predefinedHelp, logger: log.NewNopLogger(), } tdms.processWriteRequest(wr) // Construct a test Gatherer to check if consistent gathering is possible. tg := prometheus.Gatherers{ prometheus.DefaultGatherer, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return tdms.GetMetricFamilies(), nil }), } if _, err = tg.Gather(); err != nil { return false } return true } func (dms *DiskMetricStore) persist() error { // Check (again) if persistence is configured because some code paths // will call this method even if it is not. if dms.persistenceFile == "" { return nil } f, err := ioutil.TempFile( path.Dir(dms.persistenceFile), path.Base(dms.persistenceFile)+".in_progress.", ) if err != nil { return err } inProgressFileName := f.Name() e := gob.NewEncoder(f) dms.lock.RLock() err = e.Encode(dms.metricGroups) dms.lock.RUnlock() if err != nil { f.Close() os.Remove(inProgressFileName) return err } if err := f.Close(); err != nil { os.Remove(inProgressFileName) return err } return os.Rename(inProgressFileName, dms.persistenceFile) } func (dms *DiskMetricStore) restore() error { if dms.persistenceFile == "" { return nil } f, err := os.Open(dms.persistenceFile) if os.IsNotExist(err) { return nil } if err != nil { return err } defer f.Close() d := gob.NewDecoder(f) if err := d.Decode(&dms.metricGroups); err != nil { return err } return nil } func copyMetricFamily(mf *dto.MetricFamily) *dto.MetricFamily { return &dto.MetricFamily{ Name: mf.Name, Help: mf.Help, Type: mf.Type, Metric: append([]*dto.Metric{}, mf.Metric...), } } // groupingKeyFor creates a grouping key from the provided map of grouping // labels. The grouping key is created by joining all label names and values // together with model.SeparatorByte as a separator. The label names are sorted // lexicographically before joining. In that way, the grouping key is both // reproducible and unique. func groupingKeyFor(labels map[string]string) string { if len(labels) == 0 { // Super fast path. return "" } labelNames := make([]string, 0, len(labels)) for labelName := range labels { labelNames = append(labelNames, labelName) } sort.Strings(labelNames) sb := strings.Builder{} for i, labelName := range labelNames { sb.WriteString(labelName) sb.WriteByte(model.SeparatorByte) sb.WriteString(labels[labelName]) if i+1 < len(labels) { // No separator at the end. sb.WriteByte(model.SeparatorByte) } } return sb.String() } // extractPredefinedHelpStrings extracts all the HELP strings from the provided // gatherer so that the DiskMetricStore can fix deviations in pushed metrics. func extractPredefinedHelpStrings(g prometheus.Gatherer) (map[string]string, error) { if g == nil { return nil, nil } mfs, err := g.Gather() if err != nil { return nil, err } result := map[string]string{} for _, mf := range mfs { result[mf.GetName()] = mf.GetHelp() } return result, nil } func newPushTimestampGauge(groupingLabels map[string]string, t time.Time) *dto.MetricFamily { return newTimestampGauge(pushMetricName, pushMetricHelp, groupingLabels, t) } func newPushFailedTimestampGauge(groupingLabels map[string]string, t time.Time) *dto.MetricFamily { return newTimestampGauge(pushFailedMetricName, pushFailedMetricHelp, groupingLabels, t) } func newTimestampGauge(name, help string, groupingLabels map[string]string, t time.Time) *dto.MetricFamily { var ts float64 if !t.IsZero() { ts = float64(t.UnixNano()) / 1e9 } mf := &dto.MetricFamily{ Name: proto.String(name), Help: proto.String(help), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Gauge: &dto.Gauge{ Value: proto.Float64(ts), }, }, }, } sanitizeLabels(mf, groupingLabels) return mf } // sanitizeLabels ensures that all the labels in groupingLabels and the // `instance` label are present in the MetricFamily. The label values from // groupingLabels are set in each Metric, no matter what. After that, if the // 'instance' label is not present at all in a Metric, it will be created (with // an empty string as value). // // Finally, sanitizeLabels sorts the label pairs of all metrics. func sanitizeLabels(mf *dto.MetricFamily, groupingLabels map[string]string) { gLabelsNotYetDone := make(map[string]string, len(groupingLabels)) metric: for _, m := range mf.GetMetric() { for ln, lv := range groupingLabels { gLabelsNotYetDone[ln] = lv } hasInstanceLabel := false for _, lp := range m.GetLabel() { ln := lp.GetName() if lv, ok := gLabelsNotYetDone[ln]; ok { lp.Value = proto.String(lv) delete(gLabelsNotYetDone, ln) } if ln == string(model.InstanceLabel) { hasInstanceLabel = true } if len(gLabelsNotYetDone) == 0 && hasInstanceLabel { sort.Sort(labelPairs(m.Label)) continue metric } } for ln, lv := range gLabelsNotYetDone { m.Label = append(m.Label, &dto.LabelPair{ Name: proto.String(ln), Value: proto.String(lv), }) if ln == string(model.InstanceLabel) { hasInstanceLabel = true } delete(gLabelsNotYetDone, ln) // To prepare map for next metric. } if !hasInstanceLabel { m.Label = append(m.Label, &dto.LabelPair{ Name: proto.String(string(model.InstanceLabel)), Value: proto.String(""), }) } sort.Sort(labelPairs(m.Label)) } } // Checks if any timestamps have been specified. func timestampsPresent(metricFamilies map[string]*dto.MetricFamily) bool { for _, mf := range metricFamilies { for _, m := range mf.GetMetric() { if m.TimestampMs != nil { return true } } } return false } // labelPairs implements sort.Interface. It provides a sortable version of a // slice of dto.LabelPair pointers. type labelPairs []*dto.LabelPair func (s labelPairs) Len() int { return len(s) } func (s labelPairs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s labelPairs) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() }<|fim▁end|>
}
<|file_name|>enforcer_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for google.cloud.security.enforcer.enforcer.""" import copy import json import httplib2 import mock import testing_constants as constants from tests.unittest_utils import ForsetiTestCase from google.protobuf import text_format from tests.unittest_utils import get_datafile_path from google.cloud.security.enforcer import enforcer_log_pb2 from google.cloud.security.enforcer import enforcer # Used anywhere a real timestamp could be generated to ensure consistent # comparisons in tests MOCK_TIMESTAMP = 1234567890 class EnforcerTest(ForsetiTestCase): """Extended unit tests for BatchFirewallEnforcer class.""" def setUp(self): """Set up.""" self.mock_compute = mock.patch.object(enforcer.batch_enforcer.compute, 'ComputeClient').start() self.gce_service = self.mock_compute().service self.gce_service.networks().list().execute.return_value = ( constants.SAMPLE_TEST_NETWORK_SELFLINK) self.project = constants.TEST_PROJECT self.mock_time = mock.patch.object(enforcer.batch_enforcer.datelib, 'Timestamp').start() self.mock_time.now().AsMicroTimestamp.return_value = MOCK_TIMESTAMP self.mock_time.now().AsSecondsSinceEpoch.return_value = MOCK_TIMESTAMP self.enforcer = enforcer.initialize_batch_enforcer( {}, concurrent_threads=1, max_write_threads=1, max_running_operations=0, dry_run=True) self.expected_summary = ( enforcer_log_pb2.BatchResult( batch_id=MOCK_TIMESTAMP, timestamp_start_msec=MOCK_TIMESTAMP, timestamp_end_msec=MOCK_TIMESTAMP)) self.addCleanup(mock.patch.stopall) def test_enforce_single_project(self): """Verifies enforce_single_project returns the correct results. Setup: * Set API calls to return the different firewall rules from the new policy on the first call, and the expected new firewall rules on the second call. * Load a mock policy file. * Create a temporary directory for writing the dremel recordio table out to. * Send the policy and project to EnforceSingleProject. <|fim▁hole|> Expected Results: * The results proto returned matches the expected results. """ self.gce_service.firewalls().list().execute.side_effect = [ constants.DEFAULT_FIREWALL_API_RESPONSE, constants.EXPECTED_FIREWALL_API_RESPONSE] policy_filename = get_datafile_path(__file__, 'sample_policy.json') results = enforcer.enforce_single_project(self.enforcer, self.project, policy_filename) self.expected_summary.projects_total = 1 self.expected_summary.projects_success = 1 self.expected_summary.projects_changed = 1 self.expected_summary.projects_unchanged = 0 self.assertEqual(self.expected_summary, results.summary) expected_results = enforcer_log_pb2.ProjectResult() text_format.Merge(constants.SAMPLE_ENFORCER_PROJECTRESULTS_ASCIIPB, expected_results) expected_results.run_context = enforcer_log_pb2.ENFORCER_ONE_PROJECT expected_results.gce_firewall_enforcement.policy_path = policy_filename project_result = results.results[0] self.assertEqual(expected_results, project_result) def test_enforcer_raises_exception_with_invalid_json_policy(self): """Verifies json parsed correct as a list of dictionaries. Setup: * Load an invalid json file (no list). * Give it to enforcer to parse and load Expected Results: * Enforcer should raise InvalidParsedPolicyFileError """ policy_filename = get_datafile_path(__file__, 'invalid_sample_policy.json') with self.assertRaises(enforcer.InvalidParsedPolicyFileError) as r: enforcer.enforce_single_project( self.enforcer, self.project, policy_filename) if __name__ == '__main__': unittest.main()<|fim▁end|>
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package logging import ( "fmt" "os" "time" "github.com/op/go-logging" ) const ( size = 1024 ) var ( Log *CTopLogger exited bool level = logging.INFO // default level format = logging.MustStringFormatter( `%{color}%{time:15:04:05.000} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`, ) ) type statusMsg struct { Text string IsError bool } type CTopLogger struct { *logging.Logger backend *logging.MemoryBackend logFile *os.File sLog []statusMsg } func (c *CTopLogger) FlushStatus() chan statusMsg { ch := make(chan statusMsg) go func() { for _, sm := range c.sLog { ch <- sm } close(ch) c.sLog = []statusMsg{} }() return ch } func (c *CTopLogger) StatusQueued() bool { return len(c.sLog) > 0 } func (c *CTopLogger) Status(s string) { c.addStatus(statusMsg{s, false}) } func (c *CTopLogger) StatusErr(err error) { c.addStatus(statusMsg{err.Error(), true}) } func (c *CTopLogger) addStatus(sm statusMsg) { c.sLog = append(c.sLog, sm) } func (c *CTopLogger) Statusf(s string, a ...interface{}) { c.Status(fmt.Sprintf(s, a...)) } func Init() *CTopLogger { if Log == nil { logging.SetFormatter(format) // setup default formatter Log = &CTopLogger{ logging.MustGetLogger("ctop"), logging.NewMemoryBackend(size), nil, []statusMsg{}, } debugMode := debugMode() if debugMode { level = logging.DEBUG } backendLvl := logging.AddModuleLevel(Log.backend) backendLvl.SetLevel(level, "") logFilePath := debugModeFile() if logFilePath == "" { logging.SetBackend(backendLvl) } else { logFile, err := os.OpenFile(logFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) if err != nil {<|fim▁hole|> Log.Error("Unable to create log file: %s", err.Error()) } else { backendFile := logging.NewLogBackend(logFile, "", 0) backendFileLvl := logging.AddModuleLevel(backendFile) backendFileLvl.SetLevel(level, "") logging.SetBackend(backendLvl, backendFileLvl) Log.logFile = logFile } } if debugMode { StartServer() } Log.Notice("logger initialized") } return Log } func (log *CTopLogger) tail() chan string { stream := make(chan string) node := log.backend.Head() go func() { for { stream <- node.Record.Formatted(0) for { nnode := node.Next() if nnode != nil { node = nnode break } if exited { close(stream) return } time.Sleep(1 * time.Second) } } }() return stream } func (log *CTopLogger) Exit() { exited = true if log.logFile != nil { _ = log.logFile.Close() } StopServer() } func debugMode() bool { return os.Getenv("CTOP_DEBUG") == "1" } func debugModeTCP() bool { return os.Getenv("CTOP_DEBUG_TCP") == "1" } func debugModeFile() string { return os.Getenv("CTOP_DEBUG_FILE") }<|fim▁end|>
logging.SetBackend(backendLvl)
<|file_name|>package.py<|end_file_name|><|fim▁begin|>############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Wt(CMakePackage): """Wt, C++ Web Toolkit. Wt is a C++ library for developing web applications.""" homepage = "http://www.webtoolkit.eu/wt" url = "https://github.com/emweb/wt/archive/3.3.7.tar.gz" version('3.3.7', '09858901f2dcf5c3d36a9237daba3e3f') version('master', branch='master', git='https://github.com/emweb/wt.git') # wt builds in parallel, but requires more than 5 GByte RAM per -j <njob><|fim▁hole|> variant('openssl', default=True, description='SSL and WebSockets support in the built-in httpd, ' 'the HTTP(S) client, and additional cryptographic ' 'hashes in the authentication module') variant('libharu', default=True, description='painting to PDF') # variant('graphicsmagick', default=True, # description='painting to PNG, GIF') variant('sqlite', default=False, description='create SQLite3 DBO') variant('mariadb', default=False, description='create MariaDB/MySQL DBO') variant('postgresql', default=False, description='create PostgreSQL DBO') # variant('firebird', default=False, description='create Firebird DBO') variant('pango', default=True, description='improved font support in PDF and raster image ' 'painting') variant('zlib', default=True, description='compression in the built-in httpd') # variant('fastcgi', default=False, # description='FastCGI connector via libfcgi++') depends_on('[email protected]:') depends_on('openssl', when='+openssl') depends_on('libharu', when='+libharu') depends_on('sqlite', when='+sqlite') depends_on('mariadb', when='+mariadb') depends_on('postgresql', when='+postgresql') depends_on('pango', when='+pango') depends_on('zlib', when='+zlib') def cmake_args(self): spec = self.spec cmake_args = [ '-DBUILD_EXAMPLES:BOOL=OFF', '-DCONNECTOR_FCGI:BOOL=OFF', '-DENABLE_OPENGL:BOOL=OFF', '-DENABLE_QT4:BOOL=OFF' ] cmake_args.extend([ '-DENABLE_SSL:BOOL={0}'.format(( 'ON' if '+openssl' in spec else 'OFF')), '-DENABLE_HARU:BOOL={0}'.format(( 'ON' if '+libharu' in spec else 'OFF')), '-DENABLE_PANGO:BOOL={0}'.format(( 'ON' if '+pango' in spec else 'OFF')), '-DENABLE_SQLITE:BOOL={0}'.format(( 'ON' if '+sqlite' in spec else 'OFF')), '-DENABLE_MYSQL:BOOL={0}'.format(( 'ON' if '+mariadb' in spec else 'OFF')), '-DENABLE_POSTGRES:BOOL={0}'.format(( 'ON' if '+postgres' in spec else 'OFF')) ]) return cmake_args<|fim▁end|>
# which most machines do not provide and crash the build parallel = False
<|file_name|>Clip.ts<|end_file_name|><|fim▁begin|>import * as uuid from 'uuid' import { AvailableRenderer } from '../Engine/Renderer' import { Branded } from '../helper/Branded' import { safeAssign } from '../helper/safeAssign' import { Animatable } from './Animatable' import { Effect } from './Effect' interface ClipProps { id?: string renderer: string placedFrame: number<|fim▁hole|> durationFrames: number } type ClipId = Branded<string, 'Entity/Clip/Id'> class Clip extends Animatable implements ClipProps { public id: Clip.Id public renderer: AvailableRenderer public placedFrame: number public durationFrames: number public effects: ReadonlyArray<Effect> = [] constructor(props: ClipProps) { super() this.id = uuid.v4() as Clip.Id safeAssign<Clip>(this, props as ClipProps & { id: Clip.Id }) this.normalize() } public patch(props: Partial<ClipProps>) { safeAssign(this, props) } public findEffect(effectId: string): Effect | null { return this.effects.find(effect => effect.id === effectId) || null } public addEffect(effect: Effect, index: number | null = null): void { if (index == null) { this.effects = [...this.effects, effect] return } const clone = [...this.effects] clone.splice(index, 0, effect) this.effects = clone } public removeEffect(effectId: string): boolean { const beforeLength = this.effects.length this.effects = this.effects.filter(effect => effect.id !== effectId) return this.effects.length !== beforeLength } public moveEffectIndex(effectId: string, newIndex: number): boolean { const index = this.effects.findIndex(effect => effect.id === effectId) if (index === -1) return false const clone = [...this.effects] clone.splice(newIndex, 0, ...clone.splice(index, 1)) this.effects = clone return true } private normalize() { this.placedFrame = Math.round(this.placedFrame) this.durationFrames = Math.round(this.durationFrames) } } namespace Clip { export type Id = ClipId } export { Clip }<|fim▁end|>
<|file_name|>tables.py<|end_file_name|><|fim▁begin|># import json # import pandas as pd import numpy as np import os from core.lda_engine import model_files from pandas import DataFrame from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from core.keyword_db import keyword_dbs def db_connect(base, model_name='dss'): try: path = 'sqlite:///' + os.path.join(os.getcwd(), base, keyword_dbs[model_name] + '.sqlite') except KeyError: path = 'sqlite:///' + os.path.join(os.getcwd(), base, model_files[model_name].split(".")[0] + '.sqlite') print("Connecting to: ", path) return create_engine(path) def toDataFrame(sql, session): tmpt = session.execute(sql) return DataFrame(tmpt.fetchall(), columns=tmpt.keys()) def get_database(model_name, return_keyword=False): engine = db_connect("databases", model_name=model_name) Session = sessionmaker(bind=engine) session = Session() doc = "select * from documents" auth = "select * from authors" Author = toDataFrame(auth, session) Author.index = Author.id Document = toDataFrame(doc, session) Document.index = Document.id Key_Auth = ''' select authors_id, keywords_id, keyword, first_name, last_name from keywords k, documents_keywords dk, documents_authors da, authors a, documents d where a.id = da.authors_id and d.id = da.documents_id and d.id = dk.documents_id and k.id = dk.keywords_id ''' Key_Auth_alt = ''' select authors_id, keywords_id, keyword, first_name, last_name from keywords k, documents_keywords dk, documents_authors da, authors a, documents d where a.id = da.authors_id and d.id = da.documents_id and d.id = dk.documents_id and k.id = dk.keywords_id ''' tmpt = session.execute(Key_Auth) KA = DataFrame(tmpt.fetchall(), columns=list(tmpt.keys())) Docu_Auth = ''' select authors_id, documents_id, first_name, last_name, title from authors a, documents b, documents_authors c where a.id=c.authors_id and c.documents_id=b.id; ''' tmpt = session.execute(Docu_Auth)<|fim▁hole|> from (select keywords_id, count(*) freqency from documents_keywords group by keywords_id) a, keywords where keywords.id = a.keywords_id ''' a = session.execute(Key_Freq) Keyword = DataFrame(a.fetchall(), columns=list(a.keys())) Keyword.index = Keyword.id DocNum = session.execute('select count(*) from documents').first()[0] Keyword.loc[:, 'weight'] = np.log(DocNum / Keyword.freqency) if not return_keyword: return Author, Document, KA, DA else: return Author, Document, KA, DA, Keyword def get_top_keywords(model_name, author_id, n): engine = db_connect("databases", model_name=model_name) Session = sessionmaker(bind=engine) session = Session() Key_Auth_ID = ''' select keyword, count(*) as frequency from (select authors_id, keywords_id, keyword from keywords k, documents_keywords dk, documents_authors da, authors a, documents d where a.id = da.authors_id and d.id = da.documents_id and d.id = dk.documents_id and k.id = dk.keywords_id and authors_id = {}) as KA group by keywords_id order by frequency '''.format(author_id) tmpt = session.execute(Key_Auth_ID) return DataFrame(tmpt.fetchall(), columns=list(tmpt.keys()))[:n].values.tolist()<|fim▁end|>
DA = DataFrame(tmpt.fetchall(), columns=list(tmpt.keys())) Key_Freq = ''' select keywords.id, keyword, freqency
<|file_name|>http_transport_test_server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # coding: utf-8 # Copyright 2014 The Crashpad Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A one-shot testing webserver. When invoked, this server will write a short integer to stdout, indiciating on which port the server is listening. It will then read one integer from stdin, indiciating the response code to be sent in response to a request. It also reads 16 characters from stdin, which, after having "\r\n" appended, will form the response body in a successful response (one with code 200). The server will process one HTTP request, deliver the prearranged response to the client, and write the entire request to stdout. It will then terminate. This server is written in Python since it provides a simple HTTP stack, and because parsing chunked encoding is safer and easier in a memory-safe language. This could easily have been written in C++ instead. """ import BaseHTTPServer import struct import sys import zlib class BufferedReadFile(object): """A File-like object that stores all read contents into a buffer.""" def __init__(self, real_file): self.file = real_file self.buffer = "" def read(self, size=-1): buf = self.file.read(size) self.buffer += buf return buf def readline(self, size=-1): buf = self.file.readline(size) self.buffer += buf return buf def flush(self): self.file.flush() def close(self): self.file.close() class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): # Everything to be written to stdout is collected into this string. It can’t # be written to stdout until after the HTTP transaction is complete, because # stdout is a pipe being read by a test program that’s also the HTTP client. # The test program expects to complete the entire HTTP transaction before it # even starts reading this script’s stdout. If the stdout pipe buffer fills up # during an HTTP transaction, deadlock would result. raw_request = '' response_code = 500 response_body = '' def handle_one_request(self): # Wrap the rfile in the buffering file object so that the raw header block # can be written to stdout after it is parsed. self.rfile = BufferedReadFile(self.rfile) BaseHTTPServer.BaseHTTPRequestHandler.handle_one_request(self) def do_POST(self): RequestHandler.raw_request = self.rfile.buffer self.rfile.buffer = '' if self.headers.get('Transfer-Encoding', '').lower() == 'chunked': if 'Content-Length' in self.headers: raise AssertionError body = self.handle_chunked_encoding() else: length = int(self.headers.get('Content-Length', -1)) body = self.rfile.read(length) if self.headers.get('Content-Encoding', '').lower() == 'gzip': # 15 is the value of |wbits|, which should be at the maximum possible # value to ensure that any gzip stream can be decoded. The offset of 16 # specifies that the stream to decompress will be formatted with a gzip # wrapper. body = zlib.decompress(body, 16 + 15) RequestHandler.raw_request += body self.send_response(self.response_code) self.end_headers() if self.response_code == 200: self.wfile.write(self.response_body) self.wfile.write('\r\n') def handle_chunked_encoding(self): """This parses a "Transfer-Encoding: Chunked" body in accordance with RFC 7230 §4.1. This returns the result as a string. """ body = '' chunk_size = self.read_chunk_size() while chunk_size > 0: # Read the body. data = self.rfile.read(chunk_size) chunk_size -= len(data) body += data # Finished reading this chunk. if chunk_size == 0: # Read through any trailer fields. trailer_line = self.rfile.readline() while trailer_line.strip() != '': trailer_line = self.rfile.readline() # Read the chunk size. chunk_size = self.read_chunk_size() return body def read_chunk_size(self): # Read the whole line, including the \r\n. chunk_size_and_ext_line = self.rfile.readline() # Look for a chunk extension. chunk_size_end = chunk_size_and_ext_line.find(';') if chunk_size_end == -1: # No chunk extensions; just encounter the end of line. chunk_size_end = chunk_size_and_ext_line.find('\r') if chunk_size_end == -1: self.send_response(400) # Bad request. return -1<|fim▁hole|> def log_request(self, code='-', size='-'): # The default implementation logs these to sys.stderr, which is just noise. pass def Main(): if sys.platform == 'win32': import os, msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) # Start the server. server = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), RequestHandler) # Write the port as an unsigned short to the parent process. sys.stdout.write(struct.pack('=H', server.server_address[1])) sys.stdout.flush() # Read the desired test response code as an unsigned short and the desired # response body as a 16-byte string from the parent process. RequestHandler.response_code, RequestHandler.response_body = \ struct.unpack('=H16s', sys.stdin.read(struct.calcsize('=H16s'))) # Handle the request. server.handle_request() # Share the entire request with the test program, which will validate it. sys.stdout.write(RequestHandler.raw_request) sys.stdout.flush() if __name__ == '__main__': Main()<|fim▁end|>
return int(chunk_size_and_ext_line[:chunk_size_end], base=16)
<|file_name|>clientscriptAPI.js<|end_file_name|><|fim▁begin|>// Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors // MIT License. See license.txt get_server_fields = function(method, arg, table_field, doc, dt, dn, allow_edit, call_back) { frappe.dom.freeze(); if($.isPlainObject(arg)) arg = JSON.stringify(arg); return $c('runserverobj', args={'method': method, 'docs': JSON.stringify(doc), 'arg': arg }, function(r, rt) { frappe.dom.unfreeze(); if (r.message) { var d = locals[dt][dn]; var field_dict = r.message; for(var key in field_dict) { d[key] = field_dict[key]; if (table_field) refresh_field(key, d.name, table_field); else refresh_field(key); } } if(call_back){ doc = locals[doc.doctype][doc.name]; call_back(doc, dt, dn); } } ); } set_multiple = function (dt, dn, dict, table_field) { var d = locals[dt][dn]; for(var key in dict) { d[key] = dict[key]; if (table_field)<|fim▁hole|> refresh_field(key); } } refresh_many = function (flist, dn, table_field) { for(var i in flist) { if (table_field) refresh_field(flist[i], dn, table_field); else refresh_field(flist[i]); } } set_field_tip = function(n,txt) { var df = frappe.meta.get_docfield(cur_frm.doctype, n, cur_frm.docname); if(df)df.description = txt; if(cur_frm && cur_frm.fields_dict) { if(cur_frm.fields_dict[n]) cur_frm.fields_dict[n].comment_area.innerHTML = replace_newlines(txt); else console.log('[set_field_tip] Unable to set field tip: ' + n); } } refresh_field = function(n, docname, table_field) { // multiple if(typeof n==typeof []) refresh_many(n, docname, table_field); if(table_field && cur_frm.fields_dict[table_field].grid.grid_rows_by_docname) { // for table cur_frm.fields_dict[table_field].grid.grid_rows_by_docname[docname].refresh_field(n); } else if(cur_frm) { cur_frm.refresh_field(n) } } set_field_options = function(n, txt) { cur_frm.set_df_property(n, 'options', txt) } set_field_permlevel = function(n, level) { cur_frm.set_df_property(n, 'permlevel', level) } toggle_field = function(n, hidden) { var df = frappe.meta.get_docfield(cur_frm.doctype, n, cur_frm.docname); if(df) { df.hidden = hidden; refresh_field(n); } else { console.log((hidden ? "hide_field" : "unhide_field") + " cannot find field " + n); } } hide_field = function(n) { if(cur_frm) { if(n.substr) toggle_field(n, 1); else { for(var i in n) toggle_field(n[i], 1) } } } unhide_field = function(n) { if(cur_frm) { if(n.substr) toggle_field(n, 0); else { for(var i in n) toggle_field(n[i], 0) } } } get_field_obj = function(fn) { return cur_frm.fields_dict[fn]; } // set missing values in given doc set_missing_values = function(doc, dict) { // dict contains fieldname as key and "default value" as value var fields_to_set = {}; $.each(dict, function(i, v) { if (!doc[i]) { fields_to_set[i] = v; } }); if (fields_to_set) { set_multiple(doc.doctype, doc.name, fields_to_set); } } _f.Frm.prototype.get_doc = function() { return locals[this.doctype][this.docname]; } _f.Frm.prototype.field_map = function(fnames, fn) { if(typeof fnames==='string') { if(fnames == '*') { fnames = keys(this.fields_dict); } else { fnames = [fnames]; } } $.each(fnames, function(i,fieldname) { //var field = cur_frm.fields_dict[f]; - much better design var field = frappe.meta.get_docfield(cur_frm.doctype, fieldname, cur_frm.docname); if(field) { fn(field); cur_frm.refresh_field(fieldname); }; }) } _f.Frm.prototype.set_df_property = function(fieldname, property, value) { var field = frappe.meta.get_docfield(cur_frm.doctype, fieldname, cur_frm.docname) if(field) { field[property] = value; cur_frm.refresh_field(fieldname); }; } _f.Frm.prototype.toggle_enable = function(fnames, enable) { cur_frm.field_map(fnames, function(field) { field.read_only = enable ? 0 : 1; }); } _f.Frm.prototype.toggle_reqd = function(fnames, mandatory) { cur_frm.field_map(fnames, function(field) { field.reqd = mandatory ? true : false; }); } _f.Frm.prototype.toggle_display = function(fnames, show) { cur_frm.field_map(fnames, function(field) { field.hidden = show ? 0 : 1; }); } _f.Frm.prototype.call_server = function(method, args, callback) { return $c_obj(cur_frm.doc, method, args, callback); } _f.Frm.prototype.get_files = function() { return cur_frm.attachments ? frappe.utils.sort(cur_frm.attachments.get_attachments(), "file_name", "string") : [] ; } _f.Frm.prototype.set_query = function(fieldname, opt1, opt2) { var func = (typeof opt1=="function") ? opt1 : opt2; if(opt2) { this.fields_dict[opt1].grid.get_field(fieldname).get_query = func; } else { this.fields_dict[fieldname].get_query = func; } } _f.Frm.prototype.set_value_if_missing = function(field, value) { this.set_value(field, value, true); } _f.Frm.prototype.set_value = function(field, value, if_missing) { var me = this; var _set = function(f, v) { var fieldobj = me.fields_dict[f]; if(fieldobj) { if(!if_missing || !frappe.model.has_value(me.doctype, me.doc.name, f)) { if(fieldobj.df.fieldtype==="Table" && $.isArray(v)) { frappe.model.clear_table(me.doc, fieldobj.df.fieldname); $.each(v, function(i, d) { var child = frappe.model.add_child(me.doc, fieldobj.df.options, fieldobj.df.fieldname, i+1); $.extend(child, d); }); me.refresh_field(f); } else { frappe.model.set_value(me.doctype, me.doc.name, f, v); } } } } if(typeof field=="string") { _set(field, value) } else if($.isPlainObject(field)) { $.each(field, function(f, v) { _set(f, v); }) } } _f.Frm.prototype.call = function(opts) { var me = this; if(!opts.doc) { if(opts.method.indexOf(".")===-1) opts.method = frappe.model.get_server_module_name(me.doctype) + "." + opts.method; opts.original_callback = opts.callback; opts.callback = function(r) { if($.isPlainObject(r.message)) { if(opts.child) { // update child doc opts.child = locals[opts.child.doctype][opts.child.name]; $.extend(opts.child, r.message); me.fields_dict[opts.child.parentfield].refresh(); } else { // update parent doc me.set_value(r.message); } } opts.original_callback && opts.original_callback(r); } } else { opts.original_callback = opts.callback; opts.callback = function(r) { if(!r.exc) me.refresh_fields(); opts.original_callback && opts.original_callback(r); } } return frappe.call(opts); } _f.Frm.prototype.get_field = function(field) { return cur_frm.fields_dict[field]; }; _f.Frm.prototype.new_doc = function(doctype, field) { frappe._from_link = field; frappe._from_link_scrollY = scrollY; new_doc(doctype); } _f.Frm.prototype.set_read_only = function() { var perm = []; $.each(frappe.perm.get_perm(cur_frm.doc.doctype), function(i, p) { perm[p.permlevel || 0] = {read:1}; }); cur_frm.perm = perm; } _f.Frm.prototype.get_formatted = function(fieldname) { return frappe.format(this.doc[fieldname], frappe.meta.get_docfield(this.doctype, fieldname, this.docname), {no_icon:true}, this.doc); }<|fim▁end|>
refresh_field(key, d.name, table_field); else
<|file_name|>test_business_quarter.py<|end_file_name|><|fim▁begin|>""" Tests for the following offsets: - BQuarterBegin - BQuarterEnd """ from __future__ import annotations from datetime import datetime import pytest from pandas._libs.tslibs.offsets import QuarterOffset from pandas.tests.tseries.offsets.common import ( Base, assert_is_on_offset, assert_offset_equal, ) from pandas.tseries.offsets import ( BQuarterBegin, BQuarterEnd, ) def test_quarterly_dont_normalize(): date = datetime(2012, 3, 31, 5, 30) offsets = (BQuarterEnd, BQuarterBegin) for klass in offsets: result = date + klass() assert result.time() == date.time() @pytest.mark.parametrize("offset", [BQuarterBegin(), BQuarterEnd()]) def test_on_offset(offset): dates = [ datetime(2016, m, d) for m in [10, 11, 12] for d in [1, 2, 3, 28, 29, 30, 31] if not (m == 11 and d == 31) ] for date in dates: res = offset.is_on_offset(date) slow_version = date == (date + offset) - offset assert res == slow_version class TestBQuarterBegin(Base): _offset: type[QuarterOffset] = BQuarterBegin def test_repr(self): expected = "<BusinessQuarterBegin: startingMonth=3>" assert repr(BQuarterBegin()) == expected expected = "<BusinessQuarterBegin: startingMonth=3>" assert repr(BQuarterBegin(startingMonth=3)) == expected expected = "<BusinessQuarterBegin: startingMonth=1>" assert repr(BQuarterBegin(startingMonth=1)) == expected def test_is_anchored(self): assert BQuarterBegin(startingMonth=1).is_anchored() assert BQuarterBegin().is_anchored() assert not BQuarterBegin(2, startingMonth=1).is_anchored() def test_offset_corner_case(self): # corner offset = BQuarterBegin(n=-1, startingMonth=1) assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2) offset_cases = [] offset_cases.append( ( BQuarterBegin(startingMonth=1), { datetime(2008, 1, 1): datetime(2008, 4, 1), datetime(2008, 1, 31): datetime(2008, 4, 1), datetime(2008, 2, 15): datetime(2008, 4, 1), datetime(2008, 2, 29): datetime(2008, 4, 1), datetime(2008, 3, 15): datetime(2008, 4, 1), datetime(2008, 3, 31): datetime(2008, 4, 1), datetime(2008, 4, 15): datetime(2008, 7, 1), datetime(2007, 3, 15): datetime(2007, 4, 2), datetime(2007, 2, 28): datetime(2007, 4, 2), datetime(2007, 1, 1): datetime(2007, 4, 2), datetime(2007, 4, 15): datetime(2007, 7, 2), datetime(2007, 7, 1): datetime(2007, 7, 2), datetime(2007, 4, 1): datetime(2007, 4, 2), datetime(2007, 4, 2): datetime(2007, 7, 2), datetime(2008, 4, 30): datetime(2008, 7, 1), }, ) ) offset_cases.append( ( BQuarterBegin(startingMonth=2), { datetime(2008, 1, 1): datetime(2008, 2, 1), datetime(2008, 1, 31): datetime(2008, 2, 1), datetime(2008, 1, 15): datetime(2008, 2, 1), datetime(2008, 2, 29): datetime(2008, 5, 1), datetime(2008, 3, 15): datetime(2008, 5, 1), datetime(2008, 3, 31): datetime(2008, 5, 1), datetime(2008, 4, 15): datetime(2008, 5, 1), datetime(2008, 8, 15): datetime(2008, 11, 3), datetime(2008, 9, 15): datetime(2008, 11, 3), datetime(2008, 11, 1): datetime(2008, 11, 3), datetime(2008, 4, 30): datetime(2008, 5, 1), }, ) ) offset_cases.append( ( BQuarterBegin(startingMonth=1, n=0), { datetime(2008, 1, 1): datetime(2008, 1, 1), datetime(2007, 12, 31): datetime(2008, 1, 1), datetime(2008, 2, 15): datetime(2008, 4, 1), datetime(2008, 2, 29): datetime(2008, 4, 1), datetime(2008, 1, 15): datetime(2008, 4, 1), datetime(2008, 2, 27): datetime(2008, 4, 1), datetime(2008, 3, 15): datetime(2008, 4, 1), datetime(2007, 4, 1): datetime(2007, 4, 2), datetime(2007, 4, 2): datetime(2007, 4, 2), datetime(2007, 7, 1): datetime(2007, 7, 2), datetime(2007, 4, 15): datetime(2007, 7, 2), datetime(2007, 7, 2): datetime(2007, 7, 2), }, ) ) offset_cases.append( ( BQuarterBegin(startingMonth=1, n=-1), { datetime(2008, 1, 1): datetime(2007, 10, 1), datetime(2008, 1, 31): datetime(2008, 1, 1), datetime(2008, 2, 15): datetime(2008, 1, 1), datetime(2008, 2, 29): datetime(2008, 1, 1), datetime(2008, 3, 15): datetime(2008, 1, 1), datetime(2008, 3, 31): datetime(2008, 1, 1),<|fim▁hole|> datetime(2008, 4, 1): datetime(2008, 1, 1), }, ) ) offset_cases.append( ( BQuarterBegin(startingMonth=1, n=2), { datetime(2008, 1, 1): datetime(2008, 7, 1), datetime(2008, 1, 15): datetime(2008, 7, 1), datetime(2008, 2, 29): datetime(2008, 7, 1), datetime(2008, 3, 15): datetime(2008, 7, 1), datetime(2007, 3, 31): datetime(2007, 7, 2), datetime(2007, 4, 15): datetime(2007, 10, 1), datetime(2008, 4, 30): datetime(2008, 10, 1), }, ) ) @pytest.mark.parametrize("case", offset_cases) def test_offset(self, case): offset, cases = case for base, expected in cases.items(): assert_offset_equal(offset, base, expected) class TestBQuarterEnd(Base): _offset: type[QuarterOffset] = BQuarterEnd def test_repr(self): expected = "<BusinessQuarterEnd: startingMonth=3>" assert repr(BQuarterEnd()) == expected expected = "<BusinessQuarterEnd: startingMonth=3>" assert repr(BQuarterEnd(startingMonth=3)) == expected expected = "<BusinessQuarterEnd: startingMonth=1>" assert repr(BQuarterEnd(startingMonth=1)) == expected def test_is_anchored(self): assert BQuarterEnd(startingMonth=1).is_anchored() assert BQuarterEnd().is_anchored() assert not BQuarterEnd(2, startingMonth=1).is_anchored() def test_offset_corner_case(self): # corner offset = BQuarterEnd(n=-1, startingMonth=1) assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29) offset_cases = [] offset_cases.append( ( BQuarterEnd(startingMonth=1), { datetime(2008, 1, 1): datetime(2008, 1, 31), datetime(2008, 1, 31): datetime(2008, 4, 30), datetime(2008, 2, 15): datetime(2008, 4, 30), datetime(2008, 2, 29): datetime(2008, 4, 30), datetime(2008, 3, 15): datetime(2008, 4, 30), datetime(2008, 3, 31): datetime(2008, 4, 30), datetime(2008, 4, 15): datetime(2008, 4, 30), datetime(2008, 4, 30): datetime(2008, 7, 31), }, ) ) offset_cases.append( ( BQuarterEnd(startingMonth=2), { datetime(2008, 1, 1): datetime(2008, 2, 29), datetime(2008, 1, 31): datetime(2008, 2, 29), datetime(2008, 2, 15): datetime(2008, 2, 29), datetime(2008, 2, 29): datetime(2008, 5, 30), datetime(2008, 3, 15): datetime(2008, 5, 30), datetime(2008, 3, 31): datetime(2008, 5, 30), datetime(2008, 4, 15): datetime(2008, 5, 30), datetime(2008, 4, 30): datetime(2008, 5, 30), }, ) ) offset_cases.append( ( BQuarterEnd(startingMonth=1, n=0), { datetime(2008, 1, 1): datetime(2008, 1, 31), datetime(2008, 1, 31): datetime(2008, 1, 31), datetime(2008, 2, 15): datetime(2008, 4, 30), datetime(2008, 2, 29): datetime(2008, 4, 30), datetime(2008, 3, 15): datetime(2008, 4, 30), datetime(2008, 3, 31): datetime(2008, 4, 30), datetime(2008, 4, 15): datetime(2008, 4, 30), datetime(2008, 4, 30): datetime(2008, 4, 30), }, ) ) offset_cases.append( ( BQuarterEnd(startingMonth=1, n=-1), { datetime(2008, 1, 1): datetime(2007, 10, 31), datetime(2008, 1, 31): datetime(2007, 10, 31), datetime(2008, 2, 15): datetime(2008, 1, 31), datetime(2008, 2, 29): datetime(2008, 1, 31), datetime(2008, 3, 15): datetime(2008, 1, 31), datetime(2008, 3, 31): datetime(2008, 1, 31), datetime(2008, 4, 15): datetime(2008, 1, 31), datetime(2008, 4, 30): datetime(2008, 1, 31), }, ) ) offset_cases.append( ( BQuarterEnd(startingMonth=1, n=2), { datetime(2008, 1, 31): datetime(2008, 7, 31), datetime(2008, 2, 15): datetime(2008, 7, 31), datetime(2008, 2, 29): datetime(2008, 7, 31), datetime(2008, 3, 15): datetime(2008, 7, 31), datetime(2008, 3, 31): datetime(2008, 7, 31), datetime(2008, 4, 15): datetime(2008, 7, 31), datetime(2008, 4, 30): datetime(2008, 10, 31), }, ) ) @pytest.mark.parametrize("case", offset_cases) def test_offset(self, case): offset, cases = case for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [ (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True), (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False), (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False), (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False), (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False), (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True), (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False), (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False), (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False), (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False), (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False), (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True), (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False), (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False), (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False), (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True), (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False), (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False), (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False), (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True), (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False), (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True), (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False), (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False), (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False), (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True), (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False), ] @pytest.mark.parametrize("case", on_offset_cases) def test_is_on_offset(self, case): offset, dt, expected = case assert_is_on_offset(offset, dt, expected)<|fim▁end|>
datetime(2008, 4, 15): datetime(2008, 4, 1), datetime(2007, 7, 3): datetime(2007, 7, 2), datetime(2007, 4, 3): datetime(2007, 4, 2), datetime(2007, 7, 2): datetime(2007, 4, 2),
<|file_name|>balanced_resource_allocation.go<|end_file_name|><|fim▁begin|>/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software<|fim▁hole|>distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package priorities import ( "fmt" "math" "k8s.io/api/core/v1" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "github.com/golang/glog" ) // This is a reasonable size range of all container images. 90%ile of images on dockerhub drops into this range. const ( mb int64 = 1024 * 1024 minImgSize int64 = 23 * mb maxImgSize int64 = 1000 * mb ) // Also used in most/least_requested nad metadata. // TODO: despaghettify it func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource { result := &schedulercache.Resource{} for i := range pod.Spec.Containers { container := &pod.Spec.Containers[i] cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) result.MilliCPU += cpu result.Memory += memory } return result } func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } allocatableResources := nodeInfo.AllocatableResource() totalResources := *podRequests totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU totalResources.Memory += nodeInfo.NonZeroRequest().Memory cpuFraction := fractionOfCapacity(totalResources.MilliCPU, allocatableResources.MilliCPU) memoryFraction := fractionOfCapacity(totalResources.Memory, allocatableResources.Memory) score := int(0) if cpuFraction >= 1 || memoryFraction >= 1 { // if requested >= capacity, the corresponding host should never be preferred. score = 0 } else { // Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1 // respectively. Multilying the absolute value of the difference by 10 scales the value to // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. diff := math.Abs(cpuFraction - memoryFraction) score = int(10 - diff*10) } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.V(10).Infof( "%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, totalResources.MilliCPU, totalResources.Memory, score, ) } return schedulerapi.HostPriority{ Host: node.Name, Score: score, }, nil } func fractionOfCapacity(requested, capacity int64) float64 { if capacity == 0 { return 1 } return float64(requested) / float64(capacity) } // BalancedResourceAllocation favors nodes with balanced resource usage rate. // BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority. // It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how // close the two metrics are to each other. // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" func BalancedResourceAllocationMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { var nonZeroRequest *schedulercache.Resource if priorityMeta, ok := meta.(*priorityMetadata); ok { nonZeroRequest = priorityMeta.nonZeroRequest } else { // We couldn't parse metadatat - fallback to computing it. nonZeroRequest = getNonZeroRequests(pod) } return calculateBalancedResourceAllocation(pod, nonZeroRequest, nodeInfo) }<|fim▁end|>
<|file_name|>tslint-json.ts<|end_file_name|><|fim▁begin|>/** * This module defines the `tslint-json` project task. * * @module project-tasks/tslint-json * @internal */ /** (Placeholder comment, see TypeStrong/typedoc#603) */ import { Furi, join as furiJoin } from "furi"; import Undertaker from "undertaker";<|fim▁hole|>import { writeJsonFile } from "../utils/project"; export function generateTask(project: ResolvedProject): Undertaker.TaskFunction { let relativePath: string; if (project.tslint !== undefined && project.tslint.tslintJson !== undefined) { relativePath = project.tslint.tslintJson; } else { relativePath = "tslint.json"; } const absolutePath: Furi = furiJoin(project.absRoot, [relativePath]); return async function () { return writeJsonFile(absolutePath, DEFAULT_UNTYPED_TSLINT_CONFIG); }; } export function getTaskName(): string { return "tslint.json"; } export function registerTask(taker: Undertaker, project: ResolvedProject): Undertaker.TaskFunction { const taskName: string = getTaskName(); const task: Undertaker.TaskFunction = generateTask(project); taker.task(taskName, task); return task; }<|fim▁end|>
import { DEFAULT_UNTYPED_TSLINT_CONFIG } from "../options/tslint"; import { ResolvedProject } from "../project";
<|file_name|>MultiLineString.js<|end_file_name|><|fim▁begin|>/** * @module ol/geom/MultiLineString */ import {extend} from '../array.js'; import {closestSquaredDistanceXY} from '../extent.js'; import GeometryLayout from '../geom/GeometryLayout.js'; import GeometryType from '../geom/GeometryType.js'; import LineString from '../geom/LineString.js'; import SimpleGeometry from '../geom/SimpleGeometry.js'; import {assignClosestArrayPoint, arrayMaxSquaredDelta} from '../geom/flat/closest.js'; import {deflateCoordinatesArray} from '../geom/flat/deflate.js'; import {inflateCoordinatesArray} from '../geom/flat/inflate.js'; import {interpolatePoint, lineStringsCoordinateAtM} from '../geom/flat/interpolate.js'; import {intersectsLineStringArray} from '../geom/flat/intersectsextent.js'; import {douglasPeuckerArray} from '../geom/flat/simplify.js'; /** * @classdesc * Multi-linestring geometry. * * @api */ class MultiLineString extends SimpleGeometry { /** * @param {Array<Array<import("../coordinate.js").Coordinate>|LineString>|Array<number>} coordinates * Coordinates or LineString geometries. (For internal use, flat coordinates in * combination with `opt_layout` and `opt_ends` are also accepted.) * @param {GeometryLayout=} opt_layout Layout. * @param {Array<number>=} opt_ends Flat coordinate ends for internal use. */ constructor(coordinates, opt_layout, opt_ends) { super(); /** * @type {Array<number>} * @private */ this.ends_ = []; /** * @private * @type {number} */ this.maxDelta_ = -1; /** * @private * @type {number} */ this.maxDeltaRevision_ = -1; if (Array.isArray(coordinates[0])) { this.setCoordinates(/** @type {Array<Array<import("../coordinate.js").Coordinate>>} */ (coordinates), opt_layout); } else if (opt_layout !== undefined && opt_ends) { this.setFlatCoordinates(opt_layout, /** @type {Array<number>} */ (coordinates)); this.ends_ = opt_ends; } else { let layout = this.getLayout(); const lineStrings = /** @type {Array<LineString>} */ (coordinates); const flatCoordinates = []; const ends = []; for (let i = 0, ii = lineStrings.length; i < ii; ++i) { const lineString = lineStrings[i]; if (i === 0) { layout = lineString.getLayout(); } extend(flatCoordinates, lineString.getFlatCoordinates()); ends.push(flatCoordinates.length); } this.setFlatCoordinates(layout, flatCoordinates); this.ends_ = ends; } } /** * Append the passed linestring to the multilinestring. * @param {LineString} lineString LineString. * @api */ appendLineString(lineString) { if (!this.flatCoordinates) { this.flatCoordinates = lineString.getFlatCoordinates().slice(); } else { extend(this.flatCoordinates, lineString.getFlatCoordinates().slice()); } this.ends_.push(this.flatCoordinates.length); this.changed(); } /** * Make a complete copy of the geometry. * @return {!MultiLineString} Clone. * @override * @api */ clone() { return new MultiLineString(this.flatCoordinates.slice(), this.layout, this.ends_.slice()); } /** * @inheritDoc */ closestPointXY(x, y, closestPoint, minSquaredDistance) { if (minSquaredDistance < closestSquaredDistanceXY(this.getExtent(), x, y)) { return minSquaredDistance; } if (this.maxDeltaRevision_ != this.getRevision()) { this.maxDelta_ = Math.sqrt(arrayMaxSquaredDelta( this.flatCoordinates, 0, this.ends_, this.stride, 0)); this.maxDeltaRevision_ = this.getRevision(); } return assignClosestArrayPoint( this.flatCoordinates, 0, this.ends_, this.stride, this.maxDelta_, false, x, y, closestPoint, minSquaredDistance); } /** * Returns the coordinate at `m` using linear interpolation, or `null` if no * such coordinate exists. * * `opt_extrapolate` controls extrapolation beyond the range of Ms in the * MultiLineString. If `opt_extrapolate` is `true` then Ms less than the first * M will return the first coordinate and Ms greater than the last M will * return the last coordinate. * * `opt_interpolate` controls interpolation between consecutive LineStrings * within the MultiLineString. If `opt_interpolate` is `true` the coordinates * will be linearly interpolated between the last coordinate of one LineString * and the first coordinate of the next LineString. If `opt_interpolate` is * `false` then the function will return `null` for Ms falling between * LineStrings. * * @param {number} m M. * @param {boolean=} opt_extrapolate Extrapolate. Default is `false`. * @param {boolean=} opt_interpolate Interpolate. Default is `false`. * @return {import("../coordinate.js").Coordinate} Coordinate. * @api */ getCoordinateAtM(m, opt_extrapolate, opt_interpolate) { if ((this.layout != GeometryLayout.XYM && this.layout != GeometryLayout.XYZM) || this.flatCoordinates.length === 0) { return null; } const extrapolate = opt_extrapolate !== undefined ? opt_extrapolate : false; const interpolate = opt_interpolate !== undefined ? opt_interpolate : false; return lineStringsCoordinateAtM(this.flatCoordinates, 0, this.ends_, this.stride, m, extrapolate, interpolate); } /** * Return the coordinates of the multilinestring. * @return {Array<Array<import("../coordinate.js").Coordinate>>} Coordinates. * @override * @api */ getCoordinates() { return inflateCoordinatesArray( this.flatCoordinates, 0, this.ends_, this.stride); } /** * @return {Array<number>} Ends. */ getEnds() { return this.ends_; } /** * Return the linestring at the specified index. * @param {number} index Index. * @return {LineString} LineString. * @api */ getLineString(index) { if (index < 0 || this.ends_.length <= index) { return null; } return new LineString(this.flatCoordinates.slice( index === 0 ? 0 : this.ends_[index - 1], this.ends_[index]), this.layout); } /** * Return the linestrings of this multilinestring. * @return {Array<LineString>} LineStrings. * @api */ getLineStrings() { const flatCoordinates = this.flatCoordinates; const ends = this.ends_; const layout = this.layout; /** @type {Array<LineString>} */ const lineStrings = []; let offset = 0; for (let i = 0, ii = ends.length; i < ii; ++i) { const end = ends[i]; const lineString = new LineString(flatCoordinates.slice(offset, end), layout); lineStrings.push(lineString); offset = end; } return lineStrings; } /** * @return {Array<number>} Flat midpoints. */<|fim▁hole|> let offset = 0; const ends = this.ends_; const stride = this.stride; for (let i = 0, ii = ends.length; i < ii; ++i) { const end = ends[i]; const midpoint = interpolatePoint( flatCoordinates, offset, end, stride, 0.5); extend(midpoints, midpoint); offset = end; } return midpoints; } /** * @inheritDoc */ getSimplifiedGeometryInternal(squaredTolerance) { const simplifiedFlatCoordinates = []; const simplifiedEnds = []; simplifiedFlatCoordinates.length = douglasPeuckerArray( this.flatCoordinates, 0, this.ends_, this.stride, squaredTolerance, simplifiedFlatCoordinates, 0, simplifiedEnds); return new MultiLineString(simplifiedFlatCoordinates, GeometryLayout.XY, simplifiedEnds); } /** * @inheritDoc * @api */ getType() { return GeometryType.MULTI_LINE_STRING; } /** * @inheritDoc * @api */ intersectsExtent(extent) { return intersectsLineStringArray( this.flatCoordinates, 0, this.ends_, this.stride, extent); } /** * Set the coordinates of the multilinestring. * @param {!Array<Array<import("../coordinate.js").Coordinate>>} coordinates Coordinates. * @param {GeometryLayout=} opt_layout Layout. * @override * @api */ setCoordinates(coordinates, opt_layout) { this.setLayout(opt_layout, coordinates, 2); if (!this.flatCoordinates) { this.flatCoordinates = []; } const ends = deflateCoordinatesArray( this.flatCoordinates, 0, coordinates, this.stride, this.ends_); this.flatCoordinates.length = ends.length === 0 ? 0 : ends[ends.length - 1]; this.changed(); } } export default MultiLineString;<|fim▁end|>
getFlatMidpoints() { const midpoints = []; const flatCoordinates = this.flatCoordinates;
<|file_name|>application.rs<|end_file_name|><|fim▁begin|>//! This module contains the base elements of an OrbTk application (Application, WindowBuilder and Window). use std::sync::mpsc;<|fim▁hole|>use crate::{ core::{application::WindowAdapter, localization::*, *}, shell::{Shell, ShellRequest}, }; /// The `Application` represents the entry point of an OrbTk based application. pub struct Application { // shells: Vec<Shell<WindowAdapter>>, request_sender: mpsc::Sender<ShellRequest<WindowAdapter>>, shell: Shell<WindowAdapter>, name: Box<str>, theme: Rc<Theme>, localization: Option<Rc<RefCell<Box<dyn Localization>>>>, } impl Default for Application { fn default() -> Self { Application::from_name("orbtk_application") } } impl Application { /// Creates a new application. pub fn new() -> Self { Self::default() } /// Sets the default theme for the application. Could be changed per window. pub fn theme(mut self, theme: Theme) -> Self { self.theme = Rc::new(theme); self } pub fn localization<L>(mut self, localization: L) -> Self where L: Localization + 'static, { self.localization = Some(Rc::new(RefCell::new(Box::new(localization)))); self } /// Create a new application with the given name. pub fn from_name(name: impl Into<Box<str>>) -> Self { let (sender, receiver) = mpsc::channel(); Application { request_sender: sender, name: name.into(), shell: Shell::new(receiver), theme: Rc::new(crate::widgets::themes::theme_orbtk::theme_default()), localization: None, } } /// Creates a new window and add it to the application. pub fn window<F: Fn(&mut BuildContext) -> Entity + 'static>(mut self, create_fn: F) -> Self { let (adapter, settings, receiver) = create_window( self.name.clone(), &self.theme, self.request_sender.clone(), create_fn, self.localization.clone(), ); self.shell .create_window_from_settings(settings, adapter) .request_receiver(receiver) .build(); self } /// Starts the application and run it until quit is requested. pub fn run(mut self) { self.shell.run(); } }<|fim▁end|>
use dces::prelude::Entity;
<|file_name|>main.js<|end_file_name|><|fim▁begin|>jQuery(document).ready(function($){ //check if the .cd-image-container is in the viewport //if yes, animate it checkPosition($('.cd-image-container')); $(window).on('scroll', function(){ checkPosition($('.cd-image-container')); }); //make the .cd-handle element draggable and modify .cd-resize-img width according to its position $('.cd-image-container').each(function(){ var actual = $(this); drags(actual.find('.cd-handle'), actual.find('.cd-resize-img'), actual, actual.find('.cd-image-label[data-type="original"]'), actual.find('.cd-image-label[data-type="modified"]')); }); //upadate images label visibility $(window).on('resize', function(){ $('.cd-image-container').each(function(){<|fim▁hole|> updateLabel(actual.find('.cd-image-label[data-type="original"]'), actual.find('.cd-resize-img'), 'right'); }); }); }); function checkPosition(container) { container.each(function(){ var actualContainer = $(this); if( $(window).scrollTop() + $(window).height()*0.5 > actualContainer.offset().top) { actualContainer.addClass('is-visible'); } }); } //draggable funtionality - credits to http://css-tricks.com/snippets/jquery/draggable-without-jquery-ui/ function drags(dragElement, resizeElement, container, labelContainer, labelResizeElement) { dragElement.on("mousedown vmousedown", function(e) { dragElement.addClass('draggable'); resizeElement.addClass('resizable'); var dragWidth = dragElement.outerWidth(), xPosition = dragElement.offset().left + dragWidth - e.pageX, containerOffset = container.offset().left, containerWidth = container.outerWidth(), minLeft = containerOffset + 10, maxLeft = containerOffset + containerWidth - dragWidth - 10; dragElement.parents().on("mousemove vmousemove", function(e) { leftValue = e.pageX + xPosition - dragWidth; //constrain the draggable element to move inside his container if(leftValue < minLeft ) { leftValue = minLeft; } else if ( leftValue > maxLeft) { leftValue = maxLeft; } widthValue = (leftValue + dragWidth/2 - containerOffset)*100/containerWidth+'%'; $('.draggable').css('left', widthValue).on("mouseup vmouseup", function() { $(this).removeClass('draggable'); resizeElement.removeClass('resizable'); }); $('.resizable').css('width', widthValue); updateLabel(labelResizeElement, resizeElement, 'left'); updateLabel(labelContainer, resizeElement, 'right'); }).on("mouseup vmouseup", function(e){ dragElement.removeClass('draggable'); resizeElement.removeClass('resizable'); }); e.preventDefault(); }).on("mouseup vmouseup", function(e) { dragElement.removeClass('draggable'); resizeElement.removeClass('resizable'); }); } function updateLabel(label, resizeElement, position) { if(position == 'left') { ( label.offset().left + label.outerWidth() < resizeElement.offset().left + resizeElement.outerWidth() ) ? label.removeClass('is-hidden') : label.addClass('is-hidden') ; } else { ( label.offset().left > resizeElement.offset().left + resizeElement.outerWidth() ) ? label.removeClass('is-hidden') : label.addClass('is-hidden') ; } }<|fim▁end|>
var actual = $(this); updateLabel(actual.find('.cd-image-label[data-type="modified"]'), actual.find('.cd-resize-img'), 'left');
<|file_name|>code2seq_dataset.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for converting the Code2Seq dataset to a PLUR dataset. """ import os import tarfile import apache_beam as beam from plur.stage_1.plur_dataset import Configuration from plur.stage_1.plur_dataset import PlurDataset from plur.utils import constants from plur.utils import util from plur.utils.graph_to_output_example import GraphToOutputExample from plur.utils.graph_to_output_example import GraphToOutputExampleNotValidError import tqdm class Code2SeqDataset(PlurDataset): # pylint: disable=line-too-long """Converting data from code2seq dataset to a PLUR dataset. The dataset is used in: Alon, Uri, et al. 'code2seq: Generating sequences from structured representations of code.' arXiv preprint arXiv:1808.01400 (2018). The task is to predict the function name given the function body. The provided dataset by code2seq are the tokenized function name, and the AST paths. Therefore we have to create our own graph representation of code2seq. We try to mimic the code2seq model by constructing a graph similar to figure 3 in the code2seq paper. An example of such graph is shown in https://drive.google.com/file/d/1-cH0FzYIMikgTkUpzVkEZDGjoiqBB9C1/view?usp=sharing. In short, we build the AST path subtree and connects all AST paths with a code2seq root node to make it a graph. """ <|fim▁hole|> 'java-small-preprocessed.tar.gz': { 'url': 'https://s3.amazonaws.com/code2seq/datasets/java-small-preprocessed.tar.gz', 'sha1sum': '857c2495785f606ab99676c7bbae601ea2160f66', } } _URLS_MED = { 'java-med-preprocessed.tar.gz': { 'url': 'https://s3.amazonaws.com/code2seq/datasets/java-med-preprocessed.tar.gz', 'sha1sum': '219e558ddf46678ef322ff75bf1982faa1b6204d', } } _URLS_LARGE = { 'java-large-preprocessed.tar.gz': { 'url': 'https://s3.amazonaws.com/code2seq/datasets/java-large-preprocessed.tar.gz', 'sha1sum': 'ebc229ba1838a3c8f3a69ab507eb26fa5460152a', } } # pylint: enable=line-too-long _GIT_URL = {} _DATASET_NAME = 'code2seq_dataset' _DATASET_DESCRIPTION = """\ This dataset is used to train the code2seq model. The task is to predict the function name, given the ast paths sampled the function AST. An AST path is a path between two leaf nodes in the AST. """ def __init__(self, stage_1_dir, configuration: Configuration = Configuration(), transformation_funcs=(), filter_funcs=(), user_defined_split_range=(), num_shards=1000, seed=0, dataset_size='small', deduplicate=False): # dataset_size can only be 'small', 'med' or 'large'. valid_dataset_size = {'small', 'med', 'large'} if dataset_size not in valid_dataset_size: raise ValueError('{} not in {}'.format(dataset_size, str(valid_dataset_size))) if dataset_size == 'small': urls = self._URLS_SMALL elif dataset_size == 'med': urls = self._URLS_MED else: urls = self._URLS_LARGE self.dataset_size = dataset_size super().__init__(self._DATASET_NAME, urls, self._GIT_URL, self._DATASET_DESCRIPTION, stage_1_dir, transformation_funcs=transformation_funcs, filter_funcs=filter_funcs, user_defined_split_range=user_defined_split_range, num_shards=num_shards, seed=seed, configuration=configuration, deduplicate=deduplicate) def download_dataset(self): """Download the dataset using requests and extract the tarfile.""" super().download_dataset_using_requests() # Extract the tarfile depending on the dataset size. if self.dataset_size == 'small': self.code2seq_extracted_dir = os.path.join( self.raw_data_dir, 'java-small') tarfile_name = 'java-small-preprocessed.tar.gz' elif self.dataset_size == 'med': self.code2seq_extracted_dir = os.path.join( self.raw_data_dir, 'java-med') tarfile_name = 'java-med-preprocessed.tar.gz' else: self.code2seq_extracted_dir = os.path.join( self.raw_data_dir, 'java-large') tarfile_name = 'java-large-preprocessed.tar.gz' tarfiles_to_extract = [] tarfiles_to_extract = util.check_need_to_extract( tarfiles_to_extract, self.code2seq_extracted_dir, tarfile_name) for filename in tarfiles_to_extract: dest = os.path.join(self.raw_data_dir, filename) with tarfile.open(dest, 'r:gz') as tf: for member in tqdm.tqdm( tf.getmembers(), unit='file', desc='Extracting {}'.format(filename)): tf.extract(member, self.raw_data_dir) def get_all_raw_data_paths(self): """Get paths to all raw data.""" # Get the filenames depending on the dataset size. if self.dataset_size == 'small': train_file = os.path.join( self.code2seq_extracted_dir, 'java-small.train.c2s') validation_file = os.path.join( self.code2seq_extracted_dir, 'java-small.val.c2s') test_file = os.path.join( self.code2seq_extracted_dir, 'java-small.test.c2s') elif self.dataset_size == 'med': train_file = os.path.join( self.code2seq_extracted_dir, 'java-med.train.c2s') validation_file = os.path.join( self.code2seq_extracted_dir, 'java-med.val.c2s') test_file = os.path.join( self.code2seq_extracted_dir, 'java-med.test.c2s') else: train_file = os.path.join( self.code2seq_extracted_dir, 'java-large.train.c2s') validation_file = os.path.join( self.code2seq_extracted_dir, 'java-large.val.c2s') test_file = os.path.join( self.code2seq_extracted_dir, 'java-large.test.c2s') return [train_file, validation_file, test_file] def raw_data_paths_to_raw_data_do_fn(self): """Returns a beam.DoFn subclass that reads the raw data.""" return C2SExtractor(super().get_random_split, bool(self.user_defined_split_range)) def _construct_token_subtree(self, graph_to_output_example, token, cur_node_id, token_root_name): # pylint: disable=line-too-long """Construct the token subtree in a AST path. We create a node for each subtoken in the token, all subtokens are connected to the next subtoken via the 'NEXT_SUBTOKEN' edge. All subtokens are connected to the token root node via the 'SUBTOKEN' edge. See the draw.io figure mentioned in the class doc for the visualization. Args: graph_to_output_example: A GraphToOutputExample instance. token: Starting or ending token in the AST path. cur_node_id: Next available node id. token_root_name: Node type and label for the token root node. Returns: A tuple of graph_to_output_example, cur_node_id, token_node_id. graph_to_output_example is updated with the token subtree, cur_node_id is the next available node id after all the token subtree nodes are added, and token_node_id is the node id of the root token node. """ subtokens = token.split('|') subtoken_node_ids = [] prev_subtoken_id = -1 # Create a node each subtoken. for subtoken in subtokens: graph_to_output_example.add_node(cur_node_id, 'SUBTOKEN', subtoken) subtoken_node_ids.append(cur_node_id) # Connects to the previous subtoken node if prev_subtoken_id != -1: graph_to_output_example.add_edge(prev_subtoken_id, cur_node_id, 'NEXT_SUBTOKEN') prev_subtoken_id = cur_node_id cur_node_id += 1 # Add a root node for the token subtree. graph_to_output_example.add_node(cur_node_id, token_root_name, token_root_name) token_node_id = cur_node_id cur_node_id += 1 # Connect all subtoken nodes to the token subtree root node. for node_id in subtoken_node_ids: graph_to_output_example.add_edge(token_node_id, node_id, 'SUBTOKEN') return graph_to_output_example, cur_node_id, token_node_id def _construct_ast_nodes_subtree(self, graph_to_output_example, ast_nodes, cur_node_id): """Construct the AST nodes subtree in a AST path. We create a node for each AST node in the AST path. Each AST node are connected to the next AST node via the 'NEXT_AST_NODE' edge. See the draw.io figure mentioned in the class doc for the visualization. Args: graph_to_output_example: A GraphToOutputExample instance. ast_nodes: AST nodes in the AST path. cur_node_id: Current available node id. Returns: A tuple of graph_to_output_example, cur_node_id, ast_node_ids. graph_to_output_example is updated with the ast nodes subtree, cur_node_id is the next available node id after all the ast nodes are added, and ast_node_ids the node ids of all AST nodes. """ ast_nodes = ast_nodes.split('|') ast_node_ids = [] prev_ast_node_id = -1 # Create a node each AST node. for ast_node in ast_nodes: graph_to_output_example.add_node(cur_node_id, 'AST_NODE', ast_node) ast_node_ids.append(cur_node_id) # Connects to the previous AST node. if prev_ast_node_id != -1: graph_to_output_example.add_edge(prev_ast_node_id, cur_node_id, 'NEXT_AST_NODE') prev_ast_node_id = cur_node_id cur_node_id += 1 return graph_to_output_example, cur_node_id, ast_node_ids def raw_data_to_graph_to_output_example(self, raw_data): # pylint: disable=line-too-long """Convert raw data to the unified GraphToOutputExample data structure. The Code2Seq raw data contains the target function name, and the sampled AST paths. Each AST path starts and ends with a token, and a series of AST nodes that connects the two tokens. We use _construct_token_subtree to build the token subtree and _construct_ast_nodes_subtree to build the AST nodes subtree. Then, all AST paths' nodes are connected to a AST root node. All AST root nodes are connected to a single code2seq root node. https://drive.google.com/file/d/1-cH0FzYIMikgTkUpzVkEZDGjoiqBB9C1/view?usp=sharing shows an example of such a graph and the original AST path. Args: raw_data: A dictionary with 'split', 'target_label' and 'ast_paths' as keys. The value of the 'split' field is the split (train/valid/test) that the data belongs to. The value of the 'target_label' field is the function name. The value of the 'ast_paths' field is a list of AST paths. Raises: GraphToOutputExampleNotValidError if the GraphToOutputExample is not valid. Returns: A dictionary with keys 'split' and 'GraphToOutputExample'. Values are the split(train/validation/test) the data belongs to, and the GraphToOutputExample instance. """ # pylint: enable=line-too-long split = raw_data['split'] target_label = raw_data['target_label'] ast_paths = raw_data['ast_paths'] graph_to_output_example = GraphToOutputExample() cur_node_id = 0 ast_path_root_node_ids = [] # This is the root node of all AST path nodes. graph_to_output_example.add_node(cur_node_id, 'C2C_ROOT', 'C2C_ROOT') c2c_root_node_id = cur_node_id cur_node_id += 1 for ast_path in ast_paths: # The start_token subtree start_token = ast_path[0] graph_to_output_example, cur_node_id, start_token_node_id = ( self._construct_token_subtree( graph_to_output_example, start_token, cur_node_id, 'START_TOKEN')) # The ast_nodes subtree ast_nodes = ast_path[1] graph_to_output_example, cur_node_id, ast_node_ids = ( self._construct_ast_nodes_subtree( graph_to_output_example, ast_nodes, cur_node_id)) # The end_token subtree end_token = ast_path[2] graph_to_output_example, cur_node_id, end_token_node_id = ( self._construct_token_subtree( graph_to_output_example, end_token, cur_node_id, 'END_TOKEN')) # Connects the start_token root node with the first node in the # ast_nodes subtree. graph_to_output_example.add_edge( start_token_node_id, ast_node_ids[0], 'START_AST_PATH') # Connects the end_token root node with the last node in the # ast_nodes subtree. graph_to_output_example.add_edge( end_token_node_id, ast_node_ids[-1], 'END_AST_PATH') # Add a root AST path node representing the AST path. graph_to_output_example.add_node( cur_node_id, 'ROOT_AST_PATH', 'ROOT_AST_PATH') ast_path_root_node_id = cur_node_id ast_path_root_node_ids.append(ast_path_root_node_id) cur_node_id += 1 # Connects the root AST path node with the start_token and end_token # subtree. graph_to_output_example.add_edge( ast_path_root_node_id, start_token_node_id, 'START_TOKEN') graph_to_output_example.add_edge( ast_path_root_node_id, end_token_node_id, 'END_TOKEN') # Connects the root AST path node with all nodes in the ast_nodes subtree. for node_id in ast_node_ids: graph_to_output_example.add_edge(ast_path_root_node_id, node_id, 'AST_NODE') # Connects the code2seq root node with all AST path root node. for ast_path_root_node_id in ast_path_root_node_ids: graph_to_output_example.add_edge(c2c_root_node_id, ast_path_root_node_id, 'AST_PATH') for subtoken in target_label.split('|'): graph_to_output_example.add_token_output(subtoken) for transformation_fn in self.transformation_funcs: graph_to_output_example = transformation_fn(graph_to_output_example) if not graph_to_output_example.check_if_valid(): raise GraphToOutputExampleNotValidError( 'Invalid GraphToOutputExample found {}'.format( graph_to_output_example)) for filter_fn in self.filter_funcs: if not filter_fn(graph_to_output_example): graph_to_output_example = None break return {'split': split, 'GraphToOutputExample': graph_to_output_example} class C2SExtractor(beam.DoFn): """Class to read the code2seq dataset.""" def __init__(self, random_split_fn, use_random_split): self.random_split_fn = random_split_fn self.use_random_split = use_random_split def _read_data(self, file_path): """Read and parse the code2seq raw data file. Each line in the code2seq raw data file has the following format: '<token> <token>,<node1>,<node2>,<token> <token>,<node3>,<token>' The first token is the function name. The rest are the AST paths, separated with a whitespace. Args: file_path: Path to a code2seq data file. Yields: A tuple of the function name, and a list of AST paths. """ with open(file_path) as f: for line in f: fields = line.rstrip().split(' ') # The subtokens are still separated by '|', we handle them # together in self.raw_data_to_graph_to_output_example() target_label = fields[0] ast_paths = [] for field in fields[1:]: if field: # The subtokens are still separated by '|', we handle them # together in self.raw_data_to_graph_to_output_example() ast_paths.append(field.split(',')) yield target_label, ast_paths def _get_split(self, file_path): """Get the data split based on the filename suffix.""" if file_path.endswith('train.c2s'): return constants.TRAIN_SPLIT_NAME elif file_path.endswith('val.c2s'): return constants.VALIDATION_SPLIT_NAME else: return constants.TEST_SPLIT_NAME def process(self, file_path): split = self._get_split(file_path) for target_label, ast_paths in self._read_data(file_path): yield { 'split': self.random_split_fn() if self.use_random_split else split, 'target_label': target_label, 'ast_paths': ast_paths }<|fim▁end|>
_URLS_SMALL = {
<|file_name|>karma.conf.js<|end_file_name|><|fim▁begin|>module.exports = function(config) { config.set({ basePath: '../../', frameworks: ['jasmine', 'requirejs'], files: [ {pattern: 'test/unit/require.conf.js', included: true}, {pattern: 'test/unit/tests/global.js', included: true}, {pattern: 'src/client/**/*.*', included: false}, {pattern: 'test/unit/tests/**/*.*', included: false}, ], plugins: [ 'karma-jasmine', 'karma-requirejs', 'karma-coverage', 'karma-html-reporter', 'karma-phantomjs-launcher', 'karma-chrome-launcher', 'karma-firefox-launcher', 'karma-safari-launcher', 'karma-ie-launcher' ], reporters: ['coverage', 'html', 'progress'], preprocessors: {<|fim▁hole|> 'src/client/service/**/*.js': ['coverage'] }, coverageReporter: { type: 'html', dir: 'test/unit/coverage/', includeAllSources: true }, htmlReporter: { outputDir: 'results' //it is annoying that this file path isn't from basePath :( }, colors: true, logLevel: config.LOG_INFO, autoWatch: false, browsers: ['Chrome'/*, 'PhantomJS', 'Firefox', 'IE', 'Safari'*/], captureTimeout: 5000, singleRun: true }); };<|fim▁end|>
'src/client/component/**/*.js': ['coverage'],
<|file_name|>deluged_client.py<|end_file_name|><|fim▁begin|># Author: Paul Wollaston # Contributions: Luke Mullan # # This client script allows connection to Deluge Daemon directly, completely # circumventing the requirement to use the WebUI. import json from base64 import b64encode import sickbeard from sickbeard import logger from .generic import GenericClient from synchronousdeluge import DelugeClient class DelugeDAPI(GenericClient): drpc = None def __init__(self, host=None, username=None, password=None): super(DelugeDAPI, self).__init__('DelugeD', host, username, password) def _get_auth(self): if not self.connect(): return None return True def connect(self, reconnect = False): hostname = self.host.replace("/", "").split(':') if not self.drpc or reconnect: self.drpc = DelugeRPC(hostname[1], port = hostname[2], username = self.username, password = self.password) return self.drpc def _add_torrent_uri(self, result): label = sickbeard.TORRENT_LABEL if result.show.is_anime: label = sickbeard.TORRENT_LABEL_ANIME options = { 'add_paused': sickbeard.TORRENT_PAUSED } remote_torrent = self.drpc.add_torrent_magnet(result.url, options, result.hash) if not remote_torrent: return None result.hash = remote_torrent return remote_torrent def _add_torrent_file(self, result): label = sickbeard.TORRENT_LABEL if result.show.is_anime: label = sickbeard.TORRENT_LABEL_ANIME if not result.content: result.content = {} if not result.content: return None options = { 'add_paused': sickbeard.TORRENT_PAUSED } remote_torrent = self.drpc.add_torrent_file(result.name + '.torrent', result.content, options, result.hash) if not remote_torrent: return None result.hash = remote_torrent return remote_torrent def _set_torrent_label(self, result): label = sickbeard.TORRENT_LABEL if result.show.is_anime: label = sickbeard.TORRENT_LABEL_ANIME if ' ' in label: logger.log(self.name + u': Invalid label. Label must not contain a space', logger.ERROR) return False if label: if self.drpc.set_torrent_label(result.hash, label): return True return False def _set_torrent_ratio(self, result): return True def _set_torrent_path(self, result): path = sickbeard.TORRENT_PATH if path: if self.drpc.set_torrent_path(result.hash, path): return True return False def _set_torrent_pause(self, result): if sickbeard.TORRENT_PAUSED: return self.drpc.pause_torrent(result.hash) return True def testAuthentication(self): if self.connect(True) and self.drpc.test(): return True, 'Success: Connected and Authenticated' else: return False, 'Error: Unable to Authenticate! Please check your config!' class DelugeRPC(object): host = 'localhost' port = 58846 username = None password = None client = None def __init__(self, host = 'localhost', port = 58846, username = None, password = None): super(DelugeRPC, self).__init__() self.host = host self.port = port self.username = username self.password = password def connect(self): self.client = DelugeClient() self.client.connect(self.host, int(self.port), self.username, self.password) def test(self): try: self.connect() except: return False return True def add_torrent_magnet(self, torrent, options, torrent_hash): torrent_id = False try: self.connect() torrent_id = self.client.core.add_torrent_magnet(torrent, options).get() if not torrent_id: torrent_id = self._check_torrent(torrent_hash) except Exception as err: return False finally: if self.client: self.disconnect() return torrent_id def add_torrent_file(self, filename, torrent, options, torrent_hash): torrent_id = False try: self.connect() torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get() if not torrent_id: torrent_id = self._check_torrent(torrent_hash) except Exception as err: return False finally: if self.client: self.disconnect() return torrent_id def set_torrent_label(self, torrent_id, label): try: self.connect() self.client.label.set_torrent(torrent_id, label).get() except Exception as err: logger.log('DelugeD: Failed to set label for torrent: ' + err + ' ' + traceback.format_exc(), logger.ERROR) return False finally: if self.client: self.disconnect() return True<|fim▁hole|> self.connect() self.client.core.set_torrent_move_completed_path(torrent_id, path).get() self.client.core.set_torrent_move_completed(torrent_id, 1).get() except Exception as err: logger.log('DelugeD: Failed to set path for torrent: ' + err + ' ' + traceback.format_exc(), logger.ERROR) return False finally: if self.client: self.disconnect() return True def pause_torrent(self, torrent_ids): try: self.connect() self.client.core.pause_torrent(torrent_ids).get() except Exception as err: logger.log('DelugeD: Failed to pause torrent: ' + err + ' ' + traceback.format_exc(), logger.ERROR) return False finally: if self.client: self.disconnect() return True def disconnect(self): self.client.disconnect() def _check_torrent(self, torrent_hash): torrent_id = self.client.core.get_torrent_status(torrent_hash, {}).get() if torrent_id['hash']: logger.log('DelugeD: Torrent already exists in Deluge', logger.DEBUG) return torrent_hash return False api = DelugeDAPI()<|fim▁end|>
def set_torrent_path(self, torrent_id, path): try:
<|file_name|>_load_balancer_network_interfaces_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class LoadBalancerNetworkInterfacesOperations: """LoadBalancerNetworkInterfacesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_12_01.models :param client: Client for service requests.<|fim▁hole|> :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name: str, load_balancer_name: str, **kwargs: Any ) -> AsyncIterable["_models.NetworkInterfaceListResult"]: """Gets associated load balancer network interfaces. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param load_balancer_name: The name of the load balancer. :type load_balancer_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-12-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore<|fim▁end|>
<|file_name|>mockCartData.ts<|end_file_name|><|fim▁begin|>export const data = [ { id: "DBS0238532", name: "γS13", fee: "$30.00", }, { id: "DBS0238534", label: "γS14", fee: "$30.00",<|fim▁hole|> { id: "502", name: "(Myc)2-apm1", fee: "$15.00", }, ]<|fim▁end|>
},
<|file_name|>0.py<|end_file_name|><|fim▁begin|># - Coding UTF8 - # # Networked Decision Making # Site: http://code.google.com/p/global-decision-making-system/ # # License Code: GPL, General Public License v. 2.0 # License Content: Creative Commons Attribution 3.0 # # Also visit: www.web2py.com # or Groups: http://groups.google.com/group/web2py # For details on the web framework used for this development # # Developed by Russ King ([email protected] # Russ also blogs occasionally at proudofyourplanent.blogspot.com # His general thinking on why this project is very important is available at # http://www.scribd.com/doc/98216626/New-Global-Strategy # # This file contains settings for auth policy which are need before setup # of rest of configuration so staying here for now # ######################################################################### from gluon.storage import Storage settings = Storage() #Settings for user logon - lets just uncomment as needed for now - not clear if there is much scope to #allow changes and python social auth will hopefully be added I don't think dual login worked with google but #lets setup again and see #Plan for this for now is that netdecisionmaking will use web2py and Janrain while<|fim▁hole|> #settings.logon_methods = 'web2py' #settings.logon_methods = 'google' #settings.logon_methods = 'janrain' settings.logon_methods = 'web2pyandjanrain' settings.verification = False settings.approval = False<|fim▁end|>
#globaldecisionmaking will use google - for some reason Janrain doesn't seem #to come up with google as a login and google login does not support dual methods #reason for which has not been investigated
<|file_name|>guard.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Machinery to conditionally expose things. use js::jsapi::{HandleObject, JSContext}; use util::prefs::get_pref; /// A container with a condition. pub struct Guard<T: Clone + Copy> {<|fim▁hole|> condition: Condition, value: T, } impl<T: Clone + Copy> Guard<T> { /// Construct a new guarded value. pub const fn new(condition: Condition, value: T) -> Self { Guard { condition: condition, value: value, } } /// Expose the value if the condition is satisfied. /// /// The passed handle is the object on which the value may be exposed. pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> { if self.condition.is_satisfied(cx, obj) { Some(self.value) } else { None } } } /// A condition to expose things. pub enum Condition { /// The condition is satisfied if the function returns true. Func(unsafe fn(*mut JSContext, HandleObject) -> bool), /// The condition is satisfied if the preference is set. Pref(&'static str), /// The condition is always satisfied. Satisfied, } impl Condition { unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool { match *self { Condition::Pref(name) => get_pref(name).as_boolean().unwrap_or(false), Condition::Func(f) => f(cx, obj), Condition::Satisfied => true, } } }<|fim▁end|>
<|file_name|>System.js<|end_file_name|><|fim▁begin|>"use strict"; tutao.provide('tutao.entity.sys.System'); /** * @constructor * @param {Object=} data The json data to store in this entity. */ tutao.entity.sys.System = function(data) { if (data) { this.updateData(data); } else { this.__format = "0"; this.__id = null; this.__permissions = null; this._lastInvoiceNbr = null; this._freeCustomerInfos = null; this._freeGroup = null; this._invoiceStatusIndex = null; this._premiumCustomerInfos = null; this._premiumGroup = null; this._registrationDataList = null; this._starterCustomerInfos = null; this._starterGroup = null; this._streamCustomerInfos = null; this._streamGroup = null; this._systemAdminGroup = null; this._systemCustomer = null; this._systemCustomerInfo = null; this._systemUserGroup = null; } this._entityHelper = new tutao.entity.EntityHelper(this); this.prototype = tutao.entity.sys.System.prototype; }; /** * Updates the data of this entity. * @param {Object=} data The json data to store in this entity. */ tutao.entity.sys.System.prototype.updateData = function(data) { this.__format = data._format; this.__id = data._id; this.__permissions = data._permissions; this._lastInvoiceNbr = data.lastInvoiceNbr; this._freeCustomerInfos = data.freeCustomerInfos; this._freeGroup = data.freeGroup; this._invoiceStatusIndex = data.invoiceStatusIndex; this._premiumCustomerInfos = data.premiumCustomerInfos; this._premiumGroup = data.premiumGroup; this._registrationDataList = data.registrationDataList; this._starterCustomerInfos = data.starterCustomerInfos; this._starterGroup = data.starterGroup; this._streamCustomerInfos = data.streamCustomerInfos; this._streamGroup = data.streamGroup; this._systemAdminGroup = data.systemAdminGroup; this._systemCustomer = data.systemCustomer; this._systemCustomerInfo = data.systemCustomerInfo; this._systemUserGroup = data.systemUserGroup; }; /** * The version of the model this type belongs to. * @const */ tutao.entity.sys.System.MODEL_VERSION = '9'; /** * The url path to the resource. * @const */ tutao.entity.sys.System.PATH = '/rest/sys/system'; /** * The id of the root instance reference. * @const */ tutao.entity.sys.System.ROOT_INSTANCE_ID = 'A3N5cwAAsQ'; /** * The generated id type flag. * @const */ tutao.entity.sys.System.GENERATED_ID = true; /** * The encrypted flag. * @const */ tutao.entity.sys.System.prototype.ENCRYPTED = false; /** * Provides the data of this instances as an object that can be converted to json. * @return {Object} The json object. */ tutao.entity.sys.System.prototype.toJsonData = function() { return { _format: this.__format, _id: this.__id, _permissions: this.__permissions, lastInvoiceNbr: this._lastInvoiceNbr, freeCustomerInfos: this._freeCustomerInfos, freeGroup: this._freeGroup, invoiceStatusIndex: this._invoiceStatusIndex, premiumCustomerInfos: this._premiumCustomerInfos, premiumGroup: this._premiumGroup, registrationDataList: this._registrationDataList, starterCustomerInfos: this._starterCustomerInfos, starterGroup: this._starterGroup, streamCustomerInfos: this._streamCustomerInfos, streamGroup: this._streamGroup, systemAdminGroup: this._systemAdminGroup, systemCustomer: this._systemCustomer, systemCustomerInfo: this._systemCustomerInfo, systemUserGroup: this._systemUserGroup }; }; /** * The id of the System type. */ tutao.entity.sys.System.prototype.TYPE_ID = 177; /** * The id of the lastInvoiceNbr attribute. */ tutao.entity.sys.System.prototype.LASTINVOICENBR_ATTRIBUTE_ID = 591; /** * The id of the freeCustomerInfos attribute. */ tutao.entity.sys.System.prototype.FREECUSTOMERINFOS_ATTRIBUTE_ID = 183; /** * The id of the freeGroup attribute. */ tutao.entity.sys.System.prototype.FREEGROUP_ATTRIBUTE_ID = 191; /** * The id of the invoiceStatusIndex attribute. */ tutao.entity.sys.System.prototype.INVOICESTATUSINDEX_ATTRIBUTE_ID = 829; /** * The id of the premiumCustomerInfos attribute. */ tutao.entity.sys.System.prototype.PREMIUMCUSTOMERINFOS_ATTRIBUTE_ID = 185; /** * The id of the premiumGroup attribute. */ tutao.entity.sys.System.prototype.PREMIUMGROUP_ATTRIBUTE_ID = 190; /** * The id of the registrationDataList attribute. */ tutao.entity.sys.System.prototype.REGISTRATIONDATALIST_ATTRIBUTE_ID = 194; /** * The id of the starterCustomerInfos attribute. */ tutao.entity.sys.System.prototype.STARTERCUSTOMERINFOS_ATTRIBUTE_ID = 184; /** * The id of the starterGroup attribute. */ tutao.entity.sys.System.prototype.STARTERGROUP_ATTRIBUTE_ID = 192; /** * The id of the streamCustomerInfos attribute. */ tutao.entity.sys.System.prototype.STREAMCUSTOMERINFOS_ATTRIBUTE_ID = 186; /** * The id of the streamGroup attribute. */ tutao.entity.sys.System.prototype.STREAMGROUP_ATTRIBUTE_ID = 193; /** * The id of the systemAdminGroup attribute. */ tutao.entity.sys.System.prototype.SYSTEMADMINGROUP_ATTRIBUTE_ID = 189; /** * The id of the systemCustomer attribute. */ tutao.entity.sys.System.prototype.SYSTEMCUSTOMER_ATTRIBUTE_ID = 187; /** * The id of the systemCustomerInfo attribute. */ tutao.entity.sys.System.prototype.SYSTEMCUSTOMERINFO_ATTRIBUTE_ID = 182; /** * The id of the systemUserGroup attribute. */ tutao.entity.sys.System.prototype.SYSTEMUSERGROUP_ATTRIBUTE_ID = 188; /** * Provides the id of this System. * @return {string} The id of this System. */ tutao.entity.sys.System.prototype.getId = function() { return this.__id; }; /** * Sets the format of this System. * @param {string} format The format of this System. */ tutao.entity.sys.System.prototype.setFormat = function(format) { this.__format = format; return this; }; /** * Provides the format of this System. * @return {string} The format of this System. */ tutao.entity.sys.System.prototype.getFormat = function() { return this.__format; }; /** * Sets the permissions of this System. * @param {string} permissions The permissions of this System. */ tutao.entity.sys.System.prototype.setPermissions = function(permissions) { this.__permissions = permissions; return this; }; /** * Provides the permissions of this System. * @return {string} The permissions of this System. */ tutao.entity.sys.System.prototype.getPermissions = function() { return this.__permissions; }; /** * Sets the lastInvoiceNbr of this System. * @param {string} lastInvoiceNbr The lastInvoiceNbr of this System. */ tutao.entity.sys.System.prototype.setLastInvoiceNbr = function(lastInvoiceNbr) { this._lastInvoiceNbr = lastInvoiceNbr; return this; }; /** * Provides the lastInvoiceNbr of this System. * @return {string} The lastInvoiceNbr of this System. */ tutao.entity.sys.System.prototype.getLastInvoiceNbr = function() { return this._lastInvoiceNbr; }; /** * Sets the freeCustomerInfos of this System. * @param {string} freeCustomerInfos The freeCustomerInfos of this System. */ tutao.entity.sys.System.prototype.setFreeCustomerInfos = function(freeCustomerInfos) { this._freeCustomerInfos = freeCustomerInfos; return this; }; /** * Provides the freeCustomerInfos of this System. * @return {string} The freeCustomerInfos of this System. */ tutao.entity.sys.System.prototype.getFreeCustomerInfos = function() { return this._freeCustomerInfos; }; /** * Sets the freeGroup of this System. * @param {string} freeGroup The freeGroup of this System. */ tutao.entity.sys.System.prototype.setFreeGroup = function(freeGroup) { this._freeGroup = freeGroup; return this; }; /** * Provides the freeGroup of this System. * @return {string} The freeGroup of this System. */ tutao.entity.sys.System.prototype.getFreeGroup = function() { return this._freeGroup; }; /** * Loads the freeGroup of this System. * @return {Promise.<tutao.entity.sys.Group>} Resolves to the loaded freeGroup of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadFreeGroup = function() { return tutao.entity.sys.Group.load(this._freeGroup); }; /** * Sets the invoiceStatusIndex of this System. * @param {string} invoiceStatusIndex The invoiceStatusIndex of this System. */ tutao.entity.sys.System.prototype.setInvoiceStatusIndex = function(invoiceStatusIndex) { this._invoiceStatusIndex = invoiceStatusIndex; return this; }; /** * Provides the invoiceStatusIndex of this System. * @return {string} The invoiceStatusIndex of this System. */ tutao.entity.sys.System.prototype.getInvoiceStatusIndex = function() { return this._invoiceStatusIndex; }; /** * Loads the invoiceStatusIndex of this System. * @return {Promise.<tutao.entity.sys.InvoiceStatusIndex>} Resolves to the loaded invoiceStatusIndex of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadInvoiceStatusIndex = function() { return tutao.entity.sys.InvoiceStatusIndex.load(this._invoiceStatusIndex); }; /** * Sets the premiumCustomerInfos of this System. * @param {string} premiumCustomerInfos The premiumCustomerInfos of this System. */ tutao.entity.sys.System.prototype.setPremiumCustomerInfos = function(premiumCustomerInfos) { this._premiumCustomerInfos = premiumCustomerInfos; return this; }; /** * Provides the premiumCustomerInfos of this System. * @return {string} The premiumCustomerInfos of this System. */ tutao.entity.sys.System.prototype.getPremiumCustomerInfos = function() { return this._premiumCustomerInfos; }; /** * Sets the premiumGroup of this System. * @param {string} premiumGroup The premiumGroup of this System. */ tutao.entity.sys.System.prototype.setPremiumGroup = function(premiumGroup) { this._premiumGroup = premiumGroup; return this; }; /** * Provides the premiumGroup of this System. * @return {string} The premiumGroup of this System. */ tutao.entity.sys.System.prototype.getPremiumGroup = function() { return this._premiumGroup; }; /** * Loads the premiumGroup of this System. * @return {Promise.<tutao.entity.sys.Group>} Resolves to the loaded premiumGroup of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadPremiumGroup = function() { return tutao.entity.sys.Group.load(this._premiumGroup); }; /** * Sets the registrationDataList of this System. * @param {string} registrationDataList The registrationDataList of this System. */ tutao.entity.sys.System.prototype.setRegistrationDataList = function(registrationDataList) { this._registrationDataList = registrationDataList; return this; }; /** * Provides the registrationDataList of this System. * @return {string} The registrationDataList of this System. */ tutao.entity.sys.System.prototype.getRegistrationDataList = function() { return this._registrationDataList; }; /** * Sets the starterCustomerInfos of this System. * @param {string} starterCustomerInfos The starterCustomerInfos of this System. */ tutao.entity.sys.System.prototype.setStarterCustomerInfos = function(starterCustomerInfos) { this._starterCustomerInfos = starterCustomerInfos; return this; }; /** * Provides the starterCustomerInfos of this System. * @return {string} The starterCustomerInfos of this System. */ tutao.entity.sys.System.prototype.getStarterCustomerInfos = function() { return this._starterCustomerInfos; }; /** * Sets the starterGroup of this System. * @param {string} starterGroup The starterGroup of this System. */ tutao.entity.sys.System.prototype.setStarterGroup = function(starterGroup) { this._starterGroup = starterGroup; return this; }; /** * Provides the starterGroup of this System. * @return {string} The starterGroup of this System. */ tutao.entity.sys.System.prototype.getStarterGroup = function() { return this._starterGroup;<|fim▁hole|> /** * Loads the starterGroup of this System. * @return {Promise.<tutao.entity.sys.Group>} Resolves to the loaded starterGroup of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadStarterGroup = function() { return tutao.entity.sys.Group.load(this._starterGroup); }; /** * Sets the streamCustomerInfos of this System. * @param {string} streamCustomerInfos The streamCustomerInfos of this System. */ tutao.entity.sys.System.prototype.setStreamCustomerInfos = function(streamCustomerInfos) { this._streamCustomerInfos = streamCustomerInfos; return this; }; /** * Provides the streamCustomerInfos of this System. * @return {string} The streamCustomerInfos of this System. */ tutao.entity.sys.System.prototype.getStreamCustomerInfos = function() { return this._streamCustomerInfos; }; /** * Sets the streamGroup of this System. * @param {string} streamGroup The streamGroup of this System. */ tutao.entity.sys.System.prototype.setStreamGroup = function(streamGroup) { this._streamGroup = streamGroup; return this; }; /** * Provides the streamGroup of this System. * @return {string} The streamGroup of this System. */ tutao.entity.sys.System.prototype.getStreamGroup = function() { return this._streamGroup; }; /** * Loads the streamGroup of this System. * @return {Promise.<tutao.entity.sys.Group>} Resolves to the loaded streamGroup of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadStreamGroup = function() { return tutao.entity.sys.Group.load(this._streamGroup); }; /** * Sets the systemAdminGroup of this System. * @param {string} systemAdminGroup The systemAdminGroup of this System. */ tutao.entity.sys.System.prototype.setSystemAdminGroup = function(systemAdminGroup) { this._systemAdminGroup = systemAdminGroup; return this; }; /** * Provides the systemAdminGroup of this System. * @return {string} The systemAdminGroup of this System. */ tutao.entity.sys.System.prototype.getSystemAdminGroup = function() { return this._systemAdminGroup; }; /** * Loads the systemAdminGroup of this System. * @return {Promise.<tutao.entity.sys.Group>} Resolves to the loaded systemAdminGroup of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadSystemAdminGroup = function() { return tutao.entity.sys.Group.load(this._systemAdminGroup); }; /** * Sets the systemCustomer of this System. * @param {string} systemCustomer The systemCustomer of this System. */ tutao.entity.sys.System.prototype.setSystemCustomer = function(systemCustomer) { this._systemCustomer = systemCustomer; return this; }; /** * Provides the systemCustomer of this System. * @return {string} The systemCustomer of this System. */ tutao.entity.sys.System.prototype.getSystemCustomer = function() { return this._systemCustomer; }; /** * Loads the systemCustomer of this System. * @return {Promise.<tutao.entity.sys.Customer>} Resolves to the loaded systemCustomer of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadSystemCustomer = function() { return tutao.entity.sys.Customer.load(this._systemCustomer); }; /** * Sets the systemCustomerInfo of this System. * @param {string} systemCustomerInfo The systemCustomerInfo of this System. */ tutao.entity.sys.System.prototype.setSystemCustomerInfo = function(systemCustomerInfo) { this._systemCustomerInfo = systemCustomerInfo; return this; }; /** * Provides the systemCustomerInfo of this System. * @return {string} The systemCustomerInfo of this System. */ tutao.entity.sys.System.prototype.getSystemCustomerInfo = function() { return this._systemCustomerInfo; }; /** * Sets the systemUserGroup of this System. * @param {string} systemUserGroup The systemUserGroup of this System. */ tutao.entity.sys.System.prototype.setSystemUserGroup = function(systemUserGroup) { this._systemUserGroup = systemUserGroup; return this; }; /** * Provides the systemUserGroup of this System. * @return {string} The systemUserGroup of this System. */ tutao.entity.sys.System.prototype.getSystemUserGroup = function() { return this._systemUserGroup; }; /** * Loads the systemUserGroup of this System. * @return {Promise.<tutao.entity.sys.Group>} Resolves to the loaded systemUserGroup of this System or an exception if the loading failed. */ tutao.entity.sys.System.prototype.loadSystemUserGroup = function() { return tutao.entity.sys.Group.load(this._systemUserGroup); }; /** * Loads a System from the server. * @param {string} id The id of the System. * @return {Promise.<tutao.entity.sys.System>} Resolves to the System or an exception if the loading failed. */ tutao.entity.sys.System.load = function(id) { return tutao.locator.entityRestClient.getElement(tutao.entity.sys.System, tutao.entity.sys.System.PATH, id, null, {"v" : 9}, tutao.entity.EntityHelper.createAuthHeaders()).then(function(entity) { return entity; }); }; /** * Loads multiple Systems from the server. * @param {Array.<string>} ids The ids of the Systems to load. * @return {Promise.<Array.<tutao.entity.sys.System>>} Resolves to an array of System or rejects with an exception if the loading failed. */ tutao.entity.sys.System.loadMultiple = function(ids) { return tutao.locator.entityRestClient.getElements(tutao.entity.sys.System, tutao.entity.sys.System.PATH, ids, {"v": 9}, tutao.entity.EntityHelper.createAuthHeaders()).then(function(entities) { return entities; }); }; /** * Register a function that is called as soon as any attribute of the entity has changed. If this listener * was already registered it is not registered again. * @param {function(Object,*=)} listener. The listener function. When called it gets the entity and the given id as arguments. * @param {*=} id. An optional value that is just passed-through to the listener. */ tutao.entity.sys.System.prototype.registerObserver = function(listener, id) { this._entityHelper.registerObserver(listener, id); }; /** * Removes a registered listener function if it was registered before. * @param {function(Object)} listener. The listener to unregister. */ tutao.entity.sys.System.prototype.unregisterObserver = function(listener) { this._entityHelper.unregisterObserver(listener); }; /** * Provides the entity helper of this entity. * @return {tutao.entity.EntityHelper} The entity helper. */ tutao.entity.sys.System.prototype.getEntityHelper = function() { return this._entityHelper; };<|fim▁end|>
};
<|file_name|>test_streams.py<|end_file_name|><|fim▁begin|># Copyright 2020 ScyllaDB # # This file is part of Scylla. # # Scylla is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Scylla is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Scylla. If not, see <http://www.gnu.org/licenses/>. # Tests for stream operations: ListStreams, DescribeStream, GetShardIterator, # GetRecords. import pytest import time import urllib.request from botocore.exceptions import ClientError from util import list_tables, test_table_name, create_test_table, random_string, freeze from contextlib import contextmanager from urllib.error import URLError from boto3.dynamodb.types import TypeDeserializer stream_types = [ 'OLD_IMAGE', 'NEW_IMAGE', 'KEYS_ONLY', 'NEW_AND_OLD_IMAGES'] def disable_stream(dynamodbstreams, table): while True: try: table.update(StreamSpecification={'StreamEnabled': False}); while True: streams = dynamodbstreams.list_streams(TableName=table.name) if not streams.get('Streams'): return # when running against real dynamodb, modifying stream # state has a delay. need to wait for propagation. print("Stream(s) lingering. Sleep 10s...") time.sleep(10) except ClientError as ce: # again, real dynamo has periods when state cannot yet # be modified. if ce.response['Error']['Code'] == 'ResourceInUseException': print("Stream(s) in use. Sleep 10s...") time.sleep(10) continue raise # # Cannot use fixtures. Because real dynamodb cannot _remove_ a stream # once created. It can only expire 24h later. So reusing test_table for # example works great for scylla/local testing, but once we run against # actual aws instances, we get lingering streams, and worse: cannot # create new ones to replace it, because we have overlapping types etc. # # So we have to create and delete a table per test. And not run this # test to often against aws. @contextmanager def create_stream_test_table(dynamodb, StreamViewType=None): spec = { 'StreamEnabled': False } if StreamViewType != None: spec = {'StreamEnabled': True, 'StreamViewType': StreamViewType} table = create_test_table(dynamodb, StreamSpecification=spec, KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' }, { 'AttributeName': 'c', 'KeyType': 'RANGE' } ], AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' }, { 'AttributeName': 'c', 'AttributeType': 'S' }, ]) yield table while True: try: table.delete() return except ClientError as ce: # if the table has a stream currently being created we cannot # delete the table immediately. Again, only with real dynamo if ce.response['Error']['Code'] == 'ResourceInUseException': print('Could not delete table yet. Sleeping 5s.') time.sleep(5) continue; raise def wait_for_active_stream(dynamodbstreams, table, timeout=60): exp = time.process_time() + timeout while time.process_time() < exp: streams = dynamodbstreams.list_streams(TableName=table.name) for stream in streams['Streams']: desc = dynamodbstreams.describe_stream(StreamArn=stream['StreamArn'])['StreamDescription'] if not 'StreamStatus' in desc or desc.get('StreamStatus') == 'ENABLED': arn = stream['StreamArn'] if arn != None: return arn; # real dynamo takes some time until a stream is usable print("Stream not available. Sleep 5s...") time.sleep(5) assert False # Local java dynamodb server version behaves differently from # the "real" one. Most importantly, it does not verify a number of # parameters, and consequently does not throw when called with borked # args. This will try to check if we are in fact running against # this test server, and if so, just raise the error here and be done # with it. All this just so we can run through the tests on # aws, scylla and local. def is_local_java(dynamodbstreams): # no good way to check, but local server runs on a Jetty, # so check for that. url = dynamodbstreams.meta.endpoint_url try: urllib.request.urlopen(url) except URLError as e: return e.info()['Server'].startswith('Jetty') return False def ensure_java_server(dynamodbstreams, error='ValidationException'): # no good way to check, but local server has a "shell" builtin, # so check for that. if is_local_java(dynamodbstreams): if error != None: raise ClientError({'Error': { 'Code' : error }}, '') return assert False def test_list_streams_create(dynamodb, dynamodbstreams): for type in stream_types: with create_stream_test_table(dynamodb, StreamViewType=type) as table: wait_for_active_stream(dynamodbstreams, table) def test_list_streams_alter(dynamodb, dynamodbstreams): for type in stream_types: with create_stream_test_table(dynamodb, StreamViewType=None) as table: res = table.update(StreamSpecification={'StreamEnabled': True, 'StreamViewType': type}); wait_for_active_stream(dynamodbstreams, table) def test_list_streams_paged(dynamodb, dynamodbstreams): for type in stream_types: with create_stream_test_table(dynamodb, StreamViewType=type) as table1: with create_stream_test_table(dynamodb, StreamViewType=type) as table2: wait_for_active_stream(dynamodbstreams, table1) wait_for_active_stream(dynamodbstreams, table2) streams = dynamodbstreams.list_streams(Limit=1) assert streams assert streams.get('Streams') assert streams.get('LastEvaluatedStreamArn') tables = [ table1.name, table2.name ] while True: for s in streams['Streams']: name = s['TableName'] if name in tables: tables.remove(name) if not tables: break streams = dynamodbstreams.list_streams(Limit=1, ExclusiveStartStreamArn=streams['LastEvaluatedStreamArn']) @pytest.mark.skip(reason="Python driver validates Limit, so trying to test it is pointless") def test_list_streams_zero_limit(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: with pytest.raises(ClientError, match='ValidationException'): wait_for_active_stream(dynamodbstreams, table) dynamodbstreams.list_streams(Limit=0) def test_create_streams_wrong_type(dynamodb, dynamodbstreams, test_table): with pytest.raises(ClientError, match='ValidationException'): # should throw test_table.update(StreamSpecification={'StreamEnabled': True, 'StreamViewType': 'Fisk'}); # just in case the test fails, disable stream again test_table.update(StreamSpecification={'StreamEnabled': False}); def test_list_streams_empty(dynamodb, dynamodbstreams, test_table): streams = dynamodbstreams.list_streams(TableName=test_table.name) assert 'Streams' in streams assert not streams['Streams'] # empty def test_list_streams_with_nonexistent_last_stream(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: with pytest.raises(ClientError, match='ValidationException'): streams = dynamodbstreams.list_streams(TableName=table.name, ExclusiveStartStreamArn='kossaapaaasdafsdaasdasdasdasasdasfadfadfasdasdas') assert 'Streams' in streams assert not streams['Streams'] # empty # local java dynamodb does _not_ raise validation error for # malformed stream arn here. verify ensure_java_server(dynamodbstreams) def test_describe_stream(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: streams = dynamodbstreams.list_streams(TableName=table.name) arn = streams['Streams'][0]['StreamArn']; desc = dynamodbstreams.describe_stream(StreamArn=arn) assert desc; assert desc.get('StreamDescription') assert desc['StreamDescription']['StreamArn'] == arn assert desc['StreamDescription']['StreamStatus'] != 'DISABLED' assert desc['StreamDescription']['StreamViewType'] == 'KEYS_ONLY' assert desc['StreamDescription']['TableName'] == table.name assert desc['StreamDescription'].get('Shards') assert desc['StreamDescription']['Shards'][0].get('ShardId') assert desc['StreamDescription']['Shards'][0].get('SequenceNumberRange') assert desc['StreamDescription']['Shards'][0]['SequenceNumberRange'].get('StartingSequenceNumber') @pytest.mark.xfail(reason="alternator does not have creation time or label for streams") def test_describe_stream(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: streams = dynamodbstreams.list_streams(TableName=table.name) arn = streams['Streams'][0]['StreamArn']; desc = dynamodbstreams.describe_stream(StreamArn=arn) assert desc; assert desc.get('StreamDescription') # note these are non-required attributes assert 'StreamLabel' in desc['StreamDescription'] assert 'CreationRequestDateTime' in desc['StreamDescription'] def test_describe_nonexistent_stream(dynamodb, dynamodbstreams): with pytest.raises(ClientError, match='ResourceNotFoundException' if is_local_java(dynamodbstreams) else 'ValidationException'): streams = dynamodbstreams.describe_stream(StreamArn='sdfadfsdfnlfkajakfgjalksfgklasjklasdjfklasdfasdfgasf') def test_describe_stream_with_nonexistent_last_shard(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: streams = dynamodbstreams.list_streams(TableName=table.name) arn = streams['Streams'][0]['StreamArn']; try: desc = dynamodbstreams.describe_stream(StreamArn=arn, ExclusiveStartShardId='zzzzzzzzzzzzzzzzzzzzzzzzsfasdgagadfadfgagkjsdfsfsdjfjks') assert not desc['StreamDescription']['Shards'] except: # local java throws here. real does not. ensure_java_server(dynamodbstreams, error=None) def test_get_shard_iterator(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: streams = dynamodbstreams.list_streams(TableName=table.name) arn = streams['Streams'][0]['StreamArn']; desc = dynamodbstreams.describe_stream(StreamArn=arn) shard_id = desc['StreamDescription']['Shards'][0]['ShardId']; seq = desc['StreamDescription']['Shards'][0]['SequenceNumberRange']['StartingSequenceNumber']; for type in ['AT_SEQUENCE_NUMBER', 'AFTER_SEQUENCE_NUMBER']: iter = dynamodbstreams.get_shard_iterator( StreamArn=arn, ShardId=shard_id, ShardIteratorType=type, SequenceNumber=seq ) assert iter.get('ShardIterator') for type in ['TRIM_HORIZON', 'LATEST']: iter = dynamodbstreams.get_shard_iterator( StreamArn=arn, ShardId=shard_id, ShardIteratorType=type ) assert iter.get('ShardIterator') for type in ['AT_SEQUENCE_NUMBER', 'AFTER_SEQUENCE_NUMBER']: # must have seq in these modes with pytest.raises(ClientError, match='ValidationException'): iter = dynamodbstreams.get_shard_iterator( StreamArn=arn, ShardId=shard_id, ShardIteratorType=type ) for type in ['TRIM_HORIZON', 'LATEST']: # should not set "seq" in these modes with pytest.raises(ClientError, match='ValidationException'): dynamodbstreams.get_shard_iterator( StreamArn=arn, ShardId=shard_id, ShardIteratorType=type, SequenceNumber=seq ) # bad arn with pytest.raises(ClientError, match='ValidationException'): iter = dynamodbstreams.get_shard_iterator( StreamArn='sdfadsfsdfsdgdfsgsfdabadfbabdadsfsdfsdfsdfsdfsdfsdfdfdssdffbdfdf', ShardId=shard_id, ShardIteratorType=type, SequenceNumber=seq ) # bad shard id with pytest.raises(ClientError, match='ResourceNotFoundException'): dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId='semprinidfaasdasfsdvacsdcfsdsvsdvsdvsdvsdvsdv', ShardIteratorType='LATEST' ) # bad iter type with pytest.raises(ClientError, match='ValidationException'): dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='bulle', SequenceNumber=seq ) # bad seq with pytest.raises(ClientError, match='ValidationException'): dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='LATEST', SequenceNumber='sdfsafglldfngjdafnasdflgnaldklkafdsgklnasdlv' ) def test_get_shard_iterator_for_nonexistent_stream(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: arn = wait_for_active_stream(dynamodbstreams, table) desc = dynamodbstreams.describe_stream(StreamArn=arn) shards = desc['StreamDescription']['Shards'] with pytest.raises(ClientError, match='ResourceNotFoundException' if is_local_java(dynamodbstreams) else 'ValidationException'): dynamodbstreams.get_shard_iterator( StreamArn='sdfadfsddafgdafsgjnadflkgnalngalsdfnlkasnlkasdfasdfasf', ShardId=shards[0]['ShardId'], ShardIteratorType='LATEST' ) def test_get_shard_iterator_for_nonexistent_shard(dynamodb, dynamodbstreams): with create_stream_test_table(dynamodb, StreamViewType='KEYS_ONLY') as table: streams = dynamodbstreams.list_streams(TableName=table.name) arn = streams['Streams'][0]['StreamArn']; with pytest.raises(ClientError, match='ResourceNotFoundException'): dynamodbstreams.get_shard_iterator( StreamArn=arn, ShardId='adfasdasdasdasdasdasdasdasdasasdasd', ShardIteratorType='LATEST' ) def test_get_records(dynamodb, dynamodbstreams): # TODO: add tests for storage/transactionable variations and global/local index with create_stream_test_table(dynamodb, StreamViewType='NEW_AND_OLD_IMAGES') as table: arn = wait_for_active_stream(dynamodbstreams, table) p = 'piglet' c = 'ninja' val = 'lucifers' val2 = 'flowers' table.put_item(Item={'p': p, 'c': c, 'a1': val, 'a2': val2}) nval = 'semprini' nval2 = 'nudge' table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={ 'a1': {'Value': nval, 'Action': 'PUT'}, 'a2': {'Value': nval2, 'Action': 'PUT'} }) has_insert = False # in truth, we should sleep already here, since at least scylla # will not be able to produce any stream content until # ~30s after insert/update (confidence iterval) # but it is useful to see a working null-iteration as well, so # lets go already. while True: desc = dynamodbstreams.describe_stream(StreamArn=arn) iterators = [] while True: shards = desc['StreamDescription']['Shards'] for shard in shards: shard_id = shard['ShardId'] start = shard['SequenceNumberRange']['StartingSequenceNumber'] iter = dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='AT_SEQUENCE_NUMBER',SequenceNumber=start)['ShardIterator'] iterators.append(iter) last_shard = desc["StreamDescription"].get("LastEvaluatedShardId") if not last_shard: break desc = dynamodbstreams.describe_stream(StreamArn=arn, ExclusiveStartShardId=last_shard) next_iterators = [] while iterators: iter = iterators.pop(0) response = dynamodbstreams.get_records(ShardIterator=iter, Limit=1000) next = response['NextShardIterator'] if next != '': next_iterators.append(next) records = response.get('Records') # print("Query {} -> {}".format(iter, records)) if records: for record in records: # print("Record: {}".format(record)) type = record['eventName'] dynamodb = record['dynamodb'] keys = dynamodb['Keys'] assert keys.get('p') assert keys.get('c') assert keys['p'].get('S') assert keys['p']['S'] == p assert keys['c'].get('S') assert keys['c']['S'] == c if type == 'MODIFY' or type == 'INSERT': assert dynamodb.get('NewImage') newimage = dynamodb['NewImage']; assert newimage.get('a1') assert newimage.get('a2') if type == 'INSERT' or (type == 'MODIFY' and not has_insert): assert newimage['a1']['S'] == val assert newimage['a2']['S'] == val2 has_insert = True continue if type == 'MODIFY': assert has_insert assert newimage['a1']['S'] == nval assert newimage['a2']['S'] == nval2 assert dynamodb.get('OldImage') oldimage = dynamodb['OldImage']; assert oldimage.get('a1') assert oldimage.get('a2') assert oldimage['a1']['S'] == val assert oldimage['a2']['S'] == val2 return print("Not done. Sleep 10s...") time.sleep(10) iterators = next_iterators def test_get_records_nonexistent_iterator(dynamodbstreams): with pytest.raises(ClientError, match='ValidationException'): dynamodbstreams.get_records(ShardIterator='sdfsdfsgagaddafgagasgasgasdfasdfasdfasdfasdgasdasdgasdg', Limit=1000) ############################################################################## # Fixtures for creating a table with a stream enabled with one of the allowed # StreamViewType settings (KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES). # Unfortunately changing the StreamViewType setting of an existing stream is # not allowed (see test_streams_change_type), and while removing and re-adding # a stream is posssible, it is very slow. So we create four different fixtures # with the four different StreamViewType settings for these four fixtures. # # It turns out that DynamoDB makes reusing the same table in different tests # very difficult, because when we request a "LATEST" iterator we sometimes # miss the immediately following write (this issue doesn't happen in # ALternator, just in DynamoDB - presumably LATEST adds some time slack?) # So all the fixtures we create below have scope="function", meaning that a # separate table is created for each of the tests using these fixtures. This # slows the tests down a bit, but not by much (about 0.05 seconds per test). # It is still worthwhile to use a fixture rather than to create a table # explicitly - it is convenient, safe (the table gets deleted automatically) # and if in the future we can work around the DynamoDB problem, we can return # these fixtures to session scope. def create_table_ss(dynamodb, dynamodbstreams, type): table = create_test_table(dynamodb, KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }, { 'AttributeName': 'c', 'KeyType': 'RANGE' }], AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }, { 'AttributeName': 'c', 'AttributeType': 'S' }], StreamSpecification={ 'StreamEnabled': True, 'StreamViewType': type }) arn = wait_for_active_stream(dynamodbstreams, table, timeout=60) yield table, arn table.delete() @pytest.fixture(scope="function") def test_table_ss_keys_only(dynamodb, dynamodbstreams): yield from create_table_ss(dynamodb, dynamodbstreams, 'KEYS_ONLY') @pytest.fixture(scope="function") def test_table_ss_new_image(dynamodb, dynamodbstreams): yield from create_table_ss(dynamodb, dynamodbstreams, 'NEW_IMAGE') @pytest.fixture(scope="function") def test_table_ss_old_image(dynamodb, dynamodbstreams): yield from create_table_ss(dynamodb, dynamodbstreams, 'OLD_IMAGE') @pytest.fixture(scope="function") def test_table_ss_new_and_old_images(dynamodb, dynamodbstreams): yield from create_table_ss(dynamodb, dynamodbstreams, 'NEW_AND_OLD_IMAGES') # Test that it is, sadly, not allowed to use UpdateTable on a table which # already has a stream enabled to change that stream's StreamViewType. # Currently, Alternator does allow this (see issue #6939), so the test is # marked xfail. @pytest.mark.xfail(reason="Alternator allows changing StreamViewType - see issue #6939") def test_streams_change_type(test_table_ss_keys_only): table, arn = test_table_ss_keys_only with pytest.raises(ClientError, match='ValidationException.*already'): table.update(StreamSpecification={'StreamEnabled': True, 'StreamViewType': 'OLD_IMAGE'}); # If the above change succeeded (because of issue #6939), switch it back :-) table.update(StreamSpecification={'StreamEnabled': True, 'StreamViewType': 'KEYS_ONLY'}); # Utility function for listing all the shards of the given stream arn. # Implemented by multiple calls to DescribeStream, possibly several pages # until all the shards are returned. The return of this function should be # cached - it is potentially slow, and DynamoDB documentation even states # DescribeStream may only be called at a maximum rate of 10 times per second. # list_shards() only returns the shard IDs, not the information about the # shards' sequence number range, which is also returned by DescribeStream. def list_shards(dynamodbstreams, arn): # By default DescribeStream lists a limit of 100 shards. For faster # tests we reduced the number of shards in the testing setup to # 32 (16 vnodes x 2 cpus), see issue #6979, so to still exercise this # paging feature, lets use a limit of 10. limit = 10 response = dynamodbstreams.describe_stream(StreamArn=arn, Limit=limit)['StreamDescription'] assert len(response['Shards']) <= limit shards = [x['ShardId'] for x in response['Shards']] while 'LastEvaluatedShardId' in response: response = dynamodbstreams.describe_stream(StreamArn=arn, Limit=limit, ExclusiveStartShardId=response['LastEvaluatedShardId'])['StreamDescription'] assert len(response['Shards']) <= limit shards.extend([x['ShardId'] for x in response['Shards']]) print('Number of shards in stream: {}'.format(len(shards))) return shards # Utility function for getting shard iterators starting at "LATEST" for # all the shards of the given stream arn. def latest_iterators(dynamodbstreams, arn): iterators = [] for shard_id in list_shards(dynamodbstreams, arn): iterators.append(dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='LATEST')['ShardIterator']) return iterators # Utility function for fetching more content from the stream (given its # array of iterators) into an "output" array. Call repeatedly to get more # content - the function returns a new array of iterators which should be # used to replace the input list of iterators. # Note that the order of the updates is guaranteed for the same partition, # but cannot be trusted for *different* partitions. def fetch_more(dynamodbstreams, iterators, output): new_iterators = [] for iter in iterators: response = dynamodbstreams.get_records(ShardIterator=iter) if 'NextShardIterator' in response: new_iterators.append(response['NextShardIterator']) output.extend(response['Records']) return new_iterators # Utility function for comparing "output" as fetched by fetch_more(), to a list # expected_events, each of which looks like: # [type, keys, old_image, new_image] # where type is REMOVE, INSERT or MODIFY. # The "mode" parameter specifies which StreamViewType mode (KEYS_ONLY, # OLD_IMAGE, NEW_IMAGE, NEW_AND_OLD_IMAGES) was supposedly used to generate # "output". This mode dictates what we can compare - e.g., in KEYS_ONLY mode # the compare_events() function ignores the the old and new image in # expected_events. # compare_events() throws an exception immediately if it sees an unexpected # event, but if some of the expected events are just missing in the "output", # it only returns false - suggesting maybe the caller needs to try again # later - maybe more output is coming. # Note that the order of events is only guaranteed (and therefore compared) # inside a single partition. def compare_events(expected_events, output, mode): # The order of expected_events is only meaningful inside a partition, so # let's convert it into a map indexed by partition key. expected_events_map = {} for event in expected_events: expected_type, expected_key, expected_old_image, expected_new_image = event # For simplicity, we actually use the entire key, not just the partiton # key. We only lose a bit of testing power we didn't plan to test anyway # (that events for different items in the same partition are ordered). key = freeze(expected_key) if not key in expected_events_map: expected_events_map[key] = [] expected_events_map[key].append(event) # Iterate over the events in output. An event for a certain key needs to # be the *first* remaining event for this key in expected_events_map (and # then we remove this matched even from expected_events_map) for event in output: # In DynamoDB, eventSource is 'aws:dynamodb'. We decided to set it to # a *different* value - 'scylladb:alternator'. Issue #6931. assert 'eventSource' in event # Alternator is missing "awsRegion", which makes little sense for it # (although maybe we should have provided the DC name). Issue #6931. #assert 'awsRegion' in event # Alternator is also missing the "eventVersion" entry. Issue #6931. #assert 'eventVersion' in event # Check that eventID appears, but can't check much on what it is. assert 'eventID' in event op = event['eventName'] record = event['dynamodb'] # record['Keys'] is "serialized" JSON, ({'S', 'thestring'}), so we # want to deserialize it to match our expected_events content. deserializer = TypeDeserializer() key = {x:deserializer.deserialize(y) for (x,y) in record['Keys'].items()} expected_type, expected_key, expected_old_image, expected_new_image = expected_events_map[freeze(key)].pop(0) if expected_type != '*': # hack to allow a caller to not test op, to bypass issue #6918. assert op == expected_type assert record['StreamViewType'] == mode # Check that all the expected members appear in the record, even if # we don't have anything to compare them to (TODO: we should probably # at least check they have proper format). assert 'ApproximateCreationDateTime' in record assert 'SequenceNumber' in record # Alternator doesn't set the SizeBytes member. Issue #6931. #assert 'SizeBytes' in record if mode == 'KEYS_ONLY': assert not 'NewImage' in record assert not 'OldImage' in record elif mode == 'NEW_IMAGE': assert not 'OldImage' in record if expected_new_image == None: assert not 'NewImage' in record else: new_image = {x:deserializer.deserialize(y) for (x,y) in record['NewImage'].items()} assert expected_new_image == new_image elif mode == 'OLD_IMAGE': assert not 'NewImage' in record if expected_old_image == None: assert not 'OldImage' in record pass else: old_image = {x:deserializer.deserialize(y) for (x,y) in record['OldImage'].items()} assert expected_old_image == old_image elif mode == 'NEW_AND_OLD_IMAGES': if expected_new_image == None: assert not 'NewImage' in record else: new_image = {x:deserializer.deserialize(y) for (x,y) in record['NewImage'].items()} assert expected_new_image == new_image if expected_old_image == None: assert not 'OldImage' in record else: old_image = {x:deserializer.deserialize(y) for (x,y) in record['OldImage'].items()} assert expected_old_image == old_image else: pytest.fail('cannot happen') # After the above loop, expected_events_map should remain empty arrays. # If it isn't, one of the expected events did not yet happen. Return False. for entry in expected_events_map.values(): if len(entry) > 0: return False return True # Convenience funtion used to implement several tests below. It runs a given # function "updatefunc" which is supposed to do some updates to the table # and also return an expected_events list. do_test() then fetches the streams # data and compares it to the expected_events using compare_events(). def do_test(test_table_ss_stream, dynamodbstreams, updatefunc, mode, p = random_string(), c = random_string()): table, arn = test_table_ss_stream iterators = latest_iterators(dynamodbstreams, arn) expected_events = updatefunc(table, p, c) # Updating the stream is asynchronous. Moreover, it may even be delayed # artificially (see Alternator's alternator_streams_time_window_s config). # So if compare_events() reports the stream data is missing some of the # expected events (by returning False), we need to retry it for some time. # Note that compare_events() throws if the stream data contains *wrong* # (unexpected) data, so even failing tests usually finish reasonably # fast - depending on the alternator_streams_time_window_s parameter. # This is optimization is important to keep *failing* tests reasonably # fast and not have to wait until the following arbitrary timeout. timeout = time.time() + 20 output = [] while time.time() < timeout: iterators = fetch_more(dynamodbstreams, iterators, output) print("after fetch_more number expected_events={}, output={}".format(len(expected_events), len(output))) if compare_events(expected_events, output, mode): # success! return time.sleep(0.5)<|fim▁hole|># Test a single PutItem of a new item. Should result in a single INSERT # event. Currently fails because in Alternator, PutItem - which generates a # tombstone to *replace* an item - generates REMOVE+MODIFY (issue #6930). @pytest.mark.xfail(reason="Currently fails - see issue #6930") def test_streams_putitem_keys_only(test_table_ss_keys_only, dynamodbstreams): def do_updates(table, p, c): events = [] table.put_item(Item={'p': p, 'c': c, 'x': 2}) events.append(['INSERT', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) return events do_test(test_table_ss_keys_only, dynamodbstreams, do_updates, 'KEYS_ONLY') # Test a single UpdateItem. Should result in a single INSERT event. # Currently fails because Alternator generates a MODIFY event even though # this is a new item (issue #6918). @pytest.mark.xfail(reason="Currently fails - see issue #6918") def test_streams_updateitem_keys_only(test_table_ss_keys_only, dynamodbstreams): def do_updates(table, p, c): events = [] table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 2}) events.append(['INSERT', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) return events do_test(test_table_ss_keys_only, dynamodbstreams, do_updates, 'KEYS_ONLY') # This is exactly the same test as test_streams_updateitem_keys_only except # we don't verify the type of even we find (MODIFY or INSERT). It allows us # to have at least one good GetRecords test until solving issue #6918. # When we do solve that issue, this test should be removed. def test_streams_updateitem_keys_only_2(test_table_ss_keys_only, dynamodbstreams): def do_updates(table, p, c): events = [] table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 2}) events.append(['*', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) return events do_test(test_table_ss_keys_only, dynamodbstreams, do_updates, 'KEYS_ONLY') # Test OLD_IMAGE using UpdateItem. Verify that the OLD_IMAGE indeed includes, # as needed, the entire old item and not just the modified columns. # Reproduces issue #6935 def test_streams_updateitem_old_image(test_table_ss_old_image, dynamodbstreams): def do_updates(table, p, c): events = [] table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 2}) # We use here '*' instead of the really expected 'INSERT' to avoid # checking again the same Alternator bug already checked by # test_streams_updateitem_keys_only (issue #6918). # Note: The "None" as OldImage here tests that the OldImage should be # missing because the item didn't exist. This reproduces issue #6933. events.append(['*', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET y = :val1', ExpressionAttributeValues={':val1': 3}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2}, {'p': p, 'c': c, 'x': 2, 'y': 3}]) return events do_test(test_table_ss_old_image, dynamodbstreams, do_updates, 'OLD_IMAGE') # Above we verified that if an item did not previously exist, the OLD_IMAGE # would be missing, but if the item did previously exist, OLD_IMAGE should # be present and must include the key. Here we confirm the special case the # latter case - the case of a pre-existing *empty* item, which has just the # key - in this case since the item did exist, OLD_IMAGE should be returned - # and include just the key. This is a special case of reproducing #6935 - # the first patch for this issue failed in this special case. def test_streams_updateitem_old_image_empty_item(test_table_ss_old_image, dynamodbstreams): def do_updates(table, p, c): events = [] # Create an *empty* item, with nothing except a key: table.update_item(Key={'p': p, 'c': c}) events.append(['*', {'p': p, 'c': c}, None, {'p': p, 'c': c}]) table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET y = :val1', ExpressionAttributeValues={':val1': 3}) # Note that OLD_IMAGE should be present and be the empty item, # with just a key, not entirely missing. events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c}, {'p': p, 'c': c, 'y': 3}]) return events do_test(test_table_ss_old_image, dynamodbstreams, do_updates, 'OLD_IMAGE') # Test that OLD_IMAGE indeed includes the entire old item and not just the # modified attributes, in the special case of attributes which are a key of # a secondary index. # The unique thing about this case is that as currently implemented, # secondary-index key attributes are real Scylla columns, contrasting with # other attributes which are just elements of a map. And our CDC # implementation treats those cases differently - when a map is modified # the preimage includes the entire content of the map, but for regular # columns they are only included in the preimage if they change. # Currently fails in Alternator because the item's key is missing in # OldImage (#6935) and the LSI key is also missing (#7030). @pytest.fixture(scope="function") def test_table_ss_old_image_and_lsi(dynamodb, dynamodbstreams): table = create_test_table(dynamodb, KeySchema=[ {'AttributeName': 'p', 'KeyType': 'HASH'}, {'AttributeName': 'c', 'KeyType': 'RANGE'}], AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' }, { 'AttributeName': 'c', 'AttributeType': 'S' }, { 'AttributeName': 'k', 'AttributeType': 'S' }], LocalSecondaryIndexes=[{ 'IndexName': 'index', 'KeySchema': [ {'AttributeName': 'p', 'KeyType': 'HASH'}, {'AttributeName': 'k', 'KeyType': 'RANGE'}], 'Projection': { 'ProjectionType': 'ALL' } }], StreamSpecification={ 'StreamEnabled': True, 'StreamViewType': 'OLD_IMAGE' }) arn = wait_for_active_stream(dynamodbstreams, table, timeout=60) yield table, arn table.delete() def test_streams_updateitem_old_image_lsi(test_table_ss_old_image_and_lsi, dynamodbstreams): def do_updates(table, p, c): events = [] table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :x, k = :k', ExpressionAttributeValues={':x': 2, ':k': 'dog'}) # We use here '*' instead of the really expected 'INSERT' to avoid # checking again the same Alternator bug already checked by # test_streams_updateitem_keys_only (issue #6918). events.append(['*', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2, 'k': 'dog'}]) table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET y = :y', ExpressionAttributeValues={':y': 3}) # In issue #7030, the 'k' value was missing from the OldImage. events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2, 'k': 'dog'}, {'p': p, 'c': c, 'x': 2, 'k': 'dog', 'y': 3}]) return events do_test(test_table_ss_old_image_and_lsi, dynamodbstreams, do_updates, 'OLD_IMAGE') # Tests similar to the above tests for OLD_IMAGE, just for NEW_IMAGE mode. # Verify that the NEW_IMAGE includes the entire old item (including the key), # that deleting the item results in a missing NEW_IMAGE, and that setting the # item to be empty has a different result - a NEW_IMAGE with just a key. # Reproduces issue #7107. def test_streams_new_image(test_table_ss_new_image, dynamodbstreams): def do_updates(table, p, c): events = [] table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 2}) # Confirm that when adding one attribute "x", the NewImage contains both # the new x, and the key columns (p and c). # We use here '*' instead of 'INSERT' to avoid testing issue #6918 here. events.append(['*', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) # Confirm that when adding just attribute "y", the NewImage will contain # all the attributes, including the old "x": table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET y = :val1', ExpressionAttributeValues={':val1': 3}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2}, {'p': p, 'c': c, 'x': 2, 'y': 3}]) # Confirm that when deleting the columns x and y, the NewImage becomes # empty - but still exists and contains the key columns, table.update_item(Key={'p': p, 'c': c}, UpdateExpression='REMOVE x, y') events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2, 'y': 3}, {'p': p, 'c': c}]) # Confirm that deleting the item results in a missing NewImage: table.delete_item(Key={'p': p, 'c': c}) events.append(['REMOVE', {'p': p, 'c': c}, {'p': p, 'c': c}, None]) return events do_test(test_table_ss_new_image, dynamodbstreams, do_updates, 'NEW_IMAGE') # Test similar to the above test for NEW_IMAGE corner cases, but here for # NEW_AND_OLD_IMAGES mode. # Although it is likely that if both OLD_IMAGE and NEW_IMAGE work correctly then # so will the combined NEW_AND_OLD_IMAGES mode, it is still possible that the # implementation of the combined mode has unique bugs, so it is worth testing # it separately. # Reproduces issue #7107. def test_streams_new_and_old_images(test_table_ss_new_and_old_images, dynamodbstreams): def do_updates(table, p, c): events = [] table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 2}) # The item doesn't yet exist, so OldImage is missing. # We use here '*' instead of 'INSERT' to avoid testing issue #6918 here. events.append(['*', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) # Confirm that when adding just attribute "y", the NewImage will contain # all the attributes, including the old "x". Also, OldImage contains the # key attributes, not just "x": table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET y = :val1', ExpressionAttributeValues={':val1': 3}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2}, {'p': p, 'c': c, 'x': 2, 'y': 3}]) # Confirm that when deleting the attributes x and y, the NewImage becomes # empty - but still exists and contains the key attributes: table.update_item(Key={'p': p, 'c': c}, UpdateExpression='REMOVE x, y') events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2, 'y': 3}, {'p': p, 'c': c}]) # Confirm that when adding an attribute z to the empty item, OldItem is # not missing - it just contains only the key attributes: table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET z = :val1', ExpressionAttributeValues={':val1': 4}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c}, {'p': p, 'c': c, 'z': 4}]) # Confirm that deleting the item results in a missing NewImage: table.delete_item(Key={'p': p, 'c': c}) events.append(['REMOVE', {'p': p, 'c': c}, {'p': p, 'c': c, 'z': 4}, None]) return events do_test(test_table_ss_new_and_old_images, dynamodbstreams, do_updates, 'NEW_AND_OLD_IMAGES') # Test that when a stream shard has no data to read, GetRecords returns an # empty Records array - not a missing one. Reproduces issue #6926. def test_streams_no_records(test_table_ss_keys_only, dynamodbstreams): table, arn = test_table_ss_keys_only # Get just one shard - any shard - and its LATEST iterator. Because it's # LATEST, there will be no data to read from this iterator. shard_id = dynamodbstreams.describe_stream(StreamArn=arn, Limit=1)['StreamDescription']['Shards'][0]['ShardId'] iter = dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='LATEST')['ShardIterator'] response = dynamodbstreams.get_records(ShardIterator=iter) assert 'NextShardIterator' in response assert 'Records' in response # We expect Records to be empty - there is no data at the LATEST iterator. assert response['Records'] == [] # Test that after fetching the last result from a shard, we don't get it # yet again. Reproduces issue #6942. def test_streams_last_result(test_table_ss_keys_only, dynamodbstreams): table, arn = test_table_ss_keys_only iterators = latest_iterators(dynamodbstreams, arn) # Do an UpdateItem operation that is expected to leave one event in the # stream. table.update_item(Key={'p': random_string(), 'c': random_string()}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 5}) # Eventually (we may need to retry this for a while), *one* of the # stream shards will return one event: timeout = time.time() + 15 while time.time() < timeout: for iter in iterators: response = dynamodbstreams.get_records(ShardIterator=iter) if 'Records' in response and response['Records'] != []: # Found the shard with the data! Test that it only has # one event and that if we try to read again, we don't # get more data (this was issue #6942). assert len(response['Records']) == 1 assert 'NextShardIterator' in response response = dynamodbstreams.get_records(ShardIterator=response['NextShardIterator']) assert response['Records'] == [] return time.sleep(0.5) pytest.fail("timed out") # In test_streams_last_result above we tested that there is no further events # after reading the only one. In this test we verify that if we *do* perform # another change on the same key, we do get another event and it happens on the # *same* shard. def test_streams_another_result(test_table_ss_keys_only, dynamodbstreams): table, arn = test_table_ss_keys_only iterators = latest_iterators(dynamodbstreams, arn) # Do an UpdateItem operation that is expected to leave one event in the # stream. p = random_string() c = random_string() table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 5}) # Eventually, *one* of the stream shards will return one event: timeout = time.time() + 15 while time.time() < timeout: for iter in iterators: response = dynamodbstreams.get_records(ShardIterator=iter) if 'Records' in response and response['Records'] != []: # Finally found the shard reporting the changes to this key assert len(response['Records']) == 1 assert response['Records'][0]['dynamodb']['Keys']== {'p': {'S': p}, 'c': {'S': c}} assert 'NextShardIterator' in response iter = response['NextShardIterator'] # Found the shard with the data. It only has one event so if # we try to read again, we find nothing (this is the same as # what test_streams_last_result tests). response = dynamodbstreams.get_records(ShardIterator=iter) assert response['Records'] == [] assert 'NextShardIterator' in response iter = response['NextShardIterator'] # Do another UpdateItem operation to the same key, so it is # expected to write to the *same* shard: table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val2', ExpressionAttributeValues={':val2': 7}) # Again we may need to wait for the event to appear on the stream: timeout = time.time() + 15 while time.time() < timeout: response = dynamodbstreams.get_records(ShardIterator=iter) if 'Records' in response and response['Records'] != []: assert len(response['Records']) == 1 assert response['Records'][0]['dynamodb']['Keys']== {'p': {'S': p}, 'c': {'S': c}} assert 'NextShardIterator' in response # The test is done, successfully. return time.sleep(0.5) pytest.fail("timed out") time.sleep(0.5) pytest.fail("timed out") # Test the SequenceNumber attribute returned for stream events, and the # "AT_SEQUENCE_NUMBER" iterator that can be used to re-read from the same # event again given its saved "sequence number". def test_streams_at_sequence_number(test_table_ss_keys_only, dynamodbstreams): table, arn = test_table_ss_keys_only # List all the shards and their LATEST iterators: shards_and_iterators = [] for shard_id in list_shards(dynamodbstreams, arn): shards_and_iterators.append((shard_id, dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='LATEST')['ShardIterator'])) # Do an UpdateItem operation that is expected to leave one event in the # stream. p = random_string() c = random_string() table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 5}) # Eventually, *one* of the stream shards will return the one event: timeout = time.time() + 15 while time.time() < timeout: for (shard_id, iter) in shards_and_iterators: response = dynamodbstreams.get_records(ShardIterator=iter) if 'Records' in response and response['Records'] != []: # Finally found the shard reporting the changes to this key: assert len(response['Records']) == 1 assert response['Records'][0]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert 'NextShardIterator' in response sequence_number = response['Records'][0]['dynamodb']['SequenceNumber'] # Found the shard with the data. It only has one event so if # we try to read again, we find nothing (this is the same as # what test_streams_last_result tests). iter = response['NextShardIterator'] response = dynamodbstreams.get_records(ShardIterator=iter) assert response['Records'] == [] assert 'NextShardIterator' in response # If we use the SequenceNumber of the first event to create an # AT_SEQUENCE_NUMBER iterator, we can read the same event again. # We don't need a loop and a timeout, because this event is already # available. iter = dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='AT_SEQUENCE_NUMBER', SequenceNumber=sequence_number)['ShardIterator'] response = dynamodbstreams.get_records(ShardIterator=iter) assert 'Records' in response assert len(response['Records']) == 1 assert response['Records'][0]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert response['Records'][0]['dynamodb']['SequenceNumber'] == sequence_number return time.sleep(0.5) pytest.fail("timed out") # Test the SequenceNumber attribute returned for stream events, and the # "AFTER_SEQUENCE_NUMBER" iterator that can be used to re-read *after* the same # event again given its saved "sequence number". def test_streams_after_sequence_number(test_table_ss_keys_only, dynamodbstreams): table, arn = test_table_ss_keys_only # List all the shards and their LATEST iterators: shards_and_iterators = [] for shard_id in list_shards(dynamodbstreams, arn): shards_and_iterators.append((shard_id, dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='LATEST')['ShardIterator'])) # Do two UpdateItem operations to the same key, that are expected to leave # two events in the stream. p = random_string() c = random_string() table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 3}) table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 5}) # Eventually, *one* of the stream shards will return the two events: timeout = time.time() + 15 while time.time() < timeout: for (shard_id, iter) in shards_and_iterators: response = dynamodbstreams.get_records(ShardIterator=iter) if 'Records' in response and len(response['Records']) == 2: assert response['Records'][0]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert response['Records'][1]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} sequence_number_1 = response['Records'][0]['dynamodb']['SequenceNumber'] sequence_number_2 = response['Records'][1]['dynamodb']['SequenceNumber'] # If we use the SequenceNumber of the first event to create an # AFTER_SEQUENCE_NUMBER iterator, we can read the second event # (only) again. We don't need a loop and a timeout, because this # event is already available. iter = dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='AFTER_SEQUENCE_NUMBER', SequenceNumber=sequence_number_1)['ShardIterator'] response = dynamodbstreams.get_records(ShardIterator=iter) assert 'Records' in response assert len(response['Records']) == 1 assert response['Records'][0]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert response['Records'][0]['dynamodb']['SequenceNumber'] == sequence_number_2 return time.sleep(0.5) pytest.fail("timed out") # Test the "TRIM_HORIZON" iterator, which can be used to re-read *all* the # previously-read events of the stream shard again. # NOTE: This test relies on the test_table_ss_keys_only fixture giving us a # brand new stream, with no old events saved from other tests. If we ever # change this, we should change this test to use a different fixture. def test_streams_trim_horizon(test_table_ss_keys_only, dynamodbstreams): table, arn = test_table_ss_keys_only # List all the shards and their LATEST iterators: shards_and_iterators = [] for shard_id in list_shards(dynamodbstreams, arn): shards_and_iterators.append((shard_id, dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='LATEST')['ShardIterator'])) # Do two UpdateItem operations to the same key, that are expected to leave # two events in the stream. p = random_string() c = random_string() table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 3}) table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 5}) # Eventually, *one* of the stream shards will return the two events: timeout = time.time() + 15 while time.time() < timeout: for (shard_id, iter) in shards_and_iterators: response = dynamodbstreams.get_records(ShardIterator=iter) if 'Records' in response and len(response['Records']) == 2: assert response['Records'][0]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert response['Records'][1]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} sequence_number_1 = response['Records'][0]['dynamodb']['SequenceNumber'] sequence_number_2 = response['Records'][1]['dynamodb']['SequenceNumber'] # If we use the TRIM_HORIZON iterator, we should receive the # same two events again, in the same order. # Note that we assume that the fixture gave us a brand new # stream, with no old events saved from other tests. If we # couldn't assume this, this test would need to become much # more complex, and would need to read from this shard until # we find the two events we are looking for. iter = dynamodbstreams.get_shard_iterator(StreamArn=arn, ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')['ShardIterator'] response = dynamodbstreams.get_records(ShardIterator=iter) assert 'Records' in response assert len(response['Records']) == 2 assert response['Records'][0]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert response['Records'][1]['dynamodb']['Keys'] == {'p': {'S': p}, 'c': {'S': c}} assert response['Records'][0]['dynamodb']['SequenceNumber'] == sequence_number_1 assert response['Records'][1]['dynamodb']['SequenceNumber'] == sequence_number_2 return time.sleep(0.5) pytest.fail("timed out") # Above we tested some specific operations in small tests aimed to reproduce # a specific bug, in the following tests we do a all the different operations, # PutItem, DeleteItem, BatchWriteItem and UpdateItem, and check the resulting # stream for correctness. # The following tests focus on mulitple operations on the *same* item. Those # should appear in the stream in the correct order. def do_updates_1(table, p, c): events = [] # a first put_item appears as an INSERT event. Note also empty old_image. table.put_item(Item={'p': p, 'c': c, 'x': 2}) events.append(['INSERT', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'x': 2}]) # a second put_item of the *same* key and same value, doesn't appear in the log at all! table.put_item(Item={'p': p, 'c': c, 'x': 2}) # a second put_item of the *same* key and different value, appears as a MODIFY event table.put_item(Item={'p': p, 'c': c, 'y': 3}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'x': 2}, {'p': p, 'c': c, 'y': 3}]) # deleting an item appears as a REMOVE event. Note no new_image at all, but there is an old_image. table.delete_item(Key={'p': p, 'c': c}) events.append(['REMOVE', {'p': p, 'c': c}, {'p': p, 'c': c, 'y': 3}, None]) # deleting a non-existant item doesn't appear in the log at all. table.delete_item(Key={'p': p, 'c': c}) # If update_item creates an item, the event is INSERT as well. table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET b = :val1', ExpressionAttributeValues={':val1': 4}) events.append(['INSERT', {'p': p, 'c': c}, None, {'p': p, 'c': c, 'b': 4}]) # If update_item modifies the item, note how old and new image includes both old and new columns table.update_item(Key={'p': p, 'c': c}, UpdateExpression='SET x = :val1', ExpressionAttributeValues={':val1': 5}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'b': 4}, {'p': p, 'c': c, 'b': 4, 'x': 5}]) # TODO: incredibly, if we uncomment the "REMOVE b" update below, it will be # completely missing from the DynamoDB stream - the test continues to # pass even though we didn't add another expected event, and even though # the preimage in the following expected event includes this "b" we will # remove. I couldn't reproduce this apparant DynamoDB bug in a smaller test. #table.update_item(Key={'p': p, 'c': c}, UpdateExpression='REMOVE b') # Test BatchWriteItem as well. This modifies the item, so will be a MODIFY event. table.meta.client.batch_write_item(RequestItems = {table.name: [{'PutRequest': {'Item': {'p': p, 'c': c, 'x': 5}}}]}) events.append(['MODIFY', {'p': p, 'c': c}, {'p': p, 'c': c, 'b': 4, 'x': 5}, {'p': p, 'c': c, 'x': 5}]) return events @pytest.mark.xfail(reason="Currently fails - because of multiple issues listed above") def test_streams_1_keys_only(test_table_ss_keys_only, dynamodbstreams): do_test(test_table_ss_keys_only, dynamodbstreams, do_updates_1, 'KEYS_ONLY') @pytest.mark.xfail(reason="Currently fails - because of multiple issues listed above") def test_streams_1_new_image(test_table_ss_new_image, dynamodbstreams): do_test(test_table_ss_new_image, dynamodbstreams, do_updates_1, 'NEW_IMAGE') @pytest.mark.xfail(reason="Currently fails - because of multiple issues listed above") def test_streams_1_old_image(test_table_ss_old_image, dynamodbstreams): do_test(test_table_ss_old_image, dynamodbstreams, do_updates_1, 'OLD_IMAGE') @pytest.mark.xfail(reason="Currently fails - because of multiple issues listed above") def test_streams_1_new_and_old_images(test_table_ss_new_and_old_images, dynamodbstreams): do_test(test_table_ss_new_and_old_images, dynamodbstreams, do_updates_1, 'NEW_AND_OLD_IMAGES') # TODO: tests on multiple partitions # TODO: write a test that disabling the stream and re-enabling it works, but # requires the user to wait for the first stream to become DISABLED before # creating the new one. Then ListStreams should return the two streams, # one DISABLED and one ENABLED? I'm not sure we want or can do this in # Alternator. # TODO: Can we test shard ending, or shard splitting? (shard splitting # requires the user to - periodically or following shards ending - to call # DescribeStream again. We don't do this in any of our tests.<|fim▁end|>
# If we're still here, the last compare_events returned false. pytest.fail('missing events in output: {}'.format(output))
<|file_name|>basic.rs<|end_file_name|><|fim▁begin|>extern crate jade; <|fim▁hole|><|fim▁end|>
#[test] fn test_basic_template() { }
<|file_name|>BrowserMain.cpp<|end_file_name|><|fim▁begin|>// Torc - Copyright 2011-2013 University of Southern California. All Rights Reserved. // $HeadURL$ // $Id$ // This program is free software: you can redistribute it and/or modify it under the terms of the // GNU General Public License as published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; // without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See // the GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along with this program. If // not, see <http://www.gnu.org/licenses/>. /// \file /// \brief Source for a PhysicalDiff command line utility. /// \details Opens up two designs with the XDLImporter and then compares them with PhysicalDiff. #include <fstream> #include "ArchitectureBrowser.hpp" #include "torc/common/DirectoryTree.hpp" /// \brief Main entry point for the architecture browser tool. int main(int argc, char *argv[]) { typedef std::string string; torc::common::DirectoryTree directoryTree(argv[0]); if (argc != 2) { std::cout << "Usage: " << argv[0] << " <device>" << std::endl; return 1; } string device_arg = argv[1]; torc::common::DeviceDesignator device(device_arg); torc::architecture::DDB db(device); torc::ArchitectureBrowser ab(db);<|fim▁hole|> ab.browse(); return 0; }<|fim▁end|>
<|file_name|>base.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients from tempest.common.utils.data_utils import rand_name import tempest.test class BaseIdentityAdminTest(tempest.test.BaseTestCase): <|fim▁hole|> os = clients.AdminManager(interface=cls._interface) cls.client = os.identity_client cls.token_client = os.token_client cls.endpoints_client = os.endpoints_client cls.v3_client = os.identity_v3_client cls.service_client = os.service_client cls.policy_client = os.policy_client cls.v3_token = os.token_v3_client cls.creds_client = os.credentials_client if not cls.client.has_admin_extensions(): raise cls.skipException("Admin extensions disabled") cls.data = DataGenerator(cls.client) cls.v3data = DataGenerator(cls.v3_client) os = clients.Manager(interface=cls._interface) cls.non_admin_client = os.identity_client cls.v3_non_admin_client = os.identity_v3_client @classmethod def tearDownClass(cls): cls.data.teardown_all() cls.v3data.teardown_all() super(BaseIdentityAdminTest, cls).tearDownClass() def disable_user(self, user_name): user = self.get_user_by_name(user_name) self.client.enable_disable_user(user['id'], False) def disable_tenant(self, tenant_name): tenant = self.get_tenant_by_name(tenant_name) self.client.update_tenant(tenant['id'], enabled=False) def get_user_by_name(self, name): _, users = self.client.get_users() user = [u for u in users if u['name'] == name] if len(user) > 0: return user[0] def get_tenant_by_name(self, name): _, tenants = self.client.list_tenants() tenant = [t for t in tenants if t['name'] == name] if len(tenant) > 0: return tenant[0] def get_role_by_name(self, name): _, roles = self.client.list_roles() role = [r for r in roles if r['name'] == name] if len(role) > 0: return role[0] class DataGenerator(object): def __init__(self, client): self.client = client self.users = [] self.tenants = [] self.roles = [] self.role_name = None self.v3_users = [] self.projects = [] self.v3_roles = [] def setup_test_user(self): """Set up a test user.""" self.setup_test_tenant() self.test_user = rand_name('test_user_') self.test_password = rand_name('pass_') self.test_email = self.test_user + '@testmail.tm' resp, self.user = self.client.create_user(self.test_user, self.test_password, self.tenant['id'], self.test_email) self.users.append(self.user) def setup_test_tenant(self): """Set up a test tenant.""" self.test_tenant = rand_name('test_tenant_') self.test_description = rand_name('desc_') resp, self.tenant = self.client.create_tenant( name=self.test_tenant, description=self.test_description) self.tenants.append(self.tenant) def setup_test_role(self): """Set up a test role.""" self.test_role = rand_name('role') resp, self.role = self.client.create_role(self.test_role) self.roles.append(self.role) def setup_test_v3_user(self): """Set up a test v3 user.""" self.setup_test_project() self.test_user = rand_name('test_user_') self.test_password = rand_name('pass_') self.test_email = self.test_user + '@testmail.tm' resp, self.v3_user = self.client.create_user(self.test_user, self.test_password, self.project['id'], self.test_email) self.v3_users.append(self.v3_user) def setup_test_project(self): """Set up a test project.""" self.test_project = rand_name('test_project_') self.test_description = rand_name('desc_') resp, self.project = self.client.create_project( name=self.test_project, description=self.test_description) self.projects.append(self.project) def setup_test_v3_role(self): """Set up a test v3 role.""" self.test_role = rand_name('role') resp, self.v3_role = self.client.create_role(self.test_role) self.v3_roles.append(self.v3_role) def teardown_all(self): for user in self.users: self.client.delete_user(user['id']) for tenant in self.tenants: self.client.delete_tenant(tenant['id']) for role in self.roles: self.client.delete_role(role['id']) for v3_user in self.v3_users: self.client.delete_user(v3_user['id']) for v3_project in self.projects: self.client.delete_project(v3_project['id']) for v3_role in self.v3_roles: self.client.delete_role(v3_role['id'])<|fim▁end|>
@classmethod def setUpClass(cls): super(BaseIdentityAdminTest, cls).setUpClass()
<|file_name|>connection_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <utility> #include "base/bind.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "remoting/base/constants.h" #include "remoting/protocol/fake_session.h" #include "remoting/protocol/fake_video_renderer.h" #include "remoting/protocol/ice_connection_to_client.h" #include "remoting/protocol/ice_connection_to_host.h" #include "remoting/protocol/protocol_mock_objects.h" #include "remoting/protocol/transport_context.h" #include "remoting/protocol/video_stream.h" #include "remoting/protocol/webrtc_connection_to_client.h" #include "remoting/protocol/webrtc_connection_to_host.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/webrtc/modules/desktop_capture/desktop_capturer.h" #include "third_party/webrtc/modules/desktop_capture/desktop_frame.h" using ::testing::_; using ::testing::InvokeWithoutArgs; using ::testing::NotNull; using ::testing::StrictMock; namespace remoting { namespace protocol { namespace { MATCHER_P(EqualsCapabilitiesMessage, message, "") { return arg.capabilities() == message.capabilities(); } MATCHER_P(EqualsKeyEvent, event, "") { return arg.usb_keycode() == event.usb_keycode() && arg.pressed() == event.pressed(); } ACTION_P(QuitRunLoop, run_loop) { run_loop->Quit(); } class MockConnectionToHostEventCallback : public ConnectionToHost::HostEventCallback { public: MockConnectionToHostEventCallback() {} ~MockConnectionToHostEventCallback() override {} MOCK_METHOD2(OnConnectionState, void(ConnectionToHost::State state, ErrorCode error)); MOCK_METHOD1(OnConnectionReady, void(bool ready)); MOCK_METHOD2(OnRouteChanged, void(const std::string& channel_name, const TransportRoute& route)); }; class TestScreenCapturer : public webrtc::DesktopCapturer { public: TestScreenCapturer() {} ~TestScreenCapturer() override {} // webrtc::DesktopCapturer interface. void Start(Callback* callback) override { callback_ = callback; } void Capture(const webrtc::DesktopRegion& region) override { // Return black 10x10 frame. std::unique_ptr<webrtc::DesktopFrame> frame( new webrtc::BasicDesktopFrame(webrtc::DesktopSize(100, 100))); memset(frame->data(), 0, frame->stride() * frame->size().height()); frame->mutable_updated_region()->SetRect( webrtc::DesktopRect::MakeSize(frame->size())); callback_->OnCaptureCompleted(frame.release()); } private: Callback* callback_ = nullptr; }; } // namespace class ConnectionTest : public testing::Test, public testing::WithParamInterface<bool> { public: ConnectionTest() {} protected: bool is_using_webrtc() { return GetParam(); } void SetUp() override { // Create fake sessions. host_session_ = new FakeSession(); owned_client_session_.reset(new FakeSession()); client_session_ = owned_client_session_.get(); // Create Connection objects. if (is_using_webrtc()) { host_connection_.reset(new WebrtcConnectionToClient( base::WrapUnique(host_session_), TransportContext::ForTests(protocol::TransportRole::SERVER), message_loop_.task_runner())); client_connection_.reset(new WebrtcConnectionToHost()); } else { host_connection_.reset(new IceConnectionToClient( base::WrapUnique(host_session_), TransportContext::ForTests(protocol::TransportRole::SERVER), message_loop_.task_runner())); client_connection_.reset(new IceConnectionToHost()); } // Setup host side. host_connection_->SetEventHandler(&host_event_handler_); host_connection_->set_clipboard_stub(&host_clipboard_stub_); host_connection_->set_host_stub(&host_stub_); host_connection_->set_input_stub(&host_input_stub_); // Setup client side. client_connection_->set_client_stub(&client_stub_); client_connection_->set_clipboard_stub(&client_clipboard_stub_); client_connection_->set_video_renderer(&client_video_renderer_); } void Connect() { { testing::InSequence sequence; EXPECT_CALL(host_event_handler_,<|fim▁hole|> } EXPECT_CALL(host_event_handler_, OnConnectionChannelsConnected(host_connection_.get())) .WillOnce( InvokeWithoutArgs(this, &ConnectionTest::OnHostConnected)); EXPECT_CALL(host_event_handler_, OnRouteChange(_, _, _)) .Times(testing::AnyNumber()); { testing::InSequence sequence; EXPECT_CALL(client_event_handler_, OnConnectionState(ConnectionToHost::CONNECTING, OK)); EXPECT_CALL(client_event_handler_, OnConnectionState(ConnectionToHost::AUTHENTICATED, OK)); EXPECT_CALL(client_event_handler_, OnConnectionState(ConnectionToHost::CONNECTED, OK)) .WillOnce(InvokeWithoutArgs( this, &ConnectionTest::OnClientConnected)); } EXPECT_CALL(client_event_handler_, OnRouteChanged(_, _)) .Times(testing::AnyNumber()); client_connection_->Connect( std::move(owned_client_session_), TransportContext::ForTests(protocol::TransportRole::CLIENT), &client_event_handler_); client_session_->SimulateConnection(host_session_); run_loop_.reset(new base::RunLoop()); run_loop_->Run(); EXPECT_TRUE(client_connected_); EXPECT_TRUE(host_connected_); } void TearDown() override { client_connection_.reset(); host_connection_.reset(); base::RunLoop().RunUntilIdle(); } void OnHostConnected() { host_connected_ = true; if (client_connected_ && run_loop_) run_loop_->Quit(); } void OnClientConnected() { client_connected_ = true; if (host_connected_ && run_loop_) run_loop_->Quit(); } base::MessageLoopForIO message_loop_; std::unique_ptr<base::RunLoop> run_loop_; MockConnectionToClientEventHandler host_event_handler_; MockClipboardStub host_clipboard_stub_; MockHostStub host_stub_; MockInputStub host_input_stub_; std::unique_ptr<ConnectionToClient> host_connection_; FakeSession* host_session_; // Owned by |host_connection_|. bool host_connected_ = false; MockConnectionToHostEventCallback client_event_handler_; MockClientStub client_stub_; MockClipboardStub client_clipboard_stub_; FakeVideoRenderer client_video_renderer_; std::unique_ptr<ConnectionToHost> client_connection_; FakeSession* client_session_; // Owned by |client_connection_|. std::unique_ptr<FakeSession> owned_client_session_; bool client_connected_ = false; private: DISALLOW_COPY_AND_ASSIGN(ConnectionTest); }; INSTANTIATE_TEST_CASE_P(Ice, ConnectionTest, ::testing::Values(false)); INSTANTIATE_TEST_CASE_P(Webrtc, ConnectionTest, ::testing::Values(true)); TEST_P(ConnectionTest, RejectConnection) { EXPECT_CALL(client_event_handler_, OnConnectionState(ConnectionToHost::CONNECTING, OK)); EXPECT_CALL(client_event_handler_, OnConnectionState(ConnectionToHost::CLOSED, OK)); client_connection_->Connect( std::move(owned_client_session_), TransportContext::ForTests(protocol::TransportRole::CLIENT), &client_event_handler_); client_session_->event_handler()->OnSessionStateChange(Session::CLOSED); } TEST_P(ConnectionTest, Disconnect) { Connect(); EXPECT_CALL(client_event_handler_, OnConnectionState(ConnectionToHost::CLOSED, OK)); EXPECT_CALL(host_event_handler_, OnConnectionClosed(host_connection_.get(), OK)); client_session_->Close(OK); base::RunLoop().RunUntilIdle(); } TEST_P(ConnectionTest, Control) { Connect(); Capabilities capabilities_msg; capabilities_msg.set_capabilities("test_capability"); base::RunLoop run_loop; EXPECT_CALL(client_stub_, SetCapabilities(EqualsCapabilitiesMessage(capabilities_msg))) .WillOnce(QuitRunLoop(&run_loop)); // Send capabilities from the host. host_connection_->client_stub()->SetCapabilities(capabilities_msg); run_loop.Run(); } TEST_P(ConnectionTest, Events) { Connect(); KeyEvent event; event.set_usb_keycode(3); event.set_pressed(true); base::RunLoop run_loop; EXPECT_CALL(host_event_handler_, OnInputEventReceived(host_connection_.get(), _)); EXPECT_CALL(host_input_stub_, InjectKeyEvent(EqualsKeyEvent(event))) .WillOnce(QuitRunLoop(&run_loop)); // Send capabilities from the client. client_connection_->input_stub()->InjectKeyEvent(event); run_loop.Run(); } TEST_P(ConnectionTest, Video) { Connect(); std::unique_ptr<VideoStream> video_stream = host_connection_->StartVideoStream( base::WrapUnique(new TestScreenCapturer())); base::RunLoop run_loop; // Expect frames to be passed to FrameConsumer when WebRTC is used, or to // VideoStub otherwise. if (is_using_webrtc()) { client_video_renderer_.GetFrameConsumer()->set_on_frame_callback( base::Bind(&base::RunLoop::Quit, base::Unretained(&run_loop))); } else { client_video_renderer_.GetVideoStub()->set_on_frame_callback( base::Bind(&base::RunLoop::Quit, base::Unretained(&run_loop))); } run_loop.Run(); if (is_using_webrtc()) { EXPECT_EQ( client_video_renderer_.GetFrameConsumer()->received_frames().size(), 1U); EXPECT_EQ(client_video_renderer_.GetVideoStub()->received_packets().size(), 0U); } else { EXPECT_EQ( client_video_renderer_.GetFrameConsumer()->received_frames().size(), 0U); EXPECT_EQ(client_video_renderer_.GetVideoStub()->received_packets().size(), 1U); } } } // namespace protocol } // namespace remoting<|fim▁end|>
OnConnectionAuthenticating(host_connection_.get())); EXPECT_CALL(host_event_handler_, OnConnectionAuthenticated(host_connection_.get()));
<|file_name|>regex_with_defaults.py<|end_file_name|><|fim▁begin|>"""Get a `re.Pattern` instance (as given by re.compile()) with control over defaults of it's methods. Useful to reduce if/else boilerplate when handling the output of search functions (match, search, etc.) See [regex_search_hack.md](https://gist.github.com/thorwhalen/6c913e9be35873cea6efaf6b962fde07) for more explanatoins of the use case. Example; >>> dflt_result = type('dflt_search_result', (), {'groupdict': lambda x: {}})() >>> p = re_compile('.*(?P<president>obama|bush|clinton)', search=dflt_result, match=dflt_result) >>> >>> p.search('I am beating around the bush, am I?').groupdict().get('president', 'Not found') 'bush' >>> p.match('I am beating around the bush, am I?').groupdict().get('president', 'Not found') 'bush' >>> >>> # if not match is found, will return 'Not found', as requested >>> p.search('This does not contain a president').groupdict().get('president', 'Not found') 'Not found' >>> >>> # see that other non-wrapped re.Pattern methods still work<|fim▁hole|> import re from functools import wraps def add_dflt(func, dflt_if_none): @wraps(func) def wrapped_func(*args, **kwargs): result = func(*args, **kwargs) if result is not None: return result else: if callable(dflt_if_none): return dflt_if_none() else: return dflt_if_none return wrapped_func def re_compile(pattern, flags=0, **dflt_if_none): """Get a `re.Pattern` instance (as given by re.compile()) with control over defaults of it's methods. Useful to reduce if/else boilerplate when handling the output of search functions (match, search, etc.) Example; >>> dflt_result = type('dflt_search_result', (), {'groupdict': lambda x: {}})() >>> p = re_compile('.*(?P<president>obama|bush|clinton)', search=dflt_result, match=dflt_result) >>> >>> # trying p.search >>> p.search('I am beating around the bush, am I?').groupdict().get('president', 'Not found') 'bush' >>> # trying p.match >>> p.match('I am beating around the bush, am I?').groupdict().get('president', 'Not found') 'bush' >>> >>> # if not match is found, will return 'Not found', as requested >>> p.search('This does not contain a president').groupdict().get('president', 'Not found') 'Not found' >>> >>> # see that other non-wrapped re.Pattern methods still work >>> p.findall('I am beating around the bush, am I?') ['bush'] """ compiled_regex = re.compile(pattern, flags=flags) intercepted_names = set(dflt_if_none) my_regex_compilation = type('MyRegexCompilation', (object,), {})() for _name, _dflt in dflt_if_none.items(): setattr(my_regex_compilation, _name, add_dflt(getattr(compiled_regex, _name), _dflt)) for _name in filter(lambda x: not x.startswith('__') and x not in intercepted_names, dir(compiled_regex)): setattr(my_regex_compilation, _name, getattr(compiled_regex, _name)) return my_regex_compilation<|fim▁end|>
>>> p.findall('I am beating around the bush, am I?') ['bush'] """
<|file_name|>setup_pre_context.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python import sys import os sys.path.insert(0, os.environ["QUEX_PATH"]) from copy import deepcopy from quex.core_engine.state_machine.core import * import quex.core_engine.state_machine.nfa_to_dfa as nfa_to_dfa import quex.core_engine.state_machine.hopcroft_minimization as hopcroft def do(the_state_machine, pre_context_state_machine): """Sets up a pre-condition to the given state machine. This process is entirely different from any sequentialization or paralellization of state machines. Here, the state machine representing the pre- condition ist **not** webbed into the original state machine! Instead, the following happens: -- the pre-condition state machine is inverted, because it is to be walked through backwards. -- the inverted state machine is marked with the state machine id of the_state_machine. -- the original state machine will refere to the inverse state machine of the pre-condition. -- the initial state origins and the origins of the acceptance states are marked as 'pre-conditioned' indicating the id of the inverted state machine of the pre-condition. """ #___________________________________________________________________________________________ # (*) do some consistency checking assert the_state_machine.__class__.__name__ == "StateMachine" assert pre_context_state_machine.__class__.__name__ == "StateMachine" # -- state machines with no states are senseless here. assert not the_state_machine.is_empty() assert not pre_context_state_machine.is_empty() # -- trivial pre-conditions should be added last, for simplicity assert not the_state_machine.core().pre_context_begin_of_line_f(), \ "This function was not designed to deal with trivially pre-conditioned state machines." + \ "Please, make sure the trivial pre-conditioning happens *after* regular pre-conditions." #___________________________________________________________________________________________ # (*) invert the state machine of the pre-condition inverse_pre_context = pre_context_state_machine.get_inverse() inverse_pre_context = nfa_to_dfa.do(inverse_pre_context) inverse_pre_context = hopcroft.do(inverse_pre_context) # (*) let the state machine refer to it # [Is this necessary? Is it not enough that the acceptance origins point to it? <fschaef>]<|fim▁hole|> # (*) create origin data, in case where there is none yet create new one. # (do not delete, otherwise existing information gets lost) for state in the_state_machine.states.values(): if not state.is_acceptance(): continue state.core().set_pre_context_id(pre_context_sm_id) return the_state_machine<|fim▁end|>
the_state_machine.core().set_pre_context_sm(inverse_pre_context) pre_context_sm_id = inverse_pre_context.get_id()
<|file_name|>offline-saver-admin.js<|end_file_name|><|fim▁begin|>(function( $ ) { 'use strict'; /** * All of the code for your admin-facing JavaScript source * should reside in this file. * * This enables you to define handlers, for when the DOM is ready: *<|fim▁hole|> * When the window is loaded: * * $( window ).load(function() { * * }); */ $(function() { console.log("Hallo Welt"); }); })( jQuery );<|fim▁end|>
* $(function() { * * }); *
<|file_name|>BenderSolution.java<|end_file_name|><|fim▁begin|>package medium_challenges; import java.util.ArrayList; import java.util.List; import java.util.Scanner; class BenderSolution { public static void main(String args[]) { @SuppressWarnings("resource") Scanner in = new Scanner(System.in); int R = in.nextInt(); int C = in.nextInt(); in.nextLine(); List<Node> teleports = new ArrayList<>(); Node start = null; // Create (x,y) map and store starting point char[][] map = new char[C][R]; for (int y = 0; y < R; y++) { String row = in.nextLine(); for (int x = 0; x < C; x++){ char item = row.charAt(x); map[x][y] = item; if (item == '@') { start = new Node(x,y); } if (item == 'T') { teleports.add(new Node(x,y)); } } } // Create new robot with map Bender bender = new Bender(start, map, teleports); // Limit iterations boolean circular = false; final int MAX_ITERATIONS = 200; // Collect all moves. List<String> moves = new ArrayList<>(); while (bender.alive && !circular) { moves.add(bender.move()); circular = moves.size() > MAX_ITERATIONS; } // Output Result if (circular) System.out.println("LOOP"); else { for (String s: moves) System.out.println(s); } } /** Simple object to store coordinate pair */ private static class Node { final int x, y; Node(int x, int y) { this.x = x; this.y = y; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + x; result = prime * result + y; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Node other = (Node) obj; if (x != other.x) return false; if (y != other.y) return false; return true; } } /** Object to store state and behavior of Bender */ private static class Bender { Node position; char[][] map; boolean directionToggle; boolean alive; Direction facing; boolean beerToggle; List<Node> teleports;<|fim▁hole|> SOUTH(0, 1), EAST(1, 0), NORTH(0, -1), WEST(-1, 0); private int dx; private int dy; Direction (int dx, int dy) { this.dx = dx; this.dy = dy; } public Node newNode(Node original) { return new Node(original.x + dx, original.y + dy); } public Direction nextDirection(boolean toggle) { if (toggle) { switch (this) { case SOUTH: return EAST; case EAST: return NORTH; default: return WEST; } } else { switch (this) { case WEST: return NORTH; case NORTH: return EAST; default: return SOUTH; } } } } public Bender(Node start, char[][] map, List<Node> teleports) { this.position = start; this.map = map; this.alive = true; this.facing = Direction.SOUTH; this.directionToggle = true; this.beerToggle = false; this.teleports = teleports; } /** Updates the state of bender. Returns direction of the move. */ public String move() { char currentContent = map[position.x][position.y]; // Check for Teleporters if (currentContent == 'T') { position = (teleports.get(0).equals(position)) ? teleports.get(1) : teleports.get(0); } // Check for immediate move command if ((""+currentContent).matches("[NESW]")) { switch (currentContent) { case 'N': facing = Direction.NORTH; position = facing.newNode(position); return Direction.NORTH.toString(); case 'W': facing = Direction.WEST; position = facing.newNode(position); return Direction.WEST.toString(); case 'S': facing = Direction.SOUTH; position = facing.newNode(position); return Direction.SOUTH.toString(); default: facing = Direction.EAST; position = facing.newNode(position); return Direction.EAST.toString(); } } // Check for inversion if (currentContent == 'I') { directionToggle = !directionToggle; } // Check for beer if (currentContent == 'B') { beerToggle = !beerToggle; } // Trial next possibility Node trial = facing.newNode(position); char content = map[trial.x][trial.y]; // Check if Bender dies if (content == '$') { alive = false; return facing.toString(); } // Check for beer power to remove X barrier if (beerToggle && content == 'X') { content = ' '; map[trial.x][trial.y] = ' '; } // Check for Obstacles boolean initialCheck = true; while (content == 'X' || content == '#') { // Check for obstacles if (content == 'X' || content == '#') { if (initialCheck) { facing = directionToggle ? Direction.SOUTH : Direction.WEST; initialCheck = false; } else { facing = facing.nextDirection(directionToggle); } } // Update position and facing trial = facing.newNode(position); content = map[trial.x][trial.y]; } // If we made it to this point, it's okay to move bender position = facing.newNode(position); if (content == '$') alive = false; return facing.toString(); } } }<|fim▁end|>
/** Direction enum includes the ability to find next node based on direction Bender is facing. */ private enum Direction {
<|file_name|>musicbox.py<|end_file_name|><|fim▁begin|>import sys, struct, array import SocketServer # import StringIO as StringIO # import pygame p = 0x08d682598db70a889ff1bc7e3e00d602e9fe9e812162d4e3d06954b2ff554a4a21d5f0aab3eae5c49ac1aec7117709cba1b88b79ae9805d28ddb99be07ba05ea219654afe0c8dddac7e73165f3dcd851a3c8a3b6515766321420aff177eaaa7b3da39682d7e773aa863a729706d52e83a1d0e34d69b461c837ed239745d6c50f124e34f4d1d00ad15d6ebabda8c189c7b8b35b5bae7a9cbafc5f09bd506a39bd9d2d9245324f02ff7254fab4ab17f7a165d49e318baeb8effc4e1a3f1251d2ea1ab93f767bd6dcf5567406550ea1f194ef7deb1b2fec8b30520b6777fea1b305593db941f9ad8ce1eba6f77c3a104bd97448ec0c11688c5bf82e85c90234abfc5 q = 0x0f67e886d1a0d1e59a53b4aa831c9bcb39a5d0a8f g = 0x27d6a1359821e2a758a93f5c06ebb26382a06a4681e7cf44d71aeff2390c87d20ce7cd885fb01fd84ad9d52839a8ae163bfee5d09820fea1a09f814801cb157b2c5bc4636d042fb2ac1a836f33adafd6735826ae1e96c3bfbd04f7df672a14120f6780e8848ff3b3123004654127c9d25843cd54c68c396a410a2f0496e8ebb35b971993dee0f596388911277fce46ff3c5191e7e76262875bb3368724d3a40c852ccc80be4dc82335fb9267c6ff0e20396ae8bb2d51e35f15fbd07fa1b354944c285367ac88763dd00fe6fe0aab5a49faf7bc10f8e90ba376efdc034e9e1cae7e79ac906aed3b513c5f3452dc33eb307ab3d45efe92a31b1cd9a6f52dd5fb09 y = 0x6bff47f5ea736b03c85885b0bd0f1f7fa2a7efef8812c544ab47f4aa3542235f5a298fc778bb9263223c66d149f88d377b1e70a5715e4554776127ffb874e218d7c75a3c6202cc3e2cfb6a5a4cf34e7e8d5428b90b7aa1dbf9a7e965feab029220266ad0dabade6ae09362f6463eea60e3133bb79fc4af511057e31574f4b0f34b848b180fa20da7d9a6d8adedded9819da20b8923073e35f43ca75eeb9a1ab5451c3a5446306f93ef246759f59e65e498032d48aece56f437b4b7179daf3dfa80d6a36c211ed5acdfeaf91a7e8070a49a521f3c2e411a26eeaf8fab697535914982f1f7cda1e1aa1aac602f9606ea326632b4fbabf6b361fe118637e048c482 def bytesToInt(s): x = 0 for c in s: x = (x << 8) | ord(c) return x def verifySig(r, s, m): #DSA, straight from Wikipedia if not 0 < s < q and 0 < r < q: return False w = pow(s, q-2, q) u1 = m*w % q u2 = r*w % q v = pow(g, u1, p) * pow(y, u2, p) % p return (v % q) == r def superHash(b): b += '0' * (-len(b) % 2) h = (len(b) + 1) * (len(b) ^ 42) x = 88172645463325252 for i, c in enumerate(array.array('H', b)): x ^= (x<<13) & 0xFFFFFFFFFFFFFFFF x ^= (x>>7) & 0xFFFFFFFFFFFFFFFF x ^= (x<<17) & 0xFFFFFFFFFFFFFFFF h += c * (((i % 7) + 9) ** (i % 25)) if i % 2: h *= x | i else: h += x | i h &= 0xFFFFFFFFFFFFFFFF h ^= (len(b) ^ 1) * (len(b) + 42) h &= 0xFFFFFFFFFFFFFFFF return h class HandleCheckin(SocketServer.BaseRequestHandler): def readStr(self): req = self.request prefix = req.recv(2) if prefix != '\x12\xae': req.sendall("Incorrect prefix\n") req.close() return None leng = struct.unpack("<I", req.recv(4))[0] toRead = "" while len(toRead) < leng: toRead += req.recv(leng - len(toRead)) if len(toRead) > leng: req.sendall("Length does not match input data size\n") req.close() return None return toRead <|fim▁hole|> req = self.request req.sendall("""Welcome to the new and improved Music Box! Please provide your signed music file.""") data = self.readStr() if data is None or len(data) < 48: req.sendall("Incomplete header\n") return elif len(data) > 12345678: req.sendall("The data. It is too much!\n") return r = bytesToInt(data[:20]) s = bytesToInt(data[20:40]) h = bytesToInt(data[40:48]) sound = data[48:] if not verifySig(r, s, h): req.sendall("Invalid signature\n") return elif h != superHash(sound): req.sendall("Message hash does not match\n") return else: req.sendall("Success!\n") if "Secret backdoor lol GIMME THE FLAG" in sound: with open('flag.txt','r') as f: req.sendall(f.read() + "\n") else: req.sendall("Unfortunately, the musicbox is not available at the moment.\n") req.close() # f = StringIO.StringIO(sound) # pygame.mixer.music.load(f) # pygame.mixer.music.play(loops=-1) class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): pass if __name__ == "__main__": # pygame.mixer.init() HOST, PORT = sys.argv[1], int(sys.argv[2]) print 'Running on port', PORT server = ThreadedServer((HOST, PORT), HandleCheckin) server.allow_reuse_address = True server.serve_forever()<|fim▁end|>
def handle(self):
<|file_name|>RefsetDtoShort.java<|end_file_name|><|fim▁begin|>package com.ihtsdo.snomed.model.xml; import java.sql.Date; import javax.xml.bind.annotation.XmlRootElement; import com.google.common.base.Objects; import com.google.common.primitives.Longs; import com.ihtsdo.snomed.dto.refset.RefsetDto; import com.ihtsdo.snomed.model.refset.Refset; @XmlRootElement(name="refset") public class RefsetDtoShort { private long id; private XmlRefsetConcept concept; private String publicId; private String title; private String description; private Date created; private Date lastModified; private int memberSize; private String snomedExtension; private String snomedReleaseDate; private boolean pendingChanges; public RefsetDtoShort(Refset r){ setId(r.getId()); setConcept(new XmlRefsetConcept(r.getRefsetConcept())); setPublicId(r.getPublicId()); setTitle(r.getTitle()); setDescription(r.getDescription()); setCreated(r.getCreationTime()); setLastModified(r.getModificationTime()); setPendingChanges(r.isPendingChanges()); setMemberSize(r.getMemberSize()); setSnomedExtension(r.getOntologyVersion().getFlavour().getPublicId()); setSnomedReleaseDate(RefsetDto.dateFormat.format(r.getOntologyVersion().getTaggedOn())); } public RefsetDtoShort(){} @Override public String toString() { return Objects.toStringHelper(this) .add("id", getId()) .add("concept", getConcept()) .add("publicId", getPublicId()) .add("title", getTitle()) .add("description", getDescription()) .add("created", getCreated()) .add("lastModified", getLastModified()) .add("pendingChanges", isPendingChanges()) .add("memberSize", getMemberSize()) .add("snomedExtension", getSnomedExtension()) .add("snomedReleaseDate", getSnomedReleaseDate()) .toString(); } @Override public int hashCode(){ return Longs.hashCode(getId()); } @Override public boolean equals(Object o){ if (o instanceof RefsetDtoShort){ RefsetDtoShort r = (RefsetDtoShort) o; if (r.getId() == this.getId()){ return true; } } return false; } public boolean isPendingChanges() { return pendingChanges; } public void setPendingChanges(boolean pendingChanges) { this.pendingChanges = pendingChanges; } public long getId() { return id; } public void setId(long id) { this.id = id; } public XmlRefsetConcept getConcept() { return concept; } public void setConcept(XmlRefsetConcept concept) { this.concept = concept; } public String getPublicId() { return publicId; } public void setPublicId(String publicId) { this.publicId = publicId; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public Date getCreated() { return created; } public void setCreated(Date created) { this.created = created; } public Date getLastModified() { return lastModified; } public void setLastModified(Date lastModified) { this.lastModified = lastModified; } public int getMemberSize() { return memberSize; } public void setMemberSize(int memberSize) { this.memberSize = memberSize; } public String getSnomedExtension() { return snomedExtension; } public void setSnomedExtension(String snomedExtension) { this.snomedExtension = snomedExtension; } public String getSnomedReleaseDate() { return snomedReleaseDate; } public void setSnomedReleaseDate(String snomedReleaseDate) { this.snomedReleaseDate = snomedReleaseDate; } public static RefsetDtoShort parse(Refset r){ return getBuilder(new XmlRefsetConcept(r.getRefsetConcept()), r.getPublicId(), r.getTitle(), r.getDescription(), r.getCreationTime(), r.getModificationTime(), r.isPendingChanges(), r.getMemberSize(), r.getOntologyVersion().getFlavour().getPublicId(), r.getOntologyVersion().getTaggedOn()).build(); } public static Builder getBuilder(XmlRefsetConcept concept, String publicId, String title, <|fim▁hole|> String description, Date created, Date lastModified, boolean pendingChanges, int memberSize, String snomedExtension, Date snomedReleaseDate) { return new Builder(concept, publicId, title, description, created, lastModified, pendingChanges, memberSize, snomedExtension, snomedReleaseDate); } public static class Builder { private RefsetDtoShort built; Builder(XmlRefsetConcept concept, String publicId, String title, String description, Date created, Date lastModified, boolean pendingChanges, int memberSize, String snomedExtension, Date snomedReleaseDate){ built = new RefsetDtoShort(); built.concept = concept; built.publicId = publicId; built.title = title; built.description = description; built.created = created; built.lastModified = lastModified; built.pendingChanges = pendingChanges; built.memberSize = memberSize; built.setSnomedExtension(snomedExtension); built.setSnomedReleaseDate(RefsetDto.dateFormat.format(snomedReleaseDate)); } public RefsetDtoShort build() { return built; } } }<|fim▁end|>
<|file_name|>wasm-module.wasm.spec.ts<|end_file_name|><|fim▁begin|>import * as path from "path"; import * as fs from "fs"; import {compileWasmSync} from "./utils/utils"; import {WasmModule} from "../src/backends/webassembly/wasm/wasm-module"; test('wasm-module', () => { compileWasmSync(path.join(__dirname, 'wasm-module.tbs')); let wasmBinary = fs.readFileSync(path.resolve(__dirname, "./bin/wasm-module.wasm")); let wasmModule = new WasmModule(wasmBinary); expect(wasmModule).toBeTruthy();<|fim▁hole|><|fim▁end|>
});
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """Init and utils.""" from zope.i18nmessageid import MessageFactory _ = MessageFactory('dakhli.sitecontent')<|fim▁hole|>def initialize(context): """Initializer called when used as a Zope 2 product."""<|fim▁end|>
<|file_name|>GPSSensor.hpp<|end_file_name|><|fim▁begin|>/* Copyright_License { G-Meter INU. Copyright (C) 2013-2015 Peter F Bradshaw A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #ifndef GPSSENSORS_HPP #define GPSSENSORS_HPP #include <jni.h> #include "Utility/DeviceInputBuffer.hpp" /** * This class is a singleton. Its purpose is to handle communication to and * from the Android GPS systems. * * This class buffers that data in order to ensure consistancy. */ class GPSSensor : public DeviceInputBuffer { public: /** * Get a reference to the singleton GPSSensor object. If the object does not * yet it exist then it is created. This is the only way that the object * of this class may be created. * @return The reference the object. This reference is always valid at the * time theis function is called. */ static GPSSensor& Instance() { static GPSSensor instance; return instance; } /** * GPS status. These definitions originate in the Android LocationProvider * API documentation. */ enum GPSStatus { OUT_OF_SERVICE = 0, // GPS disabled TEMPORARILY_UNAVAILABLE = 1, // Waiting for fix AVAILABLE = 2 // Position available }; /** * Set the GPS status. * @param env The JNI enviroment. * @param obj The JNI object. * @param connected The connected status: true or false. */ void Connected(JNIEnv *env, jobject obj, jint connected); /** * Give the GPS connection state. * @return The state: OUT_OF_SERVICE - not connected, TEMPORARILY_UNAVAILABLE - waiting for fix, AVAILABLE - fix available. */ GPSStatus Connected() const; /** * Set the GPS state. * @param env The JNI enviroment. * @param obj The JNI object. * @param time GPS time. * @param n_birds Number of birds in view. * @param lambda GPS lambda. * @param phi GPS phi. * @param hasAlt Valid GPS altitude: true or false. * @param alt GPS altitude in meters. * @param hasBearing Valid GPS bearing: true or false. * @param bearing GPS bearing. * @param hasSpeed Valid speed: true or false. * @param speed GPS speed. * @param hasError Valid GPS error: true or false. * @param error GPS error. * @param hasAcc Valid GPS acceleration: true or false. * @param acc GPS acceleration. */ void State(JNIEnv *env, jobject obj, jlong time, jint n_birds, jdouble lambda, jdouble phi, jboolean hasAlt, jdouble alt, jboolean hasBearing, jdouble bearing, jboolean hasSpeed, jdouble speed, jboolean hasError, jdouble error, jboolean hasAcc, jdouble acc); /** * Give the time of the fix. * @return The time of the fix from the GPS unit. */ long Time() const; /** * Give the number of birds. * @return The number of birds used to develope the fix. */ int Birds() const; /** * Give the latitude of the fix. * @return The latitude in radians. */ double Phi() const; /** * Give the longitude of the fix. * @return The longitude in radians. */ double Lambda() const; /** * Give the status of the altitude. * @return If the value of Z() is good then true. */ bool ZStatus() const; /** * Give the altitude above the Geod. * @return The altitude in meters. */ double Z() const; /** * Give the status of the argument of the track. * @return If the value of Omega() is good then true. */ bool OmegaStatus() const; /** * Give the argument of the track. * @return The argument in radians. */ double Omega() const; /** * Give the status of the magnitude of the track. * @return If the value of S() is good then true. */ bool SStatus() const; /** * Give the magnitude of the track. * @return The magnitude in meters per second. */ double S() const; /** * Give the status of the error. * @return If the value of Epsilon() is good then true. */ bool EpsilonStatus() const; /** * Give the value of the fix error. * @return The representative error of the fix in meters. */ double Epsilon() const; /** * Give the status of the acceleration value. * @return If the value A() is good then true. */ bool AStatus() const; /** * Give the value of the acceleration.<|fim▁hole|>private: /** * Ctor. Called from GPSSensor::Instance() only. */ GPSSensor(); /** * Do not allow copying by any method! */ GPSSensor(const GPSSensor&); GPSSensor& operator=(const GPSSensor&); /** * State. */ GPSStatus status; // GPS status from Java. int n[2]; // Number of birds. long t[2]; // Time of observation. double phi[2]; // Lat. double lambda[2]; // Lon. double z[2]; // Alt. bool z_good[2]; // Z value are valid. double omega[2]; // Argument to track vector. bool omega_good[2]; // Omega value is valid. double s[2]; // Magnitude of the track vector. bool s_good[2]; // S value is valid. double epsilon[2]; // Error estimate. bool epsilon_good[2]; // Epsilon value is valid. double a[2]; // Magnitude of acceleration. bool a_good[2]; // A value is valid. }; #endif /* GPSSENSORS_HPP */<|fim▁end|>
* @return The acceleration in meters per second per second. */ double A() const;
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn main() { for x in 1..10 { println!("{}", x); } // An iterator is something that we can call the .next() method on repeatedly, // and it gives us a sequence of things. let mut num_range = 0..5; loop { match num_range.next() { Some(x) => { println!("{}", x); }, Node => { break } } } // we make mutable binding to the num_range, which is iterator // then loop with inner match // match is result of num_range.next() which gives reference to the next // value of the iterator next() returns an Option<i32> which will become // Some(i32) when we have a value and None once we run out. // The for loop is just a handy way to write this loop/match/break construct. let nums = vec![1, 2, 3]; for num in &nums { println!("{}", num); } // iterate through items directly, avoid indexing<|fim▁hole|> // iterators - gives you a sequence of values // iterator apapters - operate on iterators, producing a new iterator with a different output sequence. // consumers - operate on an iterator, producing some final set of values consumers(); iterators(); iterator_adapters(); combo(); } fn consumers() { // - .collect() let one_to_one_hundred = (1..101).collect::<Vec<i32>>(); // partial hint let one_to_one_hundred2 = (1..101).collect::<Vec<_>>(); // - .find() // takes a closure and works on a reference to each element of an iterator let greater_than_forty_two = (0..100).find(|x| *x > 42); match greater_than_forty_two { Some(_) => println!("We got some numbers!"), None => println!("No numbers found :("), } // - .fold() // looks like fold(base, |accumulator, element| ...) // Upon each iteration, the closure is called, // and the result is the value of the accumulator on the next iteration. } fn iterators() { // (1..).step_by(5); // This iterator counts up from one, adding five each time. // It will give you a new integer every time, forever // (well, technically, until it reaches the maximum number representable by an i32) } fn iterator_adapters() { // (1..100).map(|x| x + 1); // map is called upon another iterator, and produces a new iterator // where each element reference has the closure it's been given as an argument called on it // for i in (1..).step_by(5).take(5) { // println!("{}", i); // } for i in (1..100).filter(|&x| x % 2 == 0) { println!("{}", i); } } fn combo() { let nums = (1..1000) .filter(|&x| x % 2 == 0) .filter(|&x| x % 3 == 0) .take(5) .collect::<Vec<i32>>(); for num in &nums { println!("num {}", num); } }<|fim▁end|>
// with references, we're just borrowing a reference to the data, // and so it's just passing a reference, without needing to do the move.
<|file_name|>unordered_rules.py<|end_file_name|><|fim▁begin|>""" Sponge Knowledge Base Using unordered rules """ from java.util.concurrent.atomic import AtomicInteger from org.openksavi.sponge.examples import SameSourceJavaUnorderedRule from org.openksavi.sponge.core.library import Deduplication def onInit(): # Variables for assertions only sponge.setVariable("hardwareFailureJavaCount", AtomicInteger(0)) sponge.setVariable("hardwareFailureScriptCount", AtomicInteger(0)) sponge.setVariable("sameSourceFirstFireCount", AtomicInteger(0)) class FirstRule(Rule): def onConfigure(self): self.withEvents(["filesystemFailure", "diskFailure"]).withOrdered(False) <|fim▁hole|> lambda rule, event: rule.firstEvent.get("source") == event.get("source"), lambda rule, event:Duration.between(rule.firstEvent.time, event.time).seconds <= 2 ]) self.withDuration(Duration.ofSeconds(5)) def onRun(self, event): self.logger.debug("Running rule for events: {}", self.eventSequence) sponge.getVariable("sameSourceFirstFireCount").incrementAndGet() sponge.event("alarm").set("source", self.firstEvent.get("source")).send() class SameSourceAllRule(Rule): def onConfigure(self): self.withEvents(["filesystemFailure e1", "diskFailure e2 :all"]).withOrdered(False) self.withCondition("e1", self.severityCondition) self.withConditions("e2", [self.severityCondition, self.diskFailureSourceCondition]) self.withDuration(Duration.ofSeconds(5)) def onRun(self, event): self.logger.info("Monitoring log [{}]: Critical failure in {}! Events: {}", event.time, event.get("source"), self.eventSequence) sponge.getVariable("hardwareFailureScriptCount").incrementAndGet() def severityCondition(self, event): return int(event.get("severity")) > 5 def diskFailureSourceCondition(self, event): # Both events have to have the same source return event.get("source") == self.firstEvent.get("source") and \ Duration.between(self.firstEvent.time, event.time).seconds <= 4 class AlarmFilter(Filter): def onConfigure(self): self.withEvent("alarm") def onInit(self): self.deduplication = Deduplication("source") self.deduplication.cacheBuilder.expireAfterWrite(2, TimeUnit.SECONDS) def onAccept(self, event): return self.deduplication.onAccept(event) class Alarm(Trigger): def onConfigure(self): self.withEvent("alarm") def onRun(self, event): self.logger.debug("Received alarm from {}", event.get("source")) def onLoad(): sponge.enableJava(SameSourceJavaUnorderedRule) def onStartup(): sponge.event("diskFailure").set("severity", 10).set("source", "server1").send() sponge.event("diskFailure").set("severity", 10).set("source", "server2").send() sponge.event("diskFailure").set("severity", 8).set("source", "server1").send() sponge.event("diskFailure").set("severity", 8).set("source", "server1").send() sponge.event("filesystemFailure").set("severity", 8).set("source", "server1").send() sponge.event("filesystemFailure").set("severity", 6).set("source", "server1").send() sponge.event("diskFailure").set("severity", 6).set("source", "server1").send()<|fim▁end|>
self.withAllConditions([
<|file_name|>t.py<|end_file_name|><|fim▁begin|><|fim▁hole|># !/usr/bin/env python # -*-coding:utf-8-*- # by huangjiangbo # 部署服务 # deploy.py from ConfigParser import ConfigParser ConfigFile = r'config.ini' # 读取配置文件 config = ConfigParser() config.read(ConfigFile) de_infos = config.items(r'deploy_server') # 远程部署服务器信息 redeploy_server_info = {} appinfo = {} print de_infos for (key, value) in de_infos: redeploy_server_info[key] = value print redeploy_server_info<|fim▁end|>
<|file_name|>le.rs<|end_file_name|><|fim▁begin|>#![feature(core)] extern crate core; #[cfg(test)] mod tests { // pub trait FixedSizeArray<T> { // /// Converts the array to immutable slice // fn as_slice(&self) -> &[T]; // /// Converts the array to mutable slice // fn as_mut_slice(&mut self) -> &mut [T]; // } // macro_rules! array_impls { // ($($N:expr)+) => { // $( // #[unstable(feature = "core")] // impl<T> FixedSizeArray<T> for [T; $N] { // #[inline] // fn as_slice(&self) -> &[T] { // &self[..] // } // #[inline] // fn as_mut_slice(&mut self) -> &mut [T] { // &mut self[..] // } // } // // #[unstable(feature = "array_as_ref", // reason = "should ideally be implemented for all fixed-sized arrays")] // impl<T> AsRef<[T]> for [T; $N] { // #[inline] // fn as_ref(&self) -> &[T] { // &self[..] // } // }<|fim▁hole|> // #[inline] // fn as_mut(&mut self) -> &mut [T] { // &mut self[..] // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<T:Copy> Clone for [T; $N] { // fn clone(&self) -> [T; $N] { // *self // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<T: Hash> Hash for [T; $N] { // fn hash<H: hash::Hasher>(&self, state: &mut H) { // Hash::hash(&self[..], state) // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<T: fmt::Debug> fmt::Debug for [T; $N] { // fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // fmt::Debug::fmt(&&self[..], f) // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<'a, T> IntoIterator for &'a [T; $N] { // type Item = &'a T; // type IntoIter = Iter<'a, T>; // // fn into_iter(self) -> Iter<'a, T> { // self.iter() // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<'a, T> IntoIterator for &'a mut [T; $N] { // type Item = &'a mut T; // type IntoIter = IterMut<'a, T>; // // fn into_iter(self) -> IterMut<'a, T> { // self.iter_mut() // } // } // // // NOTE: some less important impls are omitted to reduce code bloat // __impl_slice_eq1! { [A; $N], [B; $N] } // __impl_slice_eq2! { [A; $N], [B] } // __impl_slice_eq2! { [A; $N], &'b [B] } // __impl_slice_eq2! { [A; $N], &'b mut [B] } // // __impl_slice_eq2! { [A; $N], &'b [B; $N] } // // __impl_slice_eq2! { [A; $N], &'b mut [B; $N] } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<T:Eq> Eq for [T; $N] { } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<T:PartialOrd> PartialOrd for [T; $N] { // #[inline] // fn partial_cmp(&self, other: &[T; $N]) -> Option<Ordering> { // PartialOrd::partial_cmp(&&self[..], &&other[..]) // } // #[inline] // fn lt(&self, other: &[T; $N]) -> bool { // PartialOrd::lt(&&self[..], &&other[..]) // } // #[inline] // fn le(&self, other: &[T; $N]) -> bool { // PartialOrd::le(&&self[..], &&other[..]) // } // #[inline] // fn ge(&self, other: &[T; $N]) -> bool { // PartialOrd::ge(&&self[..], &&other[..]) // } // #[inline] // fn gt(&self, other: &[T; $N]) -> bool { // PartialOrd::gt(&&self[..], &&other[..]) // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<T:Ord> Ord for [T; $N] { // #[inline] // fn cmp(&self, other: &[T; $N]) -> Ordering { // Ord::cmp(&&self[..], &&other[..]) // } // } // )+ // } // } // array_impls! { // 0 1 2 3 4 5 6 7 8 9 // 10 11 12 13 14 15 16 17 18 19 // 20 21 22 23 24 25 26 27 28 29 // 30 31 32 // } type T = i32; type A = T; type B = T; #[test] fn le_test1() { let array_a: [A; 24] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]; let array_b: [B; 24] = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ]; assert_eq!(array_a.lt(&array_b), true); assert_eq!(array_a <= array_b, true); } #[test] fn le_test2() { let array_a: [A; 24] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]; let array_b: [B; 24] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]; assert_eq!(array_a.le(&array_b), true); assert_eq!(array_a <= array_b, true); } #[test] fn le_test3() { let array_a: [A; 24] = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ]; let array_b: [B; 24] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]; assert_eq!(array_a.le(&array_b), false); assert_eq!(array_a <= array_b, false); } }<|fim▁end|>
// // #[unstable(feature = "array_as_ref", // reason = "should ideally be implemented for all fixed-sized arrays")] // impl<T> AsMut<[T]> for [T; $N] {
<|file_name|>simple.cpp<|end_file_name|><|fim▁begin|>#include "t05.h" #include <iostream> int main(int argc, char* argv[]) { RibbonTrail a; <|fim▁hole|><|fim▁end|>
std::cout << "done" << std::endl; }
<|file_name|>ce_file_copy.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ce_file_copy version_added: "2.4" short_description: Copy a file to a remote cloudengine device over SCP on HUAWEI CloudEngine switches. description: - Copy a file to a remote cloudengine device over SCP on HUAWEI CloudEngine switches. author: - Zhou Zhijin (@CloudEngine-Ansible) notes: - The feature must be enabled with feature scp-server. - If the file is already present, no transfer will take place. requirements: - paramiko options: local_file: description: - Path to local file. Local directory must exist. The maximum length of I(local_file) is C(4096). required: true remote_file: description: - Remote file path of the copy. Remote directories must exist. If omitted, the name of the local file will be used. The maximum length of I(remote_file) is C(4096). file_system: description: - The remote file system of the device. If omitted, devices that support a I(file_system) parameter will use their default values. File system indicates the storage medium and can be set to as follows, 1) C(flash) is root directory of the flash memory on the master MPU. 2) C(slave#flash) is root directory of the flash memory on the slave MPU. If no slave MPU exists, this drive is unavailable. 3) C(chassis ID/slot number#flash) is root directory of the flash memory on a device in a stack. For example, C(1/5#flash) indicates the flash memory whose chassis ID is 1 and slot number is 5. default: 'flash:' ''' EXAMPLES = ''' - name: File copy test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: "Copy a local file to remote device" ce_file_copy: local_file: /usr/vrpcfg.cfg remote_file: /vrpcfg.cfg file_system: 'flash:' provider: "{{ cli }}" ''' RETURN = ''' changed: description: check to see if a change was made on the device returned: always type: boolean sample: true transfer_result: description: information about transfer result. returned: always type: string sample: 'The local file has been successfully transferred to the device.' local_file: description: The path of the local file. returned: always type: string sample: '/usr/work/vrpcfg.zip' remote_file: description: The path of the remote file. returned: always type: string sample: '/vrpcfg.zip' ''' import re import os import time from xml.etree import ElementTree from ansible.module_utils.basic import get_exception, AnsibleModule from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, run_commands, get_nc_config try: import paramiko HAS_PARAMIKO = True except ImportError: HAS_PARAMIKO = False try: from scp import SCPClient HAS_SCP = True except ImportError: HAS_SCP = False CE_NC_GET_FILE_INFO = """ <filter type="subtree"> <vfm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <dirs> <dir> <fileName>%s</fileName> <dirName>%s</dirName> <DirSize></DirSize> </dir> </dirs> </vfm> </filter> """ CE_NC_GET_SCP_ENABLE = """ <filter type="subtree"> <sshs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <sshServer> <scpEnable></scpEnable> </sshServer> </sshs> </filter> """ def get_cli_exception(exc=None): """Get cli exception message""" msg = list() if not exc: exc = get_exception() if exc: errs = str(exc).split("\r\n") for err in errs: if not err: continue if "matched error in response:" in err: continue if " at '^' position" in err: err = err.replace(" at '^' position", "") if err.replace(" ", "") == "^": continue if len(err) > 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]: continue if err[-1] == ".": err = err[:-1] if err.replace(" ", "") == "": continue msg.append(err) else: msg = ["Error: Fail to get cli exception message."] while msg[-1][-1] == ' ': msg[-1] = msg[-1][:-1] if msg[-1][-1] != ".": msg[-1] += "." return ", ".join(msg).capitalize() class FileCopy(object): """File copy function class""" def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.init_module() # file copy parameters self.local_file = self.module.params['local_file'] self.remote_file = self.module.params['remote_file'] self.file_system = self.module.params['file_system'] # state self.transfer_result = None self.changed = False def init_module(self): """Init module""" self.module = AnsibleModule( argument_spec=self.spec, supports_check_mode=True) def remote_file_exists(self, dst, file_system='flash:'): """Remote file whether exists"""<|fim▁hole|> file_name = os.path.basename(full_path) file_path = os.path.dirname(full_path) file_path = file_path + '/' xml_str = CE_NC_GET_FILE_INFO % (file_name, file_path) ret_xml = get_nc_config(self.module, xml_str) if "<data/>" in ret_xml: return False, 0 xml_str = ret_xml.replace('\r', '').replace('\n', '').\ replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ replace('xmlns="http://www.huawei.com/netconf/vrp"', "") # get file info root = ElementTree.fromstring(xml_str) topo = root.find("data/vfm/dirs/dir") if topo is None: return False, 0 for eles in topo: if eles.tag in ["DirSize"]: return True, int(eles.text.replace(',', '')) return False, 0 def local_file_exists(self): """Local file whether exists""" return os.path.isfile(self.local_file) def enough_space(self): """Whether device has enough space""" commands = list() cmd = 'dir %s' % self.file_system commands.append(cmd) output = run_commands(self.module, commands) if not output: return True match = re.search(r'\((.*) KB free\)', output[0]) kbytes_free = match.group(1) kbytes_free = kbytes_free.replace(',', '') file_size = os.path.getsize(self.local_file) if int(kbytes_free) * 1024 > file_size: return True return False def transfer_file(self, dest): """Begin to transfer file by scp""" if not self.local_file_exists(): self.module.fail_json( msg='Could not transfer file. Local file doesn\'t exist.') if not self.enough_space(): self.module.fail_json( msg='Could not transfer file. Not enough space on device.') hostname = self.module.params['provider']['host'] username = self.module.params['provider']['username'] password = self.module.params['provider']['password'] port = self.module.params['provider']['port'] ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=hostname, username=username, password=password, port=port) full_remote_path = '{}{}'.format(self.file_system, dest) scp = SCPClient(ssh.get_transport()) try: scp.put(self.local_file, full_remote_path) except: time.sleep(10) file_exists, temp_size = self.remote_file_exists( dest, self.file_system) file_size = os.path.getsize(self.local_file) if file_exists and int(temp_size) == int(file_size): pass else: scp.close() self.module.fail_json(msg='Could not transfer file. There was an error ' 'during transfer. Please make sure the format of ' 'input parameters is right.') scp.close() return True def get_scp_enable(self): """Get scp enable state""" xml_str = CE_NC_GET_SCP_ENABLE ret_xml = get_nc_config(self.module, xml_str) if "<data/>" in ret_xml: return False xml_str = ret_xml.replace('\r', '').replace('\n', '').\ replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ replace('xmlns="http://www.huawei.com/netconf/vrp"', "") # get file info root = ElementTree.fromstring(xml_str) topo = root.find("data/sshs/sshServer") if topo is None: return False for eles in topo: if eles.tag in ["scpEnable"]: return True, eles.text return False def work(self): """Excute task """ if not HAS_SCP: self.module.fail_json( msg="'Error: No scp package, please install it.'") if not HAS_PARAMIKO: self.module.fail_json( msg="'Error: No paramiko package, please install it.'") if self.local_file and len(self.local_file) > 4096: self.module.fail_json( msg="'Error: The maximum length of local_file is 4096.'") if self.remote_file and len(self.remote_file) > 4096: self.module.fail_json( msg="'Error: The maximum length of remote_file is 4096.'") retcode, cur_state = self.get_scp_enable() if retcode and cur_state == 'Disable': self.module.fail_json( msg="'Error: Please ensure SCP server is enabled.'") if not os.path.isfile(self.local_file): self.module.fail_json( msg="Local file {} not found".format(self.local_file)) dest = self.remote_file or ('/' + os.path.basename(self.local_file)) remote_exists, file_size = self.remote_file_exists( dest, file_system=self.file_system) if remote_exists and (os.path.getsize(self.local_file) != file_size): remote_exists = False if not remote_exists: self.changed = True file_exists = False else: file_exists = True self.transfer_result = 'The local file already exists on the device.' if not file_exists: self.transfer_file(dest) self.transfer_result = 'The local file has been successfully transferred to the device.' if self.remote_file is None: self.remote_file = '/' + os.path.basename(self.local_file) self.module.exit_json( changed=self.changed, transfer_result=self.transfer_result, local_file=self.local_file, remote_file=self.remote_file, file_system=self.file_system) def main(): """Main function entry""" argument_spec = dict( local_file=dict(required=True), remote_file=dict(required=False), file_system=dict(required=False, default='flash:') ) argument_spec.update(ce_argument_spec) filecopy_obj = FileCopy(argument_spec) filecopy_obj.work() if __name__ == '__main__': main()<|fim▁end|>
full_path = file_system + dst