prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>EditMovieCtrl.js<|end_file_name|><|fim▁begin|><|fim▁hole|>function EditMovieCtrl(MovieService,$stateParams) {
// ViewModel
const edit = this;
edit.title = 'Edit Movies' + $stateParams.id;
edit.back = function(){
window.history.back()
}
edit.checker = function(bool){
if(bool=='true')
return true;
if(bool=='false')
return false;
}
edit.click = function(bool,key){
edit.data[key] = !bool
}
MovieService.getID($stateParams.id).then(function(results){
if(results.status===404)
edit.data.movies='not found'
edit.data = results
})
// MovieService.get().then(function(results){
// edit.movies = results
// })
edit.processForm = function(){
MovieService.put(edit.data).then(function(res){
console.log(res)
})
}
}
EditMovieCtrl.$inject=['MovieService','$stateParams']
export default {
name: 'EditMovieCtrl',
fn: EditMovieCtrl
};<|fim▁end|> | |
<|file_name|>string.go<|end_file_name|><|fim▁begin|>package funcs
import (
"fmt"
"regexp"
"sort"
"strings"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/gocty"
)
var JoinFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "separator",
Type: cty.String,
},
},
VarParam: &function.Parameter{
Name: "lists",
Type: cty.List(cty.String),
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
sep := args[0].AsString()
listVals := args[1:]
if len(listVals) < 1 {
return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required")
}
l := 0
for _, list := range listVals {
if !list.IsWhollyKnown() {
return cty.UnknownVal(cty.String), nil
}
l += list.LengthInt()
}
items := make([]string, 0, l)
for ai, list := range listVals {
ei := 0
for it := list.ElementIterator(); it.Next(); {
_, val := it.Element()
if val.IsNull() {
if len(listVals) > 1 {
return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1)
}
return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei)
}
items = append(items, val.AsString())
ei++
}
}
return cty.StringVal(strings.Join(items, sep)), nil
},
})
var SortFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "list",
Type: cty.List(cty.String),
},
},
Type: function.StaticReturnType(cty.List(cty.String)),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
listVal := args[0]
if !listVal.IsWhollyKnown() {
// If some of the element values aren't known yet then we
// can't yet preduct the order of the result.
return cty.UnknownVal(retType), nil
}
if listVal.LengthInt() == 0 { // Easy path
return listVal, nil
}
list := make([]string, 0, listVal.LengthInt())
for it := listVal.ElementIterator(); it.Next(); {
iv, v := it.Element()
if v.IsNull() {
return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String())
}
list = append(list, v.AsString())
}
sort.Strings(list)
retVals := make([]cty.Value, len(list))
for i, s := range list {
retVals[i] = cty.StringVal(s)
}
return cty.ListVal(retVals), nil
},
})
var SplitFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "separator",
Type: cty.String,
},
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.List(cty.String)),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
sep := args[0].AsString()
str := args[1].AsString()
elems := strings.Split(str, sep)
elemVals := make([]cty.Value, len(elems))
for i, s := range elems {
elemVals[i] = cty.StringVal(s)
}
if len(elemVals) == 0 {
return cty.ListValEmpty(cty.String), nil
}
return cty.ListVal(elemVals), nil
},
})
// ChompFunc constructions a function that removes newline characters at the end of a string.
var ChompFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil
},
})
// IndentFunc constructions a function that adds a given number of spaces to the
// beginnings of all but the first line in a given multi-line string.
var IndentFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "spaces",
Type: cty.Number,
},
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var spaces int
if err := gocty.FromCtyValue(args[0], &spaces); err != nil {
return cty.UnknownVal(cty.String), err
}
data := args[1].AsString()
pad := strings.Repeat(" ", spaces)
return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil
},
})
// ReplaceFunc constructions a function that searches a given string for another
// given substring, and replaces each occurence with a given replacement string.
var ReplaceFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,<|fim▁hole|> Type: cty.String,
},
{
Name: "replace",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
str := args[0].AsString()
substr := args[1].AsString()
replace := args[2].AsString()
// We search/replace using a regexp if the string is surrounded
// in forward slashes.
if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' {
re, err := regexp.Compile(substr[1 : len(substr)-1])
if err != nil {
return cty.UnknownVal(cty.String), err
}
return cty.StringVal(re.ReplaceAllString(str, replace)), nil
}
return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil
},
})
// TitleFunc constructions a function that converts the first letter of each word
// in the given string to uppercase.
var TitleFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return cty.StringVal(strings.Title(args[0].AsString())), nil
},
})
// TrimSpaceFunc constructions a function that removes any space characters from
// the start and end of the given string.
var TrimSpaceFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil
},
})
// Join concatenates together the string elements of one or more lists with a
// given separator.
func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) {
args := make([]cty.Value, len(lists)+1)
args[0] = sep
copy(args[1:], lists)
return JoinFunc.Call(args)
}
// Sort re-orders the elements of a given list of strings so that they are
// in ascending lexicographical order.
func Sort(list cty.Value) (cty.Value, error) {
return SortFunc.Call([]cty.Value{list})
}
// Split divides a given string by a given separator, returning a list of
// strings containing the characters between the separator sequences.
func Split(sep, str cty.Value) (cty.Value, error) {
return SplitFunc.Call([]cty.Value{sep, str})
}
// Chomp removes newline characters at the end of a string.
func Chomp(str cty.Value) (cty.Value, error) {
return ChompFunc.Call([]cty.Value{str})
}
// Indent adds a given number of spaces to the beginnings of all but the first
// line in a given multi-line string.
func Indent(spaces, str cty.Value) (cty.Value, error) {
return IndentFunc.Call([]cty.Value{spaces, str})
}
// Replace searches a given string for another given substring,
// and replaces all occurences with a given replacement string.
func Replace(str, substr, replace cty.Value) (cty.Value, error) {
return ReplaceFunc.Call([]cty.Value{str, substr, replace})
}
// Title converts the first letter of each word in the given string to uppercase.
func Title(str cty.Value) (cty.Value, error) {
return TitleFunc.Call([]cty.Value{str})
}
// TrimSpace removes any space characters from the start and end of the given string.
func TrimSpace(str cty.Value) (cty.Value, error) {
return TrimSpaceFunc.Call([]cty.Value{str})
}<|fim▁end|> | },
{
Name: "substr", |
<|file_name|>invoke-function.rs<|end_file_name|><|fim▁begin|>/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_lambda::{Client, Error, Region, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// The AWS Region.
#[structopt(short, long)]
region: Option<String>,
/// The AWS Lambda function's Amazon Resource Name (ARN).
#[structopt(short, long)]
arn: String,
/// Whether to display additional runtime information.
#[structopt(short, long)]
verbose: bool,
}
// Runs a Lambda function.
// snippet-start:[lambda.rust.invoke-function]
async fn run_function(client: &Client, arn: &str) -> Result<(), Error> {
client.invoke().function_name(arn).send().await?;
println!("Invoked function.");
Ok(())
}
// snippet-end:[lambda.rust.invoke-function]
/// Invokes a Lambda function by its ARN.
/// # Arguments<|fim▁hole|>/// If not supplied, uses the value of the **AWS_REGION** environment variable.
/// If the environment variable is not set, defaults to **us-west-2**.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
let Opt {
arn,
region,
verbose,
} = Opt::from_args();
let region_provider = RegionProviderChain::first_try(region.map(Region::new))
.or_default_provider()
.or_else(Region::new("us-west-2"));
println!();
if verbose {
println!("Lambda client version: {}", PKG_VERSION);
println!(
"Region: {}",
region_provider.region().await.unwrap().as_ref()
);
println!("Lambda function ARN: {}", arn);
println!();
}
let shared_config = aws_config::from_env().region(region_provider).load().await;
let client = Client::new(&shared_config);
run_function(&client, &arn).await
}<|fim▁end|> | ///
/// * `-a ARN` - The ARN of the Lambda function.
/// * `[-r REGION]` - The Region in which the client is created. |
<|file_name|>DrivePopupButton.java<|end_file_name|><|fim▁begin|>/*
* This file is part of muCommander, http://www.mucommander.com
* Copyright (C) 2002-2012 Maxence Bernard
*
* muCommander is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* muCommander is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.mucommander.ui.main;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.net.MalformedURLException;
import java.util.*;
import java.util.List;
import java.util.regex.PatternSyntaxException;
import javax.swing.*;
import javax.swing.filechooser.FileSystemView;
import com.mucommander.adb.AndroidMenu;
import com.mucommander.adb.AdbUtils;
import com.mucommander.bonjour.BonjourDirectory;
import com.mucommander.utils.FileIconsCache;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mucommander.bonjour.BonjourMenu;
import com.mucommander.bonjour.BonjourService;
import com.mucommander.bookmark.Bookmark;
import com.mucommander.bookmark.BookmarkListener;
import com.mucommander.bookmark.BookmarkManager;
import com.mucommander.commons.conf.ConfigurationEvent;
import com.mucommander.commons.conf.ConfigurationListener;
import com.mucommander.commons.file.AbstractFile;
import com.mucommander.commons.file.FileFactory;
import com.mucommander.commons.file.FileProtocols;
import com.mucommander.commons.file.FileURL;
import com.mucommander.commons.file.filter.PathFilter;
import com.mucommander.commons.file.filter.RegexpPathFilter;
import com.mucommander.commons.file.impl.local.LocalFile;
import com.mucommander.commons.runtime.OsFamily;
import com.mucommander.conf.TcConfigurations;
import com.mucommander.conf.TcPreference;
import com.mucommander.conf.TcPreferences;
import com.mucommander.utils.text.Translator;
import com.mucommander.ui.action.TcAction;
import com.mucommander.ui.action.impl.OpenLocationAction;
import com.mucommander.ui.button.PopupButton;
import com.mucommander.ui.dialog.server.FTPPanel;
import com.mucommander.ui.dialog.server.HTTPPanel;
import com.mucommander.ui.dialog.server.NFSPanel;
import com.mucommander.ui.dialog.server.SFTPPanel;
import com.mucommander.ui.dialog.server.SMBPanel;
import com.mucommander.ui.dialog.server.ServerConnectDialog;
import com.mucommander.ui.dialog.server.ServerPanel;
import com.mucommander.ui.event.LocationEvent;
import com.mucommander.ui.event.LocationListener;
import com.mucommander.ui.helper.MnemonicHelper;
import com.mucommander.ui.icon.CustomFileIconProvider;
import com.mucommander.ui.icon.FileIcons;
import com.mucommander.ui.icon.IconManager;
import ru.trolsoft.ui.TMenuSeparator;
/**
* <code>DrivePopupButton</code> is a button which, when clicked, pops up a menu with a list of volumes items that be used
* to change the current folder.
*
* @author Maxence Bernard
*/
public class DrivePopupButton extends PopupButton implements BookmarkListener, ConfigurationListener, LocationListener {
private static Logger logger;
/** FolderPanel instance that contains this button */
private FolderPanel folderPanel;
/** Current volumes */
private static AbstractFile volumes[];
/** static FileSystemView instance, has a (non-null) value only under Windows */
private static FileSystemView fileSystemView;
/** Caches extended drive names, has a (non-null) value only under Windows */
private static Map<AbstractFile, String> extendedNameCache;
/** Caches drive icons */
private static Map<AbstractFile, Icon> iconCache = new HashMap<>();
/** Filters out volumes from the list based on the exclude regexp defined in the configuration, null if the regexp
* is not defined. */
private static PathFilter volumeFilter;
static {
if (OsFamily.WINDOWS.isCurrent()) {
fileSystemView = FileSystemView.getFileSystemView();
extendedNameCache = new HashMap<>();
}
try {
String excludeRegexp = TcConfigurations.getPreferences().getVariable(TcPreference.VOLUME_EXCLUDE_REGEXP);
if (excludeRegexp != null) {
volumeFilter = new RegexpPathFilter(excludeRegexp, true);
volumeFilter.setInverted(true);
}
} catch(PatternSyntaxException e) {
getLogger().info("Invalid regexp for conf variable " + TcPreferences.VOLUME_EXCLUDE_REGEXP, e);
}
// Initialize the volumes list
volumes = getDisplayableVolumes();
}
/**
* Creates a new <code>DrivePopupButton</code> which is to be added to the given FolderPanel.
*
* @param folderPanel the FolderPanel instance this button will be added to
*/
DrivePopupButton(FolderPanel folderPanel) {
this.folderPanel = folderPanel;
// Listen to location events to update the button when the current folder changes
folderPanel.getLocationManager().addLocationListener(this);
<|fim▁hole|>
// Listen to configuration changes to update the button if the system file icons policy has changed
TcConfigurations.addPreferencesListener(this);
// Use new JButton decorations introduced in Mac OS X 10.5 (Leopard)
//if (OsFamily.MAC_OS_X.isCurrent() && OsVersion.MAC_OS_X_10_5.isCurrentOrHigher()) {
//setMargin(new Insets(6,8,6,8));
//putClientProperty("JComponent.sizeVariant", "small");
//putClientProperty("JComponent.sizeVariant", "large");
//putClientProperty("JButton.buttonType", "textured");
//}
}
/**
* Updates the button's label and icon to reflect the current folder and match one of the current volumes:
* <<ul>
* <li>If the specified folder corresponds to a bookmark, the bookmark's name will be displayed
* <li>If the specified folder corresponds to a local file, the enclosing volume's name will be displayed
* <li>If the specified folder corresponds to a remote file, the protocol's name will be displayed
* </ul>
* The button's icon will be the current folder's one.
*/
private void updateButton() {
AbstractFile currentFolder = folderPanel.getCurrentFolder();
setText(buildLabel(currentFolder));
// setToolTipText(newToolTip);
// Set the folder icon based on the current system icons policy
setIcon(FileIcons.getFileIcon(currentFolder));
}
private String buildLabel(AbstractFile currentFolder) {
String currentPath = currentFolder != null ? currentFolder.getAbsolutePath() : null;
FileURL currentURL = currentFolder != null ? currentFolder.getURL() : null;
// String newToolTip = null;
// First tries to find a bookmark matching the specified folder
List<Bookmark> bookmarks = BookmarkManager.getBookmarks();
String newLabel = null;
for (Bookmark b : bookmarks) {
if (currentPath != null && currentPath.equals(b.getLocation())) {
// Note: if several bookmarks match current folder, the first one will be used
newLabel = b.getName();
break;
}
}
if (newLabel != null) {
return newLabel;
}
// If no bookmark matched current folder
String protocol = currentURL != null ? currentURL.getScheme() : null;
if (!FileProtocols.FILE.equals(protocol)) {
// Remote file, use the protocol's name
return protocol != null ? protocol.toUpperCase() : "";
} else {
// Local file, use volume's name
// Patch for Windows UNC network paths (weakly characterized by having a host different from 'localhost'):
// display 'SMB' which is the underlying protocol
if (OsFamily.WINDOWS.isCurrent() && !FileURL.LOCALHOST.equals(currentURL.getHost())) {
return "SMB";
} else {
// getCanonicalPath() must be avoided under Windows for the following reasons:
// a) it is not necessary, Windows doesn't have symlinks
// b) it triggers the dreaded 'No disk in drive' error popup dialog.
// c) when network drives are present but not mounted (e.g. X:\ mapped onto an SMB share),
// getCanonicalPath which is I/O bound will take a looooong time to execute
int bestIndex = getBestIndex(getVolumePath(currentFolder));
return volumes[bestIndex].getName();
// Not used because the call to FileSystemView is slow
// if(fileSystemView!=null)
// newToolTip = getWindowsExtendedDriveName(volumes[bestIndex]);
}
}
}
private int getBestIndex(String currentPath) {
int bestLength = -1;
int bestIndex = 0;
for (int i = 0; i < volumes.length; i++) {
String volumePath = getVolumePath(volumes[i]).toLowerCase();
int len = volumePath.length();
if (currentPath.startsWith(volumePath) && len > bestLength) {
bestIndex = i;
bestLength = len;
}
}
return bestIndex;
}
@NotNull
private String getVolumePath(AbstractFile file) {
if (OsFamily.WINDOWS.isCurrent()) {
return file.getAbsolutePath(false);
} else {
return file.getCanonicalPath(false);
}
}
/**
* Returns the extended name of the given local file, e.g. "Local Disk (C:)" for C:\. The returned value is
* interesting only under Windows. This method is I/O bound and very slow so it should not be called from the main
* event thread.
*
* @param localFile the file for which to return the extended name
* @return the extended name of the given local file
*/
private static String getExtendedDriveName(AbstractFile localFile) {
// Note: fileSystemView.getSystemDisplayName(java.io.File) is unfortunately very very slow
String name = fileSystemView.getSystemDisplayName((java.io.File)localFile.getUnderlyingFileObject());
if (name == null || name.isEmpty()) { // This happens for CD/DVD drives when they don't contain any disc
return localFile.getName();
}
return name;
}
/**
* Returns the list of volumes to be displayed in the popup menu.
*
* <p>The raw list of volumes is fetched using {@link LocalFile#getVolumes()} and then
* filtered using the regexp defined in the {@link TcPreferences#VOLUME_EXCLUDE_REGEXP} configuration variable
* (if defined).
*
* @return the list of volumes to be displayed in the popup menu
*/
private static AbstractFile[] getDisplayableVolumes() {
AbstractFile[] volumes = LocalFile.getVolumes();
if (volumeFilter != null) {
return volumeFilter.filter(volumes);
}
return volumes;
}
////////////////////////////////
// PopupButton implementation //
////////////////////////////////
@Override
public JPopupMenu getPopupMenu() {
JPopupMenu popupMenu = new JPopupMenu();
// Update the list of volumes in case new ones were mounted
volumes = getDisplayableVolumes();
// Add volumes
final MainFrame mainFrame = folderPanel.getMainFrame();
MnemonicHelper mnemonicHelper = new MnemonicHelper(); // Provides mnemonics and ensures uniqueness
addVolumes(popupMenu, mainFrame, mnemonicHelper);
popupMenu.add(new TMenuSeparator());
addBookmarks(popupMenu, mainFrame, mnemonicHelper);
popupMenu.add(new TMenuSeparator());
// Add 'Network shares' shortcut
if (FileFactory.isRegisteredProtocol(FileProtocols.SMB)) {
TcAction action = new CustomOpenLocationAction(mainFrame, new Bookmark(Translator.get("drive_popup.network_shares"), "smb:///", null));
action.setIcon(IconManager.getIcon(IconManager.IconSet.FILE, CustomFileIconProvider.NETWORK_ICON_NAME));
setMnemonic(popupMenu.add(action), mnemonicHelper);
}
if (BonjourDirectory.isActive()) {
// Add Bonjour services menu
setMnemonic(popupMenu.add(new BonjourMenu() {
@Override
public TcAction getMenuItemAction(BonjourService bs) {
return new CustomOpenLocationAction(mainFrame, bs);
}
}), mnemonicHelper);
}
addAdbDevices(popupMenu, mainFrame, mnemonicHelper);
popupMenu.add(new TMenuSeparator());
// Add 'connect to server' shortcuts
setMnemonic(popupMenu.add(new ServerConnectAction("SMB...", SMBPanel.class)), mnemonicHelper);
setMnemonic(popupMenu.add(new ServerConnectAction("FTP...", FTPPanel.class)), mnemonicHelper);
setMnemonic(popupMenu.add(new ServerConnectAction("SFTP...", SFTPPanel.class)), mnemonicHelper);
setMnemonic(popupMenu.add(new ServerConnectAction("HTTP...", HTTPPanel.class)), mnemonicHelper);
setMnemonic(popupMenu.add(new ServerConnectAction("NFS...", NFSPanel.class)), mnemonicHelper);
return popupMenu;
}
private void addVolumes(JPopupMenu popupMenu, MainFrame mainFrame, MnemonicHelper mnemonicHelper) {
boolean useExtendedDriveNames = fileSystemView != null;
List<JMenuItem> itemsV = new ArrayList<>();
int nbVolumes = volumes.length;
for (int i = 0; i < nbVolumes; i++) {
TcAction action = new CustomOpenLocationAction(mainFrame, volumes[i]);
String volumeName = volumes[i].getName();
// If several volumes have the same filename, use the volume's path for the action's label instead of the
// volume's path, to disambiguate
for (int j = 0; j < nbVolumes; j++) {
if (j != i && volumes[j].getName().equalsIgnoreCase(volumeName)) {
action.setLabel(volumes[i].getAbsolutePath());
break;
}
}
JMenuItem item = popupMenu.add(action);
setMnemonic(item, mnemonicHelper);
// Set icon from cache
Icon icon = iconCache.get(volumes[i]);
if (icon != null) {
item.setIcon(icon);
}
if (useExtendedDriveNames) {
// Use the last known value (if any) while we update it in a separate thread
String previousExtendedName = extendedNameCache.get(volumes[i]);
if (previousExtendedName != null) {
item.setText(previousExtendedName);
}
}
itemsV.add(item); // JMenu offers no way to retrieve a particular JMenuItem, so we have to keep them
}
new RefreshDriveNamesAndIcons(popupMenu, itemsV).start();
}
private void addBookmarks(JPopupMenu popupMenu, MainFrame mainFrame, MnemonicHelper mnemonicHelper) {
// Add bookmarks
List<Bookmark> bookmarks = BookmarkManager.getBookmarks();
if (!bookmarks.isEmpty()) {
addBookmarksGroup(popupMenu, mainFrame, mnemonicHelper, bookmarks, null);
} else {
// No bookmark : add a disabled menu item saying there is no bookmark
popupMenu.add(Translator.get("bookmarks_menu.no_bookmark")).setEnabled(false);
}
}
private void addBookmarksGroup(JComponent parentMenu, MainFrame mainFrame, MnemonicHelper mnemonicHelper,
List<Bookmark> bookmarks, String parent) {
for (Bookmark b : bookmarks) {
if ((b.getParent() == null && parent == null) || (parent != null && parent.equals(b.getParent()))) {
if (b.getName().equals(BookmarkManager.BOOKMARKS_SEPARATOR) && b.getLocation().isEmpty()) {
parentMenu.add(new TMenuSeparator());
continue;
}
if (b.getLocation().isEmpty()) {
JMenu groupMenu = new JMenu(b.getName());
parentMenu.add(groupMenu);
addBookmarksGroup(groupMenu, mainFrame, mnemonicHelper, bookmarks, b.getName());
setMnemonic(groupMenu, mnemonicHelper);
} else {
JMenuItem item = createBookmarkMenuItem(parentMenu, mainFrame, b);
setMnemonic(item, mnemonicHelper);
}
}
}
}
private JMenuItem createBookmarkMenuItem(JComponent parentMenu, MainFrame mainFrame, Bookmark b) {
JMenuItem item;
if (parentMenu instanceof JPopupMenu) {
item = ((JPopupMenu)parentMenu).add(new CustomOpenLocationAction(mainFrame, b));
} else {
item = ((JMenu)parentMenu).add(new CustomOpenLocationAction(mainFrame, b));
}
//JMenuItem item = popupMenu.add(new CustomOpenLocationAction(mainFrame, b));
String location = b.getLocation();
if (!location.contains("://")) {
AbstractFile file = FileFactory.getFile(location);
if (file != null) {
Icon icon = FileIconsCache.getInstance().getIcon(file);
if (icon != null) {
item.setIcon(icon);
}
// Image image = FileIconsCache.getInstance().getImageIcon(file);
// if (image != null) {
// item.setIcon(new ImageIcon(image));
// }
}
} else if (location.startsWith("ftp://") || location.startsWith("sftp://") || location.startsWith("http://")) {
item.setIcon(IconManager.getIcon(IconManager.IconSet.FILE, CustomFileIconProvider.NETWORK_ICON_NAME));
} else if (location.startsWith("adb://")) {
item.setIcon(IconManager.getIcon(IconManager.IconSet.FILE, CustomFileIconProvider.ANDROID_ICON_NAME));
}
return item;
}
private void addAdbDevices(JPopupMenu popupMenu, MainFrame mainFrame, MnemonicHelper mnemonicHelper) {
if (AdbUtils.checkAdb()) {
setMnemonic(popupMenu.add(new AndroidMenu() {
@Override
public TcAction getMenuItemAction(String deviceSerial) {
FileURL url = getDeviceURL(deviceSerial);
return new CustomOpenLocationAction(mainFrame, url);
}
@Nullable
private FileURL getDeviceURL(String deviceSerial) {
try {
return FileURL.getFileURL("adb://" + deviceSerial);
} catch (MalformedURLException e) {
e.printStackTrace();
return null;
}
}
}), mnemonicHelper);
}
}
/**
* Calls to getExtendedDriveName(String) are very slow, so they are performed in a separate thread so as
* to not lock the main even thread. The popup menu gets first displayed with the short drive names, and
* then refreshed with the extended names as they are retrieved.
*/
private class RefreshDriveNamesAndIcons extends Thread {
private JPopupMenu popupMenu;
private List<JMenuItem> items;
RefreshDriveNamesAndIcons(JPopupMenu popupMenu, List<JMenuItem> items) {
super("RefreshDriveNamesAndIcons");
this.popupMenu = popupMenu;
this.items = items;
}
@Override
public void run() {
final boolean useExtendedDriveNames = fileSystemView != null;
for (int i = 0; i < items.size(); i++) {
final JMenuItem item = items.get(i);
final String extendedName = getExtendedDriverName(useExtendedDriveNames, volumes[i]);
final Icon icon = getIcon(volumes[i]);
SwingUtilities.invokeLater(() -> {
if (useExtendedDriveNames) {
item.setText(extendedName);
}
if (icon != null) {
item.setIcon(icon);
}
});
}
// Re-calculate the popup menu's dimensions
SwingUtilities.invokeLater(() -> {
popupMenu.invalidate();
popupMenu.pack();
});
}
@Nullable
private Icon getIcon(AbstractFile file) {
// Set system icon for volumes, only if system icons are available on the current platform
final Icon icon = FileIcons.hasProperSystemIcons() ? FileIcons.getSystemFileIcon(file) : null;
if (icon != null) {
iconCache.put(file, icon);
}
return icon;
}
@Nullable
private String getExtendedDriverName(boolean useExtendedDriveNames, AbstractFile file) {
if (useExtendedDriveNames) {
// Under Windows, show the extended drive name (e.g. "Local Disk (C:)" instead of just "C:") but use
// the simple drive name for the mnemonic (i.e. 'C' instead of 'L').
String extendedName = getExtendedDriveName(file);
// Keep the extended name for later (see above)
extendedNameCache.put(file, extendedName);
return extendedName;
}
return null;
}
}
/**
* Convenience method that sets a mnemonic to the given JMenuItem, using the specified MnemonicHelper.
*
* @param menuItem the menu item for which to set a mnemonic
* @param mnemonicHelper the MnemonicHelper instance to be used to determine the mnemonic's character.
*/
private void setMnemonic(JMenuItem menuItem, MnemonicHelper mnemonicHelper) {
menuItem.setMnemonic(mnemonicHelper.getMnemonic(menuItem.getText()));
}
//////////////////////////////
// BookmarkListener methods //
//////////////////////////////
public void bookmarksChanged() {
// Refresh label in case a bookmark with the current location was changed
updateButton();
}
///////////////////////////////////
// ConfigurationListener methods //
///////////////////////////////////
/**
* Listens to certain configuration variables.
*/
public void configurationChanged(ConfigurationEvent event) {
String var = event.getVariable();
// Update the button's icon if the system file icons policy has changed
if (var.equals(TcPreferences.USE_SYSTEM_FILE_ICONS)) {
updateButton();
}
}
////////////////////////
// Overridden methods //
////////////////////////
@Override
public Dimension getPreferredSize() {
// Limit button's maximum width to something reasonable and leave enough space for location field,
// as bookmarks name can be as long as users want them to be.
// Note: would be better to use JButton.setMaximumSize() but it doesn't seem to work
Dimension d = super.getPreferredSize();
if (d.width > 160) {
d.width = 160;
}
return d;
}
///////////////////
// Inner classes //
///////////////////
/**
* This action pops up {@link com.mucommander.ui.dialog.server.ServerConnectDialog} for a specified
* protocol.
*/
private class ServerConnectAction extends AbstractAction {
private Class<? extends ServerPanel> serverPanelClass;
private ServerConnectAction(String label, Class<? extends ServerPanel> serverPanelClass) {
super(label);
this.serverPanelClass = serverPanelClass;
}
public void actionPerformed(ActionEvent actionEvent) {
new ServerConnectDialog(folderPanel, serverPanelClass).showDialog();
}
}
/**
* This modified {@link OpenLocationAction} changes the current folder on the {@link FolderPanel} that contains
* this button, instead of the currently active {@link FolderPanel}.
*/
private class CustomOpenLocationAction extends OpenLocationAction {
CustomOpenLocationAction(MainFrame mainFrame, Bookmark bookmark) {
super(mainFrame, new HashMap<>(), bookmark);
}
CustomOpenLocationAction(MainFrame mainFrame, AbstractFile file) {
super(mainFrame, new HashMap<>(), file);
}
CustomOpenLocationAction(MainFrame mainFrame, BonjourService bs) {
super(mainFrame, new HashMap<>(), bs);
}
CustomOpenLocationAction(MainFrame mainFrame, FileURL url) {
super(mainFrame, new HashMap<>(), url);
}
////////////////////////
// Overridden methods //
////////////////////////
@Override
protected FolderPanel getFolderPanel() {
return folderPanel;
}
}
/**********************************
* LocationListener Implementation
**********************************/
public void locationChanged(LocationEvent e) {
// Update the button's label to reflect the new current folder
updateButton();
}
public void locationChanging(LocationEvent locationEvent) { }
public void locationCancelled(LocationEvent locationEvent) { }
public void locationFailed(LocationEvent locationEvent) {}
private static Logger getLogger() {
if (logger == null) {
logger = LoggerFactory.getLogger(DrivePopupButton.class);
}
return logger;
}
}<|fim▁end|> | // Listen to bookmark changes to update the button if a bookmark corresponding to the current folder
// has been added/edited/removed
BookmarkManager.addBookmarkListener(this); |
<|file_name|>cat.rs<|end_file_name|><|fim▁begin|>#![crate_name = "cat"]
#![feature(collections, rustc_private)]
#![feature(box_syntax, unsafe_destructor)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: cat (GNU coreutils) 8.13 */
extern crate getopts;
extern crate libc;
use std::fs::File;
use std::io::{stdout, stdin, stderr, Write, Read};
use std::io::Result;
use std::intrinsics::{copy_nonoverlapping};
use libc::consts::os::posix88::STDIN_FILENO;
use libc::funcs::posix88::unistd::isatty;
use libc::types::os::arch::c95::c_int;
pub fn uumain(args: Vec<String>) -> i32 {
let program = &args[0];
let opts = [
getopts::optflag("A", "show-all", "equivalent to -vET"),
getopts::optflag("b", "number-nonblank",
"number nonempty output lines, overrides -n"),
getopts::optflag("e", "", "equivalent to -vE"),
getopts::optflag("E", "show-ends", "display $ at end of each line"),
getopts::optflag("n", "number", "number all output lines"),
getopts::optflag("s", "squeeze-blank", "suppress repeated empty output lines"),
getopts::optflag("t", "", "equivalent to -vT"),
getopts::optflag("T", "show-tabs", "display TAB characters as ^I"),
getopts::optflag("v", "show-nonprinting",
"use ^ and M- notation, except for LF (\\n) and TAB (\\t)"),
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), &opts) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
println!("cat 1.0.0");
println!("");
println!("Usage:");
println!(" {0} [OPTION]... [FILE]...", program);
println!("");
print!("{}", &getopts::usage("Concatenate FILE(s), or standard input, to \
standard output.", &opts)[..]);
println!("");<|fim▁hole|> println!("cat 1.0.0");
return 0;
}
let mut number_mode = NumberingMode::NumberNone;
if matches.opt_present("n") {
number_mode = NumberingMode::NumberAll;
}
if matches.opt_present("b") {
number_mode = NumberingMode::NumberNonEmpty;
}
let show_nonprint = matches.opts_present(&["A".to_string(), "e".to_string(),
"t".to_string(), "v".to_string()]);
let show_ends = matches.opts_present(&["E".to_string(), "A".to_string(),
"e".to_string()]);
let show_tabs = matches.opts_present(&["A".to_string(), "T".to_string(),
"t".to_string()]);
let squeeze_blank = matches.opt_present("s");
let mut files = matches.free;
if files.is_empty() {
files.push("-".to_string());
}
exec(files, number_mode, show_nonprint, show_ends, show_tabs, squeeze_blank);
0
}
#[derive(Eq, PartialEq)]
enum NumberingMode {
NumberNone,
NumberNonEmpty,
NumberAll,
}
fn write_lines(files: Vec<String>, number: NumberingMode, squeeze_blank: bool,
show_ends: bool) {
let mut line_counter: usize = 1;
for (mut reader, interactive) in files.iter().filter_map(|p| open(&p[..])) {
let mut in_buf = [0; 1024 * 31];
let mut out_buf = [0; 1024 * 64];
let mut writer = UnsafeWriter::new(&mut out_buf[..], stdout());
let mut at_line_start = true;
while let Ok(n) = reader.read(&mut in_buf) {
if n == 0 { break }
let in_buf = &in_buf[..n];
let mut buf_pos = 0..n;
loop {
writer.possibly_flush();
let pos = match buf_pos.next() {
Some(p) => p,
None => break,
};
if in_buf[pos] == '\n' as u8 {
if !at_line_start || !squeeze_blank {
if at_line_start && number == NumberingMode::NumberAll {
(write!(&mut writer, "{0:6}\t", line_counter)).unwrap();
line_counter += 1;
}
if show_ends {
writer.write_all(&['$' as u8]).unwrap();
}
writer.write_all(&['\n' as u8]).unwrap();
if interactive {
writer.flush().unwrap();
}
}
at_line_start = true;
continue;
}
if at_line_start && number != NumberingMode::NumberNone {
(write!(&mut writer, "{0:6}\t", line_counter)).unwrap();
line_counter += 1;
}
match in_buf[pos..].iter().position(|c| *c == '\n' as u8) {
Some(p) => {
writer.write_all(&in_buf[pos..pos + p]).unwrap();
if show_ends {
writer.write_all(&['$' as u8]).unwrap();
}
writer.write_all(&['\n' as u8]).unwrap();
if interactive {
writer.flush().unwrap();
}
buf_pos = pos + p + 1..n;
at_line_start = true;
},
None => {
writer.write_all(&in_buf[pos..]).unwrap();
at_line_start = false;
break;
}
};
}
}
}
}
fn write_bytes(files: Vec<String>, number: NumberingMode, squeeze_blank: bool,
show_ends: bool, show_nonprint: bool, show_tabs: bool) {
let mut line_counter: usize = 1;
for (mut reader, interactive) in files.iter().filter_map(|p| open(&p[..])) {
// Flush all 1024 iterations.
let mut flush_counter = 0usize..1024;
let mut in_buf = [0; 1024 * 32];
let mut out_buf = [0; 1024 * 64];
let mut writer = UnsafeWriter::new(&mut out_buf[..], stdout());
let mut at_line_start = true;
while let Ok(n) = reader.read(&mut in_buf) {
if n == 0 { break }
for &byte in in_buf[..n].iter() {
if flush_counter.next().is_none() {
writer.possibly_flush();
flush_counter = 0usize..1024;
}
if byte == '\n' as u8 {
if !at_line_start || !squeeze_blank {
if at_line_start && number == NumberingMode::NumberAll {
(write!(&mut writer, "{0:6}\t", line_counter)).unwrap();
line_counter += 1;
}
if show_ends {
writer.write_all(&['$' as u8]).unwrap();
}
writer.write_all(&['\n' as u8]).unwrap();
if interactive {
writer.flush().unwrap();
}
}
at_line_start = true;
continue;
}
if at_line_start && number != NumberingMode::NumberNone {
(write!(&mut writer, "{0:6}\t", line_counter)).unwrap();
line_counter += 1;
at_line_start = false;
}
// This code is slow because of the many branches. cat in glibc avoids
// this by having the whole loop inside show_nonprint.
if byte == '\t' as u8 {
if show_tabs {
writer.write_all("^I".as_bytes())
} else {
writer.write_all(&[byte])
}
} else if show_nonprint {
let byte = match byte {
128 ... 255 => {
writer.write_all("M-".as_bytes()).unwrap();
byte - 128
},
_ => byte,
};
match byte {
0 ... 31 => writer.write_all(&['^' as u8, byte + 64]),
127 => writer.write_all(&['^' as u8, byte - 64]),
_ => writer.write_all(&[byte]),
}
} else {
writer.write_all(&[byte])
}.unwrap();
}
}
}
}
fn write_fast(files: Vec<String>) {
let mut writer = stdout();
let mut in_buf = [0; 1024 * 64];
for (mut reader, _) in files.iter().filter_map(|p| open(&p[..])) {
while let Ok(n) = reader.read(&mut in_buf) {
if n == 0 { break }
// This interface is completely broken.
writer.write_all(&in_buf[..n]).unwrap();
}
}
}
fn exec(files: Vec<String>, number: NumberingMode, show_nonprint: bool,
show_ends: bool, show_tabs: bool, squeeze_blank: bool) {
if show_nonprint || show_tabs {
write_bytes(files, number, squeeze_blank, show_ends, show_nonprint, show_tabs);
} else if number != NumberingMode::NumberNone || squeeze_blank || show_ends {
write_lines(files, number, squeeze_blank, show_ends);
} else {
write_fast(files);
}
}
fn open(path: &str) -> Option<(Box<Read>, bool)> {
if path == "-" {
let stdin = stdin();
let interactive = unsafe { isatty(STDIN_FILENO) } != 0 as c_int;
return Some((box stdin as Box<Read>, interactive));
}
match File::open(path) {
Ok(f) => Some((box f as Box<Read>, false)),
Err(e) => {
(writeln!(&mut stderr(), "cat: {0}: {1}", path, e.to_string())).unwrap();
None
},
}
}
struct UnsafeWriter<'a, W: Write> {
inner: W,
buf: &'a mut [u8],
pos: usize,
threshold: usize,
}
impl<'a, W: Write> UnsafeWriter<'a, W> {
fn new(buf: &'a mut [u8], inner: W) -> UnsafeWriter<'a, W> {
let threshold = buf.len()/2;
UnsafeWriter {
inner: inner,
buf: buf,
pos: 0,
threshold: threshold,
}
}
fn flush_buf(&mut self) -> Result<()> {
if self.pos != 0 {
let ret = self.inner.write(&self.buf[..self.pos]);
self.pos = 0;
match ret {
Ok(_) => Ok(()),
Err(e) => Err(e),
}
} else {
Ok(())
}
}
fn possibly_flush(&mut self) {
if self.pos > self.threshold {
self.inner.write_all(&self.buf[..self.pos]).unwrap();
self.pos = 0;
}
}
}
#[inline(never)]
fn fail() -> ! {
panic!("assertion failed");
}
impl<'a, W: Write> Write for UnsafeWriter<'a, W> {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
let dst = &mut self.buf[self.pos..];
let len = buf.len();
if len > dst.len() {
fail();
}
unsafe {
copy_nonoverlapping(buf.as_ptr(), dst.as_mut_ptr(), len)
}
self.pos += len;
Ok(len)
}
fn flush(&mut self) -> Result<()> {
self.flush_buf().and_then(|()| self.inner.flush())
}
}
#[unsafe_destructor]
impl<'a, W: Write> Drop for UnsafeWriter<'a, W> {
fn drop(&mut self) {
let _ = self.flush_buf();
}
}
/* vim: set ai ts=4 sw=4 sts=4 et : */<|fim▁end|> | println!("With no FILE, or when FILE is -, read standard input.");
return 0;
}
if matches.opt_present("version") { |
<|file_name|>metering_rpc_agent_api.py<|end_file_name|><|fim▁begin|># Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Sylvain Afchain <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import messaging
from neutron.common import constants
from neutron.common import rpc
from neutron.common import topics
from neutron.common import utils
from neutron import manager
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)<|fim▁hole|> """API for plugin to notify L3 metering agent."""
def __init__(self, topic=topics.METERING_AGENT):
super(MeteringAgentNotifyAPI, self).__init__()
target = messaging.Target(topic=topic, version='1.0')
self.client = rpc.get_client(target)
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_plugin()
l3_routers = {}
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
adminContext, [router['id']],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify metering agent at %(topic)s.%(host)s '
'the message %(method)s'),
{'topic': self.client.target.topic,
'host': l3_agent.host,
'method': method})
l3_router = l3_routers.get(l3_agent.host, [])
l3_router.append(router)
l3_routers[l3_agent.host] = l3_router
for host, routers in l3_routers.iteritems():
topic = '%s.%s' % (self.client.target.topic, host)
cctxt = self.client.prepare(topic=topic)
cctxt.cast(context, method, routers=routers)
def _notification_fanout(self, context, method, router_id):
LOG.debug(_('Fanout notify metering agent at %(topic)s the message '
'%(method)s on router %(router_id)s'),
{'topic': self.client.target.topic,
'method': method,
'router_id': router_id})
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, router_id=router_id)
def _notification(self, context, method, routers):
"""Notify all the agents that are hosting the routers."""
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
self._agent_notification(context, method, routers)
else:
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, routers=routers)
def router_deleted(self, context, router_id):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, routers):
if routers:
self._notification(context, 'routers_updated', routers)
def update_metering_label_rules(self, context, routers):
self._notification(context, 'update_metering_label_rules', routers)
def add_metering_label(self, context, routers):
self._notification(context, 'add_metering_label', routers)
def remove_metering_label(self, context, routers):
self._notification(context, 'remove_metering_label', routers)<|fim▁end|> |
class MeteringAgentNotifyAPI(object): |
<|file_name|>SequentialIntegerDataGenerator.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.pherf.rules;
import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
import org.apache.phoenix.pherf.configuration.Column;
import org.apache.phoenix.pherf.configuration.DataSequence;
import org.apache.phoenix.pherf.configuration.DataTypeMapping;
import java.util.concurrent.atomic.AtomicLong;
public class SequentialIntegerDataGenerator implements RuleBasedDataGenerator {
private final Column columnRule;
private final AtomicLong counter;
private final long minValue;
private final long maxValue;
public SequentialIntegerDataGenerator(Column columnRule) {<|fim▁hole|> Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL);
Preconditions.checkArgument(isIntegerType(columnRule.getType()));
this.columnRule = columnRule;
minValue = columnRule.getMinValue();
maxValue = columnRule.getMaxValue();
counter = new AtomicLong(0);
}
/**
* Note that this method rolls over for attempts to get larger than maxValue
* @return new DataValue
*/
@Override
public DataValue getDataValue() {
return new DataValue(columnRule.getType(), String.valueOf((counter.getAndIncrement() % (maxValue - minValue + 1)) + minValue));
}
// Probably could go into a util class in the future
boolean isIntegerType(DataTypeMapping mapping) {
switch (mapping) {
case BIGINT:
case INTEGER:
case TINYINT:
case UNSIGNED_LONG:
return true;
default:
return false;
}
}
}<|fim▁end|> | |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>"""
This file contains the unit tests for the :mod:`communication` app.
Since this app has no models there is model and view tests:
* :class:`~communication.tests.CommunicationModelTests`
* :class:`~communication.tests.CommunicationViewTests`
"""
from lab_website.tests import BasicTests
from communication.models import LabAddress,LabLocation,Post
from personnel.models import Address, Person
from papers.models import Publication
from projects.models import Project
class CommunicationModelTests(BasicTests):
'''This class tests the views associated with models in the :mod:`communication` app.'''
fixtures = ['test_address',]
def test_create_new_lab_address(self):
'''This test creates a :class:`~communication.models.LabAddress` with the required information.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
def test_lab_address_unicode(self):
'''This tests the unicode representation of a :class:`~communication.models.LabAddress`.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
self.assertEqual(test_address.__unicode__(), Address.objects.get(pk=1).__unicode__())
def test_create_new_lab_location(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_create_new_lab_location_all(self):
'''This test creates a :class:`~communication.models.LabLocation` with all fields included.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1,
address=Address.objects.get(pk=1),
url = 'www.cityofmemphis.org',
description = 'some description about the place',
lattitude = 35.149534,
longitude = -90.04898,) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_lab_location_unicode(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1)
self.assertEqual(test_location.__unicode__(), 'Memphis')
class CommunicationViewTests(BasicTests):
'''This class tests the views associated with the :mod:`communication` app.'''
def test_feed_details_view(self):
"""This tests the feed-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/feeds')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'feed_details.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
def test_lab_rules_view(self):
'''This tests the lab-rules view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/lab-rules')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_rules.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lab_rules' in test_response.context)
self.assertTrue('lab_rules_source' in test_response.context)
def test_lab_rules_view(self):
'''This tests the data-resource-sharing view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/data-resource-sharing')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'data_sharing_policy.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('data_sharing_policy' in test_response.context)
self.assertTrue('data_sharing_policy_source' in test_response.context)
def test_twitter_view(self):
'''This tests the twitter view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/twitter')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'twitter_timeline.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('timeline' in test_response.context)
def test_calendar_view(self):
'''This tests the google-calendar view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/calendar')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'calendar.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
#
# def test_wikipedia_view(self):
# '''This tests the google-calendar view.
#
# Currently it just ensures that the template is loading correctly.
# '''
# test_response = self.client.get('/wikipedia')
# self.assertEqual(test_response.status_code, 200)
# self.assertTemplateUsed(test_response, 'wikipedia_edits.html')
# self.assertTemplateUsed(test_response, 'base.html')
# self.assertTemplateUsed(test_response, 'jquery_script.html')
# self.assertTrue('pages' in test_response.context)
def test_news_view(self):
'''This tests the lab-news view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/news')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_news.html')
self.assertTemplateUsed(test_response, 'base.html')
#self.assertTrue('statuses' in test_response.context)
self.assertTrue('links' in test_response.context)
#self.assertTrue('milestones' in test_response.context)
def test_contact_page(self):
'''This tests the contact-page view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/contact/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'contact.html')
self.assertTemplateUsed(test_response, 'base.html')
def test_location_page(self):
'''This tests the location view.
Currently it ensures that the template is loading, and that that the location_list context is passed.
'''
test_response = self.client.get('/location')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'location.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lablocation_list' in test_response.context)
class PostModelTests(BasicTests):
'''This class tests various aspects of the :class:`~papers.models.Post` model.'''
fixtures = ['test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_create_new_post_minimum(self):
'''This test creates a :class:`~papers.models.Post` with the required information only.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.pk, 1) <|fim▁hole|> test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md',
paper = Publication.objects.get(pk=1),
project = Project.objects.get(pk=1))
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_post_unicode(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.__unicode__(), "Test Post")
def test_post_slugify(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.post_slug, "test-post")
class PostViewTests(BasicTests):
'''These test the views associated with post objects.'''
fixtures = ['test_post','test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_post_details_view(self):
"""This tests the post-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post' in test_response.context)
test_response = self.client.get('/posts/not-a-fixture-post')
self.assertEqual(test_response.status_code, 404)
def test_post_list(self):
"""This tests the post-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post_list' in test_response.context)
def test_post_new(self):
"""This tests the post-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/new')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
def test_post_edit(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/edit')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/edit')
self.assertEqual(test_response.status_code, 404)
def test_post_delete(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/delete')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'confirm_delete.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/delete')
self.assertEqual(test_response.status_code, 404)<|fim▁end|> |
def test_create_new_post_all(self):
'''This test creates a :class:`~papers.models.Post` with all fields entered.'''
|
<|file_name|>MultiPointDomainType.java<|end_file_name|><|fim▁begin|>/**
*/
package net.opengis.gml311;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>Multi Point Domain Type</b></em>'.
* <!-- end-user-doc -->
*
* <p>
* The following features are supported:
* </p>
* <ul>
* <li>{@link net.opengis.gml311.MultiPointDomainType#getMultiPoint <em>Multi Point</em>}</li>
* </ul>
*
* @see net.opengis.gml311.Gml311Package#getMultiPointDomainType()
* @model extendedMetaData="name='MultiPointDomainType' kind='elementOnly'"
* @generated
*/
public interface MultiPointDomainType extends DomainSetType {
/**
* Returns the value of the '<em><b>Multi Point</b></em>' containment reference.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Multi Point</em>' containment reference isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Multi Point</em>' containment reference.
* @see #setMultiPoint(MultiPointType)
* @see net.opengis.gml311.Gml311Package#getMultiPointDomainType_MultiPoint()
* @model containment="true"
* extendedMetaData="kind='element' name='MultiPoint' namespace='##targetNamespace'"
* @generated
*/
MultiPointType getMultiPoint();
/**<|fim▁hole|> * <!-- end-user-doc -->
* @param value the new value of the '<em>Multi Point</em>' containment reference.
* @see #getMultiPoint()
* @generated
*/
void setMultiPoint(MultiPointType value);
} // MultiPointDomainType<|fim▁end|> | * Sets the value of the '{@link net.opengis.gml311.MultiPointDomainType#getMultiPoint <em>Multi Point</em>}' containment reference.
* <!-- begin-user-doc --> |
<|file_name|>test_import_resolver.py<|end_file_name|><|fim▁begin|># coding=utf-8
from __future__ import print_function, unicode_literals
__author__ = "Sally Wilsak"
import codecs
import os
import sys
import textwrap
import unittest
import import_resolver
# This isn't strictly correct; it will only work properly if your terminal is set to UTF-8.
# However, Linux is usually set to UTF-8 and Windows' English code page 437 is at least ASCII-compatible this will work well enough for our purposes
if sys.stdout.encoding != 'utf8':
sys.stdout = codecs.getwriter('utf8')(sys.stdout, 'strict')
if sys.stderr.encoding != 'utf8':
sys.stderr = codecs.getwriter('utf8')(sys.stderr, 'strict')
def simple_normpath(path):
"""On Windows, normpath substitutes back slashes into the file path.
This makes cross-platform testing difficult since we're checking string output.
But the test cases have simple filepaths so we can substitute something simpler for the tests.
"""
return path.replace("./", "")
def simple_join(path, *args):
""" Make os.path.join work the same on Windows and Linux. Again this is ok because the test cases have simple paths
"""
elements = [path]
elements.extend(args)
return "/".join(elements)
class TestImportResolver(unittest.TestCase):
def setUp(self):
# Monkey-patch some path manipulations so we can string match with Unix-style paths and Windows won't mess them up
import_resolver.os.path.normpath = simple_normpath
import_resolver.os.path.join = simple_join
def test_line_extraction(self):
self.assertEqual(import_resolver.extract_import_files(""), [])
self.assertEqual(import_resolver.extract_import_files("This isn't TypeScript.\nBut it does have multiple lines."), [])
self.assertEqual(import_resolver.extract_import_files("import thing = require('./thing.ts');"), ["./thing.ts"])
import_statements = textwrap.dedent("""
// Comments should get ignored, of course
import first = require('./lib/first.ts');
// Different amounts of whitespace should be ok
import second=require('./second.ts') ; // so should other stuff at the end
// Double quotes are also ok
import _THIRD = require("./third.ts")
// So is something that's not a ts file, but it gets .ts added
import fourth = require("../fourth/file/path")
// A Windows-style path doesn't match...
import fifth = require("C:\\fifth.ts")
// ...neither does an absolute Unix-style path...
import sixth = require("/home/user6/sixth.ts")
// ...but this mixed-up one does
import seventh = require('./folder\\folder\\seventh.ts')
// Capitalizing the keywords means it doesn't match
Import eighth = Require('./eighth.ts')
// Something that's not a file path doesn't match
import ninth = require('ninth')
// If it's not at the start of the line, it doesn't match
some stuff import tenth = require('./tenth.ts')
// And for good measure, a non-ASCII file path should work
import eleventh = require('./одиннадцать.ts')
""")
expected_filenames = [
"./lib/first.ts",
"./second.ts",
"./third.ts",
"../fourth/file/path.ts",
"./folder\\folder\\seventh.ts",
"./одиннадцать.ts",
]
self.assertEqual(import_resolver.extract_import_files(import_statements), expected_filenames)
def test_format(self):
files = ["/badger/badger", "C:\\badger.ts", "/bad ger/snake.ts"]
self.assertEqual(import_resolver.format_line("/file/name.ts", files), "/file/name.ts <- /badger/badger C:\\badger.ts /bad\\ ger/snake.ts")
def test_circular_deps(self):
circular_deps = {
"/home/badger/a.ts": "import b = require('./b.ts');\nimport c = require('./c.ts');",
"/home/badger/b.ts": "import d = require('./d.ts');",
"/home/badger/c.ts": "",
"/home/badger/d.ts": "import a = require('./a.ts');",
}
import_resolver.read_file = lambda x: circular_deps[x]
expected_string = "\n".join([
"/home/badger/c.ts <- /home/badger/a.ts",
"/home/badger/d.ts <- /home/badger/b.ts",
"/home/badger/a.ts <- /home/badger/d.ts",
"/home/badger/b.ts <- /home/badger/a.ts",
])
self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string)
def test_triangle_deps(self):
triangle_deps = {
"/home/badger/a.ts": "import b = require('./b.ts');\nimport c = require('./c.ts');",
"/home/badger/b.ts": "import c = require('./c.ts');",
"/home/badger/c.ts": "",
}
import_resolver.read_file = lambda x: triangle_deps[x]
expected_string = "\n".join([
"/home/badger/c.ts <- /home/badger/a.ts /home/badger/b.ts",
"/home/badger/a.ts <- ",
<|fim▁hole|> def test_inaccessible_deps(self):
def inaccessible_deps(filename):
if "a.ts" in filename:
return "import b = require('./b.ts');"
elif "b.ts" in filename:
return "import c = require('./c.ts');"
raise IOError
import_resolver.read_file = inaccessible_deps
expected_string = "\n".join([
"/home/badger/c.ts <- /home/badger/b.ts",
"/home/badger/a.ts <- ",
"/home/badger/b.ts <- /home/badger/a.ts",
"Cannot read file '/home/badger/c.ts'",
])
self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string)
def test_lists(self):
lists_deps = {
"/home/badger/a.ts": "import b = require('./b.ts');\nimport c = require('./c.ts');\nimport d = require('./d.ts');",
"/home/badger/b.ts": "import c = require('./c.ts');\nimport d = require('./d.ts');",
"/home/badger/c.ts": "import d = require('./d.ts');",
"/home/badger/d.ts": "",
}
import_resolver.read_file = lambda x: lists_deps[x]
expected_string = "\n".join([
"/home/badger/c.ts <- /home/badger/a.ts /home/badger/b.ts",
"/home/badger/d.ts <- /home/badger/a.ts /home/badger/b.ts /home/badger/c.ts",
"/home/badger/a.ts <- ",
"/home/badger/b.ts <- /home/badger/a.ts",
])
self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string)<|fim▁end|> | "/home/badger/b.ts <- /home/badger/a.ts",
])
self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string)
|
<|file_name|>amo.js<|end_file_name|><|fim▁begin|>module.exports = { domain:"messages",
locale_data:{ messages:{ "":{ domain:"messages",
plural_forms:"nplurals=2; plural=(n!=1);",
lang:"mk" },
"%(addonName)s %(startSpan)sby %(authorList)s%(endSpan)s":[ "" ],
"Extension Metadata":[ "" ],
Screenshots:[ "" ],
"About this extension":[ "" ],
"Rate your experience":[ "" ],
Category:[ "" ],
"Used by":[ "" ],
Sentiment:[ "" ],
Back:[ "" ],
Submit:[ "" ],
"Please enter some text":[ "" ],
"Write a review":[ "" ],
"Tell the world why you think this extension is fantastic!":[ "" ],
"Privacy policy":[ "" ],
"Legal notices":[ "" ],
"View desktop site":[ "" ],
"Browse in your language":[ "" ],
"Firefox Add-ons":[ "" ],
"How are you enjoying your experience with %(addonName)s?":[ "" ],
"screenshot %(imageNumber)s of %(totalImages)s":[ "" ],
"Average rating: %(rating)s out of 5":[ "" ],
"No ratings":[ "" ],
"%(users)s user":[ "",
"%(users)s users" ],
"Log out":[ "" ],
"Log in/Sign up":[ "" ],
"Add-ons for Firefox":[ "" ],
"What do you want Firefox to do?":[ "" ],
"Block ads":[ "" ],
Screenshot:[ "" ],
"Save stuff":[ "" ],
"Shop online":[ "" ],
"Be social":[ "" ],
"Share stuff":[ "" ],
"Browse all extensions":[ "" ],
"How do you want Firefox to look?":[ "" ],
Wild:[ "" ],
Abstract:[ "" ],
Fashionable:[ "" ],
Scenic:[ "" ],
Sporty:[ "" ],
Mystical:[ "" ],
"Browse all themes":[ "" ],
"Downloading %(name)s.":[ "" ],
"Installing %(name)s.":[ "" ],
"%(name)s is installed and enabled. Click to uninstall.":[ "" ],
"%(name)s is disabled. Click to enable.":[ "" ],
"Uninstalling %(name)s.":[ "" ],
"%(name)s is uninstalled. Click to install.":[ "" ],
"Install state for %(name)s is unknown.":[ "" ],
Previous:[ "" ],
Next:[ "" ],
"Page %(currentPage)s of %(totalPages)s":[ "" ],
"Your search for \"%(query)s\" returned %(count)s result.":[ "",
"Your search for \"%(query)s\" returned %(count)s results." ],
"Searching...":[ "" ],
"No results were found for \"%(query)s\".":[ "" ],
"Please supply a valid search":[ "" ] } },
_momentDefineLocale:function anonymous() {
//! moment.js locale configuration
//! locale : Macedonian [mk]
//! author : Borislav Mickov : https://github.com/B0k0
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
var mk = moment.defineLocale('mk', {
months : 'јануари_февруари_март_април_мај_јуни_јули_август_септември_октомври_ноември_декември'.split('_'),
monthsShort : 'јан_фев_мар_апр_мај_јун_јул_авг_сеп_окт_ное_дек'.split('_'),
weekdays : 'недела_понеделник_вторник_среда_четврток_петок_сабота'.split('_'),
weekdaysShort : 'нед_пон_вто_сре_чет_пет_саб'.split('_'),
weekdaysMin : 'нe_пo_вт_ср_че_пе_сa'.split('_'),
longDateFormat : {
LT : 'H:mm',
LTS : 'H:mm:ss',
L : 'D.MM.YYYY',
LL : 'D MMMM YYYY',
LLL : 'D MMMM YYYY H:mm',
LLLL : 'dddd, D MMMM YYYY H:mm'
},
calendar : {
sameDay : '[Денес во] LT',
nextDay : '[Утре во] LT',
nextWeek : '[Во] dddd [во] LT',
lastDay : '[Вчера во] LT',
lastWeek : function () {
switch (this.day()) {
case 0:
case 3:
case 6:
return '[Изминатата] dddd [во] LT';
case 1:
case 2:
case 4:<|fim▁hole|> },
sameElse : 'L'
},
relativeTime : {
future : 'после %s',
past : 'пред %s',
s : 'неколку секунди',
m : 'минута',
mm : '%d минути',
h : 'час',
hh : '%d часа',
d : 'ден',
dd : '%d дена',
M : 'месец',
MM : '%d месеци',
y : 'година',
yy : '%d години'
},
ordinalParse: /\d{1,2}-(ев|ен|ти|ви|ри|ми)/,
ordinal : function (number) {
var lastDigit = number % 10,
last2Digits = number % 100;
if (number === 0) {
return number + '-ев';
} else if (last2Digits === 0) {
return number + '-ен';
} else if (last2Digits > 10 && last2Digits < 20) {
return number + '-ти';
} else if (lastDigit === 1) {
return number + '-ви';
} else if (lastDigit === 2) {
return number + '-ри';
} else if (lastDigit === 7 || lastDigit === 8) {
return number + '-ми';
} else {
return number + '-ти';
}
},
week : {
dow : 1, // Monday is the first day of the week.
doy : 7 // The week that contains Jan 1st is the first week of the year.
}
});
return mk;
})));
} }<|fim▁end|> | case 5:
return '[Изминатиот] dddd [во] LT';
} |
<|file_name|>hao.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from argparse import ArgumentParser
from threading import Thread
import re
class HackAlunoOnline:
def __init__( self , matricula , full_search = False ):
# Exibicao default de matricula/nome/curso/situacao/periodo/CRA
# full search para as demais informacoes
# Main url
self.aluno_online_url = 'https://www.alunoonline.uerj.br'
# parameters
self.matricula = matricula
self.full_search = full_search
# Main html
self.main_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'SinteseFormacao' } )
# Main data
self.nome = self._extract_nome()
self.cra = self._extract_cra()
self.curso = self._extract_curso()
self.situacao = self._extract_situacao()
self.periodo = self._extract_periodo()
# get and extract personal data
if ( self.full_search ):
# dados contato
self.dados_contato_html = self._get_aluno_online_html( '/recadastramento_dados_contato/recadastramento_dados_contato.php' )
self.telefone = self._extract_telefone()
self.email = self._extract_email()
self.endereco = self._extract_endereco()
self.cep = self._extract_cep()
# dados pessoais
self.dados_pessoais_html = self._get_aluno_online_html( '/recadastramento_dados_pessoais/recadastramento_dados_pessoais.php' )
self.nascimento = self._extract_nascimento()
self.sexo = self._extract_sexo()
self.estado_civil = self._extract_estado_civil()
self.naturalidade = self._extract_naturalidade()
self.nacionalidade = self._extract_nacionalidade()
self.pai = self._extract_pai()
self.mae = self._extract_mae()
self.cpf = self._extract_cpf()
self.rg = self._extract_rg() #Número, Órgão, UF, País, Data Emissão, Data Validade
self.titulo_eleitor = self._extract_titulo_eleitor() #Número, Zona, Seção, UF, Data Emissão
self.certificado_reservista = self._extract_certificado_reservista() #Número, Nro. de Série, Órgão, Tipo, Data Emissão, UF
self.ensino_medio = self._extract_ensino_medio() #Nome do Estabelecimento, País, UF, Tipo de Ensino, Data Conclusão
# disciplinas
self.disciplinas_realizadas_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'DisciplinasRealizadas' } )
self.disciplinas = self._extract_disciplinas()
def _get_aluno_online_html( self , endpoint , parameters = {} ):
result = None
try:
parameters.update( { 'matricula': self.matricula } )
data = urlencode( parameters )
request = Request( self.aluno_online_url + endpoint , data.encode( 'ascii' ) )
response = urlopen( request )
result = BeautifulSoup( response.read() , 'html.parser' )
except:
pass
return result
def _extract_nome( self ):
try:
nome = self.main_html.find( id = "table_cabecalho_rodape" ).find_all( 'font' )[2].string[15:]
except:
nome = ''
return nome
def _extract_cra( self ):
try:
cra = float( self.main_html.find_all( 'div' )[7].text[16:].replace( ',' , '.' ) )
except:
cra = ''
return cra
def _extract_curso( self ):
try:
curso = self.main_html.find_all( 'div' )[6].text[8:]
except:
curso = ''
return curso
def _extract_situacao( self ):
try:
situacao = self.main_html.find_all( 'div' )[4].text[11:]
except:
situacao = ''
return situacao
def _extract_periodo( self ):
try:
for element in self.main_html.select( 'div > b' ):
if ( element.text == "Períodos Utilizados/Em Uso para Integralização Curricular:" ):
periodo = int( element.parent.text[59:] )
except:
periodo = ''
return periodo
def _format_telefone( self , ddd , tel , ramal ):
return '({0}) {1} [{2}]'.format( ddd , tel[:4] + '-' + tel[4:] , ( 'Sem Ramal' if not ramal else ( 'Ramal ' + ramal ) ) )
def _extract_telefone( self ):
telefone = []
# Tel 1..2
for i in range( 1 , 3 ):
try:
ddd = self.dados_contato_html.find( 'input' , { 'name': 'num_ddd_' + str( i ) + '_pag' } ).get( 'value' )
tel = self.dados_contato_html.find( 'input' , { 'name': 'num_tel_' + str( i ) + '_pag' } ).get( 'value' )
ramal = self.dados_contato_html.find( 'input' , { 'name': 'num_ramal_' + str( i ) + '_pag' } ).get( 'value' )
telefone.append( self._format_telefone( ddd , tel , ramal ) )
except:
pass
return telefone
def _extract_email( self ):
try:
email = self.dados_contato_html.find( 'input' , { 'name': 'dsc_email_pag' } ).get( 'value' )
except:
email = ''
return email
def _extract_endereco( self ):
try:
endereco = self.dados_contato_html.find( 'input' , { 'name': 'txt_end_pag' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.find( 'input' , { 'name': 'cod_bairro_input' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_munic_pag"] option[selected]' )[0].text
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_uf_pag"] option[selected]' )[0].text
except:
endereco = ''
return endereco
def _extract_cep( self ):
try:
cep = self.dados_contato_html.find( 'input' , { 'name': 'num_cep_pag' } ).get( 'value' )
cep = cep[:5] + '-' + cep[5:]
except:
cep = ''
return cep
def _extract_nascimento( self ):
try:
nascimento = self.dados_pessoais_html.find_all( 'div' )[2].text[15:]
except:
nascimento = ''
return nascimento
def _extract_sexo( self ):
try:
sexo = self.dados_pessoais_html.find_all( 'div' )[3].text[6:]
except:
sexo = ''
return sexo
def _extract_estado_civil( self ):
try:
civil = self.dados_pessoais_html.find_all( 'div' )[4].text[12:]
except:
civil = ''
return civil
def _extract_naturalidade( self ):
try:
naturalidade = self.dados_pessoais_html.find_all( 'div' )[5].text[14:]
except:
naturalidade = ''
return naturalidade
def _extract_nacionalidade( self ):
try:
nacionalidade = self.dados_pessoais_html.find_all( 'div' )[6].text[15:]
except:
nacionalidade = ''
return nacionalidade
def _extract_pai( self ):
try:
pai = self.dados_pessoais_html.find_all( 'div' )[7].text[13:]
except:
pai = ''
return pai
def _extract_mae( self ):
try:
mae = self.dados_pessoais_html.find_all( 'div' )[8].text[13:]
except:
mae = ''
return mae
def _extract_cpf( self ):
try:
cpf = self.dados_pessoais_html.find_all( 'font' )[10].text
cpf = cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
except:
cpf = ''
return cpf
def _extract_dados_pessoais_divs( self , start , end , cut ):
arrayReturn = []
try:
array = self.dados_pessoais_html.find_all( 'div' )[start:end]
arrayReturn.append( array[0].text[cut:] )
for data in array[1:]:
text = data.text.strip()
if ( ( not 'Não Informado' in text ) and ( not '__/__/____' in text ) ):
arrayReturn.append( text )
except:
arrayReturn = ''
return arrayReturn
def _extract_rg( self ):
return self._extract_dados_pessoais_divs( 9 , 14 , 8 )<|fim▁hole|> def _extract_titulo_eleitor( self ):
return self._extract_dados_pessoais_divs( 15 , 19 , 8 )
def _extract_certificado_reservista( self ):
return self._extract_dados_pessoais_divs( 20 , 25 , 8 )
def _extract_ensino_medio( self ):
return self._extract_dados_pessoais_divs( 26 , 31 , 25 )
def _extract_disciplinas( self ):
disciplinas = []
try:
for linha in self.disciplinas_realizadas_html.find_all( 'div' , style = re.compile( '^width:100%;font-size=12px;' ) ):
conteudoLinha = []
for coluna in linha.children:
conteudoColuna = coluna.string.strip()
if ( conteudoColuna and not re.match( '\\d{4}/\\d' , conteudoColuna ) ):
conteudoLinha.append( conteudoColuna )
disciplinas.append( ( '{0:60} {1:2} {2:3} {3:15} {4:10}' + ( ' {5:6} {6:15}' if ( len( conteudoLinha ) > 5 ) else '' ) ).format( *conteudoLinha ) )
except:
disciplinas = ''
return disciplinas
def _truncate( self , string , width ):
if ( len( string ) > width ):
string = string[:( width - 3 )] + '...'
return string
def __str__( self ):
if self.full_search:
pattern = "\n{0:12} - {1:50}\n\nMatricula: {0}\nNome: {1}\nCurso: {2}\nSituacao: {3}\nPeriodo: {4}\nCRA: {5}\n"
pattern += "\n-Contato-\n\nTelefone: {6}\nE-mail: {7}\nEndereço: {8}\nCEP: {9}\n"
pattern += "\n-Informações Pessoais-\n\nData de Nascimento: {10}\nSexo: {11}\nEstado Civil: {12}\nNaturalidade: {13}\nNacionalidade: {14}\nNome do Pai: {15}\nNome da Mãe: {16}\nCPF: {17}\nRG: {18}\nTítulo de Eleitor: {19}\nCertificado de Reservista: {20}\nEnsino Médio: {21}\n"
pattern += "\n-Disciplinas Realizadas-\n\n{22}\n\n"
parameters = [ self.matricula , self.nome , self.curso , self.situacao , self.periodo , self.cra , ', '.join( self.telefone ) , self.email , self.endereco , self.cep , self.nascimento , self.sexo , self.estado_civil , self.naturalidade , self.nacionalidade , self.pai , self.mae , self.cpf , ', '.join( self.rg ) , ', '.join( self.titulo_eleitor ) , ', '.join( self.certificado_reservista ) , ', '.join( self.ensino_medio ) , '\n'.join( self.disciplinas ) ]
else:
pattern = "{0:12}\t{1:30}\t{2:20}\t{3:10}\t{4:3}\t{5:4}"
parameters = [ self.matricula , self._truncate( self.nome , 30 ) , self._truncate( self.curso , 20 ) , self._truncate( self.situacao , 10 ) , self.periodo , self.cra ]
return pattern.format( *parameters )
# End class
def get_registry_by_name( name , searchfile ):
matriculas = []
with open( searchfile , 'r' ) as arquivo:
for line in arquivo.readlines():
matricula, nomeArquivo = line.split( ':' )
if name in nomeArquivo.lower():
matriculas.append( matricula )
return matriculas
def get_data( matricula , full_search ):
hao = HackAlunoOnline( matricula , full_search )
print( hao )
# Programa para recuperar os dados de um aluno da UERJ atraves de sua matricula
def Main():
parser = ArgumentParser( description = "Recupera informacoes de alunos da UERJ atraves de falhas do sistema academico Aluno Online" )
parser.add_argument( 'matricula' , help = "Matricula do aluno" )
parser.add_argument( '-i' , '--inputfile' , help = "Utilizar um arquivo contendo uma lista de matriculas com uma matricula por linha como entrada" , action = "store_true" )
parser.add_argument( '-r' , '--reverse' , help = "Procura reversa -> busca matricula por nome (para alunos do IPRJ)" , action = "store_true" )
parser.add_argument( '-f' , '--fullsearch' , help = "Busca completa por informações pessoais" , action = "store_true" )
parser.add_argument( '-s' , '--searchfile' , help = "Nome do arquivo contendo matricula:nome que deverá ser usado na busca reversa" , default = "matricula-nome.txt" )
args = parser.parse_args()
matriculas = []
if ( args.reverse and args.inputfile ):
with open( args.matricula , 'r' ) as arquivoNomes:
for nome in arquivoNomes:
matriculas.extend( get_registry_by_name( nome.strip( '\n' ) , args.searchfile ) )
elif args.reverse:
matriculas = get_registry_by_name( args.matricula.lower() , args.searchfile )
elif args.inputfile:
file = open( args.matricula , 'r' )
matriculas = file.readlines()
else:
matriculas.append( args.matricula )
if ( not matriculas ):
print( "Nao foram encontrados dados para esta matricula" )
else:
if ( not args.fullsearch ):
print( "{0:12}\t{1:30}\t{2:20}\t{3:10}\t{4:2}\t{5:4}".format( "Matricula", "Nome", "Curso" , "Situacao" , "Periodo" , "CRA" ) )
for matricula in matriculas:
thread = Thread( target = get_data , args = ( matricula.strip( '\n' ) , args.fullsearch ) )
thread.start()
# End Main
if __name__ == '__main__':
Main()<|fim▁end|> | |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"bufio"
"fmt"
"os"
"strings"
)
const version = "0.5-beta"
const minNumWords = 10
const minNumHashtag = 2
const minNumUserInteractions = 2
const iterationsCount = 3
func main() {
eye := `
___________<|fim▁hole|> ,:d8888P^//\-\/_\ /_\/^q888/b.
,;d88888/~-/ .-~ _~-. |/-q88888b,
//8888887-\ _/ (#) \\-\/Y88888b\
\8888888|// T Y _/|888888 o
\q88888|- \l !\_/|88888p/
q8888l\-//\ / /\|!8888P
q888\/-| -,___.-^\/-\/888P
=88\./-/|/ |-/!\/-!/88=
^^ ------------- ^
_____ _____ ____ _____
/\ | __ \ / ____|/ __ \ / ____|
/ \ | |__) | | __| | | | (___
/ /\ \ | _ /| | |_ | | | |\___ \
/ ____ \| | \ \| |__| | |__| |____) |
/_/ \_\_| \_\\_____|\____/|_____/
Open source twitter entropic toolkit
`
c.Cyan(eye)
c.DarkGray("--Be half bot and half human, a new generation of cyborgs--")
fmt.Println("---------------")
fmt.Print("source code: ")
c.Purple("https://github.com/arnaucode/argos")
fmt.Print("project page: ")
c.Purple("http://arnaucode/argos")
fmt.Print("version ")
c.Purple(version)
fmt.Println("---------------")
client := readConfigTokensAndConnect()
fmt.Println("Getting user data...")
user := getUserData(client)
printUserData(user)
if user.ScreenName == "" {
c.Red("Can not connect to Twitter API, maybe the file twitterConfig.json is wrong")
os.Exit(3)
}
fmt.Println("")
newcommand := bufio.NewReader(os.Stdin)
fmt.Print("Please select command number")
options := `
1 - Analyze username
2 - Unfollow all
3 - Follow random
4 - Delete Tweets
5 - Delete Favs (Likes)
6 - Tweet Random
7 - Analyze tweet
8 - Analyze User Followers
0 - Exit script
option to select: `
for {
fmt.Print(options)
option, _ := newcommand.ReadString('\n')
option = strings.TrimSpace(option)
switch option {
case "1":
fmt.Println("selected 1 - Analyze username")
username := askForUsername()
optionAnalyzeUserTweets(client, username)
fmt.Println("")
c.Purple("Note: the current hours displaying, are the Twitter servers hours (Coordinated Universal Time (UTC) +0000 UTC)")
break
case "2":
fmt.Println("selected 2 - Unfollow all")
optionUnfollowAll(client)
break
case "3":
fmt.Println("selected 3 - Follow random")
optionFollowRandom(client)
break
case "4":
fmt.Println("selected 4 - Delete Tweets")
optionDeleteTweets(client)
break
case "5":
fmt.Println("selected 5 - Delete Favs (Likes)")
optionDeleteFavs(client)
break
case "6":
fmt.Println("selected 6 - Tweet random")
optionTweetRandom(client)
break
case "7":
fmt.Println("selected 7 - Analyze Tweet")
optionAnalyzeTweet(client)
break
case "8":
fmt.Println("selected 8 - Analyze User Followers")
username := askForUsername()
optionAnalyzeUserFollowers(client, username)
break
case "0":
fmt.Println("selected 0 - exit script")
os.Exit(3)
break
default:
fmt.Println("Invalid option")
break
}
}
}<|fim▁end|> | .-=d88888888888b=-.
.:d8888pr |\|/-\| rq8888b. |
<|file_name|>watch.py<|end_file_name|><|fim▁begin|>import os
import subprocess
from pathlib import Path
from time import sleep
PACKAGES = Path('packages')
class Module:
def __init__(self, name, path=None, files=None, dependencies=None):
self.name = name
if path is None:
path = PACKAGES / name
self.path = path
self.files = files or ["src/", "style/"]
self.dependencies = dependencies or []
self.old_sum = 0
#self.check_dir()
def check_dir(self):
"""Check if a file has changed in the package"""
time_list = []
for file in self.files:
file_list = []
file_path = self.path / Path(file)
if not file.endswith("/"):
file_list = [file_path]
else:
for root, _, files in os.walk(file_path):
root = Path(root)
file_list = [root / f for f in files]
time_list += [os.stat(f).st_mtime for f in file_list]
new_sum = sum(time_list)
result = new_sum != self.old_sum
self.old_sum = new_sum
return result
def run(self):
print("Building", self.name)
process = subprocess.Popen(
"npm run build",
shell=True,
cwd=self.path,
)
status = process.wait()
if status:
raise Exception("NPM run failed")
def check(self, run=True, visited={}):
"""Check if the module or its dependencies has changed"""
if self in visited:
return visited[self]
visited[self] = True
invalid = False
for dependency in self.dependencies:
if not dependency.check(run, visited):
invalid = True
invalid |= self.check_dir()
if run and invalid:
visited[self] = False
self.run()
return not invalid
def __hash__(self):
return hash(self.path)
def __repr__(self):
return "Module({})".format(self.name)
class NoFileModule(Module):
def check_dir(self):
return False
def run(self):
pass<|fim▁hole|>trial = Module("trial", dependencies=[utils])
nowvis = Module("nowvis", dependencies=[history, trial])
nbextension = Module("nbextension", dependencies=[history, trial])
ALL = NoFileModule("ALL", dependencies=[nowvis, nbextension])
print("Monitoring packages...")
while True:
visited = {}
try:
ALL.check(visited=visited)
except Exception as e:
print("Failed: {}".format(e))
sleep(1.0)<|fim▁end|> |
utils = Module("utils")
history = Module("history", dependencies=[utils]) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from frappe import _
app_name = "erpnext"
app_title = "ERPNext"
app_publisher = "Frappe Technologies Pvt. Ltd."
app_description = """ERP made simple"""
app_icon = "fa fa-th"
app_color = "#e74c3c"
app_email = "[email protected]"
app_license = "GNU General Public License (v3)"
source_link = "https://github.com/frappe/erpnext"
app_logo_url = "/assets/erpnext/images/erpnext-logo.svg"
develop_version = '13.x.x-develop'
app_include_js = "/assets/js/erpnext.min.js"
app_include_css = "/assets/css/erpnext.css"
web_include_js = "/assets/js/erpnext-web.min.js"
web_include_css = "/assets/css/erpnext-web.css"
doctype_js = {
"Address": "public/js/address.js",
"Communication": "public/js/communication.js",
"Event": "public/js/event.js",
"Newsletter": "public/js/newsletter.js"
}
override_doctype_class = {
'Address': 'erpnext.accounts.custom.address.ERPNextAddress'
}
welcome_email = "erpnext.setup.utils.welcome_email"
# setup wizard
setup_wizard_requires = "assets/erpnext/js/setup_wizard.js"
setup_wizard_stages = "erpnext.setup.setup_wizard.setup_wizard.get_setup_stages"
setup_wizard_test = "erpnext.setup.setup_wizard.test_setup_wizard.run_setup_wizard_test"
before_install = "erpnext.setup.install.check_setup_wizard_not_completed"
after_install = "erpnext.setup.install.after_install"
boot_session = "erpnext.startup.boot.boot_session"
notification_config = "erpnext.startup.notifications.get_notification_config"
get_help_messages = "erpnext.utilities.activation.get_help_messages"
leaderboards = "erpnext.startup.leaderboard.get_leaderboards"
filters_config = "erpnext.startup.filters.get_filters_config"
additional_print_settings = "erpnext.controllers.print_settings.get_print_settings"
on_session_creation = [
"erpnext.portal.utils.create_customer_or_supplier",
"erpnext.shopping_cart.utils.set_cart_count"
]
on_logout = "erpnext.shopping_cart.utils.clear_cart_count"
treeviews = ['Account', 'Cost Center', 'Warehouse', 'Item Group', 'Customer Group', 'Sales Person', 'Territory', 'Assessment Group', 'Department']
# website
update_website_context = ["erpnext.shopping_cart.utils.update_website_context", "erpnext.education.doctype.education_settings.education_settings.update_website_context"]
my_account_context = "erpnext.shopping_cart.utils.update_my_account_context"
calendars = ["Task", "Work Order", "Leave Application", "Sales Order", "Holiday List", "Course Schedule"]
domains = {
'Agriculture': 'erpnext.domains.agriculture',
'Distribution': 'erpnext.domains.distribution',
'Education': 'erpnext.domains.education',
'Healthcare': 'erpnext.domains.healthcare',
'Hospitality': 'erpnext.domains.hospitality',
'Manufacturing': 'erpnext.domains.manufacturing',
'Non Profit': 'erpnext.domains.non_profit',
'Retail': 'erpnext.domains.retail',
'Services': 'erpnext.domains.services',
}
website_generators = ["Item Group", "Item", "BOM", "Sales Partner",
"Job Opening", "Student Admission"]
website_context = {
"favicon": "/assets/erpnext/images/erpnext-favicon.svg",
"splash_image": "/assets/erpnext/images/erpnext-logo.svg"
}
website_route_rules = [
{"from_route": "/orders", "to_route": "Sales Order"},
{"from_route": "/orders/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Sales Order",
"parents": [{"label": _("Orders"), "route": "orders"}]
}
},
{"from_route": "/invoices", "to_route": "Sales Invoice"},
{"from_route": "/invoices/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Sales Invoice",
"parents": [{"label": _("Invoices"), "route": "invoices"}]
}
},
{"from_route": "/supplier-quotations", "to_route": "Supplier Quotation"},
{"from_route": "/supplier-quotations/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Supplier Quotation",
"parents": [{"label": _("Supplier Quotation"), "route": "supplier-quotations"}]
}
},
{"from_route": "/purchase-orders", "to_route": "Purchase Order"},
{"from_route": "/purchase-orders/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Purchase Order",
"parents": [{"label": _("Purchase Order"), "route": "purchase-orders"}]
}
},
{"from_route": "/purchase-invoices", "to_route": "Purchase Invoice"},
{"from_route": "/purchase-invoices/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Purchase Invoice",
"parents": [{"label": _("Purchase Invoice"), "route": "purchase-invoices"}]
}
},
{"from_route": "/quotations", "to_route": "Quotation"},
{"from_route": "/quotations/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Quotation",
"parents": [{"label": _("Quotations"), "route": "quotations"}]
}
},
{"from_route": "/shipments", "to_route": "Delivery Note"},
{"from_route": "/shipments/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Delivery Note",
"parents": [{"label": _("Shipments"), "route": "shipments"}]
}
},
{"from_route": "/rfq", "to_route": "Request for Quotation"},
{"from_route": "/rfq/<path:name>", "to_route": "rfq",
"defaults": {
"doctype": "Request for Quotation",
"parents": [{"label": _("Request for Quotation"), "route": "rfq"}]
}
},
{"from_route": "/addresses", "to_route": "Address"},
{"from_route": "/addresses/<path:name>", "to_route": "addresses",
"defaults": {
"doctype": "Address",
"parents": [{"label": _("Addresses"), "route": "addresses"}]
}
},
{"from_route": "/jobs", "to_route": "Job Opening"},
{"from_route": "/admissions", "to_route": "Student Admission"},
{"from_route": "/boms", "to_route": "BOM"},
{"from_route": "/timesheets", "to_route": "Timesheet"},
{"from_route": "/material-requests", "to_route": "Material Request"},
{"from_route": "/material-requests/<path:name>", "to_route": "material_request_info",
"defaults": {
"doctype": "Material Request",
"parents": [{"label": _("Material Request"), "route": "material-requests"}]
}
},
]
standard_portal_menu_items = [
{"title": _("Personal Details"), "route": "/personal-details", "reference_doctype": "Patient", "role": "Patient"},
{"title": _("Projects"), "route": "/project", "reference_doctype": "Project"},
{"title": _("Request for Quotations"), "route": "/rfq", "reference_doctype": "Request for Quotation", "role": "Supplier"},
{"title": _("Supplier Quotation"), "route": "/supplier-quotations", "reference_doctype": "Supplier Quotation", "role": "Supplier"},
{"title": _("Purchase Orders"), "route": "/purchase-orders", "reference_doctype": "Purchase Order", "role": "Supplier"},
{"title": _("Purchase Invoices"), "route": "/purchase-invoices", "reference_doctype": "Purchase Invoice", "role": "Supplier"},
{"title": _("Quotations"), "route": "/quotations", "reference_doctype": "Quotation", "role":"Customer"},
{"title": _("Orders"), "route": "/orders", "reference_doctype": "Sales Order", "role":"Customer"},
{"title": _("Invoices"), "route": "/invoices", "reference_doctype": "Sales Invoice", "role":"Customer"},
{"title": _("Shipments"), "route": "/shipments", "reference_doctype": "Delivery Note", "role":"Customer"},
{"title": _("Issues"), "route": "/issues", "reference_doctype": "Issue", "role":"Customer"},
{"title": _("Addresses"), "route": "/addresses", "reference_doctype": "Address"},
{"title": _("Timesheets"), "route": "/timesheets", "reference_doctype": "Timesheet", "role":"Customer"},
{"title": _("Lab Test"), "route": "/lab-test", "reference_doctype": "Lab Test", "role":"Patient"},
{"title": _("Prescription"), "route": "/prescription", "reference_doctype": "Patient Encounter", "role":"Patient"},
{"title": _("Patient Appointment"), "route": "/patient-appointments", "reference_doctype": "Patient Appointment", "role":"Patient"},
{"title": _("Fees"), "route": "/fees", "reference_doctype": "Fees", "role":"Student"},
{"title": _("Newsletter"), "route": "/newsletters", "reference_doctype": "Newsletter"},
{"title": _("Admission"), "route": "/admissions", "reference_doctype": "Student Admission", "role": "Student"},
{"title": _("Certification"), "route": "/certification", "reference_doctype": "Certification Application", "role": "Non Profit Portal User"},
{"title": _("Material Request"), "route": "/material-requests", "reference_doctype": "Material Request", "role": "Customer"},
{"title": _("Appointment Booking"), "route": "/book_appointment"},
]
default_roles = [
{'role': 'Customer', 'doctype':'Contact', 'email_field': 'email_id'},
{'role': 'Supplier', 'doctype':'Contact', 'email_field': 'email_id'},
{'role': 'Student', 'doctype':'Student', 'email_field': 'student_email_id'},
]
sounds = [
{"name": "incoming-call", "src": "/assets/erpnext/sounds/incoming-call.mp3", "volume": 0.2},
{"name": "call-disconnect", "src": "/assets/erpnext/sounds/call-disconnect.mp3", "volume": 0.2},
]
has_website_permission = {
"Sales Order": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Quotation": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Sales Invoice": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Supplier Quotation": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Purchase Order": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Purchase Invoice": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Material Request": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Delivery Note": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Issue": "erpnext.support.doctype.issue.issue.has_website_permission",
"Timesheet": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Lab Test": "erpnext.healthcare.web_form.lab_test.lab_test.has_website_permission",
"Patient Encounter": "erpnext.healthcare.web_form.prescription.prescription.has_website_permission",
"Patient Appointment": "erpnext.healthcare.web_form.patient_appointments.patient_appointments.has_website_permission",
"Patient": "erpnext.healthcare.web_form.personal_details.personal_details.has_website_permission"
}
dump_report_map = "erpnext.startup.report_data_map.data_map"
before_tests = "erpnext.setup.utils.before_tests"
standard_queries = {
"Customer": "erpnext.selling.doctype.customer.customer.get_customer_list",
"Healthcare Practitioner": "erpnext.healthcare.doctype.healthcare_practitioner.healthcare_practitioner.get_practitioner_list"
}
doc_events = {
"*": {
"on_submit": "erpnext.healthcare.doctype.patient_history_settings.patient_history_settings.create_medical_record",
"on_update_after_submit": "erpnext.healthcare.doctype.patient_history_settings.patient_history_settings.update_medical_record",
"on_cancel": "erpnext.healthcare.doctype.patient_history_settings.patient_history_settings.delete_medical_record"
},
"Stock Entry": {
"on_submit": "erpnext.stock.doctype.material_request.material_request.update_completed_and_requested_qty",
"on_cancel": "erpnext.stock.doctype.material_request.material_request.update_completed_and_requested_qty"
},
"User": {
"after_insert": "frappe.contacts.doctype.contact.contact.update_contact",
"validate": "erpnext.hr.doctype.employee.employee.validate_employee_role",
"on_update": ["erpnext.hr.doctype.employee.employee.update_user_permissions",
"erpnext.portal.utils.set_default_role"]
},
("Sales Taxes and Charges Template", 'Price List'): {
"on_update": "erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings.validate_cart_settings"
},
"Website Settings": {
"validate": "erpnext.portal.doctype.products_settings.products_settings.home_page_is_products"
},
"Tax Category": {
"validate": "erpnext.regional.india.utils.validate_tax_category"
},
"Sales Invoice": {
"on_submit": [
"erpnext.regional.create_transaction_log",
"erpnext.regional.italy.utils.sales_invoice_on_submit",
"erpnext.erpnext_integrations.taxjar_integration.create_transaction"
],
"on_cancel": [
"erpnext.regional.italy.utils.sales_invoice_on_cancel",
"erpnext.erpnext_integrations.taxjar_integration.delete_transaction"
],
"on_trash": "erpnext.regional.check_deletion_permission"
},
"Purchase Invoice": {
"validate": [
"erpnext.regional.india.utils.update_grand_total_for_rcm",
"erpnext.regional.united_arab_emirates.utils.update_grand_total_for_rcm",
"erpnext.regional.united_arab_emirates.utils.validate_returns"
]
},
"Payment Entry": {
"on_submit": ["erpnext.regional.create_transaction_log", "erpnext.accounts.doctype.payment_request.payment_request.update_payment_req_status", "erpnext.accounts.doctype.dunning.dunning.resolve_dunning"],
"on_trash": "erpnext.regional.check_deletion_permission"
},
'Address': {
'validate': ['erpnext.regional.india.utils.validate_gstin_for_india', 'erpnext.regional.italy.utils.set_state_code', 'erpnext.regional.india.utils.update_gst_category'],
'on_update':'erpnext.healthcare.utils.update_address_link'
},
'Supplier': {
'validate': 'erpnext.regional.india.utils.validate_pan_for_india'<|fim▁hole|> },
('Sales Invoice', 'Sales Order', 'Delivery Note', 'Purchase Invoice', 'Purchase Order', 'Purchase Receipt'): {
'validate': ['erpnext.regional.india.utils.set_place_of_supply']
},
('Sales Invoice', 'Purchase Invoice'): {
'validate': ['erpnext.regional.india.utils.validate_document_name']
},
"Contact": {
"on_trash": "erpnext.support.doctype.issue.issue.update_issue",
"after_insert": "erpnext.telephony.doctype.call_log.call_log.link_existing_conversations",
"validate": "erpnext.crm.utils.update_lead_phone_numbers"
},
"Email Unsubscribe": {
"after_insert": "erpnext.crm.doctype.email_campaign.email_campaign.unsubscribe_recipient"
},
('Quotation', 'Sales Order', 'Sales Invoice'): {
'validate': ["erpnext.erpnext_integrations.taxjar_integration.set_sales_tax"]
}
}
# On cancel event Payment Entry will be exempted and all linked submittable doctype will get cancelled.
# to maintain data integrity we exempted payment entry. it will un-link when sales invoice get cancelled.
# if payment entry not in auto cancel exempted doctypes it will cancel payment entry.
auto_cancel_exempted_doctypes= [
"Payment Entry",
"Inpatient Medication Entry"
]
scheduler_events = {
"cron": {
"0/30 * * * *": [
"erpnext.utilities.doctype.video.video.update_youtube_data",
]
},
"all": [
"erpnext.projects.doctype.project.project.project_status_update_reminder",
"erpnext.healthcare.doctype.patient_appointment.patient_appointment.send_appointment_reminder",
"erpnext.crm.doctype.social_media_post.social_media_post.process_scheduled_social_media_posts"
],
"hourly": [
'erpnext.hr.doctype.daily_work_summary_group.daily_work_summary_group.trigger_emails',
"erpnext.accounts.doctype.subscription.subscription.process_all",
"erpnext.erpnext_integrations.doctype.amazon_mws_settings.amazon_mws_settings.schedule_get_order_details",
"erpnext.accounts.doctype.gl_entry.gl_entry.rename_gle_sle_docs",
"erpnext.erpnext_integrations.doctype.plaid_settings.plaid_settings.automatic_synchronization",
"erpnext.projects.doctype.project.project.hourly_reminder",
"erpnext.projects.doctype.project.project.collect_project_status",
"erpnext.hr.doctype.shift_type.shift_type.process_auto_attendance_for_all_shifts",
"erpnext.support.doctype.issue.issue.set_service_level_agreement_variance",
"erpnext.erpnext_integrations.connectors.shopify_connection.sync_old_orders",
],
"daily": [
"erpnext.stock.reorder_item.reorder_item",
"erpnext.support.doctype.issue.issue.auto_close_tickets",
"erpnext.crm.doctype.opportunity.opportunity.auto_close_opportunity",
"erpnext.controllers.accounts_controller.update_invoice_status",
"erpnext.accounts.doctype.fiscal_year.fiscal_year.auto_create_fiscal_year",
"erpnext.hr.doctype.employee.employee.send_birthday_reminders",
"erpnext.projects.doctype.task.task.set_tasks_as_overdue",
"erpnext.assets.doctype.asset.depreciation.post_depreciation_entries",
"erpnext.hr.doctype.daily_work_summary_group.daily_work_summary_group.send_summary",
"erpnext.stock.doctype.serial_no.serial_no.update_maintenance_status",
"erpnext.buying.doctype.supplier_scorecard.supplier_scorecard.refresh_scorecards",
"erpnext.setup.doctype.company.company.cache_companies_monthly_sales_history",
"erpnext.assets.doctype.asset.asset.update_maintenance_status",
"erpnext.assets.doctype.asset.asset.make_post_gl_entry",
"erpnext.crm.doctype.contract.contract.update_status_for_contracts",
"erpnext.projects.doctype.project.project.update_project_sales_billing",
"erpnext.projects.doctype.project.project.send_project_status_email_to_users",
"erpnext.quality_management.doctype.quality_review.quality_review.review",
"erpnext.support.doctype.service_level_agreement.service_level_agreement.check_agreement_status",
"erpnext.crm.doctype.email_campaign.email_campaign.send_email_to_leads_or_contacts",
"erpnext.crm.doctype.email_campaign.email_campaign.set_email_campaign_status",
"erpnext.selling.doctype.quotation.quotation.set_expired_status",
"erpnext.healthcare.doctype.patient_appointment.patient_appointment.update_appointment_status",
"erpnext.buying.doctype.supplier_quotation.supplier_quotation.set_expired_status",
"erpnext.accounts.doctype.process_statement_of_accounts.process_statement_of_accounts.send_auto_email",
"erpnext.non_profit.doctype.membership.membership.set_expired_status"
],
"daily_long": [
"erpnext.setup.doctype.email_digest.email_digest.send",
"erpnext.manufacturing.doctype.bom_update_tool.bom_update_tool.update_latest_price_in_all_boms",
"erpnext.hr.doctype.leave_ledger_entry.leave_ledger_entry.process_expired_allocation",
"erpnext.hr.doctype.leave_policy_assignment.leave_policy_assignment.automatically_allocate_leaves_based_on_leave_policy",
"erpnext.hr.utils.generate_leave_encashment",
"erpnext.hr.utils.allocate_earned_leaves",
"erpnext.hr.utils.grant_leaves_automatically",
"erpnext.loan_management.doctype.process_loan_security_shortfall.process_loan_security_shortfall.create_process_loan_security_shortfall",
"erpnext.loan_management.doctype.process_loan_interest_accrual.process_loan_interest_accrual.process_loan_interest_accrual_for_term_loans",
"erpnext.crm.doctype.lead.lead.daily_open_lead"
],
"monthly_long": [
"erpnext.accounts.deferred_revenue.process_deferred_accounting",
"erpnext.loan_management.doctype.process_loan_interest_accrual.process_loan_interest_accrual.process_loan_interest_accrual_for_demand_loans"
]
}
email_brand_image = "assets/erpnext/images/erpnext-logo.jpg"
default_mail_footer = """
<span>
Sent via
<a class="text-muted" href="https://erpnext.com?source=via_email_footer" target="_blank">
ERPNext
</a>
</span>
"""
get_translated_dict = {
("doctype", "Global Defaults"): "frappe.geo.country_info.get_translated_dict"
}
bot_parsers = [
'erpnext.utilities.bot.FindItemBot',
]
get_site_info = 'erpnext.utilities.get_site_info'
payment_gateway_enabled = "erpnext.accounts.utils.create_payment_gateway_account"
communication_doctypes = ["Customer", "Supplier"]
accounting_dimension_doctypes = ["GL Entry", "Sales Invoice", "Purchase Invoice", "Payment Entry", "Asset",
"Expense Claim", "Expense Claim Detail", "Expense Taxes and Charges", "Stock Entry", "Budget", "Payroll Entry", "Delivery Note",
"Sales Invoice Item", "Purchase Invoice Item", "Purchase Order Item", "Journal Entry Account", "Material Request Item", "Delivery Note Item",
"Purchase Receipt Item", "Stock Entry Detail", "Payment Entry Deduction", "Sales Taxes and Charges", "Purchase Taxes and Charges", "Shipping Rule",
"Landed Cost Item", "Asset Value Adjustment", "Loyalty Program", "Fee Schedule", "Fee Structure", "Stock Reconciliation",
"Travel Request", "Fees", "POS Profile", "Opening Invoice Creation Tool", "Opening Invoice Creation Tool Item", "Subscription",
"Subscription Plan"
]
regional_overrides = {
'France': {
'erpnext.tests.test_regional.test_method': 'erpnext.regional.france.utils.test_method'
},
'India': {
'erpnext.tests.test_regional.test_method': 'erpnext.regional.india.utils.test_method',
'erpnext.controllers.taxes_and_totals.get_itemised_tax_breakup_header': 'erpnext.regional.india.utils.get_itemised_tax_breakup_header',
'erpnext.controllers.taxes_and_totals.get_itemised_tax_breakup_data': 'erpnext.regional.india.utils.get_itemised_tax_breakup_data',
'erpnext.accounts.party.get_regional_address_details': 'erpnext.regional.india.utils.get_regional_address_details',
'erpnext.controllers.taxes_and_totals.get_regional_round_off_accounts': 'erpnext.regional.india.utils.get_regional_round_off_accounts',
'erpnext.hr.utils.calculate_annual_eligible_hra_exemption': 'erpnext.regional.india.utils.calculate_annual_eligible_hra_exemption',
'erpnext.hr.utils.calculate_hra_exemption_for_period': 'erpnext.regional.india.utils.calculate_hra_exemption_for_period',
'erpnext.accounts.doctype.purchase_invoice.purchase_invoice.make_regional_gl_entries': 'erpnext.regional.india.utils.make_regional_gl_entries',
'erpnext.controllers.accounts_controller.validate_einvoice_fields': 'erpnext.regional.india.e_invoice.utils.validate_einvoice_fields'
},
'United Arab Emirates': {
'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.united_arab_emirates.utils.update_itemised_tax_data',
'erpnext.accounts.doctype.purchase_invoice.purchase_invoice.make_regional_gl_entries': 'erpnext.regional.united_arab_emirates.utils.make_regional_gl_entries',
},
'Saudi Arabia': {
'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.united_arab_emirates.utils.update_itemised_tax_data'
},
'Italy': {
'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.italy.utils.update_itemised_tax_data',
'erpnext.controllers.accounts_controller.validate_regional': 'erpnext.regional.italy.utils.sales_invoice_validate',
}
}
user_privacy_documents = [
{
'doctype': 'Lead',
'match_field': 'email_id',
'personal_fields': ['phone', 'mobile_no', 'fax', 'website', 'lead_name'],
},
{
'doctype': 'Opportunity',
'match_field': 'contact_email',
'personal_fields': ['contact_mobile', 'contact_display', 'customer_name'],
}
]
# ERPNext doctypes for Global Search
global_search_doctypes = {
"Default": [
{"doctype": "Customer", "index": 0},
{"doctype": "Supplier", "index": 1},
{"doctype": "Item", "index": 2},
{"doctype": "Warehouse", "index": 3},
{"doctype": "Account", "index": 4},
{"doctype": "Employee", "index": 5},
{"doctype": "BOM", "index": 6},
{"doctype": "Sales Invoice", "index": 7},
{"doctype": "Sales Order", "index": 8},
{"doctype": "Quotation", "index": 9},
{"doctype": "Work Order", "index": 10},
{"doctype": "Purchase Order", "index": 11},
{"doctype": "Purchase Receipt", "index": 12},
{"doctype": "Purchase Invoice", "index": 13},
{"doctype": "Delivery Note", "index": 14},
{"doctype": "Stock Entry", "index": 15},
{"doctype": "Material Request", "index": 16},
{"doctype": "Delivery Trip", "index": 17},
{"doctype": "Pick List", "index": 18},
{"doctype": "Salary Slip", "index": 19},
{"doctype": "Leave Application", "index": 20},
{"doctype": "Expense Claim", "index": 21},
{"doctype": "Payment Entry", "index": 22},
{"doctype": "Lead", "index": 23},
{"doctype": "Opportunity", "index": 24},
{"doctype": "Item Price", "index": 25},
{"doctype": "Purchase Taxes and Charges Template", "index": 26},
{"doctype": "Sales Taxes and Charges", "index": 27},
{"doctype": "Asset", "index": 28},
{"doctype": "Project", "index": 29},
{"doctype": "Task", "index": 30},
{"doctype": "Timesheet", "index": 31},
{"doctype": "Issue", "index": 32},
{"doctype": "Serial No", "index": 33},
{"doctype": "Batch", "index": 34},
{"doctype": "Branch", "index": 35},
{"doctype": "Department", "index": 36},
{"doctype": "Employee Grade", "index": 37},
{"doctype": "Designation", "index": 38},
{"doctype": "Job Opening", "index": 39},
{"doctype": "Job Applicant", "index": 40},
{"doctype": "Job Offer", "index": 41},
{"doctype": "Salary Structure Assignment", "index": 42},
{"doctype": "Appraisal", "index": 43},
{"doctype": "Loan", "index": 44},
{"doctype": "Maintenance Schedule", "index": 45},
{"doctype": "Maintenance Visit", "index": 46},
{"doctype": "Warranty Claim", "index": 47},
],
"Healthcare": [
{'doctype': 'Patient', 'index': 1},
{'doctype': 'Medical Department', 'index': 2},
{'doctype': 'Vital Signs', 'index': 3},
{'doctype': 'Healthcare Practitioner', 'index': 4},
{'doctype': 'Patient Appointment', 'index': 5},
{'doctype': 'Healthcare Service Unit', 'index': 6},
{'doctype': 'Patient Encounter', 'index': 7},
{'doctype': 'Antibiotic', 'index': 8},
{'doctype': 'Diagnosis', 'index': 9},
{'doctype': 'Lab Test', 'index': 10},
{'doctype': 'Clinical Procedure', 'index': 11},
{'doctype': 'Inpatient Record', 'index': 12},
{'doctype': 'Sample Collection', 'index': 13},
{'doctype': 'Patient Medical Record', 'index': 14},
{'doctype': 'Appointment Type', 'index': 15},
{'doctype': 'Fee Validity', 'index': 16},
{'doctype': 'Practitioner Schedule', 'index': 17},
{'doctype': 'Dosage Form', 'index': 18},
{'doctype': 'Lab Test Sample', 'index': 19},
{'doctype': 'Prescription Duration', 'index': 20},
{'doctype': 'Prescription Dosage', 'index': 21},
{'doctype': 'Sensitivity', 'index': 22},
{'doctype': 'Complaint', 'index': 23},
{'doctype': 'Medical Code', 'index': 24},
],
"Education": [
{'doctype': 'Article', 'index': 1},
{'doctype': 'Video', 'index': 2},
{'doctype': 'Topic', 'index': 3},
{'doctype': 'Course', 'index': 4},
{'doctype': 'Program', 'index': 5},
{'doctype': 'Quiz', 'index': 6},
{'doctype': 'Question', 'index': 7},
{'doctype': 'Fee Schedule', 'index': 8},
{'doctype': 'Fee Structure', 'index': 9},
{'doctype': 'Fees', 'index': 10},
{'doctype': 'Student Group', 'index': 11},
{'doctype': 'Student', 'index': 12},
{'doctype': 'Instructor', 'index': 13},
{'doctype': 'Course Activity', 'index': 14},
{'doctype': 'Quiz Activity', 'index': 15},
{'doctype': 'Course Enrollment', 'index': 16},
{'doctype': 'Program Enrollment', 'index': 17},
{'doctype': 'Student Language', 'index': 18},
{'doctype': 'Student Applicant', 'index': 19},
{'doctype': 'Assessment Result', 'index': 20},
{'doctype': 'Assessment Plan', 'index': 21},
{'doctype': 'Grading Scale', 'index': 22},
{'doctype': 'Guardian', 'index': 23},
{'doctype': 'Student Leave Application', 'index': 24},
{'doctype': 'Student Log', 'index': 25},
{'doctype': 'Room', 'index': 26},
{'doctype': 'Course Schedule', 'index': 27},
{'doctype': 'Student Attendance', 'index': 28},
{'doctype': 'Announcement', 'index': 29},
{'doctype': 'Student Category', 'index': 30},
{'doctype': 'Assessment Group', 'index': 31},
{'doctype': 'Student Batch Name', 'index': 32},
{'doctype': 'Assessment Criteria', 'index': 33},
{'doctype': 'Academic Year', 'index': 34},
{'doctype': 'Academic Term', 'index': 35},
{'doctype': 'School House', 'index': 36},
{'doctype': 'Student Admission', 'index': 37},
{'doctype': 'Fee Category', 'index': 38},
{'doctype': 'Assessment Code', 'index': 39},
{'doctype': 'Discussion', 'index': 40},
],
"Agriculture": [
{'doctype': 'Weather', 'index': 1},
{'doctype': 'Soil Texture', 'index': 2},
{'doctype': 'Water Analysis', 'index': 3},
{'doctype': 'Soil Analysis', 'index': 4},
{'doctype': 'Plant Analysis', 'index': 5},
{'doctype': 'Agriculture Analysis Criteria', 'index': 6},
{'doctype': 'Disease', 'index': 7},
{'doctype': 'Crop', 'index': 8},
{'doctype': 'Fertilizer', 'index': 9},
{'doctype': 'Crop Cycle', 'index': 10}
],
"Non Profit": [
{'doctype': 'Certified Consultant', 'index': 1},
{'doctype': 'Certification Application', 'index': 2},
{'doctype': 'Volunteer', 'index': 3},
{'doctype': 'Membership', 'index': 4},
{'doctype': 'Member', 'index': 5},
{'doctype': 'Donor', 'index': 6},
{'doctype': 'Chapter', 'index': 7},
{'doctype': 'Grant Application', 'index': 8},
{'doctype': 'Volunteer Type', 'index': 9},
{'doctype': 'Donor Type', 'index': 10},
{'doctype': 'Membership Type', 'index': 11}
],
"Hospitality": [
{'doctype': 'Hotel Room', 'index': 0},
{'doctype': 'Hotel Room Reservation', 'index': 1},
{'doctype': 'Hotel Room Pricing', 'index': 2},
{'doctype': 'Hotel Room Package', 'index': 3},
{'doctype': 'Hotel Room Type', 'index': 4}
]
}
additional_timeline_content = {
'*': ['erpnext.telephony.doctype.call_log.call_log.get_linked_call_logs']
}<|fim▁end|> | |
<|file_name|>apiserver.go<|end_file_name|><|fim▁begin|>/*
Copyright 2017 The Kubernetes Authors.<|fim▁hole|>Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"net/http"
"time"
"k8s.io/apimachinery/pkg/apimachinery/announced"
"k8s.io/apimachinery/pkg/apimachinery/registered"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/endpoints/discovery"
genericregistry "k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/kube-apiextensions-server/pkg/apis/apiextensions"
"k8s.io/kube-apiextensions-server/pkg/apis/apiextensions/install"
"k8s.io/kube-apiextensions-server/pkg/apis/apiextensions/v1alpha1"
"k8s.io/kube-apiextensions-server/pkg/client/clientset/internalclientset"
internalinformers "k8s.io/kube-apiextensions-server/pkg/client/informers/internalversion"
"k8s.io/kube-apiextensions-server/pkg/registry/customresourcedefinition"
// make sure the generated client works
_ "k8s.io/kube-apiextensions-server/pkg/client/clientset/clientset"
_ "k8s.io/kube-apiextensions-server/pkg/client/informers/externalversions"
_ "k8s.io/kube-apiextensions-server/pkg/client/informers/internalversion"
)
var (
groupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
registry = registered.NewOrDie("")
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme)
)
func init() {
install.Install(groupFactoryRegistry, registry, Scheme)
// we need to add the options to empty v1
metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Group: "", Version: "v1"})
unversioned := schema.GroupVersion{Group: "", Version: "v1"}
Scheme.AddUnversionedTypes(unversioned,
&metav1.Status{},
&metav1.APIVersions{},
&metav1.APIGroupList{},
&metav1.APIGroup{},
&metav1.APIResourceList{},
)
}
type Config struct {
GenericConfig *genericapiserver.Config
CustomResourceDefinitionRESTOptionsGetter genericregistry.RESTOptionsGetter
}
type CustomResourceDefinitions struct {
GenericAPIServer *genericapiserver.GenericAPIServer
}
type completedConfig struct {
*Config
}
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (c *Config) Complete() completedConfig {
c.GenericConfig.EnableDiscovery = false
c.GenericConfig.Complete()
c.GenericConfig.Version = &version.Info{
Major: "0",
Minor: "1",
}
return completedConfig{c}
}
// SkipComplete provides a way to construct a server instance without config completion.
func (c *Config) SkipComplete() completedConfig {
return completedConfig{c}
}
// New returns a new instance of CustomResourceDefinitions from the given config.
func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*CustomResourceDefinitions, error) {
genericServer, err := c.Config.GenericConfig.SkipComplete().New(genericapiserver.EmptyDelegate) // completion is done in Complete, no need for a second time
if err != nil {
return nil, err
}
s := &CustomResourceDefinitions{
GenericAPIServer: genericServer,
}
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(apiextensions.GroupName, registry, Scheme, metav1.ParameterCodec, Codecs)
apiGroupInfo.GroupMeta.GroupVersion = v1alpha1.SchemeGroupVersion
v1alpha1storage := map[string]rest.Storage{}
v1alpha1storage["customresourcedefinitions"] = customresourcedefinition.NewREST(Scheme, c.GenericConfig.RESTOptionsGetter)
apiGroupInfo.VersionedResourcesStorageMap["v1alpha1"] = v1alpha1storage
if err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {
return nil, err
}
customResourceDefinitionClient, err := internalclientset.NewForConfig(s.GenericAPIServer.LoopbackClientConfig)
if err != nil {
return nil, err
}
customResourceDefinitionInformers := internalinformers.NewSharedInformerFactory(customResourceDefinitionClient, 5*time.Minute)
delegateHandler := delegationTarget.UnprotectedHandler()
if delegateHandler == nil {
delegateHandler = http.NotFoundHandler()
}
versionDiscoveryHandler := &versionDiscoveryHandler{
discovery: map[schema.GroupVersion]*discovery.APIVersionHandler{},
delegate: delegateHandler,
}
groupDiscoveryHandler := &groupDiscoveryHandler{
discovery: map[string]*discovery.APIGroupHandler{},
delegate: delegateHandler,
}
customResourceDefinitionHandler := NewCustomResourceDefinitionHandler(
versionDiscoveryHandler,
groupDiscoveryHandler,
s.GenericAPIServer.RequestContextMapper(),
customResourceDefinitionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions().Lister(),
delegateHandler,
c.CustomResourceDefinitionRESTOptionsGetter,
c.GenericConfig.AdmissionControl,
)
s.GenericAPIServer.Handler.PostGoRestfulMux.Handle("/apis", customResourceDefinitionHandler)
s.GenericAPIServer.Handler.PostGoRestfulMux.HandlePrefix("/apis/", customResourceDefinitionHandler)
customResourceDefinitionController := NewDiscoveryController(customResourceDefinitionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(), versionDiscoveryHandler, groupDiscoveryHandler)
s.GenericAPIServer.AddPostStartHook("start-apiextensions-informers", func(context genericapiserver.PostStartHookContext) error {
customResourceDefinitionInformers.Start(context.StopCh)
return nil
})
s.GenericAPIServer.AddPostStartHook("start-apiextensions-controllers", func(context genericapiserver.PostStartHookContext) error {
go customResourceDefinitionController.Run(context.StopCh)
return nil
})
return s, nil
}<|fim▁end|> | |
<|file_name|>StringValue.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.export;
import java.io.IOException;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.OrdinalMap;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LongValues;
class StringValue implements SortValue {
private final SortedDocValues globalDocValues;
private final OrdinalMap ordinalMap;
private final String field;
private final IntComp comp;
protected LongValues toGlobal = LongValues.IDENTITY; // this segment to global ordinal. NN;
protected SortedDocValues docValues;
public int currentOrd;
protected int lastDocID;
private boolean present;
private BytesRef lastBytes;
private String lastString;
private int lastOrd = -1;
private int leafOrd = -1;
public StringValue(SortedDocValues globalDocValues, String field, IntComp comp) {
this.globalDocValues = globalDocValues;
this.docValues = globalDocValues;
if (globalDocValues instanceof MultiDocValues.MultiSortedDocValues) {
this.ordinalMap = ((MultiDocValues.MultiSortedDocValues) globalDocValues).mapping;
} else {
this.ordinalMap = null;
}
this.field = field;
this.comp = comp;
this.currentOrd = comp.resetValue();
this.present = false;
}
public String getLastString() {<|fim▁hole|> this.lastString = lastString;
}
public StringValue copy() {
StringValue copy = new StringValue(globalDocValues, field, comp);
return copy;
}
public void setCurrentValue(int docId) throws IOException {
// System.out.println(docId +":"+lastDocID);
/*
if (docId < lastDocID) {
throw new AssertionError("docs were sent out-of-order: lastDocID=" + lastDocID + " vs doc=" + docId);
}
lastDocID = docId;
*/
if (docId > docValues.docID()) {
docValues.advance(docId);
}
if (docId == docValues.docID()) {
present = true;
currentOrd = docValues.ordValue();
} else {
present = false;
currentOrd = -1;
}
}
@Override
public boolean isPresent() {
return present;
}
public void setCurrentValue(SortValue sv) {
StringValue v = (StringValue) sv;
this.currentOrd = v.currentOrd;
this.present = v.present;
this.leafOrd = v.leafOrd;
this.lastOrd = v.lastOrd;
this.toGlobal = v.toGlobal;
}
public Object getCurrentValue() throws IOException {
assert present == true;
if (currentOrd != lastOrd) {
lastBytes = docValues.lookupOrd(currentOrd);
lastOrd = currentOrd;
lastString = null;
}
return lastBytes;
}
public void toGlobalValue(SortValue previousValue) {
lastOrd = currentOrd;
StringValue sv = (StringValue) previousValue;
if (sv.lastOrd == currentOrd) {
// Take the global ord from the previousValue unless we are a -1 which is the same in both
// global and leaf ordinal
if (this.currentOrd != -1) {
this.currentOrd = sv.currentOrd;
}
} else {
if (this.currentOrd > -1) {
this.currentOrd = (int) toGlobal.get(this.currentOrd);
}
}
}
public String getField() {
return field;
}
public void setNextReader(LeafReaderContext context) throws IOException {
leafOrd = context.ord;
if (ordinalMap != null) {
toGlobal = ordinalMap.getGlobalOrds(context.ord);
}
docValues = DocValues.getSorted(context.reader(), field);
lastDocID = 0;
}
public void reset() {
this.currentOrd = comp.resetValue();
this.present = false;
lastDocID = 0;
}
public int compareTo(SortValue o) {
StringValue sv = (StringValue) o;
return comp.compare(currentOrd, sv.currentOrd);
}
public String toString() {
return Integer.toString(this.currentOrd);
}
}<|fim▁end|> | return this.lastString;
}
public void setLastString(String lastString) { |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>pub mod native;<|fim▁end|> | |
<|file_name|>factory.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/emicklei/go-restful/swagger"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apimachinery"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/metrics"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/serializer/json"
utilflag "k8s.io/kubernetes/pkg/util/flag"
"k8s.io/kubernetes/pkg/watch"
)
const (
FlagMatchBinaryVersion = "match-server-version"
)
// Factory provides abstractions that allow the Kubectl command to be extended across multiple types
// of resources and different API sets.
// TODO: make the functions interfaces
// TODO: pass the various interfaces on the factory directly into the command constructors (so the
// commands are decoupled from the factory).
type Factory struct {
clients *ClientCache
flags *pflag.FlagSet
// Returns interfaces for dealing with arbitrary runtime.Objects. If thirdPartyDiscovery is true, performs API calls
// to discovery dynamic API objects registered by third parties.
Object func(thirdPartyDiscovery bool) (meta.RESTMapper, runtime.ObjectTyper)
// Returns interfaces for decoding objects - if toInternal is set, decoded objects will be converted
// into their internal form (if possible). Eventually the internal form will be removed as an option,
// and only versioned objects will be returned.
Decoder func(toInternal bool) runtime.Decoder
// Returns an encoder capable of encoding a provided object into JSON in the default desired version.
JSONEncoder func() runtime.Encoder
// Returns a client for accessing Kubernetes resources or an error.
Client func() (*client.Client, error)
// Returns a client.Config for accessing the Kubernetes server.
ClientConfig func() (*restclient.Config, error)
// Returns a RESTClient for working with the specified RESTMapping or an error. This is intended
// for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer.
ClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error)
// Returns a Describer for displaying the specified RESTMapping type or an error.
Describer func(mapping *meta.RESTMapping) (kubectl.Describer, error)
// Returns a Printer for formatting objects of the given type or an error.
Printer func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error)
// Returns a Scaler for changing the size of the specified RESTMapping type or an error
Scaler func(mapping *meta.RESTMapping) (kubectl.Scaler, error)
// Returns a Reaper for gracefully shutting down resources.
Reaper func(mapping *meta.RESTMapping) (kubectl.Reaper, error)
// Returns a HistoryViewer for viewing change history
HistoryViewer func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error)
// Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error
Rollbacker func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error)
// MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a
// new set-based selector is provided, an error is returned if the selector cannot be converted to a
// map-based selector
MapBasedSelectorForObject func(object runtime.Object) (string, error)
// PortsForObject returns the ports associated with the provided object
PortsForObject func(object runtime.Object) ([]string, error)
// LabelsForObject returns the labels associated with the provided object
LabelsForObject func(object runtime.Object) (map[string]string, error)
// LogsForObject returns a request for the logs associated with the provided object
LogsForObject func(object, options runtime.Object) (*restclient.Request, error)
// PauseObject marks the provided object as paused ie. it will not be reconciled by its controller.
PauseObject func(object runtime.Object) (bool, error)
// ResumeObject resumes a paused object ie. it will be reconciled by its controller.
ResumeObject func(object runtime.Object) (bool, error)
// Returns a schema that can validate objects stored on disk.
Validator func(validate bool, cacheDir string) (validation.Schema, error)
// SwaggerSchema returns the schema declaration for the provided group version kind.
SwaggerSchema func(unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error)
// Returns the default namespace to use in cases where no
// other namespace is specified and whether the namespace was
// overriden.
DefaultNamespace func() (string, bool, error)
// Generators returns the generators for the provided command
Generators func(cmdName string) map[string]kubectl.Generator
// Check whether the kind of resources could be exposed
CanBeExposed func(kind unversioned.GroupKind) error
// Check whether the kind of resources could be autoscaled
CanBeAutoscaled func(kind unversioned.GroupKind) error
// AttachablePodForObject returns the pod to which to attach given an object.
AttachablePodForObject func(object runtime.Object) (*api.Pod, error)
// EditorEnvs returns a group of environment variables that the edit command
// can range over in order to determine if the user has specified an editor
// of their choice.
EditorEnvs func() []string
// PrintObjectSpecificMessage prints object-specific messages on the provided writer
PrintObjectSpecificMessage func(obj runtime.Object, out io.Writer)
}
const (
RunV1GeneratorName = "run/v1"
RunPodV1GeneratorName = "run-pod/v1"
ServiceV1GeneratorName = "service/v1"
ServiceV2GeneratorName = "service/v2"
ServiceAccountV1GeneratorName = "serviceaccount/v1"
HorizontalPodAutoscalerV1Beta1GeneratorName = "horizontalpodautoscaler/v1beta1"
DeploymentV1Beta1GeneratorName = "deployment/v1beta1"
JobV1Beta1GeneratorName = "job/v1beta1"
JobV1GeneratorName = "job/v1"
NamespaceV1GeneratorName = "namespace/v1"
SecretV1GeneratorName = "secret/v1"
SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1"
ConfigMapV1GeneratorName = "configmap/v1"
)
// DefaultGenerators returns the set of default generators for use in Factory instances
func DefaultGenerators(cmdName string) map[string]kubectl.Generator {
generators := map[string]map[string]kubectl.Generator{}
generators["expose"] = map[string]kubectl.Generator{
ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{},
ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{},
}
generators["run"] = map[string]kubectl.Generator{
RunV1GeneratorName: kubectl.BasicReplicationController{},
RunPodV1GeneratorName: kubectl.BasicPod{},
DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{},
JobV1Beta1GeneratorName: kubectl.JobV1Beta1{},
JobV1GeneratorName: kubectl.JobV1{},
}
generators["autoscale"] = map[string]kubectl.Generator{
HorizontalPodAutoscalerV1Beta1GeneratorName: kubectl.HorizontalPodAutoscalerV1Beta1{},
}
generators["namespace"] = map[string]kubectl.Generator{
NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{},
}
generators["secret"] = map[string]kubectl.Generator{
SecretV1GeneratorName: kubectl.SecretGeneratorV1{},
}
generators["secret-for-docker-registry"] = map[string]kubectl.Generator{
SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{},
}
return generators[cmdName]
}
func getGroupVersionKinds(gvks []unversioned.GroupVersionKind, group string) []unversioned.GroupVersionKind {
result := []unversioned.GroupVersionKind{}
for ix := range gvks {
if gvks[ix].Group == group {
result = append(result, gvks[ix])
}
}
return result
}
func makeInterfacesFor(versionList []unversioned.GroupVersion) func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
accessor := meta.NewAccessor()
return func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
for ix := range versionList {
if versionList[ix].String() == version.String() {
return &meta.VersionInterfaces{
ObjectConvertor: thirdpartyresourcedata.NewThirdPartyObjectConverter(api.Scheme),
MetadataAccessor: accessor,
}, nil
}
}
return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, versionList)
}
}
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags
clientConfig := optionalClientConfig
if optionalClientConfig == nil {
clientConfig = DefaultClientConfig(flags)
}
clients := NewClientCache(clientConfig)
return &Factory{
clients: clients,
flags: flags,
// If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that
// have been dynamically added to the apiserver
Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) {
cfg, err := clientConfig.ClientConfig()
CheckErr(err)
cmdApiVersion := unversioned.GroupVersion{}
if cfg.GroupVersion != nil {
cmdApiVersion = *cfg.GroupVersion
}
if discoverDynamicAPIs {
client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"})
CheckErr(err)
versions, gvks, err := GetThirdPartyGroupVersions(client.Discovery())
CheckErr(err)
if len(versions) > 0 {
priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper)
if !ok {
CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper))
return nil, nil
}
multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper)
if !ok {
CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper))
return nil, nil
}
groupsMap := map[string][]unversioned.GroupVersion{}
for _, version := range versions {
groupsMap[version.Group] = append(groupsMap[version.Group], version)
}
for group, versionList := range groupsMap {
preferredExternalVersion := versionList[0]
thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group))
CheckErr(err)
accessor := meta.NewAccessor()
groupMeta := apimachinery.GroupMeta{
GroupVersion: preferredExternalVersion,
GroupVersions: versionList,
RESTMapper: thirdPartyMapper,
SelfLinker: runtime.SelfLinker(accessor),
InterfacesFor: makeInterfacesFor(versionList),
}
CheckErr(registered.RegisterGroup(groupMeta))
registered.AddThirdPartyAPIGroupVersions(versionList...)
multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
}
priorityMapper.Delegate = multiMapper
// Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we
// don't re-assign, the above assignement won't actually update mapper.RESTMapper
mapper.RESTMapper = priorityMapper
}
}
outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
priorityRESTMapper := meta.PriorityRESTMapper{
Delegate: outputRESTMapper,
ResourcePriority: []unversioned.GroupVersionResource{
{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
},
KindPriority: []unversioned.GroupVersionKind{
{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
},
}
return priorityRESTMapper, api.Scheme
},
Client: func() (*client.Client, error) {
return clients.ClientForVersion(nil)
},
ClientConfig: func() (*restclient.Config, error) {
return clients.ClientConfigForVersion(nil)
},
ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
gvk := mapping.GroupVersionKind
mappingVersion := mapping.GroupVersionKind.GroupVersion()
c, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
switch gvk.Group {
case api.GroupName:
return c.RESTClient, nil
case autoscaling.GroupName:
return c.AutoscalingClient.RESTClient, nil
case batch.GroupName:
return c.BatchClient.RESTClient, nil
case apps.GroupName:
return c.AppsClient.RESTClient, nil
case extensions.GroupName:
return c.ExtensionsClient.RESTClient, nil
case api.SchemeGroupVersion.Group:
return c.RESTClient, nil
case extensions.SchemeGroupVersion.Group:
return c.ExtensionsClient.RESTClient, nil
default:
if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
return nil, fmt.Errorf("unknown api group/version: %s", gvk.String())
}
cfg, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
gv := gvk.GroupVersion()
cfg.GroupVersion = &gv
cfg.APIPath = "/apis"
cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk.Kind)
return restclient.RESTClientFor(cfg)
}
},
Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
return describer, nil
}
return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
},
Decoder: func(toInternal bool) runtime.Decoder {
if toInternal {
return api.Codecs.UniversalDecoder()
}
return api.Codecs.UniversalDeserializer()
},
JSONEncoder: func() runtime.Encoder {
return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
},
Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
},
MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
switch t := object.(type) {
case *api.ReplicationController:
return kubectl.MakeLabels(t.Spec.Selector), nil
case *api.Pod:
if len(t.Labels) == 0 {
return "", fmt.Errorf("the pod has no labels and cannot be exposed")
}
return kubectl.MakeLabels(t.Labels), nil
case *api.Service:
if t.Spec.Selector == nil {
return "", fmt.Errorf("the service has no pod selector set")
}
return kubectl.MakeLabels(t.Spec.Selector), nil
case *extensions.Deployment:
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
// operator, DoubleEquals operator and In operator with only one element in the set.
if len(t.Spec.Selector.MatchExpressions) > 0 {
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
}
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
case *extensions.ReplicaSet:
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
// operator, DoubleEquals operator and In operator with only one element in the set.
if len(t.Spec.Selector.MatchExpressions) > 0 {
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
}
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return "", err
}
return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
}
},
PortsForObject: func(object runtime.Object) ([]string, error) {
// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
switch t := object.(type) {
case *api.ReplicationController:
return getPorts(t.Spec.Template.Spec), nil
case *api.Pod:
return getPorts(t.Spec), nil
case *api.Service:
return getServicePorts(t.Spec), nil
case *extensions.Deployment:
return getPorts(t.Spec.Template.Spec), nil
case *extensions.ReplicaSet:
return getPorts(t.Spec.Template.Spec), nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("cannot extract ports from %v", gvk)
}
},
LabelsForObject: func(object runtime.Object) (map[string]string, error) {
return meta.NewAccessor().Labels(object)
},
LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
c, err := clients.ClientForVersion(nil)
if err != nil {
return nil, err
}
switch t := object.(type) {
case *api.Pod:
opts, ok := options.(*api.PodLogOptions)
if !ok {
return nil, errors.New("provided options object is not a PodLogOptions")
}
return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil
case *api.ReplicationController:
opts, ok := options.(*api.PodLogOptions)
if !ok {
return nil, errors.New("provided options object is not a PodLogOptions")
}
selector := labels.SelectorFromSet(t.Spec.Selector)
sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
if err != nil {
return nil, err
}
if numPods > 1 {
fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
}
return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil
case *extensions.ReplicaSet:
opts, ok := options.(*api.PodLogOptions)
if !ok {
return nil, errors.New("provided options object is not a PodLogOptions")
}
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
if err != nil {
return nil, err
}
if numPods > 1 {
fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
}
return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("cannot get the logs from %v", gvk)
}
},
PauseObject: func(object runtime.Object) (bool, error) {
c, err := clients.ClientForVersion(nil)
if err != nil {
return false, err
}
switch t := object.(type) {
case *extensions.Deployment:
if t.Spec.Paused {
return true, nil
}
t.Spec.Paused = true
_, err := c.Extensions().Deployments(t.Namespace).Update(t)
return false, err
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return false, err
}
return false, fmt.Errorf("cannot pause %v", gvk)
}
},
ResumeObject: func(object runtime.Object) (bool, error) {
c, err := clients.ClientForVersion(nil)
if err != nil {
return false, err
}
switch t := object.(type) {
case *extensions.Deployment:
if !t.Spec.Paused {
return true, nil
}
t.Spec.Paused = false
_, err := c.Extensions().Deployments(t.Namespace).Update(t)
return false, err
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return false, err
}
return false, fmt.Errorf("cannot resume %v", gvk)
}
},
Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
},
Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
},
HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
clientset := clientset.FromUnversionedClient(client)
if err != nil {
return nil, err
}
return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
},
Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
},
Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
if validate {
client, err := clients.ClientForVersion(nil)
if err != nil {
return nil, err
}
dir := cacheDir
if len(dir) > 0 {
version, err := client.ServerVersion()
if err != nil {
return nil, err
}
dir = path.Join(cacheDir, version.String())
}
return &clientSwaggerSchema{
c: client,
cacheDir: dir,
mapper: api.RESTMapper,
}, nil
}
return validation.NullSchema{}, nil
},
SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
version := gvk.GroupVersion()
client, err := clients.ClientForVersion(&version)
if err != nil {
return nil, err
}
return client.Discovery().SwaggerSchema(version)
},
DefaultNamespace: func() (string, bool, error) {
return clientConfig.Namespace()
},
Generators: func(cmdName string) map[string]kubectl.Generator {
return DefaultGenerators(cmdName)
},
CanBeExposed: func(kind unversioned.GroupKind) error {
switch kind {
case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
// nothing to do here
default:
return fmt.Errorf("cannot expose a %s", kind)
}
return nil
},
CanBeAutoscaled: func(kind unversioned.GroupKind) error {
switch kind {
case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
// nothing to do here
default:
return fmt.Errorf("cannot autoscale a %v", kind)
}
return nil
},
AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
client, err := clients.ClientForVersion(nil)
if err != nil {
return nil, err
}
switch t := object.(type) {
case *api.ReplicationController:
selector := labels.SelectorFromSet(t.Spec.Selector)
sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
return pod, err
case *extensions.Deployment:
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
return pod, err
case *batch.Job:
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
return pod, err
case *api.Pod:
return t, nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
}
},
EditorEnvs: func() []string {
return []string{"KUBE_EDITOR", "EDITOR"}
},
PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
switch obj := obj.(type) {
case *api.Service:
if obj.Spec.Type == api.ServiceTypeNodePort {
msg := fmt.Sprintf(
`You have exposed your service on an external port on all nodes in your
cluster. If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.
See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
makePortsString(obj.Spec.Ports, true))
out.Write([]byte(msg))
}
}
},
}
}
// GetFirstPod returns a pod matching the namespace and label selector
// and the number of all pods that match the label selector.
func GetFirstPod(client client.PodsNamespacer, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) {
options := api.ListOptions{LabelSelector: selector}
podList, err := client.Pods(namespace).List(options)
if err != nil {
return nil, 0, err
}
pods := []*api.Pod{}
for i := range podList.Items {
pod := podList.Items[i]
pods = append(pods, &pod)
}
if len(pods) > 0 {
sort.Sort(sortBy(pods))
return pods[0], len(podList.Items), nil
}
// Watch until we observe a pod
options.ResourceVersion = podList.ResourceVersion
w, err := client.Pods(namespace).Watch(options)
if err != nil {
return nil, 0, err
}
defer w.Stop()
condition := func(event watch.Event) (bool, error) {
return event.Type == watch.Added || event.Type == watch.Modified, nil
}
event, err := watch.Until(timeout, w, condition)
if err != nil {
return nil, 0, err
}
pod, ok := event.Object.(*api.Pod)
if !ok {
return nil, 0, fmt.Errorf("%#v is not a pod event", event)
}
return pod, 1, nil
}
// Command will stringify and return all environment arguments ie. a command run by a client
// using the factory.
// TODO: We need to filter out stuff like secrets.
func (f *Factory) Command() string {
if len(os.Args) == 0 {
return ""
}
base := filepath.Base(os.Args[0])
args := append([]string{base}, os.Args[1:]...)
return strings.Join(args, " ")
}<|fim▁hole|> flags.AddFlagSet(f.flags)
// Globally persistent flags across all subcommands.
// TODO Change flag names to consts to allow safer lookup from subcommands.
// TODO Add a verbose flag that turns on glog logging. Probably need a way
// to do that automatically for every subcommand.
flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version")
// Normalize all flags that are coming from other packages or pre-configurations
// a.k.a. change all "_" to "-". e.g. glog package
flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
}
// BindCommonFlags adds any flags defined by external projects (not part of pflags)
func (f *Factory) BindExternalFlags(flags *pflag.FlagSet) {
// any flags defined by external projects (not part of pflags)
flags.AddGoFlagSet(flag.CommandLine)
}
func makePortsString(ports []api.ServicePort, useNodePort bool) string {
pieces := make([]string, len(ports))
for ix := range ports {
var port int32
if useNodePort {
port = ports[ix].NodePort
} else {
port = ports[ix].Port
}
pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port)
}
return strings.Join(pieces, ",")
}
func getPorts(spec api.PodSpec) []string {
result := []string{}
for _, container := range spec.Containers {
for _, port := range container.Ports {
result = append(result, strconv.Itoa(int(port.ContainerPort)))
}
}
return result
}
// Extracts the ports exposed by a service from the given service spec.
func getServicePorts(spec api.ServiceSpec) []string {
result := []string{}
for _, servicePort := range spec.Ports {
result = append(result, strconv.Itoa(int(servicePort.Port)))
}
return result
}
type clientSwaggerSchema struct {
c *client.Client
cacheDir string
mapper meta.RESTMapper
}
const schemaFileName = "schema.json"
type schemaClient interface {
Get() *restclient.Request
}
func recursiveSplit(dir string) []string {
parent, file := path.Split(dir)
if len(parent) == 0 {
return []string{file}
}
return append(recursiveSplit(parent[:len(parent)-1]), file)
}
func substituteUserHome(dir string) (string, error) {
if len(dir) == 0 || dir[0] != '~' {
return dir, nil
}
parts := recursiveSplit(dir)
if len(parts[0]) == 1 {
parts[0] = os.Getenv("HOME")
} else {
usr, err := user.Lookup(parts[0][1:])
if err != nil {
return "", err
}
parts[0] = usr.HomeDir
}
return path.Join(parts...), nil
}
func writeSchemaFile(schemaData []byte, cacheDir, cacheFile, prefix, groupVersion string) error {
if err := os.MkdirAll(path.Join(cacheDir, prefix, groupVersion), 0755); err != nil {
return err
}
tmpFile, err := ioutil.TempFile(cacheDir, "schema")
if err != nil {
// If we can't write, keep going.
if os.IsPermission(err) {
return nil
}
return err
}
if _, err := io.Copy(tmpFile, bytes.NewBuffer(schemaData)); err != nil {
return err
}
if err := os.Link(tmpFile.Name(), cacheFile); err != nil {
// If we can't write due to file existing, or permission problems, keep going.
if os.IsExist(err) || os.IsPermission(err) {
return nil
}
return err
}
return nil
}
func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string) (err error) {
var schemaData []byte
var firstSeen bool
fullDir, err := substituteUserHome(cacheDir)
if err != nil {
return err
}
cacheFile := path.Join(fullDir, prefix, groupVersion, schemaFileName)
if len(cacheDir) != 0 {
if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) {
return err
}
}
if schemaData == nil {
firstSeen = true
schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion)
if err != nil {
return err
}
}
schema, err := validation.NewSwaggerSchemaFromBytes(schemaData)
if err != nil {
return err
}
err = schema.ValidateBytes(data)
if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen {
// As a temporay hack, kubectl would re-get the schema if validation
// fails for type not found reason.
// TODO: runtime-config settings needs to make into the file's name
schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion)
if err != nil {
return err
}
schema, err := validation.NewSwaggerSchemaFromBytes(schemaData)
if err != nil {
return err
}
return schema.ValidateBytes(data)
}
return err
}
// Download swagger schema from apiserver and store it to file.
func downloadSchemaAndStore(c schemaClient, cacheDir, fullDir, cacheFile, prefix, groupVersion string) (schemaData []byte, err error) {
schemaData, err = c.Get().
AbsPath("/swaggerapi", prefix, groupVersion).
Do().
Raw()
if err != nil {
return
}
if len(cacheDir) != 0 {
if err = writeSchemaFile(schemaData, fullDir, cacheFile, prefix, groupVersion); err != nil {
return
}
}
return
}
func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
gvk, err := json.DefaultMetaFactory.Interpret(data)
if err != nil {
return err
}
if ok := registered.IsEnabledVersion(gvk.GroupVersion()); !ok {
return fmt.Errorf("API version %q isn't supported, only supports API versions %q", gvk.GroupVersion().String(), registered.EnabledVersions())
}
if gvk.Group == autoscaling.GroupName {
if c.c.AutoscalingClient == nil {
return errors.New("unable to validate: no autoscaling client")
}
return getSchemaAndValidate(c.c.AutoscalingClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir)
}
if gvk.Group == apps.GroupName {
if c.c.AppsClient == nil {
return errors.New("unable to validate: no autoscaling client")
}
return getSchemaAndValidate(c.c.AppsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir)
}
if gvk.Group == batch.GroupName {
if c.c.BatchClient == nil {
return errors.New("unable to validate: no batch client")
}
return getSchemaAndValidate(c.c.BatchClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir)
}
if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
// Don't attempt to validate third party objects
return nil
}
if gvk.Group == extensions.GroupName {
if c.c.ExtensionsClient == nil {
return errors.New("unable to validate: no experimental client")
}
return getSchemaAndValidate(c.c.ExtensionsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir)
}
return getSchemaAndValidate(c.c.RESTClient, data, "api", gvk.GroupVersion().String(), c.cacheDir)
}
// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy:
// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me.
// 1. Merge together the kubeconfig itself. This is done with the following hierarchy rules:
// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this,
// then no other kubeconfig files are merged. This file must exist.
// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged.
// 3. HomeDirectoryLocation
// Empty filenames are ignored. Files with non-deserializable content produced errors.
// The first file to set a particular value or map key wins and the value or map key is never changed.
// This means that the first file to set CurrentContext will have its context preserved. It also means
// that if two files specify a "red-user", only values from the first file's red-user are used. Even
// non-conflicting entries from the second file's "red-user" are discarded.
// 2. Determine the context to use based on the first hit in this chain
// 1. command line argument - again, parsed from the command line, so it must be late bound
// 2. CurrentContext from the merged kubeconfig file
// 3. Empty is allowed at this stage
// 3. Determine the cluster info and auth info to use. At this point, we may or may not have a context. They
// are built based on the first hit in this chain. (run it twice, once for auth, once for cluster)
// 1. command line argument
// 2. If context is present, then use the context value
// 3. Empty is allowed
// 4. Determine the actual cluster info to use. At this point, we may or may not have a cluster info. Build
// each piece of the cluster info based on the chain:
// 1. command line argument
// 2. If cluster info is present and a value for the attribute is present, use it.
// 3. If you don't have a server location, bail.
// 5. Auth info is build using the same rules as cluster info, EXCEPT that you can only have one authentication
// technique per auth info. The following conditions result in an error:
// 1. If there are two conflicting techniques specified from the command line, fail.
// 2. If the command line does not specify one, and the auth info has conflicting techniques, fail.
// 3. If the command line specifies one and the auth info specifies another, honor the command line technique.
// 2. Use default values and potentially prompt for auth information
//
// However, if it appears that we're running in a kubernetes cluster
// container environment, then run with the auth info kubernetes mounted for
// us. Specifically:
// The env vars KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT are
// set, and the file /var/run/secrets/kubernetes.io/serviceaccount/token
// exists and is not a directory.
func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
overrides := &clientcmd.ConfigOverrides{}
flagNames := clientcmd.RecommendedConfigOverrideFlags("")
// short flagnames are disabled by default. These are here for compatibility with existing scripts
flagNames.ClusterOverrideFlags.APIServer.ShortName = "s"
clientcmd.BindOverrideFlags(overrides, flags, flagNames)
clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)
return clientConfig
}
// PrintObject prints an api object given command line flags to modify the output format
func (f *Factory) PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error {
gvk, err := api.Scheme.ObjectKind(obj)
if err != nil {
return err
}
mapping, err := mapper.RESTMapping(gvk.GroupKind())
if err != nil {
return err
}
printer, err := f.PrinterForMapping(cmd, mapping, false)
if err != nil {
return err
}
return printer.PrintObj(obj, out)
}
// PrinterForMapping returns a printer suitable for displaying the provided resource type.
// Requires that printer flags have been added to cmd (see AddPrinterFlags).
func (f *Factory) PrinterForMapping(cmd *cobra.Command, mapping *meta.RESTMapping, withNamespace bool) (kubectl.ResourcePrinter, error) {
printer, ok, err := PrinterForCommand(cmd)
if err != nil {
return nil, err
}
if ok {
clientConfig, err := f.ClientConfig()
if err != nil {
return nil, err
}
version, err := OutputVersion(cmd, clientConfig.GroupVersion)
if err != nil {
return nil, err
}
if version.IsEmpty() {
version = mapping.GroupVersionKind.GroupVersion()
}
if version.IsEmpty() {
return nil, fmt.Errorf("you must specify an output-version when using this output format")
}
printer = kubectl.NewVersionedPrinter(printer, mapping.ObjectConvertor, version, mapping.GroupVersionKind.GroupVersion())
} else {
// Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper
columnLabel, err := cmd.Flags().GetStringSlice("label-columns")
if err != nil {
columnLabel = []string{}
}
printer, err = f.Printer(mapping, GetFlagBool(cmd, "no-headers"), withNamespace, GetWideFlag(cmd), GetFlagBool(cmd, "show-all"), GetFlagBool(cmd, "show-labels"), isWatch(cmd), columnLabel)
if err != nil {
return nil, err
}
printer = maybeWrapSortingPrinter(cmd, printer)
}
return printer, nil
}
// One stop shopping for a Builder
func (f *Factory) NewBuilder(thirdPartyDiscovery bool) *resource.Builder {
mapper, typer := f.Object(thirdPartyDiscovery)
return resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true))
}<|fim▁end|> |
// BindFlags adds any flags that are common to all kubectl sub commands.
func (f *Factory) BindFlags(flags *pflag.FlagSet) {
// Merge factory's flags |
<|file_name|>js_interop_test.ts<|end_file_name|><|fim▁begin|>import {NUMBER, STRING} from "../ast/types";
import {Error, ErrorType} from "../base/error";
import {boot} from "../eval/boot";
import {Evaluator} from "../eval/evaluator";
import {InMemoryInputPort} from "../io/in_memory_input_port";
import {InMemoryOutputPort} from "../io/in_memory_output_port";
import {InMemoryPortBuffer} from "../io/in_memory_port_buffer";
import {LPAREN, RPAREN} from "../parse/terminals";
import {argumentTypeError} from "../runtime/errors";
import {SchemeSources} from "../scm/scheme_sources";
let buffer;
let stdin;
let stdout;
let sources;
let evaluator: Evaluator;
let sharedOutputPort: InMemoryOutputPort | undefined;
describe("scheme<->js interop tests", () => {
beforeEach(() => {
jasmine.addMatchers({toEvalTo, toOutput, toThrow});
// TODO: the output buffer isn't correctly flushed. Promoting these to top-level consts
// causes some tests to fail with garbage from previous tests.
buffer = new InMemoryPortBuffer();
stdin = new InMemoryInputPort(buffer);
stdout = new InMemoryOutputPort(buffer);
sharedOutputPort = stdout;
sources = new SchemeSources();
evaluator = boot(sources.syntax, sources.procedures, stdin, stdout);
});
it("should return primitives to js correctly", () => {
expect('42').toEvalTo('42');
expect('42').toEvalTo('42');
expect('#t').toEvalTo('#t');
expect('#f').toEvalTo('#f');
expect('"hello, world"').toEvalTo('"hello, world"');
expect("'hello").toEvalTo('hello');
expect('(quote hello)').toEvalTo('hello');
expect('#\\a').toEvalTo('#\\a');
expect('#\\space').toEvalTo('#\\space');
expect('#\\newline').toEvalTo('#\\newline');
});
it("should display primitives to js correctly", () => {
expect('(display 42)').toOutput('42');
expect('(display #t)').toOutput('#t');
expect('(display #f)').toOutput('#f');
expect('(display "hello, world")').toOutput('hello, world');
expect("(display 'hello)").toOutput('hello');
expect('(display (quote hello))').toOutput('hello');
expect('(display #\\a)').toOutput('a');
expect('(display #\\space)').toOutput(' ');
expect('(display #\\newline)').toOutput('\n');
});
it("should write primitives correctly to js", () => {
expect('(write 42)').toOutput('42');
expect('(write #t)').toOutput('#t');
expect('(write #f)').toOutput('#f');
expect('(write "hello, world")').toOutput('"hello, world"');
expect("(write 'hello)").toOutput('hello');
expect('(write (quote hello))').toOutput('hello');
expect('(write #\\a)').toOutput('#\\a');
expect('(write #\\space)').toOutput('#\\space');
expect('(write #\\newline)').toOutput('#\\newline');
});
it("should execute sanity checks correctly", () => {
expect('(+ 1 1)').toEvalTo('2');
expect('(procedure? procedure?)').toEvalTo('#t');
expect('(string-append "hello " "world")').toEvalTo('"hello world"');
expect("'a").toEvalTo('a');
expect("''a").toEvalTo("'a");
expect("'''a").toEvalTo("''a");
expect("''''a").toEvalTo("'''a");
expect("'''''a").toEvalTo("''''a");
});
it("should return recursive types to js correctly", () => {
expect('#()').toEvalTo('#()');
expect("'()").toEvalTo('()');
expect("(list '() '() '() '(42))").toEvalTo('(() () () (42))');
expect('(list 1 2 3)').toEvalTo('(1 2 3)');
expect("(cons 'a (cons 'b (cons 'c '())))").toEvalTo('(a b c)');
expect("(cons 'a 'b)").toEvalTo('(a . b)');
});
it("should display recursive types to js correctly", () => {
expect('(display #())').toOutput('#()');
expect("(display '())").toOutput('()');
expect("(display (list '() '() '() '(42)))").toOutput('(() () () (42))');
expect('(display (list 1 2 3))').toOutput('(1 2 3)');
expect("(display (cons 'a (cons 'b (cons 'c '()))))").toOutput('(a b c)');
expect("(display (cons 'a 'b))").toOutput('(a . b)');
});
it("should write recursive types to js correctly", () => {
expect('(write #())').toOutput('#()');
expect("(write '())").toOutput('()');
expect("(write (list '() '() '() '(42)))").toOutput('(() () () (42))');
expect('(write (list 1 2 3))').toOutput('(1 2 3)');
expect("(write (cons 'a (cons 'b (cons 'c '()))))").toOutput('(a b c)');
expect("(write (cons 'a 'b))").toOutput('(a . b)');
});
// R5RS doesn't actually forbid these external representations to be the empty string, but empty
// strings are not helpful to return in a REPL.
it("should do something reasonable with nonstandard external representations", () => {
expect('+').not.toEvalTo('');
expect('(lambda (x) x)').not.toEvalTo('');
expect('(current-input-port)').not.toEvalTo('');
expect('(current-output-port)').not.toEvalTo('');
expect('(scheme-report-environment 5)').not.toEvalTo('');
expect('(null-environment 5)').not.toEvalTo('');
});
it("should return the empty string for unspecified values", () => {
expect('').toEvalTo('');
expect(' ').toEvalTo('');
expect('\n').toEvalTo('');
expect('\t').toEvalTo('');
expect(' \t \n\n\n ').toEvalTo('');
expect('(define x 1)').toEvalTo('');
expect('(define x 1) (set! x 2)').toEvalTo('');
expect('(define x (cons 1 2)) (set-car! x x)').toEvalTo('');
expect('(define x (cons 1 2)) (set-cdr! x x)').toEvalTo('');
expect('(if #f #t)').toEvalTo('');
expect('(write "foo")').toEvalTo('');
expect('(display 42)').toEvalTo('');
expect('(write-char #\\a)').toEvalTo('');
expect('(close-input-port (current-input-port))').toEvalTo('');
expect('(close-input-port (open-input-file "foo"))').toEvalTo('');
expect('(close-output-port (open-output-file "foo"))').toEvalTo('');
expect('(close-output-port (current-output-port))').toEvalTo('');
});
// TODO all these errors stringify to [Object object], so the tests are not very valuable. Fix.
it("should throw appropriate errors", () => {
expect('(').toThrow(new Error(ErrorType.READ, `read error: ${LPAREN}`));
expect(')').toThrow(new Error(ErrorType.READ, `read error: ${RPAREN}`));
expect('(eval)').toThrow(Error.incorrectNumArgs('eval', 2, 0));
expect('(eval 1 2 3 4 5)').toThrow(Error.incorrectNumArgs('eval', 2, 5));
expect('(let ((foo (lambda (x) x))) (foo))').toThrow(Error.incorrectNumArgs(''/* TODO bl lambda */, 1, 0));
expect('(let ((foo (lambda (x) x))) (foo 1 2))').toThrow(Error.incorrectNumArgs('' /* TODO bl lambda */, 1, 2));
expect("(set-car! '(1 2 3) 4)").toThrow(Error.immutable(''));
// Example from R5RS 6.3.5
expect('(let ((g (lambda () "***"))) (string-set! (g) 0 #\\?))').toThrow(Error.immutable(''));
// Example from R5RS 6.3.5
expect("(string-set! (symbol->string 'immutable) 0 #\\?)").toThrow(Error.immutable(''));
// Example from R5RS 6.3.6
expect("(vector-set! '#(0 1 2) 1 \"doe\")").toThrow(Error.immutable(''));
expect('(make-vector)').toThrow(Error.tooFewVarargs('make-vector', 1, 0));
expect('(make-vector 1 2 3 4 5)').toThrow(Error.tooManyVarargs('make-vector', 2, 5));
expect('(let ((foo (lambda (x . y) x))) (foo))').toThrow(Error.tooFewVarargs('', 1, 0));
expect('(+ "a" "b")').toThrow(argumentTypeError('a', 0, '+', NUMBER, STRING));
expect('(scheme-report-environment 6)').toThrow(Error.unimplementedOption(''));
expect('(null-environment 6)').toThrow(Error.unimplementedOption(''));
});
});
function toEvalTo(util: jasmine.MatchersUtil, customEqualityTesters: jasmine.CustomEqualityTester[]): jasmine.CustomMatcher {
return {
compare(actualInput: string, expectedResult: string): jasmine.CustomMatcherResult {
let actualResult;
try {
actualResult = evaluator.evaluate(actualInput);
} catch (e) {
actualResult = e;
}
return {
pass: actualResult === expectedResult,<|fim▁hole|> };
}
};
}
function toOutput(util: jasmine.MatchersUtil, customEqualityTesters: jasmine.CustomEqualityTester[]): jasmine.CustomMatcher {
return {
compare(actualInput: string, expectedOutput: string): jasmine.CustomMatcherResult {
let actualResult;
try {
actualResult = evaluator.evaluate(actualInput);
} catch (e) {
actualResult = e;
}
const actualOutput = sharedOutputPort!.dequeueOutput();
return {
pass: actualOutput === expectedOutput,
message: `want ${expectedOutput}, got ${expectedOutput}`,
};
}
};
}
function toThrow(util: jasmine.MatchersUtil, customEqualityTesters: jasmine.CustomEqualityTester[]): jasmine.CustomMatcher {
return {
compare(input: string, expectedError: any): jasmine.CustomMatcherResult {
let actualError = '';
try {
evaluator.evaluate(input);
} catch (e) {
actualError = e.toString();
}
return {
pass: actualError === expectedError.toString(),
message: `want ${expectedError}, got ${actualError}`,
};
}
};
}<|fim▁end|> | message: `want ${expectedResult}, got ${actualResult}`, |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import React from 'react';
import {connect} from 'cerebral-view-react';
import styles from './styles.css';
import {
isObject,
isArray,
isString,
isBoolean,
isNumber,
isNull
} from 'common/utils';
import JSONInput from './JSONInput';
import connector from 'connector';
function isInPath(source, target) {
if (!source || !target) {
return false;
}
return target.reduce((isInPath, key, index) => {
if (!isInPath) {
return false;
}
return String(source[index]) === String(key);
}, true);
}
function renderType(value, hasNext, path, propertyKey, highlightPath) {
if (value === undefined) {
return null;
}
if (isArray(value)) {
return (
<ArrayValue
value={value}
hasNext={hasNext}
path={path}
propertyKey={propertyKey}
highlightPath={highlightPath}/>
);
}
if (isObject(value)) {
return (
<ObjectValue
value={value}
hasNext={hasNext}
path={path}
propertyKey={propertyKey}
highlightPath={highlightPath}/>
);
}
return (
<Value
value={value}
hasNext={hasNext}
path={path}
propertyKey={propertyKey}
highlightPath={highlightPath}/>
);
}
class ObjectValue extends React.Component {
static contextTypes = {
options: React.PropTypes.object.isRequired
}
constructor(props, context) {
super(props);
const numberOfKeys = Object.keys(props.value).length;
const isHighlightPath = !!(this.props.highlightPath && isInPath(this.props.highlightPath, this.props.path));
const preventCollapse = this.props.path.length === 0 && context.options.expanded;
this.state = {
isCollapsed: !preventCollapse && !isHighlightPath && (numberOfKeys > 3 || numberOfKeys === 0 ? true : context.options.expanded ? false : true)
};
this.onCollapseClick = this.onCollapseClick.bind(this);
this.onExpandClick = this.onExpandClick.bind(this);
}
shouldComponentUpdate(nextProps, nextState) {
return (
nextState.isCollapsed !== this.state.isCollapsed ||
this.context.options.canEdit ||
nextProps.path !== this.props.path ||
nextProps.highlightPath !== this.props.highlightPath
);
}
componentWillReceiveProps(nextProps) {
const context = this.context;
const props = nextProps;
const numberOfKeys = Object.keys(props.value).length;
const isHighlightPath = !!(props.highlightPath && isInPath(props.highlightPath, props.path));
const preventCollapse = props.path.length === 0 && context.options.expanded;
if (this.state.isCollapsed) {
this.setState({
isCollapsed: !preventCollapse && !isHighlightPath && (numberOfKeys > 3 || numberOfKeys === 0 ? true : context.options.expanded ? false : true)
});
}
}
onExpandClick() {
this.setState({isCollapsed: false})
}
onCollapseClick() {
this.setState({isCollapsed: true});
}
renderProperty(key, value, index, hasNext, path) {
this.props.path.push(key);
const property = (
<div className={styles.objectProperty} key={index}>
<div className={styles.objectPropertyValue}>{renderType(value, hasNext, path.slice(), key, this.props.highlightPath)}</div>
</div>
);
this.props.path.pop();
return property;
}
renderKeys(keys) {
if (keys.length > 3) {
return keys.slice(0, 3).join(', ') + '...'
}
return keys.join(', ');
}
render() {
const {value, hasNext} = this.props;
const isExactHighlightPath = this.props.highlightPath && String(this.props.highlightPath) === String(this.props.path);
if (this.state.isCollapsed) {
return (
<div className={isExactHighlightPath ? styles.highlightObject : styles.object} onClick={this.onExpandClick}>
{this.props.propertyKey ? this.props.propertyKey + ': ' : null}
<strong>{'{ '}</strong>{this.renderKeys(Object.keys(value))}<strong>{' }'}</strong>
{hasNext ? ',' : null}
</div>
);
} else if (this.props.propertyKey) {
const keys = Object.keys(value);
return (
<div className={isExactHighlightPath ? styles.highlightObject : styles.object}>
<div onClick={this.onCollapseClick}>{this.props.propertyKey}: <strong>{'{ '}</strong></div>
{keys.map((key, index) => this.renderProperty(key, value[key], index, index < keys.length - 1, this.props.path))}
<div><strong>{' }'}</strong>{hasNext ? ',' : null}</div>
</div>
);
} else {
const keys = Object.keys(value);
return (
<div className={isExactHighlightPath ? styles.highlightObject : styles.object}>
<div onClick={this.onCollapseClick}><strong>{'{ '}</strong></div>
{keys.map((key, index) => this.renderProperty(key, value[key], index, index < keys.length - 1, this.props.path, this.props.highlightPath))}
<div><strong>{' }'}</strong>{hasNext ? ',' : null}</div>
</div>
);
}
}
}
class ArrayValue extends React.Component {
static contextTypes = {
options: React.PropTypes.object.isRequired
}
constructor(props, context) {
super(props);
const numberOfItems = props.value.length;
const isHighlightPath = this.props.highlightPath && isInPath(this.props.highlightPath, this.props.path);
this.state = {
isCollapsed: !isHighlightPath && (numberOfItems > 3 || numberOfItems === 0) ? true : context.options.expanded ? false : true
};
this.onCollapseClick = this.onCollapseClick.bind(this);
this.onExpandClick = this.onExpandClick.bind(this);
}
shouldComponentUpdate(nextProps, nextState) {
return (
nextState.isCollapsed !== this.state.isCollapsed ||
this.context.options.canEdit ||
nextProps.path !== this.props.path ||
nextProps.highlightPath !== this.props.highlightPath
);
}
componentWillReceiveProps(nextProps) {
const context = this.context;
const props = nextProps;
const numberOfItems = props.value.length;
const isHighlightPath = props.highlightPath && isInPath(props.highlightPath, props.path);
if (this.state.isCollapsed) {
this.setState({
isCollapsed: !isHighlightPath && (numberOfItems > 3 || numberOfItems === 0) ? true : context.options.expanded ? false : true
});
}
}
onExpandClick() {
this.setState({isCollapsed: false})
}
onCollapseClick() {
this.setState({isCollapsed: true});
}
renderItem(item, index, hasNext, path) {
this.props.path.push(index);
const arrayItem = (
<div className={styles.arrayItem} key={index}>
{renderType(item, hasNext, path.slice())}
</div>
);
this.props.path.pop();
return arrayItem;
}
render() {
const {value, hasNext} = this.props;
const isExactHighlightPath = this.props.highlightPath && String(this.props.highlightPath) === String(this.props.path);
if (this.state.isCollapsed) {
return (
<div className={isExactHighlightPath ? styles.highlightArray : styles.array} onClick={this.onExpandClick}>
{this.props.propertyKey ? this.props.propertyKey + ': ' : null}
<strong>{'[ '}</strong>{value.length}<strong>{' ]'}</strong>
{hasNext ? ',' : null}
</div>
);
} else if (this.props.propertyKey) {
const keys = Object.keys(value);
return (<|fim▁hole|> <div className={isExactHighlightPath ? styles.highlightArray : styles.array}>
<div onClick={this.onCollapseClick}>{this.props.propertyKey}: <strong>{'[ '}</strong></div>
{value.map((item, index) => this.renderItem(item, index, index < value.length - 1, this.props.path))}
<div><strong>{' ]'}</strong>{hasNext ? ',' : null}</div>
</div>
);
} else {
return (
<div className={isExactHighlightPath ? styles.highlightArray : styles.array}>
<div onClick={this.onCollapseClick}><strong>{'[ '}</strong></div>
{value.map((item, index) => this.renderItem(item, index, index < value.length - 1, this.props.path))}
<div><strong>{' ]'}</strong>{hasNext ? ',' : null}</div>
</div>
);
}
}
}
@connect()
class Value extends React.Component {
static contextTypes = {
options: React.PropTypes.object.isRequired
}
constructor(props) {
super(props);
this.state = {
isEditing: false,
path: props.path.slice()
};
this.onSubmit = this.onSubmit.bind(this);
this.onBlur = this.onBlur.bind(this);
this.onClick = this.onClick.bind(this);
}
shouldComponentUpdate(nextProps, nextState) {
return (
nextProps.value !== this.props.value ||
nextState.isEditing !== this.state.isEditing ||
nextProps.path !== this.props.path
);
}
onClick() {
this.setState({
isEditing: this.context.options.canEdit ? true : false
});
}
onSubmit(value) {
this.props.signals.debugger.modelChanged({
path: this.state.path,
value
})
this.setState({isEditing: false});
connector.sendEvent('changeModel', {
path: this.state.path,
value: value
});
}
onBlur() {
this.setState({isEditing: false});
}
renderValue(value, hasNext) {
const isExactHighlightPath = this.props.highlightPath && String(this.props.highlightPath) === String(this.props.path);
if (this.state.isEditing) {
return (
<div className={isExactHighlightPath ? styles.highlightValue : null}>
{this.props.propertyKey ? this.props.propertyKey + ': ' : <span/>}
<span>
<JSONInput
value={value}
onBlur={this.onBlur}
onSubmit={this.onSubmit}/>
</span>
{hasNext ? ',' : null}
</div>
);
} else {
return (
<div className={isExactHighlightPath ? styles.highlightValue : null}>
{this.props.propertyKey ? this.props.propertyKey + ': ' : <span/>}
<span onClick={this.onClick}>{isString(value) ? '"' + value + '"' : String(value)}</span>
{hasNext ? ',' : null}
</div>
);
}
}
render() {
let className = styles.string;
if (isNumber(this.props.value)) className = styles.number;
if (isBoolean(this.props.value)) className = styles.boolean;
if (isNull(this.props.value)) className = styles.null;
return (
<div className={className}>
{this.renderValue(this.props.value, this.props.hasNext)}
</div>
);
}
}
class Inspector extends React.Component {
static childContextTypes = {
options: React.PropTypes.object.isRequired
}
getChildContext() {
return {
options: {
expanded: this.props.expanded || false,
canEdit: this.props.canEdit || false
}
}
}
render() {
return renderType(this.props.value, false, [], null, this.props.path);
}
}
export default Inspector;<|fim▁end|> | |
<|file_name|>importeur.py<|end_file_name|><|fim▁begin|># -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit un objet 'importeur', chargé de contrôler le mécanisme
d'importation, initialisation, configuration, déroulement et arrêt
des modules primaires et secondaires.
On parcourt les sous-dossiers définis dans les variables :
- REP_PRIMAIRES : répertoire des modules primaires
- REP_SECONDAIRES : répertoire des modules secondaires
Il est possible de changer ces variables mais dans ce cas, une réorganisation
du projet s'impose.
Dans chaque module, on s'occupera de charger l'objet le représentant.
Par exemple, le module anaconf se définit comme suit :
* un package anaconf contenu dans REP_PRIMAIRES
* un fichier __init__.py
* une classe Anaconf
On créée un objet chargé de représenter le module. C'est cet objet qui
possède les méthodes génériques chargées d'initialiser, configurer, lancer
et arrêter un module. Les autres fichiers du module sont une boîte noir
inconnu pour l'importeur.
"""
import os
import sys
from abstraits.module import *
REP_PRIMAIRES = "primaires"
REP_SECONDAIRES = "secondaires"
class Importeur:
"""Classe chargée de créer un objet Importeur. Il contient sous la forme
d'attributs les modules primaires et secondaires chargés. Les modules
primaires et secondaires ne sont pas distingués.
On ne doit créer qu'un seul objet Importeur.
"""
nb_importeurs = 0
def __init__(self):
"""Constructeur de l'importeur. Il vérifie surtout
qu'un seul est créé.
Il prend en paramètre le parser de commande qu'il doit transmettre
à chaque module.
"""
Importeur.nb_importeurs += 1
if Importeur.nb_importeurs > 1:
raise RuntimeError("{0} importeurs ont été créés".format( \
Importeur.nb_importeurs))
def __str__(self):
"""Retourne sous ue forme un peu plus lisible les modules importés."""
ret = []
for nom_module in self.__dict__.keys():
ret.append("{0}: {1}".format(nom_module, getattr(self, \
nom_module)))
ret.sort()
return "\n".join(ret)
def tout_charger(self):
"""Méthode appelée pour charger les modules primaires et secondaires.
Par défaut, on importe tout mais on ne créée rien.
"""
# On commence par parcourir les modules primaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_PRIMAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_PRIMAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
# On fait de même avec les modules secondaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_SECONDAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_SECONDAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
def tout_instancier(self, parser_cmd):
"""Cette méthode permet d'instancier les modules chargés auparavant.
On se base sur le type du module (classe ou objet)
pour le créer ou non.
En effet, cette méthode doit pouvoir être appelée quand certains
modules sont instanciés, et d'autres non.
NOTE IMPORTANTE: on passe au constructeur de chaque module
self, c'est-à-dire l'importeur. Les modules en ont en effet
besoin pour interragir entre eux.
"""
for nom_module, module in self.__dict__.items():
if type(module) is type: # on doit l'instancier
setattr(self, nom_module, module(self, parser_cmd))
def tout_configurer(self):
"""Méthode permettant de configurer tous les modules qui en ont besoin.
Les modules qui doivent être configuré sont ceux instanciés.
Attention: les modules non encore instanciés sont à l'état de classe.
Tous les modules doivent donc être instanciés au minimum avant
que cette méthode ne soit appelée. Autrement dit, la méthode
tout_instancier doit être appelée auparavant.
"""
for module in self.__dict__.values():
if module.statut == INSTANCIE:
module.config()
def tout_initialiser(self):
"""Méthode permettant d'initialiser tous les modules qui en ont besoin.
Les modules à initialiser sont ceux configuré.
"""
for module in self.__dict__.values():
if module.statut == CONFIGURE:
module.init()
def tout_detruire(self):
"""Méthode permettant de détruire tous les modules qui en ont besoin.
Les modules à détruire sont ceux initialisés.
"""
for module in self.__dict__.values():
if module.statut == INITIALISE:
module.detruire()
def boucle(self):
"""Méthode appelée à chaque tour de boucle synchro.
Elle doit faire appel à la méthode boucle de chaque module primaire
ou secondaire.
"""
for module in self.__dict__.values():
module.boucle()
def module_est_charge(self, nom):
"""Retourne True si le module est déjà chargé, False sinon.
On n'a pas besoin du type du module, les modules primaires
et secondaires étant stockés de la même façon.
Attention: un module peut être chargé sans être instancié,
configuré ou initialisé.
"""
return nom in self.__dict__.keys()
def charger_module(self, parser_cmd, m_type, nom):
"""Méthode permettant de charger un module en fonction de son type et
de son nom.
Si le module est déjà chargé, on ne fait rien.
Note: à la différence de tout_charger, cette méthode créée directement
l'objet gérant le module.
"""
if m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \<|fim▁hole|> print("Le module {0} est déjà chargé.".format(nom))
else:
package = __import__(rep + "." + nom)
module = getattr(getattr(package, nom), \
nom.capitalize())
setattr(self, nom, module(self, parser_cmd))
def decharger_module(self, m_type, nom):
"""Méthode permettant de décharger un module.
Elle se charge :
- d'appeler la méthode detruire du module
- de supprimer le module des modules dans sys.modules
- de supprimer l'instance du module dans self
"""
if m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \
.format(type))
nom_complet = rep + "." + nom
for cle in list(sys.modules.keys()):
if cle.startswith(nom_complet + "."):
del sys.modules[cle]
if self.module_est_charge(nom):
getattr(self, nom).detuire()
delattr(self, nom)
else:
print("{0} n'est pas dans les attributs de l'importeur".format(nom))
def recharger_module(self, parser_cmd, m_type, nom):
"""Cette méthode permet de recharger un module. Elle passe par :
- decharger_module
- charger_module
"""
self.decharger_module(parser_cmd, m_type, nom)
self.charger_module(m_type, nom)
def config_module(self, nom):
"""Méthode chargée de configurer ou reconfigurer un module."""
if self.module_est_charge(nom):
getattr(self, nom).config()
else:
print("{0} n'existe pas ou n'est pas chargé.".format(nom))
def init_module(self, nom):
"""Méthode chargée d'initialiser un module."""
if self.module_est_charge(nom) and getattr(self, nom).statut == \
CONFIGURE:
getattr(self, nom).init()
else:
print("{0} n'existe pas ou n'est pas configuré.".format(nom))<|fim▁end|> | .format(type))
if self.module_est_charge(nom): |
<|file_name|>graphcut_app.py<|end_file_name|><|fim▁begin|>import logging
import os
import sys
import tkinter
from tkinter import ttk
sys.path.append('../..')
import cv2
from src.image.imnp import ImageNP
from src.support.tkconvert import TkConverter
from src.view.template import TkViewer
from src.view.tkfonts import TkFonts
from src.view.tkframe import TkFrame, TkLabelFrame
from src.view.ttkstyle import TTKStyle, init_css
LOGGER = logging.getLogger(__name__)
THRESHOLD_OPTION = [(u'手動', 'manual'), ('Mean Adaptive', 'mean'), ('Gaussian Adaptive', 'gaussian')]
class GraphCutViewer(TkViewer):
def __init__(self):
super().__init__()
self._im_w, self._im_h = 800, 533
self._init_window(zoom=False)
self._init_style()
self._init_frame()
self._init_menu()
def _init_style(self):
init_css()
theme = 'default'
if os.name == 'posix':
theme = 'alt'
TTKStyle('H4Padding.TLabelframe', theme=theme, background='gray82')
TTKStyle('H4Padding.TLabelframe.Label', theme=theme, font=('', 16), background='gray82')
TTKStyle('H2BlackBold.TLabel', theme=theme, font=('', 24, 'bold'), background='white', foreground='black')
TTKStyle('H2RedBold.TLabel', theme=theme, font=('', 24, 'bold'), background='white', foreground='red')
self.font = TkFonts()
# init frame
def _init_frame(self):
# root
self.frame_root = TkFrame(self.root, bg='white')
self.frame_root.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_root, 0, 1, 2)
self.set_all_grid_columnconfigure(self.frame_root, 0)
# head
self.frame_head = TkFrame(self.frame_root, bg='white')
self.frame_head.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_head, 0)
self.set_all_grid_columnconfigure(self.frame_head, 0)
# body
self.frame_body = TkFrame(self.frame_root, bg='black')
self.frame_body.grid(row=1, column=0, sticky='news')
self.set_all_grid_columnconfigure(self.frame_body, 0, 1)
self.set_all_grid_rowconfigure(self.frame_body, 0)
# body > panel
self.frame_panel = TkFrame(self.frame_body, bg='light pink')
self.frame_panel.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_panel, 0)
self.set_all_grid_columnconfigure(self.frame_panel, 0)
# body > display
self.frame_display = TkFrame(self.frame_body, bg='royal blue')
self.frame_display.grid(row=0, column=1, sticky='news')
self.set_all_grid_rowconfigure(self.frame_display, 0)
self.set_all_grid_columnconfigure(self.frame_display, 0)
# footer
self.frame_footer = TkFrame(self.frame_root, bg='gray82')
self.frame_footer.grid(row=2, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_footer, 0, 1)
self.set_all_grid_columnconfigure(self.frame_footer, 0)
# footer > panel setting
self.frame_panel_setting = ttk.LabelFrame(self.frame_footer, text=u'輸入圖片選項: ', style='H4Padding.TLabelframe')
self.frame_panel_setting.grid(row=0, column=0, sticky='news', pady=10)
self.set_all_grid_rowconfigure(self.frame_panel_setting, 0, 1)
self.set_all_grid_columnconfigure(self.frame_panel_setting, 0)
# footer > panel setting > template option
self.frame_template_options = TkFrame(self.frame_panel_setting, bg='gray82', pady=5)
self.frame_template_options.grid(row=0, column=0, sticky='news')
# footer > panel setting > gamma
self.frame_gamma = TkFrame(self.frame_panel_setting, bg='gray82', pady=5)
self.frame_gamma.grid(row=1, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_gamma, 0)
self.set_all_grid_columnconfigure(self.frame_gamma, 0)
# footer > display setting
self.frame_display_setting = ttk.LabelFrame(self.frame_footer, text=u'輸出圖片選項: ', style='H4Padding.TLabelframe')
self.frame_display_setting.grid(row=1, column=0, sticky='news', pady=10)
self.set_all_grid_rowconfigure(self.frame_display_setting, 0)<|fim▁hole|> # footer > display setting > threshold options
self.frame_threshold_options = TkFrame(self.frame_display_setting, bg='gray82', pady=5)
self.frame_threshold_options.grid(row=0, column=0, sticky='news')
# footer > display setting > manual threshold
self.frame_manual_threshold = TkFrame(self.frame_display_setting, bg='gray82', pady=5)
self.frame_manual_threshold.grid(row=1, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_manual_threshold, 0)
self.set_all_grid_columnconfigure(self.frame_manual_threshold, 0)
self._init_widget_head()
self._init_widget_body()
self._init_widget_footer()
# init head widget
def _init_widget_head(self):
self.set_all_grid_rowconfigure(self.frame_head, 0, 1)
self.label_state = ttk.Label(self.frame_head, text=u'現在模式: N/A', style='H2.TLabel')
self.label_state.grid(row=0, column=0, sticky='w')
self.label_resize = ttk.Label(self.frame_head, text=u'原有尺寸 N/A-> 顯示尺寸 N/A', style='H2.TLabel')
self.label_resize.grid(row=1, column=0, sticky='w')
# init body widget
def _init_widget_body(self):
# panel
self.set_all_grid_rowconfigure(self.frame_panel, 0, 1)
self.label_panel = ttk.Label(self.frame_panel, text='Input Panel', style='H2.TLabel')
self.label_panel.grid(row=0, column=0, sticky='ns')
self.photo_panel = ImageNP.generate_checkboard((self._im_h, self._im_w), block_size=10)
self.photo_panel = TkConverter.ndarray_to_photo(self.photo_panel)
self.label_panel_image = ttk.Label(self.frame_panel, image=self.photo_panel)
self.label_panel_image.grid(row=1, column=0, sticky='ns')
# display
self.label_display = ttk.Label(self.frame_display, text='Display', style='H2.TLabel')
self.label_display.grid(row=0, column=0, columnspan=3)
self.set_all_grid_rowconfigure(self.frame_display, 0, 1, 2)
self.set_all_grid_columnconfigure(self.frame_display, 0, 1, 2)
self.photo_small = ImageNP.generate_checkboard((self._im_h//2, self._im_w//3), 10)
self.photo_small = TkConverter.ndarray_to_photo(self.photo_small)
self.photo_large = ImageNP.generate_checkboard((self._im_h, self._im_w//3), 10)
self.photo_large = TkConverter.ndarray_to_photo(self.photo_large)
self.label_fl_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_fl_image.grid(row=1, column=0)
self.label_fr_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_fr_image.grid(row=1, column=1)
self.label_bl_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_bl_image.grid(row=2, column=0)
self.label_br_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_br_image.grid(row=2, column=1)
self.label_body_image = ttk.Label(self.frame_display, image=self.photo_large)
self.label_body_image.grid(row=1, column=2, rowspan=2)
# init footer widget
def _init_widget_footer(self):
# input panel template option
self.label_template = ttk.Label(self.frame_template_options, text=u'過濾方式: ', style='H5.TLabel')
self.label_template.grid(row=0, column=0, sticky='w')
self.val_checkbtn_floodfill = tkinter.StringVar()
self.checkbtn_floodfill = ttk.Checkbutton(
self.frame_template_options,
text=u'floodfill',
variable=self.val_checkbtn_floodfill,
onvalue='on', offvalue='off',
style='H5.TCheckbutton'
)
self.checkbtn_floodfill.grid(row=0, column=1, sticky='w')
# input panel gamma
self.label_gamma = ttk.Label(self.frame_gamma, text=u'調整對比 ({:.2f}): '.format(1.), style='H5.TLabel')
self.label_gamma.grid(row=0, column=0, sticky='w')
self.val_scale_gamma = tkinter.DoubleVar()
self.val_scale_gamma.set(1.0)
self.scale_gamma = ttk.Scale(self.frame_gamma,
orient=tkinter.HORIZONTAL,
length=self._im_w*2,
from_=0, to=2.5,
variable=self.val_scale_gamma,
style='Gray.Horizontal.TScale')
self.scale_gamma.state(('active', '!disabled'))
self.scale_gamma.grid(row=0, column=1, sticky='w')
# display threshold option
self.label_threshold_options = ttk.Label(self.frame_threshold_options, text=u'門檻值選項: ', style='H5.TLabel')
# self.label_threshold_options.grid(row=0, column=0, sticky='w')
self.val_threshold_option = tkinter.StringVar()
self.val_threshold_option.set(THRESHOLD_OPTION[0][-1])
self.radiobtn_threshold_options = []
for i, op in enumerate(THRESHOLD_OPTION):
text, val = op
radiobtn = ttk.Radiobutton(self.frame_threshold_options,
text=text,
variable=self.val_threshold_option,
value=val,
style='H5.TRadiobutton')
# radiobtn.grid(row=0, column=i+1, sticky='w', padx=10)
self.radiobtn_threshold_options.append(radiobtn)
# display threshold manual scale
self.label_manual_threshold = ttk.Label(self.frame_manual_threshold, text=u'門檻值 ({:.2f}): '.format(250), style='H5.TLabel')
self.label_manual_threshold.grid(row=0, column=0, sticky='w')
self.val_manual_threshold = tkinter.DoubleVar()
self.val_manual_threshold.set(250)
self.scale_manual_threshold = ttk.Scale(self.frame_manual_threshold,
orient=tkinter.HORIZONTAL,
length=self._im_w*2,
from_=1, to=254,
variable=self.val_manual_threshold,
style='Gray.Horizontal.TScale')
self.scale_manual_threshold.state(('active', '!disabled'))
self.scale_manual_threshold.grid(row=0, column=1, sticky='news', columnspan=len(THRESHOLD_OPTION))
# init menu bar
def _init_menu(self):
# root
self.menu_root = tkinter.Menu(self.root)
self.root.config(menu=self.menu_root)
# load image
self.menu_load_img = tkinter.Menu(self.menu_root)
# show menu
self.menu_root.add_cascade(label=u'File', menu=self.menu_load_img)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)12s:L%(lineno)3s [%(levelname)8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout
)
graphcut_viewer = GraphCutViewer()
graphcut_viewer.mainloop()<|fim▁end|> | self.set_all_grid_columnconfigure(self.frame_display_setting, 0)
|
<|file_name|>barb_demo.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import numpy as np
import matplotlib.pyplot as plt
# read in data.
file = open('fcover.dat','r')
ul=[];vl=[];pl=[]
nlons=73; nlats=73
dellat = 2.5; dellon = 5.
for line in file.readlines():
l = line.replace('\n','').split()
ul.append(float(l[0]))
vl.append(float(l[1]))
pl.append(float(l[2]))
u = np.reshape(np.array(ul,np.float32),(nlats,nlons))
v = np.reshape(np.array(vl,np.float32),(nlats,nlons))
p = np.reshape(np.array(pl,np.float32),(nlats,nlons))
lats1 = -90.+dellat*np.arange(nlats)
lons1 = -180.+dellon*np.arange(nlons)
lons, lats = np.meshgrid(lons1, lats1)
# convert from mps to knots.
u = 1.944*u; v = 1.944*v
# plot barbs in map projection coordinates.
# stereogrpaphic projection.
m = Basemap(width=10000000,height=10000000,lon_0=-90,lat_0=45.,lat_ts=45,
resolution='l',projection='stere')
x,y = m(lons,lats)
# transform from spherical to map projection coordinates (rotation
# and interpolation).
nxv = 25; nyv = 25
udat, vdat, xv, yv = m.transform_vector(u,v,lons1,lats1,nxv,nyv,returnxy=True)
# create a figure, add an axes.
fig=plt.figure(figsize=(8,6))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# plot color-filled contours over map
levs = np.arange(960,1051,4)
cs1 = m.contour(x,y,p,levs,colors='k',linewidths=0.5)
cs2 = m.contourf(x,y,p,levs)
# plot barbs.
m.barbs(xv,yv,udat,vdat,length=6,barbcolor='k',flagcolor='r',linewidth=0.5)
# plot colorbar for pressure
m.colorbar(pad='12%') # draw colorbar
# draw coastlines
m.drawcoastlines()
# draw parallels
m.drawparallels(np.arange(0,81,20),labels=[1,1,0,0])
# draw meridians
m.drawmeridians(np.arange(-180,0,20),labels=[0,0,0,1])
plt.title('Surface Wind Barbs and Pressure (NH)')
# stereogrpaphic projection (SH).
# 'flip_barb' flag is automatically set for SH data, so that
# barbs point toward lower pressure (in both Hemisphere).
m = Basemap(width=10000000,height=10000000,lon_0=-90,lat_0=-45.,lat_ts=-45,
resolution='l',projection='stere')
x,y = m(lons,lats)
# transform from spherical to map projection coordinates (rotation
# and interpolation).
nxv = 25; nyv = 25
udat, vdat, xv, yv = m.transform_vector(u,v,lons1,lats1,nxv,nyv,returnxy=True)
# create a figure, add an axes.
fig=plt.figure(figsize=(8,6))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# plot color-filled contours over map
levs = np.arange(960,1051,4)
cs1 = m.contour(x,y,p,levs,colors='k',linewidths=0.5)
cs2 = m.contourf(x,y,p,levs)
# plot barbs.
m.barbs(xv,yv,udat,vdat,length=6,barbcolor='k',flagcolor='r',linewidth=0.5)
# plot colorbar for pressure
m.colorbar(pad='12%') # draw colorbar
# draw coastlines
m.drawcoastlines()
# draw parallels
m.drawparallels(np.arange(-80,-19,20),labels=[1,1,0,0])
# draw meridians
m.drawmeridians(np.arange(-180,0,20),labels=[0,0,1,0])
plt.title('Surface Wind Barbs and Pressure (SH)',y=1.04)
plt.show()<|fim▁end|> | from mpl_toolkits.basemap import Basemap |
<|file_name|>setup_jruby.py<|end_file_name|><|fim▁begin|>import subprocess
import sys
import os<|fim▁hole|>
def start(args, logfile, errfile):
setup_util.replace_text("rails-stripped/config/database.yml", "host: .*", "host: " + args.database_host)
try:
subprocess.check_call("cp Gemfile-jruby Gemfile", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
subprocess.check_call("cp Gemfile-jruby.lock Gemfile.lock", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
subprocess.Popen("rvm jruby-1.7.8 do bundle exec torqbox -b 0.0.0.0 -E production", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'torqbox' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
subprocess.check_call("rm Gemfile", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
subprocess.check_call("rm Gemfile.lock", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1<|fim▁end|> | import setup_util |
<|file_name|>DeferredLogTests.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2012-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.logging;
import org.apache.commons.logging.Log;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
/**
* Tests for {@link DeferredLog}.
*
* @author Phillip Webb
*/
public class DeferredLogTests {
private DeferredLog deferredLog = new DeferredLog();
private Object message = "Message";
private Throwable throwable = new IllegalStateException();
private Log log = mock(Log.class);
@Test
public void isTraceEnabled() throws Exception {
assertThat(this.deferredLog.isTraceEnabled()).isTrue();
}
@Test
public void isDebugEnabled() throws Exception {
assertThat(this.deferredLog.isDebugEnabled()).isTrue();
}
@Test
public void isInfoEnabled() throws Exception {
assertThat(this.deferredLog.isInfoEnabled()).isTrue();
}
@Test
public void isWarnEnabled() throws Exception {
assertThat(this.deferredLog.isWarnEnabled()).isTrue();
}
@Test
public void isErrorEnabled() throws Exception {
assertThat(this.deferredLog.isErrorEnabled()).isTrue();
}
@Test<|fim▁hole|> @Test
public void trace() throws Exception {
this.deferredLog.trace(this.message);
this.deferredLog.replayTo(this.log);
verify(this.log).trace(this.message, null);
}
@Test
public void traceWithThrowable() throws Exception {
this.deferredLog.trace(this.message, this.throwable);
this.deferredLog.replayTo(this.log);
verify(this.log).trace(this.message, this.throwable);
}
@Test
public void debug() throws Exception {
this.deferredLog.debug(this.message);
this.deferredLog.replayTo(this.log);
verify(this.log).debug(this.message, null);
}
@Test
public void debugWithThrowable() throws Exception {
this.deferredLog.debug(this.message, this.throwable);
this.deferredLog.replayTo(this.log);
verify(this.log).debug(this.message, this.throwable);
}
@Test
public void info() throws Exception {
this.deferredLog.info(this.message);
this.deferredLog.replayTo(this.log);
verify(this.log).info(this.message, null);
}
@Test
public void infoWithThrowable() throws Exception {
this.deferredLog.info(this.message, this.throwable);
this.deferredLog.replayTo(this.log);
verify(this.log).info(this.message, this.throwable);
}
@Test
public void warn() throws Exception {
this.deferredLog.warn(this.message);
this.deferredLog.replayTo(this.log);
verify(this.log).warn(this.message, null);
}
@Test
public void warnWithThrowable() throws Exception {
this.deferredLog.warn(this.message, this.throwable);
this.deferredLog.replayTo(this.log);
verify(this.log).warn(this.message, this.throwable);
}
@Test
public void error() throws Exception {
this.deferredLog.error(this.message);
this.deferredLog.replayTo(this.log);
verify(this.log).error(this.message, null);
}
@Test
public void errorWithThrowable() throws Exception {
this.deferredLog.error(this.message, this.throwable);
this.deferredLog.replayTo(this.log);
verify(this.log).error(this.message, this.throwable);
}
@Test
public void fatal() throws Exception {
this.deferredLog.fatal(this.message);
this.deferredLog.replayTo(this.log);
verify(this.log).fatal(this.message, null);
}
@Test
public void fatalWithThrowable() throws Exception {
this.deferredLog.fatal(this.message, this.throwable);
this.deferredLog.replayTo(this.log);
verify(this.log).fatal(this.message, this.throwable);
}
@Test
public void clearsOnReplayTo() throws Exception {
this.deferredLog.info("1");
this.deferredLog.fatal("2");
Log log2 = mock(Log.class);
this.deferredLog.replayTo(this.log);
this.deferredLog.replayTo(log2);
verify(this.log).info("1", null);
verify(this.log).fatal("2", null);
verifyNoMoreInteractions(this.log);
verifyZeroInteractions(log2);
}
}<|fim▁end|> | public void isFatalEnabled() throws Exception {
assertThat(this.deferredLog.isFatalEnabled()).isTrue();
}
|
<|file_name|>job.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from ..base import ComponentAPI
class CollectionsJOB(object):
"""Collections of JOB APIS"""
def __init__(self, client):
self.client = client
self.execute_task = ComponentAPI(
client=self.client, method='POST', path='/api/c/compapi/job/execute_task/',
description=u'根据作业模板ID启动作业',
)
self.fast_execute_script = ComponentAPI(
client=self.client, method='POST', path='/api/c/compapi/job/fast_execute_script/',
description=u'快速执行脚本',
)
self.fast_push_file = ComponentAPI(
client=self.client, method='POST', path='/api/c/compapi/job/fast_push_file/',
description=u'快速分发文件',
)<|fim▁hole|> )
self.get_task_detail = ComponentAPI(
client=self.client, method='GET', path='/api/c/compapi/job/get_task_detail/',
description=u'查询作业模板详情',
)
self.get_task_ip_log = ComponentAPI(
client=self.client, method='GET', path='/api/c/compapi/job/get_task_ip_log/',
description=u'根据作业实例ID查询作业执行日志',
)
self.get_task_result = ComponentAPI(
client=self.client, method='GET', path='/api/c/compapi/job/get_task_result/',
description=u'根据作业实例 ID 查询作业执行状态',
)<|fim▁end|> | self.get_agent_status = ComponentAPI(
client=self.client, method='POST', path='/api/c/compapi/job/get_agent_status/',
description=u'查询Agent状态', |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Field, Layout, Submit
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import User, UserProfile
class UserForm(forms.ModelForm):
"""
Edit username
"""
def clean_username(self):
if self.initial['username'] == self.cleaned_data['username']:
raise forms.ValidationError(
_("Please choose a different username."))
try:
User.objects.get(username=self.cleaned_data['username'])
raise ValidationError(
_('A user with that username already exists.'))
except User.DoesNotExist:
pass
return self.cleaned_data['username']
class Meta:
model = User
fields = ['username']
class UserProfileForm(forms.ModelForm):
"""
Edit user profile
"""
# avatar = forms.ImageField(required=False) # TODO
name = forms.CharField(label=_('Name'), required=False, max_length=255)
location = forms.CharField(label=_('Location'),
required=False, max_length=255)
website = forms.CharField(label=_('Website'),
required=False, max_length=255)
bio = forms.CharField(label=_('About me'),
required=False,
widget=forms.Textarea())
helper = FormHelper()
helper.form_class = 'users-update'
helper.form_action = 'users:update'
helper.layout = Layout(
# 'avatar', # TODO
'name',
'location',
'website',
HTML('<label for="id_status" class="control-label ">'),
HTML(_('Occupation')),
HTML('</label>'),
Div('student', 'assistant', 'professional', 'professor',
css_class='users-update-status'),
Field('bio', rows="3", css_class='input-xlarge'),
FormActions(
Submit('submit', _('Update Profile'), css_class="btn-primary"),
),
)
class Meta:
model = UserProfile
exclude = ['user', 'avatar']
DELETE_CONFIRMATION_PHRASE = _('delete my account')
class UserDeleteForm(forms.ModelForm):
confirmation_phrase_en = _('To verify, type "<span class='
'"confirmation-phrase do-not-copy-me">'
'delete my account</span>" below:')
form_labels = {
'sudo_login': _('Your username or email:'),
'confirmation_phrase': confirmation_phrase_en,
'sudo_password': _('Confirm your password:'),
}
sudo_login = forms.CharField(
label=form_labels['sudo_login'],
required=True,
max_length=255
)
confirmation_phrase = forms.CharField(
label=form_labels['confirmation_phrase'],
required=True,
max_length=255
)
sudo_password = forms.CharField(
label=form_labels['sudo_password'],
required=True,
max_length=128,<|fim▁hole|> helper.form_class = 'users-delete'
helper.form_action = 'users:account'
helper.layout = Layout(
Field('sudo_login', css_class='form-control'),
Field('confirmation_phrase', css_class='form-control'),
Field('sudo_password', css_class='form-control'),
FormActions(
Submit('submit_delete', _('Delete your account'),
css_class="btn btn-danger"),
),
)
def clean_sudo_login(self):
login = self.cleaned_data.get("sudo_login")
if login != self.user.username and login != self.user.email:
raise forms.ValidationError(_("The login and/or password you "
"specified are not correct."))
return self.cleaned_data["sudo_login"]
def clean_confirmation_phrase(self):
confirmation_phrase = self.cleaned_data.get("confirmation_phrase")
if str(DELETE_CONFIRMATION_PHRASE) != confirmation_phrase:
raise forms.ValidationError(
_("Confirmation phrase is not correct."))
return self.cleaned_data["confirmation_phrase"]
def clean_sudo_password(self):
password = self.cleaned_data.get("sudo_password")
if not self.user.check_password(password):
raise forms.ValidationError(_("The login and/or password you "
"specified are not correct."))
return self.cleaned_data["sudo_password"]
def is_valid(self):
self.user = self.instance
return super(UserDeleteForm, self).is_valid()
class Meta:
model = User
fields = []<|fim▁end|> | widget=forms.PasswordInput
)
helper = FormHelper() |
<|file_name|>DisplayDefaultSignatureFileVersionCommand.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2016, The National Archives <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the The National Archives nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package uk.gov.nationalarchives.droid.command.action;
import java.io.PrintWriter;
import java.util.Map;
import uk.gov.nationalarchives.droid.command.i18n.I18N;
import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureFileException;
import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureFileInfo;
import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureManager;
import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureType;
/**
* @author rflitcroft
*
*/
public class DisplayDefaultSignatureFileVersionCommand implements DroidCommand {
private PrintWriter printWriter;
private SignatureManager signatureManager;
/**
* {@inheritDoc}
*/
@Override
public void execute() throws CommandExecutionException {
try {
Map<SignatureType, SignatureFileInfo> sigFileInfos = signatureManager.getDefaultSignatures();
for (SignatureFileInfo info : sigFileInfos.values()) {
printWriter.println(I18N.getResource(I18N.DEFAULT_SIGNATURE_VERSION,
info.getType(), info.getVersion(), info.getFile().getName()));
}
} catch (SignatureFileException e) {
throw new CommandExecutionException(e);
}
}
/**
* @param printWriter the printWriter to set
<|fim▁hole|>
/**
* @param signatureManager the signatureManager to set
*/
public void setSignatureManager(SignatureManager signatureManager) {
this.signatureManager = signatureManager;
}
}<|fim▁end|> | */
public void setPrintWriter(PrintWriter printWriter) {
this.printWriter = printWriter;
}
|
<|file_name|>move-2-unique.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
struct X { x: isize, y: isize, z: isize }
pub fn main() {<|fim▁hole|> let y = x;
assert_eq!(y.y, 2);
}<|fim▁end|> | let x: Box<_> = box X{x: 1, y: 2, z: 3}; |
<|file_name|>MessagesAdapter.java<|end_file_name|><|fim▁begin|>/*
* This file is part of eduVPN.
*
* eduVPN is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* eduVPN is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with eduVPN. If not, see <http://www.gnu.org/licenses/>.
*/
package nl.eduvpn.app.adapter;
import android.content.Context;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import nl.eduvpn.app.R;
import nl.eduvpn.app.adapter.viewholder.MessageViewHolder;
import nl.eduvpn.app.entity.message.Maintenance;
import nl.eduvpn.app.entity.message.Message;
import nl.eduvpn.app.entity.message.Notification;
import nl.eduvpn.app.utils.FormattingUtils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Adapter for serving the message views inside a list.
* Created by Daniel Zolnai on 2016-10-19.
*/
public class MessagesAdapter extends RecyclerView.Adapter<MessageViewHolder> {
private List<Message> _userMessages;
private List<Message> _systemMessages;
private List<Message> _mergedList = new ArrayList<>();
private LayoutInflater _layoutInflater;
public void setUserMessages(List<Message> userMessages) {
_userMessages = userMessages;
_regenerateList();
}
public void setSystemMessages(List<Message> systemMessages) {
_systemMessages = systemMessages;
_regenerateList();
}
private void _regenerateList() {
_mergedList.clear();
if (_userMessages != null) {
_mergedList.addAll(_userMessages);
}
if (_systemMessages != null) {
_mergedList.addAll(_systemMessages);
}
Collections.sort(_mergedList);
notifyDataSetChanged();
}
@Override
public MessageViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
if (_layoutInflater == null) {
_layoutInflater = LayoutInflater.from(parent.getContext());
}
return new MessageViewHolder(_layoutInflater.inflate(R.layout.list_item_message, parent, false));
}
@Override
public void onBindViewHolder(MessageViewHolder holder, int position) {
Message message = _mergedList.get(position);
if (message instanceof Maintenance) {
holder.messageIcon.setVisibility(View.VISIBLE);
Context context = holder.messageText.getContext();
String maintenanceText = FormattingUtils.getMaintenanceText(context, (Maintenance)message);
holder.messageText.setText(maintenanceText);
} else if (message instanceof Notification) {
holder.messageIcon.setVisibility(View.GONE);
holder.messageText.setText(((Notification)message).getContent());
} else {
throw new RuntimeException("Unexpected message type!");
}
}
<|fim▁hole|> public int getItemCount() {
return _mergedList.size();
}
}<|fim▁end|> | @Override |
<|file_name|>types.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-gogo.
// source: types.proto
// DO NOT EDIT!
/*
Package api is a generated protocol buffer package.
It is generated from these files:
types.proto
specs.proto
objects.proto
control.proto
dispatcher.proto
ca.proto
snapshot.proto
raft.proto
health.proto
resource.proto
It has these top-level messages:
Version
Annotations
Resources
ResourceRequirements
Platform
PluginDescription
EngineDescription
NodeDescription
RaftMemberStatus
NodeStatus
Image
Mount
RestartPolicy
UpdateConfig
UpdateStatus
ContainerStatus
TaskStatus
NetworkAttachmentConfig
IPAMConfig
PortConfig
Driver
IPAMOptions
Peer
WeightedPeer
IssuanceStatus
AcceptancePolicy
ExternalCA
CAConfig
OrchestrationConfig
TaskDefaults
DispatcherConfig
RaftConfig
Placement
JoinTokens
RootCA
Certificate
EncryptionKey
ManagerStatus
SecretReference
NodeSpec
ServiceSpec
ReplicatedService
GlobalService
TaskSpec
NetworkAttachmentSpec
ContainerSpec
EndpointSpec
NetworkSpec
ClusterSpec
SecretSpec
Meta
Node
Service
Endpoint
Task
NetworkAttachment
Network
Cluster
Secret
GetNodeRequest
GetNodeResponse
ListNodesRequest
ListNodesResponse
UpdateNodeRequest
UpdateNodeResponse
RemoveNodeRequest
RemoveNodeResponse
GetTaskRequest
GetTaskResponse
RemoveTaskRequest
RemoveTaskResponse
ListTasksRequest
ListTasksResponse
CreateServiceRequest
CreateServiceResponse
GetServiceRequest
GetServiceResponse
UpdateServiceRequest
UpdateServiceResponse
RemoveServiceRequest
RemoveServiceResponse
ListServicesRequest
ListServicesResponse
CreateNetworkRequest
CreateNetworkResponse
GetNetworkRequest
GetNetworkResponse
RemoveNetworkRequest
RemoveNetworkResponse
ListNetworksRequest
ListNetworksResponse
GetClusterRequest
GetClusterResponse
ListClustersRequest
ListClustersResponse
JoinTokenRotation
UpdateClusterRequest
UpdateClusterResponse
GetSecretRequest
GetSecretResponse
ListSecretsRequest
ListSecretsResponse
CreateSecretRequest
CreateSecretResponse
RemoveSecretRequest
RemoveSecretResponse
SessionRequest
SessionMessage
HeartbeatRequest
HeartbeatResponse
UpdateTaskStatusRequest
UpdateTaskStatusResponse
TasksRequest
TasksMessage
AssignmentsRequest
AssignmentsMessage
NodeCertificateStatusRequest
NodeCertificateStatusResponse
IssueNodeCertificateRequest
IssueNodeCertificateResponse
GetRootCACertificateRequest
GetRootCACertificateResponse
StoreSnapshot
ClusterSnapshot
Snapshot
RaftMember
JoinRequest
JoinResponse
LeaveRequest
LeaveResponse
ProcessRaftMessageRequest
ProcessRaftMessageResponse
ResolveAddressRequest
ResolveAddressResponse
InternalRaftRequest
StoreAction
HealthCheckRequest
HealthCheckResponse
AttachNetworkRequest
AttachNetworkResponse
DetachNetworkRequest
DetachNetworkResponse
*/
package api
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import docker_swarmkit_v1 "github.com/docker/swarmkit/api/timestamp"
import docker_swarmkit_v11 "github.com/docker/swarmkit/api/duration"
import _ "github.com/gogo/protobuf/gogoproto"
import os "os"
import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// TaskState enumerates the states that a task progresses through within an
// agent. States are designed to be monotonically increasing, such that if two
// states are seen by a task, the greater of the new represents the true state.
type TaskState int32
const (
TaskStateNew TaskState = 0
TaskStateAllocated TaskState = 64
TaskStatePending TaskState = 128
TaskStateAssigned TaskState = 192
TaskStateAccepted TaskState = 256
TaskStatePreparing TaskState = 320
TaskStateReady TaskState = 384
TaskStateStarting TaskState = 448
TaskStateRunning TaskState = 512
TaskStateCompleted TaskState = 576
TaskStateShutdown TaskState = 640
TaskStateFailed TaskState = 704
TaskStateRejected TaskState = 768
)
var TaskState_name = map[int32]string{
0: "NEW",
64: "ALLOCATED",
128: "PENDING",
192: "ASSIGNED",
256: "ACCEPTED",
320: "PREPARING",
384: "READY",
448: "STARTING",
512: "RUNNING",
576: "COMPLETE",
640: "SHUTDOWN",
704: "FAILED",
768: "REJECTED",
}
var TaskState_value = map[string]int32{
"NEW": 0,
"ALLOCATED": 64,
"PENDING": 128,
"ASSIGNED": 192,
"ACCEPTED": 256,
"PREPARING": 320,
"READY": 384,
"STARTING": 448,
"RUNNING": 512,
"COMPLETE": 576,
"SHUTDOWN": 640,
"FAILED": 704,
"REJECTED": 768,
}
func (x TaskState) String() string {
return proto.EnumName(TaskState_name, int32(x))
}
func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
type NodeRole int32
const (
NodeRoleWorker NodeRole = 0
NodeRoleManager NodeRole = 1
)
var NodeRole_name = map[int32]string{
0: "WORKER",
1: "MANAGER",
}
var NodeRole_value = map[string]int32{
"WORKER": 0,
"MANAGER": 1,
}
func (x NodeRole) String() string {
return proto.EnumName(NodeRole_name, int32(x))
}
func (NodeRole) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
type RaftMemberStatus_Reachability int32
const (
// Unknown indicates that the manager state cannot be resolved
RaftMemberStatus_UNKNOWN RaftMemberStatus_Reachability = 0
// Unreachable indicates that the node cannot be contacted by other
// raft cluster members.
RaftMemberStatus_UNREACHABLE RaftMemberStatus_Reachability = 1
// Reachable indicates that the node is healthy and reachable
// by other members.
RaftMemberStatus_REACHABLE RaftMemberStatus_Reachability = 2
)
var RaftMemberStatus_Reachability_name = map[int32]string{
0: "UNKNOWN",
1: "UNREACHABLE",
2: "REACHABLE",
}
var RaftMemberStatus_Reachability_value = map[string]int32{
"UNKNOWN": 0,
"UNREACHABLE": 1,
"REACHABLE": 2,
}
func (x RaftMemberStatus_Reachability) String() string {
return proto.EnumName(RaftMemberStatus_Reachability_name, int32(x))
}
func (RaftMemberStatus_Reachability) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{8, 0}
}
// TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`.
type NodeStatus_State int32
const (
// Unknown indicates the node state cannot be resolved.
NodeStatus_UNKNOWN NodeStatus_State = 0
// Down indicates the node is down.
NodeStatus_DOWN NodeStatus_State = 1
// Ready indicates the node is ready to accept tasks.
NodeStatus_READY NodeStatus_State = 2
// Disconnected indicates the node is currently trying to find new manager.
NodeStatus_DISCONNECTED NodeStatus_State = 3
)
var NodeStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "DOWN",
2: "READY",
3: "DISCONNECTED",
}
var NodeStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"DOWN": 1,
"READY": 2,
"DISCONNECTED": 3,
}
func (x NodeStatus_State) String() string {
return proto.EnumName(NodeStatus_State_name, int32(x))
}
func (NodeStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9, 0} }
type Mount_MountType int32
const (
MountTypeBind Mount_MountType = 0
MountTypeVolume Mount_MountType = 1
MountTypeTmpfs Mount_MountType = 2
)
var Mount_MountType_name = map[int32]string{
0: "BIND",
1: "VOLUME",
2: "TMPFS",
}
var Mount_MountType_value = map[string]int32{
"BIND": 0,
"VOLUME": 1,
"TMPFS": 2,
}
func (x Mount_MountType) String() string {
return proto.EnumName(Mount_MountType_name, int32(x))
}
func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 0} }
type Mount_BindOptions_MountPropagation int32
const (
MountPropagationRPrivate Mount_BindOptions_MountPropagation = 0
MountPropagationPrivate Mount_BindOptions_MountPropagation = 1
MountPropagationRShared Mount_BindOptions_MountPropagation = 2
MountPropagationShared Mount_BindOptions_MountPropagation = 3
MountPropagationRSlave Mount_BindOptions_MountPropagation = 4
MountPropagationSlave Mount_BindOptions_MountPropagation = 5
)
var Mount_BindOptions_MountPropagation_name = map[int32]string{
0: "RPRIVATE",
1: "PRIVATE",
2: "RSHARED",
3: "SHARED",
4: "RSLAVE",
5: "SLAVE",
}
var Mount_BindOptions_MountPropagation_value = map[string]int32{
"RPRIVATE": 0,
"PRIVATE": 1,
"RSHARED": 2,
"SHARED": 3,
"RSLAVE": 4,
"SLAVE": 5,
}
func (x Mount_BindOptions_MountPropagation) String() string {
return proto.EnumName(Mount_BindOptions_MountPropagation_name, int32(x))
}
func (Mount_BindOptions_MountPropagation) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{11, 0, 0}
}
type RestartPolicy_RestartCondition int32
const (
RestartOnNone RestartPolicy_RestartCondition = 0
RestartOnFailure RestartPolicy_RestartCondition = 1
RestartOnAny RestartPolicy_RestartCondition = 2
)
var RestartPolicy_RestartCondition_name = map[int32]string{
0: "NONE",
1: "ON_FAILURE",
2: "ANY",
}
var RestartPolicy_RestartCondition_value = map[string]int32{
"NONE": 0,
"ON_FAILURE": 1,
"ANY": 2,
}
func (x RestartPolicy_RestartCondition) String() string {
return proto.EnumName(RestartPolicy_RestartCondition_name, int32(x))
}
func (RestartPolicy_RestartCondition) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{12, 0}
}
type UpdateConfig_FailureAction int32
const (
UpdateConfig_PAUSE UpdateConfig_FailureAction = 0
UpdateConfig_CONTINUE UpdateConfig_FailureAction = 1
// NOTE: Automated rollback triggered as a failure action is an
// experimental feature that is not yet exposed to the end
// user. Currently, rollbacks must be initiated manually
// through the API by setting Spec to PreviousSpec. We may
// decide to expose automatic rollback in the future based on
// user feedback, or remove this feature otherwise.
UpdateConfig_ROLLBACK UpdateConfig_FailureAction = 2
)
var UpdateConfig_FailureAction_name = map[int32]string{
0: "PAUSE",
1: "CONTINUE",
2: "ROLLBACK",
}
var UpdateConfig_FailureAction_value = map[string]int32{
"PAUSE": 0,
"CONTINUE": 1,
"ROLLBACK": 2,
}
func (x UpdateConfig_FailureAction) String() string {
return proto.EnumName(UpdateConfig_FailureAction_name, int32(x))
}
func (UpdateConfig_FailureAction) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{13, 0}
}
type UpdateStatus_UpdateState int32
const (
UpdateStatus_UNKNOWN UpdateStatus_UpdateState = 0
UpdateStatus_UPDATING UpdateStatus_UpdateState = 1
UpdateStatus_PAUSED UpdateStatus_UpdateState = 2
UpdateStatus_COMPLETED UpdateStatus_UpdateState = 3
UpdateStatus_ROLLBACK_STARTED UpdateStatus_UpdateState = 4
UpdateStatus_ROLLBACK_PAUSED UpdateStatus_UpdateState = 5
UpdateStatus_ROLLBACK_COMPLETED UpdateStatus_UpdateState = 6
)
var UpdateStatus_UpdateState_name = map[int32]string{
0: "UNKNOWN",
1: "UPDATING",
2: "PAUSED",
3: "COMPLETED",
4: "ROLLBACK_STARTED",
5: "ROLLBACK_PAUSED",
6: "ROLLBACK_COMPLETED",
}
var UpdateStatus_UpdateState_value = map[string]int32{
"UNKNOWN": 0,
"UPDATING": 1,
"PAUSED": 2,
"COMPLETED": 3,
"ROLLBACK_STARTED": 4,
"ROLLBACK_PAUSED": 5,
"ROLLBACK_COMPLETED": 6,
}
func (x UpdateStatus_UpdateState) String() string {
return proto.EnumName(UpdateStatus_UpdateState_name, int32(x))
}
func (UpdateStatus_UpdateState) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{14, 0}
}
// AddressFamily specifies the network address family that
// this IPAMConfig belongs to.
type IPAMConfig_AddressFamily int32
const (
IPAMConfig_UNKNOWN IPAMConfig_AddressFamily = 0
IPAMConfig_IPV4 IPAMConfig_AddressFamily = 4
IPAMConfig_IPV6 IPAMConfig_AddressFamily = 6
)
var IPAMConfig_AddressFamily_name = map[int32]string{
0: "UNKNOWN",
4: "IPV4",
6: "IPV6",
}
var IPAMConfig_AddressFamily_value = map[string]int32{
"UNKNOWN": 0,
"IPV4": 4,
"IPV6": 6,
}
func (x IPAMConfig_AddressFamily) String() string {
return proto.EnumName(IPAMConfig_AddressFamily_name, int32(x))
}
func (IPAMConfig_AddressFamily) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{18, 0}
}
type PortConfig_Protocol int32
const (
ProtocolTCP PortConfig_Protocol = 0
ProtocolUDP PortConfig_Protocol = 1
)
var PortConfig_Protocol_name = map[int32]string{
0: "TCP",
1: "UDP",
}
var PortConfig_Protocol_value = map[string]int32{
"TCP": 0,
"UDP": 1,
}
func (x PortConfig_Protocol) String() string {
return proto.EnumName(PortConfig_Protocol_name, int32(x))
}
func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19, 0} }
type IssuanceStatus_State int32
const (
IssuanceStateUnknown IssuanceStatus_State = 0
IssuanceStateRenew IssuanceStatus_State = 1
IssuanceStatePending IssuanceStatus_State = 2
IssuanceStateIssued IssuanceStatus_State = 3
IssuanceStateFailed IssuanceStatus_State = 4
)
var IssuanceStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "RENEW",
2: "PENDING",
3: "ISSUED",
4: "FAILED",
}
var IssuanceStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"RENEW": 1,
"PENDING": 2,
"ISSUED": 3,
"FAILED": 4,
}
func (x IssuanceStatus_State) String() string {
return proto.EnumName(IssuanceStatus_State_name, int32(x))
}
func (IssuanceStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24, 0} }
type ExternalCA_CAProtocol int32
const (
ExternalCA_CAProtocolCFSSL ExternalCA_CAProtocol = 0
)
var ExternalCA_CAProtocol_name = map[int32]string{
0: "CFSSL",
}
var ExternalCA_CAProtocol_value = map[string]int32{
"CFSSL": 0,
}
func (x ExternalCA_CAProtocol) String() string {
return proto.EnumName(ExternalCA_CAProtocol_name, int32(x))
}
func (ExternalCA_CAProtocol) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{26, 0}
}
// Encryption algorithm that can implemented using this key
type EncryptionKey_Algorithm int32
const (
AES_128_GCM EncryptionKey_Algorithm = 0
)
var EncryptionKey_Algorithm_name = map[int32]string{
0: "AES_128_GCM",
}
var EncryptionKey_Algorithm_value = map[string]int32{
"AES_128_GCM": 0,
}
func (x EncryptionKey_Algorithm) String() string {
return proto.EnumName(EncryptionKey_Algorithm_name, int32(x))
}
func (EncryptionKey_Algorithm) EnumDescriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{36, 0}
}
// Mode specifies how this secret should be exposed to the task.
type SecretReference_Mode int32
const (
// SYSTEM means that it is not exposed inside to a task at all, but
// only available via direct access, usually at the agent-level
SecretReference_SYSTEM SecretReference_Mode = 0
// FILE means that it will be exposed to the task as a file
SecretReference_FILE SecretReference_Mode = 1
// ENV means that it will be exposed to the task as an environment variable
SecretReference_ENV SecretReference_Mode = 2
)
var SecretReference_Mode_name = map[int32]string{
0: "SYSTEM",
1: "FILE",
2: "ENV",
}
var SecretReference_Mode_value = map[string]int32{
"SYSTEM": 0,
"FILE": 1,
"ENV": 2,
}
func (x SecretReference_Mode) String() string {
return proto.EnumName(SecretReference_Mode_name, int32(x))
}
func (SecretReference_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38, 0} }
// Version tracks the last time an object in the store was updated.
type Version struct {
Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
}
func (m *Version) Reset() { *m = Version{} }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
// Annotations provide useful information to identify API objects. They are
// common to all API specs.
type Annotations struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *Annotations) Reset() { *m = Annotations{} }
func (*Annotations) ProtoMessage() {}
func (*Annotations) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
type Resources struct {
// Amount of CPUs (e.g. 2000000000 = 2 CPU cores)
NanoCPUs int64 `protobuf:"varint,1,opt,name=nano_cpus,json=nanoCpus,proto3" json:"nano_cpus,omitempty"`
// Amount of memory in bytes.
MemoryBytes int64 `protobuf:"varint,2,opt,name=memory_bytes,json=memoryBytes,proto3" json:"memory_bytes,omitempty"`
}
func (m *Resources) Reset() { *m = Resources{} }
func (*Resources) ProtoMessage() {}
func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} }
type ResourceRequirements struct {
Limits *Resources `protobuf:"bytes,1,opt,name=limits" json:"limits,omitempty"`
Reservations *Resources `protobuf:"bytes,2,opt,name=reservations" json:"reservations,omitempty"`
}
func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
func (*ResourceRequirements) ProtoMessage() {}
func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} }
type Platform struct {
// Architecture (e.g. x86_64)
Architecture string `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty"`
// Operating System (e.g. linux)
OS string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"`
}
func (m *Platform) Reset() { *m = Platform{} }
func (*Platform) ProtoMessage() {}
func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} }
// PluginDescription describes an engine plugin.
type PluginDescription struct {
// Type of plugin. Canonical values for existing types are
// Volume, Network, and Authorization. More types could be
// supported in the future.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Name of the plugin
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
}
func (m *PluginDescription) Reset() { *m = PluginDescription{} }
func (*PluginDescription) ProtoMessage() {}
func (*PluginDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} }
type EngineDescription struct {
// Docker daemon version running on the node.
EngineVersion string `protobuf:"bytes,1,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"`
// Labels attached to the engine.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Volume, Network, and Auth plugins
Plugins []PluginDescription `protobuf:"bytes,3,rep,name=plugins" json:"plugins"`
}
func (m *EngineDescription) Reset() { *m = EngineDescription{} }
func (*EngineDescription) ProtoMessage() {}
func (*EngineDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} }
type NodeDescription struct {
// Hostname of the node as reported by the agent.
// This is different from spec.meta.name which is user-defined.
Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
// Platform of the node.
Platform *Platform `protobuf:"bytes,2,opt,name=platform" json:"platform,omitempty"`
// Total resources on the node.
Resources *Resources `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"`
// Information about the Docker Engine on the node.
Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"`
}
func (m *NodeDescription) Reset() { *m = NodeDescription{} }
func (*NodeDescription) ProtoMessage() {}
func (*NodeDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} }
type RaftMemberStatus struct {
Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"`
Reachability RaftMemberStatus_Reachability `protobuf:"varint,2,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
}
func (m *RaftMemberStatus) Reset() { *m = RaftMemberStatus{} }
func (*RaftMemberStatus) ProtoMessage() {}
func (*RaftMemberStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} }
type NodeStatus struct {
State NodeStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.NodeStatus_State" json:"state,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
}
func (m *NodeStatus) Reset() { *m = NodeStatus{} }
func (*NodeStatus) ProtoMessage() {}
func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} }
type Image struct {
// reference is a docker image reference. This can include a rpository, tag
// or be fully qualified witha digest. The format is specified in the
// distribution/reference package.
Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"`
}
func (m *Image) Reset() { *m = Image{} }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} }
// Mount describes volume mounts for a container.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target. Top-level flags, such as writable, are common to all kinds
// of mounts, where we also provide options that are specific to a type of
// mount. This corresponds to flags and data, respectively, in the syscall.
type Mount struct {
// Type defines the nature of the mount.
Type Mount_MountType `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.Mount_MountType" json:"type,omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// Target path in container
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// ReadOnly should be set to true if the mount should not be writable.
ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"`
// BindOptions configures properties of a bind mount type.
//
// For mounts of type bind, the source must be an absolute host path.
BindOptions *Mount_BindOptions `protobuf:"bytes,5,opt,name=bind_options,json=bindOptions" json:"bind_options,omitempty"`
// VolumeOptions configures the properties specific to a volume mount type.
//
// For mounts of type volume, the source will be used as the volume name.
VolumeOptions *Mount_VolumeOptions `protobuf:"bytes,6,opt,name=volume_options,json=volumeOptions" json:"volume_options,omitempty"`
// TmpfsOptions allows one to set options for mounting a temporary
// filesystem.
//
// The source field will be ignored when using mounts of type tmpfs.
TmpfsOptions *Mount_TmpfsOptions `protobuf:"bytes,7,opt,name=tmpfs_options,json=tmpfsOptions" json:"tmpfs_options,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
func (*Mount) ProtoMessage() {}
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} }
// BindOptions specifies options that are specific to a bind mount.
type Mount_BindOptions struct {
// Propagation mode of mount.
Propagation Mount_BindOptions_MountPropagation `protobuf:"varint,1,opt,name=propagation,proto3,enum=docker.swarmkit.v1.Mount_BindOptions_MountPropagation" json:"propagation,omitempty"`
}
func (m *Mount_BindOptions) Reset() { *m = Mount_BindOptions{} }
func (*Mount_BindOptions) ProtoMessage() {}
func (*Mount_BindOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 0} }
// VolumeOptions contains parameters for mounting the volume.
type Mount_VolumeOptions struct {
// nocopy prevents automatic copying of data to the volume with data from target
NoCopy bool `protobuf:"varint,1,opt,name=nocopy,proto3" json:"nocopy,omitempty"`
// labels to apply to the volume if creating
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// DriverConfig specifies the options that may be passed to the driver
// if the volume is created.
//
// If this is empty, no volume will be created if the volume is missing.
DriverConfig *Driver `protobuf:"bytes,3,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"`
}
func (m *Mount_VolumeOptions) Reset() { *m = Mount_VolumeOptions{} }
func (*Mount_VolumeOptions) ProtoMessage() {}
func (*Mount_VolumeOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 1} }
type Mount_TmpfsOptions struct {
// Size sets the size of the tmpfs, in bytes.
//
// This will be converted to an operating system specific value
// depending on the host. For example, on linux, it will be convered to
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
// docker, uses a straight byte value.
//
// Percentages are not supported.
SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
// Mode of the tmpfs upon creation
Mode os.FileMode `protobuf:"varint,2,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"`
}
func (m *Mount_TmpfsOptions) Reset() { *m = Mount_TmpfsOptions{} }
func (*Mount_TmpfsOptions) ProtoMessage() {}
func (*Mount_TmpfsOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11, 2} }
type RestartPolicy struct {
Condition RestartPolicy_RestartCondition `protobuf:"varint,1,opt,name=condition,proto3,enum=docker.swarmkit.v1.RestartPolicy_RestartCondition" json:"condition,omitempty"`
// Delay between restart attempts
Delay *docker_swarmkit_v11.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay,omitempty"`
// MaxAttempts is the maximum number of restarts to attempt on an
// instance before giving up. Ignored if 0.
MaxAttempts uint64 `protobuf:"varint,3,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"`
// Window is the time window used to evaluate the restart policy.
// The time window is unbounded if this is 0.
Window *docker_swarmkit_v11.Duration `protobuf:"bytes,4,opt,name=window" json:"window,omitempty"`
}
func (m *RestartPolicy) Reset() { *m = RestartPolicy{} }
func (*RestartPolicy) ProtoMessage() {}
func (*RestartPolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} }
// UpdateConfig specifies the rate and policy of updates.
// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy.
type UpdateConfig struct {
// Maximum number of tasks to be updated in one iteration.
// 0 means unlimited parallelism.
Parallelism uint64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"`
// Amount of time between updates.
Delay docker_swarmkit_v11.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay"`
// FailureAction is the action to take when an update failures.
FailureAction UpdateConfig_FailureAction `protobuf:"varint,3,opt,name=failure_action,json=failureAction,proto3,enum=docker.swarmkit.v1.UpdateConfig_FailureAction" json:"failure_action,omitempty"`
// Monitor indicates how long to monitor a task for failure after it is
// created. If the task fails by ending up in one of the states
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
// this counts as a failure. If it fails after Monitor, it does not
// count as a failure. If Monitor is unspecified, a default value will
// be used.
Monitor *docker_swarmkit_v11.Duration `protobuf:"bytes,4,opt,name=monitor" json:"monitor,omitempty"`
// MaxFailureRatio is the fraction of tasks that may fail during
// an update before the failure action is invoked. Any task created by
// the current update which ends up in one of the states REJECTED,
// COMPLETED or FAILED within Monitor from its creation counts as a
// failure. The number of failures is divided by the number of tasks
// being updated, and if this fraction is greater than
// MaxFailureRatio, the failure action is invoked.
//
// If the failure action is CONTINUE, there is no effect.
// If the failure action is PAUSE, no more tasks will be updated until
// another update is started.
// If the failure action is ROLLBACK, the orchestrator will attempt to
// roll back to the previous service spec. If the MaxFailureRatio
// threshold is hit during the rollback, the rollback will pause.
//
// TODO(aaronl): Should there be a separate failure threshold for
// rollbacks? Should there be a failure action for rollbacks (to allow
// them to do something other than pause when the rollback encounters
// errors)?
MaxFailureRatio float32 `protobuf:"fixed32,5,opt,name=max_failure_ratio,json=maxFailureRatio,proto3" json:"max_failure_ratio,omitempty"`
}
func (m *UpdateConfig) Reset() { *m = UpdateConfig{} }
func (*UpdateConfig) ProtoMessage() {}
func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} }
// UpdateStatus is the status of an update in progress.
type UpdateStatus struct {
// State is the state of this update. It indicates whether the
// update is in progress, completed, paused, rolling back, or
// finished rolling back.
State UpdateStatus_UpdateState `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.UpdateStatus_UpdateState" json:"state,omitempty"`
// StartedAt is the time at which the update was started.
StartedAt *docker_swarmkit_v1.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt" json:"started_at,omitempty"`
// CompletedAt is the time at which the update completed successfully,
// paused, or finished rolling back.
CompletedAt *docker_swarmkit_v1.Timestamp `protobuf:"bytes,3,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"`
// Message explains how the update got into its current state. For
// example, if the update is paused, it will explain what is preventing
// the update from proceeding (typically the failure of a task to start up
// when OnFailure is PAUSE).
Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"`
}
func (m *UpdateStatus) Reset() { *m = UpdateStatus{} }
func (*UpdateStatus) ProtoMessage() {}
func (*UpdateStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} }
// Container specific status.
type ContainerStatus struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
PID int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"`
}
func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
func (*ContainerStatus) ProtoMessage() {}
func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} }
type TaskStatus struct {
Timestamp *docker_swarmkit_v1.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
// State expresses the current state of the task.
State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=docker.swarmkit.v1.TaskState" json:"state,omitempty"`
// Message reports a message for the task status. This should provide a
// human readable message that can point to how the task actually arrived
// at a current state.
//
// As a convention, we place the a small message here that led to the
// current state. For example, if the task is in ready, because it was
// prepared, we'd place "prepared" in this field. If we skipped preparation
// because the task is prepared, we would put "already prepared" in this
// field.
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
// Err is set if the task is in an error state.
//
// The following states should report a companion error:
//
// FAILED, REJECTED
//
// TODO(stevvooe) Integrate this field with the error interface.
Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"`
// Container status contains container specific status information.
//
// Types that are valid to be assigned to RuntimeStatus:
// *TaskStatus_Container
RuntimeStatus isTaskStatus_RuntimeStatus `protobuf_oneof:"runtime_status"`
}
func (m *TaskStatus) Reset() { *m = TaskStatus{} }
func (*TaskStatus) ProtoMessage() {}
func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} }
type isTaskStatus_RuntimeStatus interface {
isTaskStatus_RuntimeStatus()
MarshalTo([]byte) (int, error)
Size() int
}
type TaskStatus_Container struct {
Container *ContainerStatus `protobuf:"bytes,5,opt,name=container,oneof"`
}
func (*TaskStatus_Container) isTaskStatus_RuntimeStatus() {}
func (m *TaskStatus) GetRuntimeStatus() isTaskStatus_RuntimeStatus {
if m != nil {
return m.RuntimeStatus
}
return nil
}
func (m *TaskStatus) GetContainer() *ContainerStatus {
if x, ok := m.GetRuntimeStatus().(*TaskStatus_Container); ok {
return x.Container
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*TaskStatus) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _TaskStatus_OneofMarshaler, _TaskStatus_OneofUnmarshaler, _TaskStatus_OneofSizer, []interface{}{
(*TaskStatus_Container)(nil),
}
}
func _TaskStatus_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*TaskStatus)
// runtime_status
switch x := m.RuntimeStatus.(type) {
case *TaskStatus_Container:
_ = b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Container); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("TaskStatus.RuntimeStatus has unexpected type %T", x)
}
return nil
}
func _TaskStatus_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*TaskStatus)
switch tag {
case 5: // runtime_status.container
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ContainerStatus)
err := b.DecodeMessage(msg)
m.RuntimeStatus = &TaskStatus_Container{msg}
return true, err
default:
return false, nil
}
}
func _TaskStatus_OneofSizer(msg proto.Message) (n int) {
m := msg.(*TaskStatus)
// runtime_status
switch x := m.RuntimeStatus.(type) {
case *TaskStatus_Container:
s := proto.Size(x.Container)
n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// NetworkAttachmentConfig specifies how a service should be attached to a particular network.
//
// For now, this is a simple struct, but this can include future information
// instructing Swarm on how this service should work on the particular
// network.
type NetworkAttachmentConfig struct {
// Target specifies the target network for attachment. This value may be a
// network name or identifier. Only identifiers are supported at this time.
Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
// Aliases specifies a list of discoverable alternate names for the service on this Target.
Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"`
// Addresses specifies a list of ipv4 and ipv6 addresses
// preferred. If these addresses are not available then the
// attachment might fail.
Addresses []string `protobuf:"bytes,3,rep,name=addresses" json:"addresses,omitempty"`
}
func (m *NetworkAttachmentConfig) Reset() { *m = NetworkAttachmentConfig{} }
func (*NetworkAttachmentConfig) ProtoMessage() {}
func (*NetworkAttachmentConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} }
// IPAMConfig specifies parameters for IP Address Management.
type IPAMConfig struct {
Family IPAMConfig_AddressFamily `protobuf:"varint,1,opt,name=family,proto3,enum=docker.swarmkit.v1.IPAMConfig_AddressFamily" json:"family,omitempty"`
// Subnet defines a network as a CIDR address (ie network and mask
// 192.168.0.1/24).
Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"`
// Range defines the portion of the subnet to allocate to tasks. This is
// defined as a subnet within the primary subnet.
Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
// Gateway address within the subnet.
Gateway string `protobuf:"bytes,4,opt,name=gateway,proto3" json:"gateway,omitempty"`
// Reserved is a list of address from the master pool that should *not* be
// allocated. These addresses may have already been allocated or may be
// reserved for another allocation manager.
Reserved map[string]string `protobuf:"bytes,5,rep,name=reserved" json:"reserved,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *IPAMConfig) Reset() { *m = IPAMConfig{} }
func (*IPAMConfig) ProtoMessage() {}
func (*IPAMConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} }
// PortConfig specifies an exposed port which can be
// addressed using the given name. This can be later queried
// using a service discovery api or a DNS SRV query. The node
// port specifies a port that can be used to address this
// service external to the cluster by sending a connection
// request to this port to any node on the cluster.
type PortConfig struct {
// Name for the port. If provided the port information can
// be queried using the name as in a DNS SRV query.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Protocol for the port which is exposed.
Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"`
// The port which the application is exposing and is bound to.
TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"`
// PublishedPort specifies the port on which the service is
// exposed. If specified, the port must be
// within the available range. If not specified, an available
// port is automatically assigned.
PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"`
}
func (m *PortConfig) Reset() { *m = PortConfig{} }
func (*PortConfig) ProtoMessage() {}
func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} }
// Driver is a generic driver type to be used throughout the API. For now, a
// driver is simply a name and set of options. The field contents depend on the
// target use case and driver application. For example, a network driver may
// have different rules than a volume driver.
type Driver struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Options map[string]string `protobuf:"bytes,2,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *Driver) Reset() { *m = Driver{} }
func (*Driver) ProtoMessage() {}
func (*Driver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} }
type IPAMOptions struct {
Driver *Driver `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"`
Configs []*IPAMConfig `protobuf:"bytes,3,rep,name=configs" json:"configs,omitempty"`
}
func (m *IPAMOptions) Reset() { *m = IPAMOptions{} }
func (*IPAMOptions) ProtoMessage() {}
func (*IPAMOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} }
// Peer should be used anywhere where we are describing a remote peer.
type Peer struct {
NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
}
func (m *Peer) Reset() { *m = Peer{} }
func (*Peer) ProtoMessage() {}
func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} }
// WeightedPeer should be used anywhere where we are describing a remote peer
// with a weight.
type WeightedPeer struct {
Peer *Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
Weight int64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"`
}
func (m *WeightedPeer) Reset() { *m = WeightedPeer{} }
func (*WeightedPeer) ProtoMessage() {}
func (*WeightedPeer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} }
type IssuanceStatus struct {
State IssuanceStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.IssuanceStatus_State" json:"state,omitempty"`
// Err is set if the Certificate Issuance is in an error state.
// The following states should report a companion error:
// FAILED
Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
}
func (m *IssuanceStatus) Reset() { *m = IssuanceStatus{} }
func (*IssuanceStatus) ProtoMessage() {}
func (*IssuanceStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} }
type AcceptancePolicy struct {
Policies []*AcceptancePolicy_RoleAdmissionPolicy `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"`
}
func (m *AcceptancePolicy) Reset() { *m = AcceptancePolicy{} }
func (*AcceptancePolicy) ProtoMessage() {}
func (*AcceptancePolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} }
type AcceptancePolicy_RoleAdmissionPolicy struct {
Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
// Autoaccept controls which roles' certificates are automatically
// issued without administrator intervention.
Autoaccept bool `protobuf:"varint,2,opt,name=autoaccept,proto3" json:"autoaccept,omitempty"`
// Secret represents a user-provided string that is necessary for new
// nodes to join the cluster
Secret *AcceptancePolicy_RoleAdmissionPolicy_Secret `protobuf:"bytes,3,opt,name=secret" json:"secret,omitempty"`
}
func (m *AcceptancePolicy_RoleAdmissionPolicy) Reset() { *m = AcceptancePolicy_RoleAdmissionPolicy{} }
func (*AcceptancePolicy_RoleAdmissionPolicy) ProtoMessage() {}
func (*AcceptancePolicy_RoleAdmissionPolicy) Descriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{25, 0}
}
type AcceptancePolicy_RoleAdmissionPolicy_Secret struct {
// The actual content (possibly hashed)
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
// The type of hash we are using, or "plaintext"
Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"`
}
func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Reset() {
*m = AcceptancePolicy_RoleAdmissionPolicy_Secret{}
}
func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) ProtoMessage() {}
func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) Descriptor() ([]byte, []int) {
return fileDescriptorTypes, []int{25, 0, 0}
}
type ExternalCA struct {
// Protocol is the protocol used by this external CA.
Protocol ExternalCA_CAProtocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=docker.swarmkit.v1.ExternalCA_CAProtocol" json:"protocol,omitempty"`
// URL is the URL where the external CA can be reached.
URL string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
Options map[string]string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *ExternalCA) Reset() { *m = ExternalCA{} }
func (*ExternalCA) ProtoMessage() {}
func (*ExternalCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} }
type CAConfig struct {
// NodeCertExpiry is the duration certificates should be issued for
NodeCertExpiry *docker_swarmkit_v11.Duration `protobuf:"bytes,1,opt,name=node_cert_expiry,json=nodeCertExpiry" json:"node_cert_expiry,omitempty"`
// ExternalCAs is a list of CAs to which a manager node will make
// certificate signing requests for node certificates.
ExternalCAs []*ExternalCA `protobuf:"bytes,2,rep,name=external_cas,json=externalCas" json:"external_cas,omitempty"`
}
func (m *CAConfig) Reset() { *m = CAConfig{} }
func (*CAConfig) ProtoMessage() {}
func (*CAConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} }
// OrchestrationConfig defines cluster-level orchestration settings.
type OrchestrationConfig struct {
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
// node. If negative, never remove completed or failed tasks.
TaskHistoryRetentionLimit int64 `protobuf:"varint,1,opt,name=task_history_retention_limit,json=taskHistoryRetentionLimit,proto3" json:"task_history_retention_limit,omitempty"`
}
func (m *OrchestrationConfig) Reset() { *m = OrchestrationConfig{} }
func (*OrchestrationConfig) ProtoMessage() {}
func (*OrchestrationConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} }
// TaskDefaults specifies default values for task creation.
type TaskDefaults struct {
// LogDriver specifies the log driver to use for the cluster if not
// specified for each task.
//
// If this is changed, only new tasks will pick up the new log driver.
// Existing tasks will continue to use the previous default until rescheduled.
LogDriver *Driver `protobuf:"bytes,1,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"`
}
func (m *TaskDefaults) Reset() { *m = TaskDefaults{} }
func (*TaskDefaults) ProtoMessage() {}
func (*TaskDefaults) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} }
// DispatcherConfig defines cluster-level dispatcher settings.
type DispatcherConfig struct {
// HeartbeatPeriod defines how often agent should send heartbeats to
// dispatcher.
HeartbeatPeriod *docker_swarmkit_v11.Duration `protobuf:"bytes,1,opt,name=heartbeat_period,json=heartbeatPeriod" json:"heartbeat_period,omitempty"`
}
func (m *DispatcherConfig) Reset() { *m = DispatcherConfig{} }
func (*DispatcherConfig) ProtoMessage() {}
func (*DispatcherConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} }
// RaftConfig defines raft settings for the cluster.
type RaftConfig struct {
// SnapshotInterval is the number of log entries between snapshots.
SnapshotInterval uint64 `protobuf:"varint,1,opt,name=snapshot_interval,json=snapshotInterval,proto3" json:"snapshot_interval,omitempty"`
// KeepOldSnapshots is the number of snapshots to keep beyond the
// current snapshot.
KeepOldSnapshots uint64 `protobuf:"varint,2,opt,name=keep_old_snapshots,json=keepOldSnapshots,proto3" json:"keep_old_snapshots,omitempty"`
// LogEntriesForSlowFollowers is the number of log entries to keep
// around to sync up slow followers after a snapshot is created.
LogEntriesForSlowFollowers uint64 `protobuf:"varint,3,opt,name=log_entries_for_slow_followers,json=logEntriesForSlowFollowers,proto3" json:"log_entries_for_slow_followers,omitempty"`
// HeartbeatTick defines the amount of ticks (in seconds) between
// each heartbeat message sent to other members for health-check.
HeartbeatTick uint32 `protobuf:"varint,4,opt,name=heartbeat_tick,json=heartbeatTick,proto3" json:"heartbeat_tick,omitempty"`
// ElectionTick defines the amount of ticks (in seconds) needed
// without a leader to trigger a new election.
ElectionTick uint32 `protobuf:"varint,5,opt,name=election_tick,json=electionTick,proto3" json:"election_tick,omitempty"`
}
func (m *RaftConfig) Reset() { *m = RaftConfig{} }
func (*RaftConfig) ProtoMessage() {}
func (*RaftConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} }
// Placement specifies task distribution constraints.
type Placement struct {
// constraints specifies a set of requirements a node should meet for a task.
Constraints []string `protobuf:"bytes,1,rep,name=constraints" json:"constraints,omitempty"`
}
func (m *Placement) Reset() { *m = Placement{} }
func (*Placement) ProtoMessage() {}
func (*Placement) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} }
// JoinToken contains the join tokens for workers and managers.
type JoinTokens struct {
// Worker is the join token workers may use to join the swarm.
Worker string `protobuf:"bytes,1,opt,name=worker,proto3" json:"worker,omitempty"`
// Manager is the join token workers may use to join the swarm.
Manager string `protobuf:"bytes,2,opt,name=manager,proto3" json:"manager,omitempty"`
}
func (m *JoinTokens) Reset() { *m = JoinTokens{} }
func (*JoinTokens) ProtoMessage() {}
func (*JoinTokens) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} }
type RootCA struct {
// CAKey is the root CA private key.
CAKey []byte `protobuf:"bytes,1,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"`
// CACert is the root CA certificate.
CACert []byte `protobuf:"bytes,2,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"`
// CACertHash is the digest of the CA Certificate.
CACertHash string `protobuf:"bytes,3,opt,name=ca_cert_hash,json=caCertHash,proto3" json:"ca_cert_hash,omitempty"`
// JoinTokens contains the join tokens for workers and managers.
JoinTokens JoinTokens `protobuf:"bytes,4,opt,name=join_tokens,json=joinTokens" json:"join_tokens"`
}
func (m *RootCA) Reset() { *m = RootCA{} }
func (*RootCA) ProtoMessage() {}
func (*RootCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34} }
type Certificate struct {
Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"`
Status IssuanceStatus `protobuf:"bytes,3,opt,name=status" json:"status"`
Certificate []byte `protobuf:"bytes,4,opt,name=certificate,proto3" json:"certificate,omitempty"`
// CN represents the node ID.
CN string `protobuf:"bytes,5,opt,name=cn,proto3" json:"cn,omitempty"`
}
func (m *Certificate) Reset() { *m = Certificate{} }
func (*Certificate) ProtoMessage() {}
func (*Certificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{35} }
// Symmetric keys to encrypt inter-agent communication.
type EncryptionKey struct {
// Agent subsystem the key is intended for. Example:
// networking:gossip
Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"`
Algorithm EncryptionKey_Algorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.EncryptionKey_Algorithm" json:"algorithm,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
// Time stamp from the lamport clock of the key allocator to
// identify the relative age of the key.
LamportTime uint64 `protobuf:"varint,4,opt,name=lamport_time,json=lamportTime,proto3" json:"lamport_time,omitempty"`
}
func (m *EncryptionKey) Reset() { *m = EncryptionKey{} }
func (*EncryptionKey) ProtoMessage() {}
func (*EncryptionKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{36} }
// ManagerStatus provides informations about the state of a manager in the cluster.
type ManagerStatus struct {
// RaftID specifies the internal ID used by the manager in a raft context, it can never be modified
// and is used only for information purposes
RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"`
// Addr is the address advertised to raft.
Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
// Leader is set to true if this node is the raft leader.
Leader bool `protobuf:"varint,3,opt,name=leader,proto3" json:"leader,omitempty"`
// Reachability specifies whether this node is reachable.
Reachability RaftMemberStatus_Reachability `protobuf:"varint,4,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"`
}
func (m *ManagerStatus) Reset() { *m = ManagerStatus{} }
func (*ManagerStatus) ProtoMessage() {}
func (*ManagerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{37} }
// SecretReference is the linkage between a service and a secret that it uses.
type SecretReference struct {
// SecretID represents the ID of the specific Secret that we're
// referencing. This identifier exists so that SecretReferences don't leak
// any information about the secret contents.
SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"`
// Mode is the way the secret should be presented.
Mode SecretReference_Mode `protobuf:"varint,2,opt,name=mode,proto3,enum=docker.swarmkit.v1.SecretReference_Mode" json:"mode,omitempty"`
// Target is the name by which the image accesses the secret.
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// SecretName is the name of the secret that this references, but this is just provided for
// lookup/display purposes. The secret in the reference will be identified by its ID.
SecretName string `protobuf:"bytes,4,opt,name=secret_name,json=secretName,proto3" json:"secret_name,omitempty"`
}
func (m *SecretReference) Reset() { *m = SecretReference{} }
func (*SecretReference) ProtoMessage() {}
func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38} }
func init() {
proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version")
proto.RegisterType((*Annotations)(nil), "docker.swarmkit.v1.Annotations")
proto.RegisterType((*Resources)(nil), "docker.swarmkit.v1.Resources")
proto.RegisterType((*ResourceRequirements)(nil), "docker.swarmkit.v1.ResourceRequirements")
proto.RegisterType((*Platform)(nil), "docker.swarmkit.v1.Platform")
proto.RegisterType((*PluginDescription)(nil), "docker.swarmkit.v1.PluginDescription")
proto.RegisterType((*EngineDescription)(nil), "docker.swarmkit.v1.EngineDescription")
proto.RegisterType((*NodeDescription)(nil), "docker.swarmkit.v1.NodeDescription")
proto.RegisterType((*RaftMemberStatus)(nil), "docker.swarmkit.v1.RaftMemberStatus")
proto.RegisterType((*NodeStatus)(nil), "docker.swarmkit.v1.NodeStatus")
proto.RegisterType((*Image)(nil), "docker.swarmkit.v1.Image")
proto.RegisterType((*Mount)(nil), "docker.swarmkit.v1.Mount")
proto.RegisterType((*Mount_BindOptions)(nil), "docker.swarmkit.v1.Mount.BindOptions")
proto.RegisterType((*Mount_VolumeOptions)(nil), "docker.swarmkit.v1.Mount.VolumeOptions")
proto.RegisterType((*Mount_TmpfsOptions)(nil), "docker.swarmkit.v1.Mount.TmpfsOptions")
proto.RegisterType((*RestartPolicy)(nil), "docker.swarmkit.v1.RestartPolicy")
proto.RegisterType((*UpdateConfig)(nil), "docker.swarmkit.v1.UpdateConfig")
proto.RegisterType((*UpdateStatus)(nil), "docker.swarmkit.v1.UpdateStatus")
proto.RegisterType((*ContainerStatus)(nil), "docker.swarmkit.v1.ContainerStatus")
proto.RegisterType((*TaskStatus)(nil), "docker.swarmkit.v1.TaskStatus")
proto.RegisterType((*NetworkAttachmentConfig)(nil), "docker.swarmkit.v1.NetworkAttachmentConfig")
proto.RegisterType((*IPAMConfig)(nil), "docker.swarmkit.v1.IPAMConfig")
proto.RegisterType((*PortConfig)(nil), "docker.swarmkit.v1.PortConfig")
proto.RegisterType((*Driver)(nil), "docker.swarmkit.v1.Driver")
proto.RegisterType((*IPAMOptions)(nil), "docker.swarmkit.v1.IPAMOptions")
proto.RegisterType((*Peer)(nil), "docker.swarmkit.v1.Peer")
proto.RegisterType((*WeightedPeer)(nil), "docker.swarmkit.v1.WeightedPeer")
proto.RegisterType((*IssuanceStatus)(nil), "docker.swarmkit.v1.IssuanceStatus")
proto.RegisterType((*AcceptancePolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy")
proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy")
proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy_Secret)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret")
proto.RegisterType((*ExternalCA)(nil), "docker.swarmkit.v1.ExternalCA")
proto.RegisterType((*CAConfig)(nil), "docker.swarmkit.v1.CAConfig")
proto.RegisterType((*OrchestrationConfig)(nil), "docker.swarmkit.v1.OrchestrationConfig")
proto.RegisterType((*TaskDefaults)(nil), "docker.swarmkit.v1.TaskDefaults")
proto.RegisterType((*DispatcherConfig)(nil), "docker.swarmkit.v1.DispatcherConfig")
proto.RegisterType((*RaftConfig)(nil), "docker.swarmkit.v1.RaftConfig")
proto.RegisterType((*Placement)(nil), "docker.swarmkit.v1.Placement")
proto.RegisterType((*JoinTokens)(nil), "docker.swarmkit.v1.JoinTokens")
proto.RegisterType((*RootCA)(nil), "docker.swarmkit.v1.RootCA")
proto.RegisterType((*Certificate)(nil), "docker.swarmkit.v1.Certificate")
proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey")
proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus")
proto.RegisterType((*SecretReference)(nil), "docker.swarmkit.v1.SecretReference")
proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value)
proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value)
proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value)
proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value)
proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value)
proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value)
proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value)
proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value)
proto.RegisterEnum("docker.swarmkit.v1.UpdateStatus_UpdateState", UpdateStatus_UpdateState_name, UpdateStatus_UpdateState_value)
proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value)
proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value)
proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value)
proto.RegisterEnum("docker.swarmkit.v1.ExternalCA_CAProtocol", ExternalCA_CAProtocol_name, ExternalCA_CAProtocol_value)
proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value)
proto.RegisterEnum("docker.swarmkit.v1.SecretReference_Mode", SecretReference_Mode_name, SecretReference_Mode_value)
}
func (m *Version) Copy() *Version {
if m == nil {
return nil
}
o := &Version{
Index: m.Index,
}
return o
}
func (m *Annotations) Copy() *Annotations {
if m == nil {
return nil
}
o := &Annotations{
Name: m.Name,
}
if m.Labels != nil {
o.Labels = make(map[string]string)
for k, v := range m.Labels {
o.Labels[k] = v
}
}
return o
}
func (m *Resources) Copy() *Resources {
if m == nil {
return nil
}
o := &Resources{
NanoCPUs: m.NanoCPUs,
MemoryBytes: m.MemoryBytes,
}
return o
}
func (m *ResourceRequirements) Copy() *ResourceRequirements {
if m == nil {
return nil
}
o := &ResourceRequirements{
Limits: m.Limits.Copy(),
Reservations: m.Reservations.Copy(),
}
return o
}
func (m *Platform) Copy() *Platform {
if m == nil {
return nil
}
o := &Platform{
Architecture: m.Architecture,
OS: m.OS,
}
return o
}
func (m *PluginDescription) Copy() *PluginDescription {
if m == nil {
return nil
}
o := &PluginDescription{
Type: m.Type,
Name: m.Name,
}
return o
}
func (m *EngineDescription) Copy() *EngineDescription {
if m == nil {
return nil
}
o := &EngineDescription{
EngineVersion: m.EngineVersion,
}
if m.Labels != nil {
o.Labels = make(map[string]string)
for k, v := range m.Labels {
o.Labels[k] = v
}
}
if m.Plugins != nil {
o.Plugins = make([]PluginDescription, 0, len(m.Plugins))
for _, v := range m.Plugins {
o.Plugins = append(o.Plugins, *v.Copy())
}
}
return o
}
func (m *NodeDescription) Copy() *NodeDescription {
if m == nil {
return nil
}
o := &NodeDescription{
Hostname: m.Hostname,
Platform: m.Platform.Copy(),
Resources: m.Resources.Copy(),
Engine: m.Engine.Copy(),
}
return o
}
func (m *RaftMemberStatus) Copy() *RaftMemberStatus {
if m == nil {
return nil
}
o := &RaftMemberStatus{
Leader: m.Leader,
Reachability: m.Reachability,
Message: m.Message,
}
return o
}
func (m *NodeStatus) Copy() *NodeStatus {
if m == nil {
return nil
}
o := &NodeStatus{
State: m.State,
Message: m.Message,
}
return o
}
func (m *Image) Copy() *Image {
if m == nil {
return nil
}
o := &Image{
Reference: m.Reference,
}
return o
}
func (m *Mount) Copy() *Mount {
if m == nil {
return nil
}
o := &Mount{
Type: m.Type,
Source: m.Source,
Target: m.Target,
ReadOnly: m.ReadOnly,
BindOptions: m.BindOptions.Copy(),
VolumeOptions: m.VolumeOptions.Copy(),
TmpfsOptions: m.TmpfsOptions.Copy(),
}
return o
}
func (m *Mount_BindOptions) Copy() *Mount_BindOptions {
if m == nil {
return nil
}
o := &Mount_BindOptions{
Propagation: m.Propagation,
}
return o
}
func (m *Mount_VolumeOptions) Copy() *Mount_VolumeOptions {
if m == nil {
return nil
}
o := &Mount_VolumeOptions{
NoCopy: m.NoCopy,
DriverConfig: m.DriverConfig.Copy(),
}
if m.Labels != nil {
o.Labels = make(map[string]string)
for k, v := range m.Labels {
o.Labels[k] = v
}
}
return o
}
func (m *Mount_TmpfsOptions) Copy() *Mount_TmpfsOptions {
if m == nil {
return nil
}
o := &Mount_TmpfsOptions{
SizeBytes: m.SizeBytes,
Mode: m.Mode,
}
return o
}
func (m *RestartPolicy) Copy() *RestartPolicy {
if m == nil {
return nil
}
o := &RestartPolicy{
Condition: m.Condition,
Delay: m.Delay.Copy(),
MaxAttempts: m.MaxAttempts,
Window: m.Window.Copy(),
}
return o
}
func (m *UpdateConfig) Copy() *UpdateConfig {
if m == nil {
return nil
}
o := &UpdateConfig{
Parallelism: m.Parallelism,
Delay: *m.Delay.Copy(),
FailureAction: m.FailureAction,
Monitor: m.Monitor.Copy(),
MaxFailureRatio: m.MaxFailureRatio,
}
return o
}
func (m *UpdateStatus) Copy() *UpdateStatus {
if m == nil {
return nil
}
o := &UpdateStatus{
State: m.State,
StartedAt: m.StartedAt.Copy(),
CompletedAt: m.CompletedAt.Copy(),
Message: m.Message,
}
return o
}
func (m *ContainerStatus) Copy() *ContainerStatus {
if m == nil {
return nil
}
o := &ContainerStatus{
ContainerID: m.ContainerID,
PID: m.PID,
ExitCode: m.ExitCode,
}
return o
}
func (m *TaskStatus) Copy() *TaskStatus {
if m == nil {
return nil
}
o := &TaskStatus{
Timestamp: m.Timestamp.Copy(),
State: m.State,
Message: m.Message,
Err: m.Err,
}
switch m.RuntimeStatus.(type) {
case *TaskStatus_Container:
i := &TaskStatus_Container{
Container: m.GetContainer().Copy(),
}
o.RuntimeStatus = i
}
return o
}
func (m *NetworkAttachmentConfig) Copy() *NetworkAttachmentConfig {
if m == nil {
return nil
}
o := &NetworkAttachmentConfig{
Target: m.Target,
}
if m.Aliases != nil {
o.Aliases = make([]string, 0, len(m.Aliases))
for _, v := range m.Aliases {
o.Aliases = append(o.Aliases, v)
}
}
if m.Addresses != nil {
o.Addresses = make([]string, 0, len(m.Addresses))
for _, v := range m.Addresses {
o.Addresses = append(o.Addresses, v)
}
}
return o
}
func (m *IPAMConfig) Copy() *IPAMConfig {
if m == nil {
return nil
}
o := &IPAMConfig{
Family: m.Family,
Subnet: m.Subnet,
Range: m.Range,
Gateway: m.Gateway,
}
if m.Reserved != nil {
o.Reserved = make(map[string]string)
for k, v := range m.Reserved {
o.Reserved[k] = v
}
}
return o
}
func (m *PortConfig) Copy() *PortConfig {
if m == nil {
return nil
}
o := &PortConfig{
Name: m.Name,
Protocol: m.Protocol,
TargetPort: m.TargetPort,
PublishedPort: m.PublishedPort,
}
return o
}
func (m *Driver) Copy() *Driver {
if m == nil {
return nil
}
o := &Driver{
Name: m.Name,
}
if m.Options != nil {
o.Options = make(map[string]string)
for k, v := range m.Options {
o.Options[k] = v
}
}
return o
}
func (m *IPAMOptions) Copy() *IPAMOptions {
if m == nil {
return nil
}
o := &IPAMOptions{
Driver: m.Driver.Copy(),
}
if m.Configs != nil {
o.Configs = make([]*IPAMConfig, 0, len(m.Configs))
for _, v := range m.Configs {
o.Configs = append(o.Configs, v.Copy())
}
}
return o
}
func (m *Peer) Copy() *Peer {
if m == nil {
return nil
}
o := &Peer{
NodeID: m.NodeID,
Addr: m.Addr,
}
return o
}
func (m *WeightedPeer) Copy() *WeightedPeer {
if m == nil {
return nil
}
o := &WeightedPeer{
Peer: m.Peer.Copy(),
Weight: m.Weight,
}
return o
}
func (m *IssuanceStatus) Copy() *IssuanceStatus {
if m == nil {
return nil
}
o := &IssuanceStatus{
State: m.State,
Err: m.Err,
}
return o
}
func (m *AcceptancePolicy) Copy() *AcceptancePolicy {
if m == nil {
return nil
}
o := &AcceptancePolicy{}
if m.Policies != nil {
o.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, 0, len(m.Policies))
for _, v := range m.Policies {
o.Policies = append(o.Policies, v.Copy())
}
}
return o
}
func (m *AcceptancePolicy_RoleAdmissionPolicy) Copy() *AcceptancePolicy_RoleAdmissionPolicy {
if m == nil {
return nil
}
o := &AcceptancePolicy_RoleAdmissionPolicy{
Role: m.Role,
Autoaccept: m.Autoaccept,
Secret: m.Secret.Copy(),
}
return o
}
func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Copy() *AcceptancePolicy_RoleAdmissionPolicy_Secret {
if m == nil {
return nil
}
o := &AcceptancePolicy_RoleAdmissionPolicy_Secret{
Data: m.Data,
Alg: m.Alg,
}
return o
}
func (m *ExternalCA) Copy() *ExternalCA {
if m == nil {
return nil
}
o := &ExternalCA{
Protocol: m.Protocol,
URL: m.URL,
}
if m.Options != nil {
o.Options = make(map[string]string)
for k, v := range m.Options {
o.Options[k] = v
}
}
return o
}
func (m *CAConfig) Copy() *CAConfig {
if m == nil {
return nil
}
o := &CAConfig{
NodeCertExpiry: m.NodeCertExpiry.Copy(),
}
if m.ExternalCAs != nil {
o.ExternalCAs = make([]*ExternalCA, 0, len(m.ExternalCAs))
for _, v := range m.ExternalCAs {
o.ExternalCAs = append(o.ExternalCAs, v.Copy())
}
}
return o
}
func (m *OrchestrationConfig) Copy() *OrchestrationConfig {
if m == nil {
return nil
}
o := &OrchestrationConfig{
TaskHistoryRetentionLimit: m.TaskHistoryRetentionLimit,
}
return o
}
func (m *TaskDefaults) Copy() *TaskDefaults {
if m == nil {
return nil
}
o := &TaskDefaults{
LogDriver: m.LogDriver.Copy(),
}
return o
}
func (m *DispatcherConfig) Copy() *DispatcherConfig {
if m == nil {
return nil
}
o := &DispatcherConfig{
HeartbeatPeriod: m.HeartbeatPeriod.Copy(),
}
return o
}
func (m *RaftConfig) Copy() *RaftConfig {
if m == nil {
return nil
}
o := &RaftConfig{
SnapshotInterval: m.SnapshotInterval,
KeepOldSnapshots: m.KeepOldSnapshots,
LogEntriesForSlowFollowers: m.LogEntriesForSlowFollowers,
HeartbeatTick: m.HeartbeatTick,
ElectionTick: m.ElectionTick,
}
return o
}
func (m *Placement) Copy() *Placement {
if m == nil {
return nil
}
o := &Placement{}
if m.Constraints != nil {
o.Constraints = make([]string, 0, len(m.Constraints))
for _, v := range m.Constraints {
o.Constraints = append(o.Constraints, v)
}
}
return o
}
func (m *JoinTokens) Copy() *JoinTokens {
if m == nil {
return nil
}
o := &JoinTokens{
Worker: m.Worker,
Manager: m.Manager,
}
return o
}
func (m *RootCA) Copy() *RootCA {
if m == nil {
return nil
}
o := &RootCA{
CAKey: m.CAKey,
CACert: m.CACert,
CACertHash: m.CACertHash,
JoinTokens: *m.JoinTokens.Copy(),
}
return o
}
func (m *Certificate) Copy() *Certificate {
if m == nil {
return nil
}
o := &Certificate{
Role: m.Role,
CSR: m.CSR,
Status: *m.Status.Copy(),
Certificate: m.Certificate,
CN: m.CN,
}
return o
}
func (m *EncryptionKey) Copy() *EncryptionKey {
if m == nil {
return nil
}
o := &EncryptionKey{
Subsystem: m.Subsystem,
Algorithm: m.Algorithm,
Key: m.Key,
LamportTime: m.LamportTime,
}
return o
}
func (m *ManagerStatus) Copy() *ManagerStatus {
if m == nil {
return nil
}
o := &ManagerStatus{
RaftID: m.RaftID,
Addr: m.Addr,
Leader: m.Leader,
Reachability: m.Reachability,
}
return o
}
func (m *SecretReference) Copy() *SecretReference {
if m == nil {
return nil
}
o := &SecretReference{
SecretID: m.SecretID,
Mode: m.Mode,
Target: m.Target,
SecretName: m.SecretName,
}
return o
}
func (this *Version) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.Version{")
s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Annotations) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.Annotations{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
}
mapStringForLabels += "}"
if this.Labels != nil {
s = append(s, "Labels: "+mapStringForLabels+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Resources) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.Resources{")
s = append(s, "NanoCPUs: "+fmt.Sprintf("%#v", this.NanoCPUs)+",\n")
s = append(s, "MemoryBytes: "+fmt.Sprintf("%#v", this.MemoryBytes)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ResourceRequirements) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.ResourceRequirements{")
if this.Limits != nil {
s = append(s, "Limits: "+fmt.Sprintf("%#v", this.Limits)+",\n")
}
if this.Reservations != nil {
s = append(s, "Reservations: "+fmt.Sprintf("%#v", this.Reservations)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Platform) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.Platform{")
s = append(s, "Architecture: "+fmt.Sprintf("%#v", this.Architecture)+",\n")
s = append(s, "OS: "+fmt.Sprintf("%#v", this.OS)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *PluginDescription) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.PluginDescription{")
s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *EngineDescription) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.EngineDescription{")
s = append(s, "EngineVersion: "+fmt.Sprintf("%#v", this.EngineVersion)+",\n")
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
}
mapStringForLabels += "}"
if this.Labels != nil {
s = append(s, "Labels: "+mapStringForLabels+",\n")
}
if this.Plugins != nil {
s = append(s, "Plugins: "+fmt.Sprintf("%#v", this.Plugins)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *NodeDescription) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.NodeDescription{")
s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n")
if this.Platform != nil {
s = append(s, "Platform: "+fmt.Sprintf("%#v", this.Platform)+",\n")
}
if this.Resources != nil {
s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n")
}
if this.Engine != nil {
s = append(s, "Engine: "+fmt.Sprintf("%#v", this.Engine)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *RaftMemberStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.RaftMemberStatus{")
s = append(s, "Leader: "+fmt.Sprintf("%#v", this.Leader)+",\n")
s = append(s, "Reachability: "+fmt.Sprintf("%#v", this.Reachability)+",\n")
s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *NodeStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.NodeStatus{")
s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Image) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.Image{")
s = append(s, "Reference: "+fmt.Sprintf("%#v", this.Reference)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Mount) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 11)
s = append(s, "&api.Mount{")
s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
s = append(s, "Target: "+fmt.Sprintf("%#v", this.Target)+",\n")
s = append(s, "ReadOnly: "+fmt.Sprintf("%#v", this.ReadOnly)+",\n")
if this.BindOptions != nil {
s = append(s, "BindOptions: "+fmt.Sprintf("%#v", this.BindOptions)+",\n")
}
if this.VolumeOptions != nil {
s = append(s, "VolumeOptions: "+fmt.Sprintf("%#v", this.VolumeOptions)+",\n")
}
if this.TmpfsOptions != nil {
s = append(s, "TmpfsOptions: "+fmt.Sprintf("%#v", this.TmpfsOptions)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Mount_BindOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.Mount_BindOptions{")
s = append(s, "Propagation: "+fmt.Sprintf("%#v", this.Propagation)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Mount_VolumeOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.Mount_VolumeOptions{")
s = append(s, "NoCopy: "+fmt.Sprintf("%#v", this.NoCopy)+",\n")
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
}
mapStringForLabels += "}"
if this.Labels != nil {
s = append(s, "Labels: "+mapStringForLabels+",\n")
}
if this.DriverConfig != nil {
s = append(s, "DriverConfig: "+fmt.Sprintf("%#v", this.DriverConfig)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Mount_TmpfsOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.Mount_TmpfsOptions{")
s = append(s, "SizeBytes: "+fmt.Sprintf("%#v", this.SizeBytes)+",\n")
s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *RestartPolicy) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.RestartPolicy{")
s = append(s, "Condition: "+fmt.Sprintf("%#v", this.Condition)+",\n")
if this.Delay != nil {
s = append(s, "Delay: "+fmt.Sprintf("%#v", this.Delay)+",\n")
}
s = append(s, "MaxAttempts: "+fmt.Sprintf("%#v", this.MaxAttempts)+",\n")
if this.Window != nil {
s = append(s, "Window: "+fmt.Sprintf("%#v", this.Window)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *UpdateConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&api.UpdateConfig{")
s = append(s, "Parallelism: "+fmt.Sprintf("%#v", this.Parallelism)+",\n")
s = append(s, "Delay: "+strings.Replace(this.Delay.GoString(), `&`, ``, 1)+",\n")
s = append(s, "FailureAction: "+fmt.Sprintf("%#v", this.FailureAction)+",\n")
if this.Monitor != nil {
s = append(s, "Monitor: "+fmt.Sprintf("%#v", this.Monitor)+",\n")
}
s = append(s, "MaxFailureRatio: "+fmt.Sprintf("%#v", this.MaxFailureRatio)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *UpdateStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.UpdateStatus{")
s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
if this.StartedAt != nil {
s = append(s, "StartedAt: "+fmt.Sprintf("%#v", this.StartedAt)+",\n")
}
if this.CompletedAt != nil {
s = append(s, "CompletedAt: "+fmt.Sprintf("%#v", this.CompletedAt)+",\n")
}
s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ContainerStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.ContainerStatus{")
s = append(s, "ContainerID: "+fmt.Sprintf("%#v", this.ContainerID)+",\n")
s = append(s, "PID: "+fmt.Sprintf("%#v", this.PID)+",\n")
s = append(s, "ExitCode: "+fmt.Sprintf("%#v", this.ExitCode)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *TaskStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&api.TaskStatus{")
if this.Timestamp != nil {
s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
}
s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
s = append(s, "Err: "+fmt.Sprintf("%#v", this.Err)+",\n")
if this.RuntimeStatus != nil {
s = append(s, "RuntimeStatus: "+fmt.Sprintf("%#v", this.RuntimeStatus)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *TaskStatus_Container) GoString() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&api.TaskStatus_Container{` +
`Container:` + fmt.Sprintf("%#v", this.Container) + `}`}, ", ")
return s
}
func (this *NetworkAttachmentConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.NetworkAttachmentConfig{")
s = append(s, "Target: "+fmt.Sprintf("%#v", this.Target)+",\n")
s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n")
s = append(s, "Addresses: "+fmt.Sprintf("%#v", this.Addresses)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *IPAMConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&api.IPAMConfig{")
s = append(s, "Family: "+fmt.Sprintf("%#v", this.Family)+",\n")
s = append(s, "Subnet: "+fmt.Sprintf("%#v", this.Subnet)+",\n")
s = append(s, "Range: "+fmt.Sprintf("%#v", this.Range)+",\n")
s = append(s, "Gateway: "+fmt.Sprintf("%#v", this.Gateway)+",\n")
keysForReserved := make([]string, 0, len(this.Reserved))
for k, _ := range this.Reserved {
keysForReserved = append(keysForReserved, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForReserved)
mapStringForReserved := "map[string]string{"
for _, k := range keysForReserved {
mapStringForReserved += fmt.Sprintf("%#v: %#v,", k, this.Reserved[k])
}
mapStringForReserved += "}"
if this.Reserved != nil {
s = append(s, "Reserved: "+mapStringForReserved+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *PortConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.PortConfig{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n")
s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Driver) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.Driver{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
keysForOptions := make([]string, 0, len(this.Options))
for k, _ := range this.Options {
keysForOptions = append(keysForOptions, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
mapStringForOptions := "map[string]string{"
for _, k := range keysForOptions {
mapStringForOptions += fmt.Sprintf("%#v: %#v,", k, this.Options[k])
}
mapStringForOptions += "}"
if this.Options != nil {
s = append(s, "Options: "+mapStringForOptions+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *IPAMOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.IPAMOptions{")
if this.Driver != nil {
s = append(s, "Driver: "+fmt.Sprintf("%#v", this.Driver)+",\n")
}
if this.Configs != nil {
s = append(s, "Configs: "+fmt.Sprintf("%#v", this.Configs)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Peer) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.Peer{")
s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *WeightedPeer) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.WeightedPeer{")
if this.Peer != nil {
s = append(s, "Peer: "+fmt.Sprintf("%#v", this.Peer)+",\n")
}
s = append(s, "Weight: "+fmt.Sprintf("%#v", this.Weight)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *IssuanceStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.IssuanceStatus{")
s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
s = append(s, "Err: "+fmt.Sprintf("%#v", this.Err)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *AcceptancePolicy) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.AcceptancePolicy{")
if this.Policies != nil {
s = append(s, "Policies: "+fmt.Sprintf("%#v", this.Policies)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *AcceptancePolicy_RoleAdmissionPolicy) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.AcceptancePolicy_RoleAdmissionPolicy{")
s = append(s, "Role: "+fmt.Sprintf("%#v", this.Role)+",\n")
s = append(s, "Autoaccept: "+fmt.Sprintf("%#v", this.Autoaccept)+",\n")
if this.Secret != nil {
s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *AcceptancePolicy_RoleAdmissionPolicy_Secret) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.AcceptancePolicy_RoleAdmissionPolicy_Secret{")
s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
s = append(s, "Alg: "+fmt.Sprintf("%#v", this.Alg)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ExternalCA) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&api.ExternalCA{")
s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
s = append(s, "URL: "+fmt.Sprintf("%#v", this.URL)+",\n")
keysForOptions := make([]string, 0, len(this.Options))
for k, _ := range this.Options {
keysForOptions = append(keysForOptions, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
mapStringForOptions := "map[string]string{"
for _, k := range keysForOptions {
mapStringForOptions += fmt.Sprintf("%#v: %#v,", k, this.Options[k])
}
mapStringForOptions += "}"
if this.Options != nil {
s = append(s, "Options: "+mapStringForOptions+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *CAConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.CAConfig{")
if this.NodeCertExpiry != nil {
s = append(s, "NodeCertExpiry: "+fmt.Sprintf("%#v", this.NodeCertExpiry)+",\n")
}
if this.ExternalCAs != nil {
s = append(s, "ExternalCAs: "+fmt.Sprintf("%#v", this.ExternalCAs)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *OrchestrationConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.OrchestrationConfig{")
s = append(s, "TaskHistoryRetentionLimit: "+fmt.Sprintf("%#v", this.TaskHistoryRetentionLimit)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *TaskDefaults) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.TaskDefaults{")
if this.LogDriver != nil {
s = append(s, "LogDriver: "+fmt.Sprintf("%#v", this.LogDriver)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *DispatcherConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.DispatcherConfig{")
if this.HeartbeatPeriod != nil {
s = append(s, "HeartbeatPeriod: "+fmt.Sprintf("%#v", this.HeartbeatPeriod)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *RaftConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&api.RaftConfig{")
s = append(s, "SnapshotInterval: "+fmt.Sprintf("%#v", this.SnapshotInterval)+",\n")
s = append(s, "KeepOldSnapshots: "+fmt.Sprintf("%#v", this.KeepOldSnapshots)+",\n")
s = append(s, "LogEntriesForSlowFollowers: "+fmt.Sprintf("%#v", this.LogEntriesForSlowFollowers)+",\n")
s = append(s, "HeartbeatTick: "+fmt.Sprintf("%#v", this.HeartbeatTick)+",\n")
s = append(s, "ElectionTick: "+fmt.Sprintf("%#v", this.ElectionTick)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Placement) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.Placement{")
s = append(s, "Constraints: "+fmt.Sprintf("%#v", this.Constraints)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *JoinTokens) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&api.JoinTokens{")
s = append(s, "Worker: "+fmt.Sprintf("%#v", this.Worker)+",\n")
s = append(s, "Manager: "+fmt.Sprintf("%#v", this.Manager)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *RootCA) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.RootCA{")
s = append(s, "CAKey: "+fmt.Sprintf("%#v", this.CAKey)+",\n")
s = append(s, "CACert: "+fmt.Sprintf("%#v", this.CACert)+",\n")
s = append(s, "CACertHash: "+fmt.Sprintf("%#v", this.CACertHash)+",\n")
s = append(s, "JoinTokens: "+strings.Replace(this.JoinTokens.GoString(), `&`, ``, 1)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *Certificate) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&api.Certificate{")
s = append(s, "Role: "+fmt.Sprintf("%#v", this.Role)+",\n")
s = append(s, "CSR: "+fmt.Sprintf("%#v", this.CSR)+",\n")
s = append(s, "Status: "+strings.Replace(this.Status.GoString(), `&`, ``, 1)+",\n")
s = append(s, "Certificate: "+fmt.Sprintf("%#v", this.Certificate)+",\n")
s = append(s, "CN: "+fmt.Sprintf("%#v", this.CN)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *EncryptionKey) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.EncryptionKey{")
s = append(s, "Subsystem: "+fmt.Sprintf("%#v", this.Subsystem)+",\n")
s = append(s, "Algorithm: "+fmt.Sprintf("%#v", this.Algorithm)+",\n")
s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
s = append(s, "LamportTime: "+fmt.Sprintf("%#v", this.LamportTime)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ManagerStatus) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.ManagerStatus{")
s = append(s, "RaftID: "+fmt.Sprintf("%#v", this.RaftID)+",\n")
s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
s = append(s, "Leader: "+fmt.Sprintf("%#v", this.Leader)+",\n")
s = append(s, "Reachability: "+fmt.Sprintf("%#v", this.Reachability)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *SecretReference) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&api.SecretReference{")
s = append(s, "SecretID: "+fmt.Sprintf("%#v", this.SecretID)+",\n")
s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n")
s = append(s, "Target: "+fmt.Sprintf("%#v", this.Target)+",\n")
s = append(s, "SecretName: "+fmt.Sprintf("%#v", this.SecretName)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringTypes(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringTypes(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *Version) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Version) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Index != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Index))
}
return i, nil
}
func (m *Annotations) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Annotations) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
data[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
i = encodeVarintTypes(data, i, uint64(mapSize))
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(k)))
i += copy(data[i:], k)
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(v)))
i += copy(data[i:], v)
}
}
return i, nil
}
func (m *Resources) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Resources) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.NanoCPUs != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.NanoCPUs))
}
if m.MemoryBytes != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.MemoryBytes))
}
return i, nil
}
func (m *ResourceRequirements) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Limits != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.Limits.Size()))
n1, err := m.Limits.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.Reservations != nil {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(m.Reservations.Size()))
n2, err := m.Reservations.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func (m *Platform) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Platform) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Architecture) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Architecture)))
i += copy(data[i:], m.Architecture)
}
if len(m.OS) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.OS)))
i += copy(data[i:], m.OS)
}
return i, nil
}
func (m *PluginDescription) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *PluginDescription) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Type) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Type)))
i += copy(data[i:], m.Type)
}
if len(m.Name) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
return i, nil
}
func (m *EngineDescription) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *EngineDescription) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.EngineVersion) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.EngineVersion)))
i += copy(data[i:], m.EngineVersion)
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
data[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
i = encodeVarintTypes(data, i, uint64(mapSize))
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(k)))
i += copy(data[i:], k)
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(v)))
i += copy(data[i:], v)
}
}
if len(m.Plugins) > 0 {
for _, msg := range m.Plugins {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *NodeDescription) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *NodeDescription) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Hostname) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Hostname)))
i += copy(data[i:], m.Hostname)
}
if m.Platform != nil {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(m.Platform.Size()))
n3, err := m.Platform.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n3
}
if m.Resources != nil {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(m.Resources.Size()))
n4, err := m.Resources.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n4
}
if m.Engine != nil {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(m.Engine.Size()))
n5, err := m.Engine.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n5
}
return i, nil
}
func (m *RaftMemberStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *RaftMemberStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Leader {
data[i] = 0x8
i++
if m.Leader {
data[i] = 1
} else {
data[i] = 0
}
i++
}
if m.Reachability != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.Reachability))
}
if len(m.Message) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.Message)))
i += copy(data[i:], m.Message)
}
return i, nil
}
func (m *NodeStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *NodeStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.State != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.State))
}
if len(m.Message) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Message)))
i += copy(data[i:], m.Message)
}
return i, nil
}
func (m *Image) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Image) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Reference) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Reference)))
i += copy(data[i:], m.Reference)
}
return i, nil
}
func (m *Mount) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Mount) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Type != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Type))
}
if len(m.Source) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Source)))
i += copy(data[i:], m.Source)
}
if len(m.Target) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.Target)))
i += copy(data[i:], m.Target)
}
if m.ReadOnly {
data[i] = 0x20
i++
if m.ReadOnly {
data[i] = 1
} else {
data[i] = 0
}
i++
}
if m.BindOptions != nil {
data[i] = 0x2a
i++
i = encodeVarintTypes(data, i, uint64(m.BindOptions.Size()))
n6, err := m.BindOptions.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n6
}
if m.VolumeOptions != nil {
data[i] = 0x32
i++
i = encodeVarintTypes(data, i, uint64(m.VolumeOptions.Size()))
n7, err := m.VolumeOptions.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n7
}
if m.TmpfsOptions != nil {
data[i] = 0x3a
i++
i = encodeVarintTypes(data, i, uint64(m.TmpfsOptions.Size()))
n8, err := m.TmpfsOptions.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n8
}
return i, nil
}
func (m *Mount_BindOptions) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Mount_BindOptions) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Propagation != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Propagation))
}
return i, nil
}
func (m *Mount_VolumeOptions) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Mount_VolumeOptions) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.NoCopy {
data[i] = 0x8
i++
if m.NoCopy {
data[i] = 1
} else {
data[i] = 0
}
i++
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
data[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
i = encodeVarintTypes(data, i, uint64(mapSize))
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(k)))
i += copy(data[i:], k)
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(v)))
i += copy(data[i:], v)
}
}
if m.DriverConfig != nil {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(m.DriverConfig.Size()))
n9, err := m.DriverConfig.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n9
}
return i, nil
}
func (m *Mount_TmpfsOptions) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Mount_TmpfsOptions) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.SizeBytes != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.SizeBytes))
}
if m.Mode != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.Mode))
}
return i, nil
}
func (m *RestartPolicy) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *RestartPolicy) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Condition != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Condition))
}
if m.Delay != nil {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(m.Delay.Size()))
n10, err := m.Delay.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n10
}
if m.MaxAttempts != 0 {
data[i] = 0x18
i++
i = encodeVarintTypes(data, i, uint64(m.MaxAttempts))
}
if m.Window != nil {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(m.Window.Size()))
n11, err := m.Window.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n11
}
return i, nil
}
func (m *UpdateConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *UpdateConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Parallelism != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Parallelism))
}
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(m.Delay.Size()))
n12, err := m.Delay.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n12
if m.FailureAction != 0 {
data[i] = 0x18
i++
i = encodeVarintTypes(data, i, uint64(m.FailureAction))
}
if m.Monitor != nil {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(m.Monitor.Size()))
n13, err := m.Monitor.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n13
}
if m.MaxFailureRatio != 0 {
data[i] = 0x2d
i++
i = encodeFixed32Types(data, i, uint32(math.Float32bits(float32(m.MaxFailureRatio))))
}
return i, nil
}
func (m *UpdateStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *UpdateStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.State != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.State))
}
if m.StartedAt != nil {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(m.StartedAt.Size()))
n14, err := m.StartedAt.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n14
}
if m.CompletedAt != nil {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(m.CompletedAt.Size()))
n15, err := m.CompletedAt.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n15
}
if len(m.Message) > 0 {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(len(m.Message)))
i += copy(data[i:], m.Message)
}
return i, nil
}
func (m *ContainerStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *ContainerStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ContainerID) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.ContainerID)))
i += copy(data[i:], m.ContainerID)
}
if m.PID != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.PID))
}
if m.ExitCode != 0 {
data[i] = 0x18
i++
i = encodeVarintTypes(data, i, uint64(m.ExitCode))
}
return i, nil
}
func (m *TaskStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *TaskStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Timestamp != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.Timestamp.Size()))
n16, err := m.Timestamp.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n16
}
if m.State != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.State))
}
if len(m.Message) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.Message)))
i += copy(data[i:], m.Message)
}
if len(m.Err) > 0 {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(len(m.Err)))
i += copy(data[i:], m.Err)
}
if m.RuntimeStatus != nil {
nn17, err := m.RuntimeStatus.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += nn17
}
return i, nil
}
func (m *TaskStatus_Container) MarshalTo(data []byte) (int, error) {
i := 0
if m.Container != nil {
data[i] = 0x2a
i++
i = encodeVarintTypes(data, i, uint64(m.Container.Size()))
n18, err := m.Container.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n18
}
return i, nil
}
func (m *NetworkAttachmentConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *NetworkAttachmentConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Target) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Target)))
i += copy(data[i:], m.Target)
}
if len(m.Aliases) > 0 {
for _, s := range m.Aliases {
data[i] = 0x12
i++
l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
if len(m.Addresses) > 0 {
for _, s := range m.Addresses {
data[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *IPAMConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *IPAMConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Family != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Family))
}
if len(m.Subnet) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Subnet)))
i += copy(data[i:], m.Subnet)
}
if len(m.Range) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.Range)))
i += copy(data[i:], m.Range)
}
if len(m.Gateway) > 0 {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(len(m.Gateway)))
i += copy(data[i:], m.Gateway)
}
if len(m.Reserved) > 0 {
for k, _ := range m.Reserved {
data[i] = 0x2a
i++
v := m.Reserved[k]
mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
i = encodeVarintTypes(data, i, uint64(mapSize))
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(k)))
i += copy(data[i:], k)
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(v)))
i += copy(data[i:], v)
}
}
return i, nil
}
func (m *PortConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *PortConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if m.Protocol != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.Protocol))
}
if m.TargetPort != 0 {
data[i] = 0x18
i++
i = encodeVarintTypes(data, i, uint64(m.TargetPort))
}
if m.PublishedPort != 0 {
data[i] = 0x20
i++
i = encodeVarintTypes(data, i, uint64(m.PublishedPort))
}
return i, nil
}
func (m *Driver) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Driver) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.Options) > 0 {
for k, _ := range m.Options {
data[i] = 0x12
i++
v := m.Options[k]
mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
i = encodeVarintTypes(data, i, uint64(mapSize))
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(k)))
i += copy(data[i:], k)
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(v)))
i += copy(data[i:], v)
}
}
return i, nil
}
func (m *IPAMOptions) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *IPAMOptions) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Driver != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.Driver.Size()))
n19, err := m.Driver.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n19
}
if len(m.Configs) > 0 {
for _, msg := range m.Configs {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *Peer) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Peer) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.NodeID) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.NodeID)))
i += copy(data[i:], m.NodeID)
}
if len(m.Addr) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Addr)))
i += copy(data[i:], m.Addr)
}
return i, nil
}
func (m *WeightedPeer) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *WeightedPeer) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Peer != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.Peer.Size()))
n20, err := m.Peer.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n20
}
if m.Weight != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.Weight))
}
return i, nil
}
func (m *IssuanceStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *IssuanceStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.State != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.State))
}
if len(m.Err) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Err)))
i += copy(data[i:], m.Err)
}
return i, nil
}
func (m *AcceptancePolicy) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *AcceptancePolicy) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Policies) > 0 {
for _, msg := range m.Policies {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *AcceptancePolicy_RoleAdmissionPolicy) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Role != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Role))
}
if m.Autoaccept {
data[i] = 0x10
i++
if m.Autoaccept {
data[i] = 1
} else {
data[i] = 0
}
i++
}
if m.Secret != nil {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(m.Secret.Size()))
n21, err := m.Secret.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n21
}
return i, nil
}
func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Data) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Data)))
i += copy(data[i:], m.Data)
}
if len(m.Alg) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Alg)))
i += copy(data[i:], m.Alg)
}
return i, nil
}
func (m *ExternalCA) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *ExternalCA) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Protocol != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Protocol))
}
if len(m.URL) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.URL)))
i += copy(data[i:], m.URL)
}
if len(m.Options) > 0 {
for k, _ := range m.Options {
data[i] = 0x1a
i++
v := m.Options[k]
mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
i = encodeVarintTypes(data, i, uint64(mapSize))
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(k)))
i += copy(data[i:], k)
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(v)))
i += copy(data[i:], v)
}
}
return i, nil
}
func (m *CAConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *CAConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.NodeCertExpiry != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.NodeCertExpiry.Size()))
n22, err := m.NodeCertExpiry.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n22
}
if len(m.ExternalCAs) > 0 {
for _, msg := range m.ExternalCAs {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *OrchestrationConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *OrchestrationConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.TaskHistoryRetentionLimit != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.TaskHistoryRetentionLimit))
}
return i, nil
}
func (m *TaskDefaults) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *TaskDefaults) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.LogDriver != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.LogDriver.Size()))
n23, err := m.LogDriver.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n23
}
return i, nil
}
func (m *DispatcherConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *DispatcherConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.HeartbeatPeriod != nil {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(m.HeartbeatPeriod.Size()))
n24, err := m.HeartbeatPeriod.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n24
}
return i, nil
}
func (m *RaftConfig) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *RaftConfig) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.SnapshotInterval != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.SnapshotInterval))
}
if m.KeepOldSnapshots != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.KeepOldSnapshots))
}
if m.LogEntriesForSlowFollowers != 0 {
data[i] = 0x18
i++
i = encodeVarintTypes(data, i, uint64(m.LogEntriesForSlowFollowers))
}
if m.HeartbeatTick != 0 {
data[i] = 0x20
i++
i = encodeVarintTypes(data, i, uint64(m.HeartbeatTick))
}
if m.ElectionTick != 0 {
data[i] = 0x28
i++
i = encodeVarintTypes(data, i, uint64(m.ElectionTick))
}
return i, nil
}
func (m *Placement) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Placement) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Constraints) > 0 {
for _, s := range m.Constraints {
data[i] = 0xa
i++
l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *JoinTokens) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *JoinTokens) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Worker) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Worker)))
i += copy(data[i:], m.Worker)
}
if len(m.Manager) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Manager)))
i += copy(data[i:], m.Manager)
}
return i, nil
}
func (m *RootCA) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *RootCA) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.CAKey) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.CAKey)))
i += copy(data[i:], m.CAKey)
}
if len(m.CACert) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.CACert)))
i += copy(data[i:], m.CACert)
}
if len(m.CACertHash) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.CACertHash)))
i += copy(data[i:], m.CACertHash)
}
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(m.JoinTokens.Size()))
n25, err := m.JoinTokens.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n25
return i, nil
}
func (m *Certificate) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Certificate) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Role != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.Role))
}
if len(m.CSR) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.CSR)))
i += copy(data[i:], m.CSR)
}
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(m.Status.Size()))
n26, err := m.Status.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n26
if len(m.Certificate) > 0 {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(len(m.Certificate)))
i += copy(data[i:], m.Certificate)
}
if len(m.CN) > 0 {
data[i] = 0x2a
i++
i = encodeVarintTypes(data, i, uint64(len(m.CN)))
i += copy(data[i:], m.CN)
}
return i, nil
}
func (m *EncryptionKey) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *EncryptionKey) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Subsystem) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.Subsystem)))
i += copy(data[i:], m.Subsystem)
}
if m.Algorithm != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.Algorithm))
}
if len(m.Key) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if m.LamportTime != 0 {
data[i] = 0x20
i++
i = encodeVarintTypes(data, i, uint64(m.LamportTime))
}
return i, nil
}
func (m *ManagerStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *ManagerStatus) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RaftID != 0 {
data[i] = 0x8
i++
i = encodeVarintTypes(data, i, uint64(m.RaftID))
}
if len(m.Addr) > 0 {
data[i] = 0x12
i++
i = encodeVarintTypes(data, i, uint64(len(m.Addr)))
i += copy(data[i:], m.Addr)
}
if m.Leader {
data[i] = 0x18
i++
if m.Leader {
data[i] = 1
} else {
data[i] = 0
}
i++
}
if m.Reachability != 0 {
data[i] = 0x20
i++
i = encodeVarintTypes(data, i, uint64(m.Reachability))
}
return i, nil
}
func (m *SecretReference) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *SecretReference) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.SecretID) > 0 {
data[i] = 0xa
i++
i = encodeVarintTypes(data, i, uint64(len(m.SecretID)))
i += copy(data[i:], m.SecretID)
}
if m.Mode != 0 {
data[i] = 0x10
i++
i = encodeVarintTypes(data, i, uint64(m.Mode))
}
if len(m.Target) > 0 {
data[i] = 0x1a
i++
i = encodeVarintTypes(data, i, uint64(len(m.Target)))
i += copy(data[i:], m.Target)
}
if len(m.SecretName) > 0 {
data[i] = 0x22
i++
i = encodeVarintTypes(data, i, uint64(len(m.SecretName)))
i += copy(data[i:], m.SecretName)
}
return i, nil
}
func encodeFixed64Types(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Types(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintTypes(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *Version) Size() (n int) {
var l int
_ = l
if m.Index != 0 {
n += 1 + sovTypes(uint64(m.Index))
}
return n
}
func (m *Annotations) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
}
}
return n
}
func (m *Resources) Size() (n int) {
var l int
_ = l
if m.NanoCPUs != 0 {
n += 1 + sovTypes(uint64(m.NanoCPUs))
}
if m.MemoryBytes != 0 {
n += 1 + sovTypes(uint64(m.MemoryBytes))
}
return n
}
func (m *ResourceRequirements) Size() (n int) {
var l int
_ = l
if m.Limits != nil {
l = m.Limits.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.Reservations != nil {
l = m.Reservations.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *Platform) Size() (n int) {
var l int
_ = l
l = len(m.Architecture)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.OS)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *PluginDescription) Size() (n int) {
var l int
_ = l
l = len(m.Type)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *EngineDescription) Size() (n int) {
var l int
_ = l
l = len(m.EngineVersion)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
}
}
if len(m.Plugins) > 0 {
for _, e := range m.Plugins {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *NodeDescription) Size() (n int) {
var l int
_ = l
l = len(m.Hostname)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.Platform != nil {
l = m.Platform.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.Resources != nil {
l = m.Resources.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.Engine != nil {
l = m.Engine.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *RaftMemberStatus) Size() (n int) {
var l int
_ = l
if m.Leader {
n += 2
}
if m.Reachability != 0 {
n += 1 + sovTypes(uint64(m.Reachability))
}
l = len(m.Message)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *NodeStatus) Size() (n int) {
var l int
_ = l
if m.State != 0 {
n += 1 + sovTypes(uint64(m.State))
}
l = len(m.Message)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *Image) Size() (n int) {
var l int
_ = l
l = len(m.Reference)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *Mount) Size() (n int) {
var l int
_ = l
if m.Type != 0 {
n += 1 + sovTypes(uint64(m.Type))
}
l = len(m.Source)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Target)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.ReadOnly {
n += 2
}
if m.BindOptions != nil {
l = m.BindOptions.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.VolumeOptions != nil {
l = m.VolumeOptions.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.TmpfsOptions != nil {
l = m.TmpfsOptions.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *Mount_BindOptions) Size() (n int) {
var l int
_ = l
if m.Propagation != 0 {
n += 1 + sovTypes(uint64(m.Propagation))
}
return n
}
func (m *Mount_VolumeOptions) Size() (n int) {
var l int
_ = l
if m.NoCopy {
n += 2
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
}
}
if m.DriverConfig != nil {
l = m.DriverConfig.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *Mount_TmpfsOptions) Size() (n int) {
var l int
_ = l
if m.SizeBytes != 0 {
n += 1 + sovTypes(uint64(m.SizeBytes))
}
if m.Mode != 0 {
n += 1 + sovTypes(uint64(m.Mode))
}
return n
}
func (m *RestartPolicy) Size() (n int) {
var l int
_ = l
if m.Condition != 0 {
n += 1 + sovTypes(uint64(m.Condition))
}
if m.Delay != nil {
l = m.Delay.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.MaxAttempts != 0 {
n += 1 + sovTypes(uint64(m.MaxAttempts))
}
if m.Window != nil {
l = m.Window.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *UpdateConfig) Size() (n int) {
var l int
_ = l
if m.Parallelism != 0 {
n += 1 + sovTypes(uint64(m.Parallelism))
}
l = m.Delay.Size()
n += 1 + l + sovTypes(uint64(l))
if m.FailureAction != 0 {
n += 1 + sovTypes(uint64(m.FailureAction))
}
if m.Monitor != nil {
l = m.Monitor.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.MaxFailureRatio != 0 {
n += 5
}
return n
}
func (m *UpdateStatus) Size() (n int) {
var l int
_ = l
if m.State != 0 {
n += 1 + sovTypes(uint64(m.State))
}
if m.StartedAt != nil {
l = m.StartedAt.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.CompletedAt != nil {
l = m.CompletedAt.Size()
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Message)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *ContainerStatus) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.PID != 0 {
n += 1 + sovTypes(uint64(m.PID))
}
if m.ExitCode != 0 {
n += 1 + sovTypes(uint64(m.ExitCode))
}
return n
}
func (m *TaskStatus) Size() (n int) {
var l int
_ = l
if m.Timestamp != nil {
l = m.Timestamp.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.State != 0 {
n += 1 + sovTypes(uint64(m.State))
}
l = len(m.Message)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Err)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.RuntimeStatus != nil {
n += m.RuntimeStatus.Size()
}
return n
}
func (m *TaskStatus_Container) Size() (n int) {
var l int
_ = l
if m.Container != nil {
l = m.Container.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *NetworkAttachmentConfig) Size() (n int) {
var l int
_ = l
l = len(m.Target)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Aliases) > 0 {
for _, s := range m.Aliases {
l = len(s)
n += 1 + l + sovTypes(uint64(l))
}
}
if len(m.Addresses) > 0 {
for _, s := range m.Addresses {
l = len(s)
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *IPAMConfig) Size() (n int) {
var l int
_ = l
if m.Family != 0 {
n += 1 + sovTypes(uint64(m.Family))
}
l = len(m.Subnet)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Range)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Gateway)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Reserved) > 0 {
for k, v := range m.Reserved {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
}
}
return n
}
func (m *PortConfig) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.Protocol != 0 {
n += 1 + sovTypes(uint64(m.Protocol))
}
if m.TargetPort != 0 {
n += 1 + sovTypes(uint64(m.TargetPort))
}
if m.PublishedPort != 0 {
n += 1 + sovTypes(uint64(m.PublishedPort))
}
return n
}
func (m *Driver) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Options) > 0 {
for k, v := range m.Options {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
}
}
return n
}
func (m *IPAMOptions) Size() (n int) {
var l int
_ = l
if m.Driver != nil {
l = m.Driver.Size()
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Configs) > 0 {
for _, e := range m.Configs {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *Peer) Size() (n int) {
var l int
_ = l
l = len(m.NodeID)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Addr)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *WeightedPeer) Size() (n int) {
var l int
_ = l
if m.Peer != nil {
l = m.Peer.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.Weight != 0 {
n += 1 + sovTypes(uint64(m.Weight))
}
return n
}
func (m *IssuanceStatus) Size() (n int) {
var l int
_ = l
if m.State != 0 {
n += 1 + sovTypes(uint64(m.State))
}
l = len(m.Err)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *AcceptancePolicy) Size() (n int) {
var l int
_ = l
if len(m.Policies) > 0 {
for _, e := range m.Policies {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *AcceptancePolicy_RoleAdmissionPolicy) Size() (n int) {
var l int
_ = l
if m.Role != 0 {
n += 1 + sovTypes(uint64(m.Role))
}
if m.Autoaccept {
n += 2
}
if m.Secret != nil {
l = m.Secret.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Size() (n int) {
var l int
_ = l
l = len(m.Data)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Alg)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *ExternalCA) Size() (n int) {
var l int
_ = l
if m.Protocol != 0 {
n += 1 + sovTypes(uint64(m.Protocol))
}
l = len(m.URL)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Options) > 0 {
for k, v := range m.Options {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v)))
n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize))
}
}
return n
}
func (m *CAConfig) Size() (n int) {
var l int
_ = l
if m.NodeCertExpiry != nil {
l = m.NodeCertExpiry.Size()
n += 1 + l + sovTypes(uint64(l))
}
if len(m.ExternalCAs) > 0 {
for _, e := range m.ExternalCAs {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *OrchestrationConfig) Size() (n int) {
var l int
_ = l
if m.TaskHistoryRetentionLimit != 0 {
n += 1 + sovTypes(uint64(m.TaskHistoryRetentionLimit))
}
return n
}
func (m *TaskDefaults) Size() (n int) {
var l int
_ = l
if m.LogDriver != nil {
l = m.LogDriver.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *DispatcherConfig) Size() (n int) {
var l int
_ = l
if m.HeartbeatPeriod != nil {
l = m.HeartbeatPeriod.Size()
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *RaftConfig) Size() (n int) {
var l int
_ = l
if m.SnapshotInterval != 0 {
n += 1 + sovTypes(uint64(m.SnapshotInterval))
}
if m.KeepOldSnapshots != 0 {
n += 1 + sovTypes(uint64(m.KeepOldSnapshots))
}
if m.LogEntriesForSlowFollowers != 0 {
n += 1 + sovTypes(uint64(m.LogEntriesForSlowFollowers))
}
if m.HeartbeatTick != 0 {
n += 1 + sovTypes(uint64(m.HeartbeatTick))
}
if m.ElectionTick != 0 {
n += 1 + sovTypes(uint64(m.ElectionTick))
}
return n
}
func (m *Placement) Size() (n int) {
var l int
_ = l
if len(m.Constraints) > 0 {
for _, s := range m.Constraints {
l = len(s)
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *JoinTokens) Size() (n int) {
var l int
_ = l
l = len(m.Worker)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Manager)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *RootCA) Size() (n int) {
var l int
_ = l
l = len(m.CAKey)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.CACert)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.CACertHash)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = m.JoinTokens.Size()
n += 1 + l + sovTypes(uint64(l))
return n
}
func (m *Certificate) Size() (n int) {
var l int
_ = l
if m.Role != 0 {
n += 1 + sovTypes(uint64(m.Role))
}
l = len(m.CSR)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = m.Status.Size()
n += 1 + l + sovTypes(uint64(l))
l = len(m.Certificate)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.CN)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *EncryptionKey) Size() (n int) {
var l int
_ = l
l = len(m.Subsystem)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.Algorithm != 0 {
n += 1 + sovTypes(uint64(m.Algorithm))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.LamportTime != 0 {
n += 1 + sovTypes(uint64(m.LamportTime))
}
return n
}
func (m *ManagerStatus) Size() (n int) {
var l int
_ = l
if m.RaftID != 0 {
n += 1 + sovTypes(uint64(m.RaftID))
}
l = len(m.Addr)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.Leader {
n += 2
}
if m.Reachability != 0 {
n += 1 + sovTypes(uint64(m.Reachability))
}
return n
}
func (m *SecretReference) Size() (n int) {
var l int
_ = l
l = len(m.SecretID)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.Mode != 0 {
n += 1 + sovTypes(uint64(m.Mode))
}
l = len(m.Target)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.SecretName)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func sovTypes(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozTypes(x uint64) (n int) {
return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Version) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Version{`,
`Index:` + fmt.Sprintf("%v", this.Index) + `,`,
`}`,
}, "")
return s
}
func (this *Annotations) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&Annotations{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + mapStringForLabels + `,`,
`}`,
}, "")
return s
}
func (this *Resources) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Resources{`,
`NanoCPUs:` + fmt.Sprintf("%v", this.NanoCPUs) + `,`,
`MemoryBytes:` + fmt.Sprintf("%v", this.MemoryBytes) + `,`,
`}`,
}, "")
return s
}
func (this *ResourceRequirements) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ResourceRequirements{`,
`Limits:` + strings.Replace(fmt.Sprintf("%v", this.Limits), "Resources", "Resources", 1) + `,`,
`Reservations:` + strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resources", "Resources", 1) + `,`,
`}`,
}, "")
return s
}
func (this *Platform) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Platform{`,
`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
`}`,
}, "")
return s
}
func (this *PluginDescription) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PluginDescription{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`}`,
}, "")
return s
}
func (this *EngineDescription) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&EngineDescription{`,
`EngineVersion:` + fmt.Sprintf("%v", this.EngineVersion) + `,`,
`Labels:` + mapStringForLabels + `,`,
`Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "PluginDescription", "PluginDescription", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *NodeDescription) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NodeDescription{`,
`Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
`Platform:` + strings.Replace(fmt.Sprintf("%v", this.Platform), "Platform", "Platform", 1) + `,`,
`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`,
`Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`,
`}`,
}, "")
return s
}
func (this *RaftMemberStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RaftMemberStatus{`,
`Leader:` + fmt.Sprintf("%v", this.Leader) + `,`,
`Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`}`,
}, "")
return s
}
func (this *NodeStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NodeStatus{`,
`State:` + fmt.Sprintf("%v", this.State) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`}`,
}, "")
return s
}
func (this *Image) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Image{`,
`Reference:` + fmt.Sprintf("%v", this.Reference) + `,`,
`}`,
}, "")
return s
}
func (this *Mount) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Mount{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
`BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`,
`VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`,
`TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`,
`}`,
}, "")
return s
}
func (this *Mount_BindOptions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Mount_BindOptions{`,
`Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`,
`}`,
}, "")
return s
}
func (this *Mount_VolumeOptions) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&Mount_VolumeOptions{`,
`NoCopy:` + fmt.Sprintf("%v", this.NoCopy) + `,`,
`Labels:` + mapStringForLabels + `,`,
`DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`,
`}`,
}, "")
return s
}
func (this *Mount_TmpfsOptions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Mount_TmpfsOptions{`,
`SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`,
`Mode:` + fmt.Sprintf("%v", this.Mode) + `,`,
`}`,
}, "")
return s
}
func (this *RestartPolicy) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RestartPolicy{`,
`Condition:` + fmt.Sprintf("%v", this.Condition) + `,`,
`Delay:` + strings.Replace(fmt.Sprintf("%v", this.Delay), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
`MaxAttempts:` + fmt.Sprintf("%v", this.MaxAttempts) + `,`,
`Window:` + strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
`}`,
}, "")
return s
}
func (this *UpdateConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&UpdateConfig{`,
`Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`,
`Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "docker_swarmkit_v11.Duration", 1), `&`, ``, 1) + `,`,
`FailureAction:` + fmt.Sprintf("%v", this.FailureAction) + `,`,
`Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
`MaxFailureRatio:` + fmt.Sprintf("%v", this.MaxFailureRatio) + `,`,
`}`,
}, "")
return s
}
func (this *UpdateStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&UpdateStatus{`,
`State:` + fmt.Sprintf("%v", this.State) + `,`,
`StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
`CompletedAt:` + strings.Replace(fmt.Sprintf("%v", this.CompletedAt), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`}`,
}, "")
return s
}
func (this *ContainerStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContainerStatus{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`PID:` + fmt.Sprintf("%v", this.PID) + `,`,
`ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`,
`}`,
}, "")
return s
}
func (this *TaskStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TaskStatus{`,
`Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
`State:` + fmt.Sprintf("%v", this.State) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`Err:` + fmt.Sprintf("%v", this.Err) + `,`,
`RuntimeStatus:` + fmt.Sprintf("%v", this.RuntimeStatus) + `,`,
`}`,
}, "")
return s
}
func (this *TaskStatus_Container) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TaskStatus_Container{`,
`Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerStatus", "ContainerStatus", 1) + `,`,
`}`,
}, "")
return s
}
func (this *NetworkAttachmentConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NetworkAttachmentConfig{`,
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
`Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`,
`Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`,
`}`,
}, "")
return s
}
func (this *IPAMConfig) String() string {
if this == nil {
return "nil"
}
keysForReserved := make([]string, 0, len(this.Reserved))
for k, _ := range this.Reserved {
keysForReserved = append(keysForReserved, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForReserved)
mapStringForReserved := "map[string]string{"
for _, k := range keysForReserved {
mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k])
}
mapStringForReserved += "}"
s := strings.Join([]string{`&IPAMConfig{`,
`Family:` + fmt.Sprintf("%v", this.Family) + `,`,
`Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`,
`Range:` + fmt.Sprintf("%v", this.Range) + `,`,
`Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`,
`Reserved:` + mapStringForReserved + `,`,
`}`,
}, "")
return s
}
func (this *PortConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PortConfig{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
`PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`,
`}`,
}, "")
return s
}
func (this *Driver) String() string {
if this == nil {
return "nil"
}
keysForOptions := make([]string, 0, len(this.Options))
for k, _ := range this.Options {
keysForOptions = append(keysForOptions, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
mapStringForOptions := "map[string]string{"
for _, k := range keysForOptions {
mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
}
mapStringForOptions += "}"
s := strings.Join([]string{`&Driver{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Options:` + mapStringForOptions + `,`,
`}`,
}, "")
return s
}
func (this *IPAMOptions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&IPAMOptions{`,
`Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`,
`Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "IPAMConfig", "IPAMConfig", 1) + `,`,
`}`,
}, "")
return s
}
func (this *Peer) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Peer{`,
`NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
`Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
`}`,
}, "")
return s
}
func (this *WeightedPeer) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&WeightedPeer{`,
`Peer:` + strings.Replace(fmt.Sprintf("%v", this.Peer), "Peer", "Peer", 1) + `,`,
`Weight:` + fmt.Sprintf("%v", this.Weight) + `,`,
`}`,
}, "")
return s
}
func (this *IssuanceStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&IssuanceStatus{`,
`State:` + fmt.Sprintf("%v", this.State) + `,`,
`Err:` + fmt.Sprintf("%v", this.Err) + `,`,
`}`,
}, "")
return s
}
func (this *AcceptancePolicy) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&AcceptancePolicy{`,
`Policies:` + strings.Replace(fmt.Sprintf("%v", this.Policies), "AcceptancePolicy_RoleAdmissionPolicy", "AcceptancePolicy_RoleAdmissionPolicy", 1) + `,`,
`}`,
}, "")
return s
}
func (this *AcceptancePolicy_RoleAdmissionPolicy) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy{`,
`Role:` + fmt.Sprintf("%v", this.Role) + `,`,
`Autoaccept:` + fmt.Sprintf("%v", this.Autoaccept) + `,`,
`Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "AcceptancePolicy_RoleAdmissionPolicy_Secret", "AcceptancePolicy_RoleAdmissionPolicy_Secret", 1) + `,`,
`}`,
}, "")
return s
}
func (this *AcceptancePolicy_RoleAdmissionPolicy_Secret) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy_Secret{`,
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
`Alg:` + fmt.Sprintf("%v", this.Alg) + `,`,
`}`,
}, "")
return s
}
func (this *ExternalCA) String() string {
if this == nil {
return "nil"
}
keysForOptions := make([]string, 0, len(this.Options))
for k, _ := range this.Options {
keysForOptions = append(keysForOptions, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
mapStringForOptions := "map[string]string{"
for _, k := range keysForOptions {
mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
}
mapStringForOptions += "}"
s := strings.Join([]string{`&ExternalCA{`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`URL:` + fmt.Sprintf("%v", this.URL) + `,`,
`Options:` + mapStringForOptions + `,`,
`}`,
}, "")
return s
}
func (this *CAConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CAConfig{`,
`NodeCertExpiry:` + strings.Replace(fmt.Sprintf("%v", this.NodeCertExpiry), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
`ExternalCAs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalCAs), "ExternalCA", "ExternalCA", 1) + `,`,
`}`,
}, "")
return s
}
func (this *OrchestrationConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&OrchestrationConfig{`,
`TaskHistoryRetentionLimit:` + fmt.Sprintf("%v", this.TaskHistoryRetentionLimit) + `,`,
`}`,
}, "")
return s
}
func (this *TaskDefaults) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TaskDefaults{`,
`LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`,
`}`,
}, "")
return s
}
func (this *DispatcherConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&DispatcherConfig{`,
`HeartbeatPeriod:` + strings.Replace(fmt.Sprintf("%v", this.HeartbeatPeriod), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
`}`,
}, "")
return s
}
func (this *RaftConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RaftConfig{`,
`SnapshotInterval:` + fmt.Sprintf("%v", this.SnapshotInterval) + `,`,
`KeepOldSnapshots:` + fmt.Sprintf("%v", this.KeepOldSnapshots) + `,`,
`LogEntriesForSlowFollowers:` + fmt.Sprintf("%v", this.LogEntriesForSlowFollowers) + `,`,
`HeartbeatTick:` + fmt.Sprintf("%v", this.HeartbeatTick) + `,`,
`ElectionTick:` + fmt.Sprintf("%v", this.ElectionTick) + `,`,
`}`,
}, "")
return s
}
func (this *Placement) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Placement{`,
`Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`,
`}`,
}, "")
return s
}
func (this *JoinTokens) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&JoinTokens{`,
`Worker:` + fmt.Sprintf("%v", this.Worker) + `,`,
`Manager:` + fmt.Sprintf("%v", this.Manager) + `,`,
`}`,
}, "")
return s
}
func (this *RootCA) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&RootCA{`,
`CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`,
`CACert:` + fmt.Sprintf("%v", this.CACert) + `,`,
`CACertHash:` + fmt.Sprintf("%v", this.CACertHash) + `,`,
`JoinTokens:` + strings.Replace(strings.Replace(this.JoinTokens.String(), "JoinTokens", "JoinTokens", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *Certificate) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Certificate{`,
`Role:` + fmt.Sprintf("%v", this.Role) + `,`,
`CSR:` + fmt.Sprintf("%v", this.CSR) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "IssuanceStatus", "IssuanceStatus", 1), `&`, ``, 1) + `,`,
`Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`,
`CN:` + fmt.Sprintf("%v", this.CN) + `,`,
`}`,
}, "")
return s
}
func (this *EncryptionKey) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&EncryptionKey{`,
`Subsystem:` + fmt.Sprintf("%v", this.Subsystem) + `,`,
`Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`LamportTime:` + fmt.Sprintf("%v", this.LamportTime) + `,`,
`}`,
}, "")
return s
}
func (this *ManagerStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ManagerStatus{`,
`RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`,
`Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
`Leader:` + fmt.Sprintf("%v", this.Leader) + `,`,
`Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`,
`}`,
}, "")
return s
}
func (this *SecretReference) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&SecretReference{`,
`SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`,
`Mode:` + fmt.Sprintf("%v", this.Mode) + `,`,
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
`SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
`}`,
}, "")
return s
}
func valueToStringTypes(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Version) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Version: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
}
m.Index = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Index |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Annotations) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Annotations: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Annotations: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(data[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Labels == nil {
m.Labels = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Labels[mapkey] = mapvalue
} else {
var mapvalue string
m.Labels[mapkey] = mapvalue
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Resources) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Resources: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NanoCPUs", wireType)
}
m.NanoCPUs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.NanoCPUs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType)
}
m.MemoryBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.MemoryBytes |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResourceRequirements) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Limits == nil {
m.Limits = &Resources{}
}
if err := m.Limits.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Reservations == nil {
m.Reservations = &Resources{}
}
if err := m.Reservations.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Platform) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Platform: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Architecture = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OS = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PluginDescription) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PluginDescription: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PluginDescription: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EngineDescription) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break<|fim▁hole|> }
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EngineDescription: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EngineDescription: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EngineVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EngineVersion = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(data[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Labels == nil {
m.Labels = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Labels[mapkey] = mapvalue
} else {
var mapvalue string
m.Labels[mapkey] = mapvalue
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Plugins = append(m.Plugins, PluginDescription{})
if err := m.Plugins[len(m.Plugins)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeDescription) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeDescription: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeDescription: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Hostname = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Platform == nil {
m.Platform = &Platform{}
}
if err := m.Platform.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Resources == nil {
m.Resources = &Resources{}
}
if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Engine", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Engine == nil {
m.Engine = &EngineDescription{}
}
if err := m.Engine.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RaftMemberStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RaftMemberStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RaftMemberStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Leader = bool(v != 0)
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType)
}
m.Reachability = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
}
m.State = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.State |= (NodeStatus_State(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Image) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Image: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reference = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Mount) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Mount: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Type |= (Mount_MountType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Source = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Target = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ReadOnly = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BindOptions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.BindOptions == nil {
m.BindOptions = &Mount_BindOptions{}
}
if err := m.BindOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VolumeOptions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.VolumeOptions == nil {
m.VolumeOptions = &Mount_VolumeOptions{}
}
if err := m.VolumeOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOptions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TmpfsOptions == nil {
m.TmpfsOptions = &Mount_TmpfsOptions{}
}
if err := m.TmpfsOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Mount_BindOptions) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BindOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BindOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType)
}
m.Propagation = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Propagation |= (Mount_BindOptions_MountPropagation(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Mount_VolumeOptions) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VolumeOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VolumeOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NoCopy", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NoCopy = bool(v != 0)
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(data[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Labels == nil {
m.Labels = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Labels[mapkey] = mapvalue
} else {
var mapvalue string
m.Labels[mapkey] = mapvalue
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.DriverConfig == nil {
m.DriverConfig = &Driver{}
}
if err := m.DriverConfig.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Mount_TmpfsOptions) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TmpfsOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TmpfsOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
}
m.SizeBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.SizeBytes |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
}
m.Mode = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Mode |= (os.FileMode(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RestartPolicy) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RestartPolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RestartPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType)
}
m.Condition = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Condition |= (RestartPolicy_RestartCondition(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Delay == nil {
m.Delay = &docker_swarmkit_v11.Duration{}
}
if err := m.Delay.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxAttempts", wireType)
}
m.MaxAttempts = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.MaxAttempts |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Window == nil {
m.Window = &docker_swarmkit_v11.Duration{}
}
if err := m.Window.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UpdateConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UpdateConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UpdateConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType)
}
m.Parallelism = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Parallelism |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Delay.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FailureAction", wireType)
}
m.FailureAction = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.FailureAction |= (UpdateConfig_FailureAction(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Monitor == nil {
m.Monitor = &docker_swarmkit_v11.Duration{}
}
if err := m.Monitor.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 5 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxFailureRatio", wireType)
}
var v uint32
if (iNdEx + 4) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 4
v = uint32(data[iNdEx-4])
v |= uint32(data[iNdEx-3]) << 8
v |= uint32(data[iNdEx-2]) << 16
v |= uint32(data[iNdEx-1]) << 24
m.MaxFailureRatio = float32(math.Float32frombits(v))
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UpdateStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UpdateStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UpdateStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
}
m.State = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.State |= (UpdateStatus_UpdateState(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.StartedAt == nil {
m.StartedAt = &docker_swarmkit_v1.Timestamp{}
}
if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CompletedAt == nil {
m.CompletedAt = &docker_swarmkit_v1.Timestamp{}
}
if err := m.CompletedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ContainerStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ContainerID = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType)
}
m.PID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PID |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType)
}
m.ExitCode = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ExitCode |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TaskStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Timestamp == nil {
m.Timestamp = &docker_swarmkit_v1.Timestamp{}
}
if err := m.Timestamp.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
}
m.State = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.State |= (TaskState(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Err = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &ContainerStatus{}
if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
m.RuntimeStatus = &TaskStatus_Container{v}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkAttachmentConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkAttachmentConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkAttachmentConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Target = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, string(data[iNdEx:postIndex]))
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addresses = append(m.Addresses, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IPAMConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IPAMConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IPAMConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Family", wireType)
}
m.Family = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Family |= (IPAMConfig_AddressFamily(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Subnet = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Range = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Gateway = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(data[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Reserved == nil {
m.Reserved = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Reserved[mapkey] = mapvalue
} else {
var mapvalue string
m.Reserved[mapkey] = mapvalue
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PortConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PortConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
m.Protocol = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
}
m.TargetPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.TargetPort |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType)
}
m.PublishedPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PublishedPort |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Driver) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Driver: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Driver: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(data[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Options == nil {
m.Options = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Options[mapkey] = mapvalue
} else {
var mapvalue string
m.Options[mapkey] = mapvalue
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IPAMOptions) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IPAMOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IPAMOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Driver == nil {
m.Driver = &Driver{}
}
if err := m.Driver.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Configs = append(m.Configs, &IPAMConfig{})
if err := m.Configs[len(m.Configs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Peer) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Peer: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NodeID = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addr = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *WeightedPeer) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: WeightedPeer: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: WeightedPeer: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Peer == nil {
m.Peer = &Peer{}
}
if err := m.Peer.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
}
m.Weight = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Weight |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IssuanceStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IssuanceStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IssuanceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
}
m.State = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.State |= (IssuanceStatus_State(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Err = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AcceptancePolicy) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AcceptancePolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AcceptancePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Policies = append(m.Policies, &AcceptancePolicy_RoleAdmissionPolicy{})
if err := m.Policies[len(m.Policies)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AcceptancePolicy_RoleAdmissionPolicy) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RoleAdmissionPolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RoleAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
}
m.Role = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Role |= (NodeRole(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Autoaccept", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Autoaccept = bool(v != 0)
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Secret == nil {
m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{}
}
if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Secret: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Alg", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Alg = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExternalCA) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExternalCA: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExternalCA: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
m.Protocol = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Protocol |= (ExternalCA_CAProtocol(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.URL = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(data[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Options == nil {
m.Options = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTypes
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Options[mapkey] = mapvalue
} else {
var mapvalue string
m.Options[mapkey] = mapvalue
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CAConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CAConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CAConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeCertExpiry", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.NodeCertExpiry == nil {
m.NodeCertExpiry = &docker_swarmkit_v11.Duration{}
}
if err := m.NodeCertExpiry.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExternalCAs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExternalCAs = append(m.ExternalCAs, &ExternalCA{})
if err := m.ExternalCAs[len(m.ExternalCAs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *OrchestrationConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: OrchestrationConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: OrchestrationConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TaskHistoryRetentionLimit", wireType)
}
m.TaskHistoryRetentionLimit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.TaskHistoryRetentionLimit |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TaskDefaults) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TaskDefaults: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TaskDefaults: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.LogDriver == nil {
m.LogDriver = &Driver{}
}
if err := m.LogDriver.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *DispatcherConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DispatcherConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DispatcherConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatPeriod", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.HeartbeatPeriod == nil {
m.HeartbeatPeriod = &docker_swarmkit_v11.Duration{}
}
if err := m.HeartbeatPeriod.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RaftConfig) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RaftConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RaftConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterval", wireType)
}
m.SnapshotInterval = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.SnapshotInterval |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeepOldSnapshots", wireType)
}
m.KeepOldSnapshots = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.KeepOldSnapshots |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LogEntriesForSlowFollowers", wireType)
}
m.LogEntriesForSlowFollowers = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.LogEntriesForSlowFollowers |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTick", wireType)
}
m.HeartbeatTick = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.HeartbeatTick |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ElectionTick", wireType)
}
m.ElectionTick = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ElectionTick |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Placement) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Placement: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Placement: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Constraints = append(m.Constraints, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JoinTokens) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JoinTokens: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JoinTokens: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Worker = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Manager = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RootCA) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RootCA: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RootCA: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CAKey = append(m.CAKey[:0], data[iNdEx:postIndex]...)
if m.CAKey == nil {
m.CAKey = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CACert = append(m.CACert[:0], data[iNdEx:postIndex]...)
if m.CACert == nil {
m.CACert = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CACertHash", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CACertHash = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field JoinTokens", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.JoinTokens.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Certificate) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Certificate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
}
m.Role = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Role |= (NodeRole(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CSR = append(m.CSR[:0], data[iNdEx:postIndex]...)
if m.CSR == nil {
m.CSR = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...)
if m.Certificate == nil {
m.Certificate = []byte{}
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CN", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CN = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EncryptionKey) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EncryptionKey: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EncryptionKey: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Subsystem = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType)
}
m.Algorithm = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Algorithm |= (EncryptionKey_Algorithm(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LamportTime", wireType)
}
m.LamportTime = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.LamportTime |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ManagerStatus) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ManagerStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ManagerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType)
}
m.RaftID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.RaftID |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addr = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Leader = bool(v != 0)
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType)
}
m.Reachability = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SecretReference) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SecretReference: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SecretID = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
}
m.Mode = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Mode |= (SecretReference_Mode(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Target = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SecretName = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthTypes
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipTypes(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("types.proto", fileDescriptorTypes) }
var fileDescriptorTypes = []byte{
// 3603 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6f, 0x23, 0x47,
0x76, 0x17, 0x3f, 0x45, 0x3e, 0x52, 0x52, 0x4f, 0xcd, 0xec, 0x58, 0x43, 0x8f, 0x25, 0xba, 0xc7,
0xb3, 0x1e, 0x7b, 0x1d, 0xda, 0x96, 0x37, 0xc6, 0xac, 0x67, 0xb3, 0x76, 0x8b, 0xa4, 0x66, 0xb8,
0x23, 0x51, 0x44, 0x91, 0xd4, 0xc0, 0x08, 0x10, 0xa2, 0xd4, 0x5d, 0x22, 0xdb, 0x6a, 0x76, 0x33,
0xdd, 0x45, 0x69, 0x98, 0x20, 0xc0, 0x24, 0x87, 0x24, 0xd0, 0x29, 0xf7, 0x40, 0x58, 0x04, 0x09,
0x72, 0xcb, 0x39, 0x40, 0x4e, 0x3e, 0xfa, 0xb8, 0x41, 0x80, 0x60, 0xb1, 0x41, 0x84, 0x58, 0xf9,
0x07, 0x16, 0x08, 0x82, 0x3d, 0x24, 0x87, 0xa0, 0x3e, 0xba, 0xd9, 0xe4, 0x50, 0xf2, 0x4c, 0x76,
0x4f, 0x64, 0xbd, 0xfa, 0xbd, 0x57, 0xaf, 0xaa, 0x5e, 0xbd, 0xfa, 0xbd, 0x6a, 0x28, 0xb0, 0xc9,
0x88, 0x06, 0x95, 0x91, 0xef, 0x31, 0x0f, 0x21, 0xcb, 0x33, 0x8f, 0xa9, 0x5f, 0x09, 0x4e, 0x89,
0x3f, 0x3c, 0xb6, 0x59, 0xe5, 0xe4, 0xe3, 0xd2, 0x1d, 0x66, 0x0f, 0x69, 0xc0, 0xc8, 0x70, 0xf4,
0x61, 0xf4, 0x4f, 0xc2, 0x4b, 0x6f, 0x58, 0x63, 0x9f, 0x30, 0xdb, 0x73, 0x3f, 0x0c, 0xff, 0xa8,
0x8e, 0x5b, 0x7d, 0xaf, 0xef, 0x89, 0xbf, 0x1f, 0xf2, 0x7f, 0x52, 0xaa, 0x6f, 0xc2, 0xf2, 0x01,
0xf5, 0x03, 0xdb, 0x73, 0xd1, 0x2d, 0xc8, 0xd8, 0xae, 0x45, 0x9f, 0xaf, 0x27, 0xca, 0x89, 0x07,
0x69, 0x2c, 0x1b, 0xfa, 0xdf, 0x24, 0xa0, 0x60, 0xb8, 0xae, 0xc7, 0x84, 0xad, 0x00, 0x21, 0x48,
0xbb, 0x64, 0x48, 0x05, 0x28, 0x8f, 0xc5, 0x7f, 0x54, 0x85, 0xac, 0x43, 0x0e, 0xa9, 0x13, 0xac,
0x27, 0xcb, 0xa9, 0x07, 0x85, 0xad, 0x1f, 0x54, 0x5e, 0xf6, 0xb9, 0x12, 0x33, 0x52, 0xd9, 0x15,
0xe8, 0xba, 0xcb, 0xfc, 0x09, 0x56, 0xaa, 0xa5, 0x1f, 0x41, 0x21, 0x26, 0x46, 0x1a, 0xa4, 0x8e,
0xe9, 0x44, 0x0d, 0xc3, 0xff, 0x72, 0xff, 0x4e, 0x88, 0x33, 0xa6, 0xeb, 0x49, 0x21, 0x93, 0x8d,
0xcf, 0x92, 0x0f, 0x13, 0xfa, 0x97, 0x90, 0xc7, 0x34, 0xf0, 0xc6, 0xbe, 0x49, 0x03, 0xf4, 0x1e,
0xe4, 0x5d, 0xe2, 0x7a, 0x3d, 0x73, 0x34, 0x0e, 0x84, 0x7a, 0x6a, 0xbb, 0x78, 0x79, 0xb1, 0x99,
0x6b, 0x12, 0xd7, 0xab, 0xb6, 0xba, 0x01, 0xce, 0xf1, 0xee, 0xea, 0x68, 0x1c, 0xa0, 0xb7, 0xa1,
0x38, 0xa4, 0x43, 0xcf, 0x9f, 0xf4, 0x0e, 0x27, 0x8c, 0x06, 0xc2, 0x70, 0x0a, 0x17, 0xa4, 0x6c,
0x9b, 0x8b, 0xf4, 0xbf, 0x4a, 0xc0, 0xad, 0xd0, 0x36, 0xa6, 0x7f, 0x38, 0xb6, 0x7d, 0x3a, 0xa4,
0x2e, 0x0b, 0xd0, 0xef, 0x42, 0xd6, 0xb1, 0x87, 0x36, 0x93, 0x63, 0x14, 0xb6, 0xde, 0x5a, 0x34,
0xe7, 0xc8, 0x2b, 0xac, 0xc0, 0xc8, 0x80, 0xa2, 0x4f, 0x03, 0xea, 0x9f, 0xc8, 0x95, 0x10, 0x43,
0x7e, 0xa7, 0xf2, 0x8c, 0x8a, 0xbe, 0x03, 0xb9, 0x96, 0x43, 0xd8, 0x91, 0xe7, 0x0f, 0x91, 0x0e,
0x45, 0xe2, 0x9b, 0x03, 0x9b, 0x51, 0x93, 0x8d, 0xfd, 0x70, 0x57, 0x66, 0x64, 0xe8, 0x36, 0x24,
0x3d, 0x39, 0x50, 0x7e, 0x3b, 0x7b, 0x79, 0xb1, 0x99, 0xdc, 0x6f, 0xe3, 0xa4, 0x17, 0xe8, 0x8f,
0xe0, 0x46, 0xcb, 0x19, 0xf7, 0x6d, 0xb7, 0x46, 0x03, 0xd3, 0xb7, 0x47, 0xdc, 0x3a, 0xdf, 0x5e,
0x1e, 0x7c, 0xe1, 0xf6, 0xf2, 0xff, 0xd1, 0x96, 0x27, 0xa7, 0x5b, 0xae, 0xff, 0x45, 0x12, 0x6e,
0xd4, 0xdd, 0xbe, 0xed, 0xd2, 0xb8, 0xf6, 0x7d, 0x58, 0xa5, 0x42, 0xd8, 0x3b, 0x91, 0x41, 0xa5,
0xec, 0xac, 0x48, 0x69, 0x18, 0x69, 0x8d, 0xb9, 0x78, 0xf9, 0x78, 0xd1, 0xf4, 0x5f, 0xb2, 0xbe,
0x28, 0x6a, 0x50, 0x1d, 0x96, 0x47, 0x62, 0x12, 0xc1, 0x7a, 0x4a, 0xd8, 0xba, 0xbf, 0xc8, 0xd6,
0x4b, 0xf3, 0xdc, 0x4e, 0x7f, 0x73, 0xb1, 0xb9, 0x84, 0x43, 0xdd, 0xdf, 0x24, 0xf8, 0xfe, 0x33,
0x01, 0x6b, 0x4d, 0xcf, 0x9a, 0x59, 0x87, 0x12, 0xe4, 0x06, 0x5e, 0xc0, 0x62, 0x07, 0x25, 0x6a,
0xa3, 0x87, 0x90, 0x1b, 0xa9, 0xed, 0x53, 0xbb, 0x7f, 0x77, 0xb1, 0xcb, 0x12, 0x83, 0x23, 0x34,
0x7a, 0x04, 0x79, 0x3f, 0x8c, 0x89, 0xf5, 0xd4, 0xab, 0x04, 0xce, 0x14, 0x8f, 0x7e, 0x0f, 0xb2,
0x72, 0x13, 0xd6, 0xd3, 0x42, 0xf3, 0xfe, 0x2b, 0xad, 0x39, 0x56, 0x4a, 0xfa, 0x2f, 0x12, 0xa0,
0x61, 0x72, 0xc4, 0xf6, 0xe8, 0xf0, 0x90, 0xfa, 0x6d, 0x46, 0xd8, 0x38, 0x40, 0xb7, 0x21, 0xeb,
0x50, 0x62, 0x51, 0x5f, 0x4c, 0x32, 0x87, 0x55, 0x0b, 0x75, 0x79, 0x90, 0x13, 0x73, 0x40, 0x0e,
0x6d, 0xc7, 0x66, 0x13, 0x31, 0xcd, 0xd5, 0xc5, 0xbb, 0x3c, 0x6f, 0xb3, 0x82, 0x63, 0x8a, 0x78,
0xc6, 0x0c, 0x5a, 0x87, 0xe5, 0x21, 0x0d, 0x02, 0xd2, 0xa7, 0x62, 0xf6, 0x79, 0x1c, 0x36, 0xf5,
0x47, 0x50, 0x8c, 0xeb, 0xa1, 0x02, 0x2c, 0x77, 0x9b, 0x4f, 0x9b, 0xfb, 0xcf, 0x9a, 0xda, 0x12,
0x5a, 0x83, 0x42, 0xb7, 0x89, 0xeb, 0x46, 0xf5, 0x89, 0xb1, 0xbd, 0x5b, 0xd7, 0x12, 0x68, 0x05,
0xf2, 0xd3, 0x66, 0x52, 0xff, 0x59, 0x02, 0x80, 0x6f, 0xa0, 0x9a, 0xd4, 0x67, 0x90, 0x09, 0x18,
0x61, 0x72, 0xe3, 0x56, 0xb7, 0xde, 0x59, 0xe4, 0xf5, 0x14, 0x5e, 0xe1, 0x3f, 0x14, 0x4b, 0x95,
0xb8, 0x87, 0xc9, 0x79, 0x0f, 0x33, 0x02, 0x39, 0xeb, 0x5a, 0x0e, 0xd2, 0x35, 0xfe, 0x2f, 0x81,
0xf2, 0x90, 0xc1, 0x75, 0xa3, 0xf6, 0xa5, 0x96, 0x44, 0x1a, 0x14, 0x6b, 0x8d, 0x76, 0x75, 0xbf,
0xd9, 0xac, 0x57, 0x3b, 0xf5, 0x9a, 0x96, 0xd2, 0xef, 0x43, 0xa6, 0x31, 0x24, 0x7d, 0x8a, 0xee,
0xf2, 0x08, 0x38, 0xa2, 0x3e, 0x75, 0xcd, 0x30, 0xb0, 0xa6, 0x02, 0xfd, 0xe7, 0x79, 0xc8, 0xec,
0x79, 0x63, 0x97, 0xa1, 0xad, 0xd8, 0x29, 0x5e, 0xdd, 0xda, 0x58, 0x34, 0x05, 0x01, 0xac, 0x74,
0x26, 0x23, 0xaa, 0x4e, 0xf9, 0x6d, 0xc8, 0xca, 0x58, 0x51, 0xae, 0xab, 0x16, 0x97, 0x33, 0xe2,
0xf7, 0x29, 0x53, 0x8b, 0xae, 0x5a, 0xe8, 0x01, 0xe4, 0x7c, 0x4a, 0x2c, 0xcf, 0x75, 0x26, 0x22,
0xa4, 0x72, 0x32, 0xcd, 0x62, 0x4a, 0xac, 0x7d, 0xd7, 0x99, 0xe0, 0xa8, 0x17, 0x3d, 0x81, 0xe2,
0xa1, 0xed, 0x5a, 0x3d, 0x6f, 0x24, 0x73, 0x5e, 0xe6, 0xea, 0x00, 0x94, 0x5e, 0x6d, 0xdb, 0xae,
0xb5, 0x2f, 0xc1, 0xb8, 0x70, 0x38, 0x6d, 0xa0, 0x26, 0xac, 0x9e, 0x78, 0xce, 0x78, 0x48, 0x23,
0x5b, 0x59, 0x61, 0xeb, 0xdd, 0xab, 0x6d, 0x1d, 0x08, 0x7c, 0x68, 0x6d, 0xe5, 0x24, 0xde, 0x44,
0x4f, 0x61, 0x85, 0x0d, 0x47, 0x47, 0x41, 0x64, 0x6e, 0x59, 0x98, 0xfb, 0xfe, 0x35, 0x0b, 0xc6,
0xe1, 0xa1, 0xb5, 0x22, 0x8b, 0xb5, 0x4a, 0x7f, 0x96, 0x82, 0x42, 0xcc, 0x73, 0xd4, 0x86, 0xc2,
0xc8, 0xf7, 0x46, 0xa4, 0x2f, 0xf2, 0xb6, 0xda, 0x8b, 0x8f, 0x5f, 0x69, 0xd6, 0x95, 0xd6, 0x54,
0x11, 0xc7, 0xad, 0xe8, 0xe7, 0x49, 0x28, 0xc4, 0x3a, 0xd1, 0xfb, 0x90, 0xc3, 0x2d, 0xdc, 0x38,
0x30, 0x3a, 0x75, 0x6d, 0xa9, 0x74, 0xf7, 0xec, 0xbc, 0xbc, 0x2e, 0xac, 0xc5, 0x0d, 0xb4, 0x7c,
0xfb, 0x84, 0x87, 0xde, 0x03, 0x58, 0x0e, 0xa1, 0x89, 0xd2, 0x9b, 0x67, 0xe7, 0xe5, 0x37, 0xe6,
0xa1, 0x31, 0x24, 0x6e, 0x3f, 0x31, 0x70, 0xbd, 0xa6, 0x25, 0x17, 0x23, 0x71, 0x7b, 0x40, 0x7c,
0x6a, 0xa1, 0xef, 0x43, 0x56, 0x01, 0x53, 0xa5, 0xd2, 0xd9, 0x79, 0xf9, 0xf6, 0x3c, 0x70, 0x8a,
0xc3, 0xed, 0x5d, 0xe3, 0xa0, 0xae, 0xa5, 0x17, 0xe3, 0x70, 0xdb, 0x21, 0x27, 0x14, 0xbd, 0x03,
0x19, 0x09, 0xcb, 0x94, 0xee, 0x9c, 0x9d, 0x97, 0xbf, 0xf7, 0x92, 0x39, 0x8e, 0x2a, 0xad, 0xff,
0xe5, 0xdf, 0x6e, 0x2c, 0xfd, 0xd3, 0xdf, 0x6d, 0x68, 0xf3, 0xdd, 0xa5, 0xff, 0x4d, 0xc0, 0xca,
0xcc, 0x96, 0x23, 0x1d, 0xb2, 0xae, 0x67, 0x7a, 0x23, 0x99, 0xce, 0x73, 0xdb, 0x70, 0x79, 0xb1,
0x99, 0x6d, 0x7a, 0x55, 0x6f, 0x34, 0xc1, 0xaa, 0x07, 0x3d, 0x9d, 0xbb, 0x90, 0x3e, 0x79, 0xc5,
0x78, 0x5a, 0x78, 0x25, 0x7d, 0x0e, 0x2b, 0x96, 0x6f, 0x9f, 0x50, 0xbf, 0x67, 0x7a, 0xee, 0x91,
0xdd, 0x57, 0xa9, 0xba, 0xb4, 0xc8, 0x66, 0x4d, 0x00, 0x71, 0x51, 0x2a, 0x54, 0x05, 0xfe, 0x37,
0xb8, 0x8c, 0x4a, 0x07, 0x50, 0x8c, 0x47, 0x28, 0x7a, 0x0b, 0x20, 0xb0, 0xff, 0x88, 0x2a, 0x7e,
0x23, 0xd8, 0x10, 0xce, 0x73, 0x89, 0x60, 0x37, 0xe8, 0x5d, 0x48, 0x0f, 0x3d, 0x4b, 0xda, 0xc9,
0x6c, 0xdf, 0xe4, 0x77, 0xe2, 0x2f, 0x2f, 0x36, 0x0b, 0x5e, 0x50, 0xd9, 0xb1, 0x1d, 0xba, 0xe7,
0x59, 0x14, 0x0b, 0x80, 0x7e, 0x02, 0x69, 0x9e, 0x2a, 0xd0, 0x9b, 0x90, 0xde, 0x6e, 0x34, 0x6b,
0xda, 0x52, 0xe9, 0xc6, 0xd9, 0x79, 0x79, 0x45, 0x2c, 0x09, 0xef, 0xe0, 0xb1, 0x8b, 0x36, 0x21,
0x7b, 0xb0, 0xbf, 0xdb, 0xdd, 0xe3, 0xe1, 0x75, 0xf3, 0xec, 0xbc, 0xbc, 0x16, 0x75, 0xcb, 0x45,
0x43, 0x6f, 0x41, 0xa6, 0xb3, 0xd7, 0xda, 0x69, 0x6b, 0xc9, 0x12, 0x3a, 0x3b, 0x2f, 0xaf, 0x46,
0xfd, 0xc2, 0xe7, 0xd2, 0x0d, 0xb5, 0xab, 0xf9, 0x48, 0xae, 0xff, 0x4f, 0x12, 0x56, 0x30, 0xe7,
0xb7, 0x3e, 0x6b, 0x79, 0x8e, 0x6d, 0x4e, 0x50, 0x0b, 0xf2, 0xa6, 0xe7, 0x5a, 0x76, 0xec, 0x4c,
0x6d, 0x5d, 0x71, 0x09, 0x4e, 0xb5, 0xc2, 0x56, 0x35, 0xd4, 0xc4, 0x53, 0x23, 0x68, 0x0b, 0x32,
0x16, 0x75, 0xc8, 0xe4, 0xba, 0xdb, 0xb8, 0xa6, 0xb8, 0x34, 0x96, 0x50, 0xc1, 0x1c, 0xc9, 0xf3,
0x1e, 0x61, 0x8c, 0x0e, 0x47, 0x4c, 0xde, 0xc6, 0x69, 0x5c, 0x18, 0x92, 0xe7, 0x86, 0x12, 0xa1,
0x1f, 0x42, 0xf6, 0xd4, 0x76, 0x2d, 0xef, 0x54, 0x5d, 0xb8, 0xd7, 0xdb, 0x55, 0x58, 0xfd, 0x8c,
0xdf, 0xb3, 0x73, 0xce, 0xf2, 0x55, 0x6f, 0xee, 0x37, 0xeb, 0xe1, 0xaa, 0xab, 0xfe, 0x7d, 0xb7,
0xe9, 0xb9, 0xfc, 0xc4, 0xc0, 0x7e, 0xb3, 0xb7, 0x63, 0x34, 0x76, 0xbb, 0x98, 0xaf, 0xfc, 0xad,
0xb3, 0xf3, 0xb2, 0x16, 0x41, 0x76, 0x88, 0xed, 0x70, 0x12, 0x78, 0x07, 0x52, 0x46, 0xf3, 0x4b,
0x2d, 0x59, 0xd2, 0xce, 0xce, 0xcb, 0xc5, 0xa8, 0xdb, 0x70, 0x27, 0xd3, 0xc3, 0x34, 0x3f, 0xae,
0xfe, 0xef, 0x49, 0x28, 0x76, 0x47, 0x16, 0x61, 0x54, 0x46, 0x26, 0x2a, 0x43, 0x61, 0x44, 0x7c,
0xe2, 0x38, 0xd4, 0xb1, 0x83, 0xa1, 0x2a, 0x14, 0xe2, 0x22, 0xf4, 0xf0, 0x35, 0x16, 0x53, 0x91,
0x30, 0xb5, 0xa4, 0x5d, 0x58, 0x3d, 0x92, 0xce, 0xf6, 0x88, 0x29, 0x76, 0x37, 0x25, 0x76, 0xb7,
0xb2, 0xc8, 0x44, 0xdc, 0xab, 0x8a, 0x9a, 0xa3, 0x21, 0xb4, 0xf0, 0xca, 0x51, 0xbc, 0x89, 0x3e,
0x85, 0xe5, 0xa1, 0xe7, 0xda, 0xcc, 0xf3, 0x5f, 0x69, 0x1f, 0x42, 0x30, 0x7a, 0x1f, 0x6e, 0xf0,
0x1d, 0x0e, 0x5d, 0x12, 0xdd, 0xe2, 0xe6, 0x4a, 0xe2, 0xb5, 0x21, 0x79, 0xae, 0xc6, 0xc4, 0x5c,
0xac, 0x7f, 0x0a, 0x2b, 0x33, 0x3e, 0xf0, 0xdb, 0xbc, 0x65, 0x74, 0xdb, 0x75, 0x6d, 0x09, 0x15,
0x21, 0x57, 0xdd, 0x6f, 0x76, 0x1a, 0xcd, 0x2e, 0xa7, 0x1e, 0x45, 0xc8, 0xe1, 0xfd, 0xdd, 0xdd,
0x6d, 0xa3, 0xfa, 0x54, 0x4b, 0xea, 0xff, 0x1d, 0xad, 0xaf, 0xe2, 0x1e, 0xdb, 0xb3, 0xdc, 0xe3,
0x83, 0xab, 0xa7, 0xae, 0xd8, 0xc7, 0xb4, 0x11, 0x71, 0x90, 0x1f, 0x03, 0x88, 0x6d, 0xa4, 0x56,
0x8f, 0xb0, 0xeb, 0xea, 0x8b, 0x4e, 0x58, 0x39, 0xe2, 0xbc, 0x52, 0x30, 0x18, 0xfa, 0x02, 0x8a,
0xa6, 0x37, 0x1c, 0x39, 0x54, 0xe9, 0xa7, 0x5e, 0x45, 0xbf, 0x10, 0xa9, 0x18, 0x2c, 0xce, 0x81,
0xd2, 0xb3, 0x1c, 0xe8, 0xcf, 0x13, 0x50, 0x88, 0x39, 0x3c, 0x4b, 0x85, 0x8a, 0x90, 0xeb, 0xb6,
0x6a, 0x46, 0xa7, 0xd1, 0x7c, 0xac, 0x25, 0x10, 0x40, 0x56, 0x2c, 0x60, 0x4d, 0x4b, 0x72, 0xba,
0x56, 0xdd, 0xdf, 0x6b, 0xed, 0xd6, 0x05, 0x19, 0x42, 0xb7, 0x40, 0x0b, 0x97, 0xb0, 0xd7, 0xee,
0x18, 0x98, 0x4b, 0xd3, 0xe8, 0x26, 0xac, 0x45, 0x52, 0xa5, 0x99, 0x41, 0xb7, 0x01, 0x45, 0xc2,
0xa9, 0x89, 0xac, 0xfe, 0x27, 0xb0, 0x56, 0xf5, 0x5c, 0x46, 0x6c, 0x37, 0xa2, 0xb2, 0x5b, 0x7c,
0xde, 0x4a, 0xd4, 0xb3, 0x2d, 0x99, 0x6d, 0xb7, 0xd7, 0x2e, 0x2f, 0x36, 0x0b, 0x11, 0xb4, 0x51,
0xe3, 0x33, 0x0d, 0x1b, 0x16, 0x3f, 0x53, 0x23, 0xdb, 0x52, 0xc9, 0x73, 0xf9, 0xf2, 0x62, 0x33,
0xd5, 0x6a, 0xd4, 0x30, 0x97, 0xa1, 0x37, 0x21, 0x4f, 0x9f, 0xdb, 0xac, 0x67, 0xf2, 0xec, 0xca,
0xd7, 0x30, 0x83, 0x73, 0x5c, 0x50, 0xe5, 0xc9, 0xf4, 0x4f, 0x93, 0x00, 0x1d, 0x12, 0x1c, 0xab,
0xa1, 0x1f, 0x41, 0x3e, 0x2a, 0xe2, 0xaf, 0x2b, 0x26, 0x63, 0xfb, 0x15, 0xe1, 0xd1, 0x27, 0x61,
0xc4, 0x48, 0x8e, 0xbd, 0x58, 0x51, 0x8d, 0xb5, 0x88, 0xa6, 0xce, 0x12, 0x69, 0x7e, 0xd7, 0x50,
0xdf, 0x57, 0x1b, 0xc7, 0xff, 0xa2, 0xaa, 0xc8, 0xb7, 0x72, 0xce, 0x8a, 0xb9, 0xdd, 0x5b, 0x34,
0xc8, 0xdc, 0x82, 0x3e, 0x59, 0xc2, 0x53, 0xbd, 0x6d, 0x0d, 0x56, 0xfd, 0xb1, 0xcb, 0xbd, 0xee,
0x05, 0xa2, 0x5b, 0xb7, 0xe1, 0x8d, 0x26, 0x65, 0xa7, 0x9e, 0x7f, 0x6c, 0x30, 0x46, 0xcc, 0x01,
0x2f, 0xaa, 0x55, 0x92, 0x99, 0x12, 0xce, 0xc4, 0x0c, 0xe1, 0x5c, 0x87, 0x65, 0xe2, 0xd8, 0x24,
0xa0, 0xf2, 0x96, 0xce, 0xe3, 0xb0, 0xc9, 0x69, 0x31, 0xb1, 0x2c, 0x9f, 0x06, 0x01, 0x95, 0x65,
0x60, 0x1e, 0x4f, 0x05, 0xfa, 0xbf, 0x24, 0x01, 0x1a, 0x2d, 0x63, 0x4f, 0x99, 0xaf, 0x41, 0xf6,
0x88, 0x0c, 0x6d, 0x67, 0x72, 0xdd, 0x21, 0x9b, 0xe2, 0x2b, 0x86, 0x34, 0xb4, 0x23, 0x74, 0xb0,
0xd2, 0x15, 0x6c, 0x79, 0x7c, 0xe8, 0x52, 0x16, 0xb1, 0x65, 0xd1, 0xe2, 0x57, 0xb3, 0x4f, 0xdc,
0x68, 0x61, 0x65, 0x83, 0xbb, 0xde, 0x27, 0x8c, 0x9e, 0x92, 0x49, 0x78, 0x26, 0x54, 0x13, 0x3d,
0xe1, 0x2c, 0x9a, 0x17, 0xf7, 0xd4, 0x5a, 0xcf, 0x08, 0xee, 0xf1, 0x5d, 0xfe, 0x60, 0x05, 0x97,
0xa4, 0x23, 0xd2, 0x2e, 0x3d, 0x12, 0x37, 0xe5, 0xb4, 0xeb, 0xb5, 0x8a, 0xd8, 0x8f, 0x60, 0x65,
0x66, 0x9e, 0x2f, 0x95, 0x29, 0x8d, 0xd6, 0xc1, 0x0f, 0xb5, 0xb4, 0xfa, 0xf7, 0xa9, 0x96, 0xd5,
0xff, 0x2b, 0x01, 0xd0, 0xf2, 0xfc, 0x70, 0xd3, 0x16, 0x3f, 0x0b, 0xe5, 0xc4, 0x23, 0x93, 0xe9,
0x39, 0x2a, 0x3c, 0x17, 0xf2, 0xf4, 0xa9, 0x15, 0x4e, 0x7b, 0x05, 0x1c, 0x47, 0x8a, 0x68, 0x13,
0x0a, 0x72, 0xff, 0x7b, 0x23, 0xcf, 0x97, 0xf9, 0x68, 0x05, 0x83, 0x14, 0x71, 0x4d, 0x74, 0x1f,
0x56, 0x47, 0xe3, 0x43, 0xc7, 0x0e, 0x06, 0xd4, 0x92, 0x98, 0xb4, 0xc0, 0xac, 0x44, 0x52, 0x0e,
0xd3, 0x6b, 0x90, 0x0b, 0xad, 0xa3, 0x75, 0x48, 0x75, 0xaa, 0x2d, 0x6d, 0xa9, 0xb4, 0x76, 0x76,
0x5e, 0x2e, 0x84, 0xe2, 0x4e, 0xb5, 0xc5, 0x7b, 0xba, 0xb5, 0x96, 0x96, 0x98, 0xed, 0xe9, 0xd6,
0x5a, 0xa5, 0x34, 0xbf, 0x25, 0xf5, 0xbf, 0x4e, 0x40, 0x56, 0x72, 0xb6, 0x85, 0x33, 0x36, 0x60,
0x39, 0xac, 0x24, 0x24, 0x91, 0x7c, 0xf7, 0x6a, 0xd2, 0x57, 0x51, 0x1c, 0x4d, 0xee, 0x63, 0xa8,
0x57, 0xfa, 0x0c, 0x8a, 0xf1, 0x8e, 0xd7, 0xda, 0xc5, 0x3f, 0x86, 0x02, 0x0f, 0x94, 0x90, 0xfc,
0x6d, 0x41, 0x56, 0xf2, 0x4a, 0x95, 0x55, 0xae, 0x63, 0xa0, 0x0a, 0x89, 0x1e, 0xc2, 0xb2, 0x64,
0xad, 0xe1, 0x7b, 0xca, 0xc6, 0xf5, 0xe1, 0x88, 0x43, 0xb8, 0xfe, 0x39, 0xa4, 0x5b, 0x94, 0xfa,
0xe8, 0x1e, 0x2c, 0xbb, 0x9e, 0x45, 0xa7, 0x49, 0x54, 0x11, 0x6e, 0x8b, 0x36, 0x6a, 0x9c, 0x70,
0x5b, 0xb4, 0x61, 0xf1, 0xc5, 0xe3, 0x07, 0x34, 0x7c, 0x52, 0xe2, 0xff, 0xf5, 0x0e, 0x14, 0x9f,
0x51, 0xbb, 0x3f, 0x60, 0xd4, 0x12, 0x86, 0x3e, 0x80, 0xf4, 0x88, 0x46, 0xce, 0xaf, 0x2f, 0x0c,
0x1d, 0x4a, 0x7d, 0x2c, 0x50, 0xfc, 0x40, 0x9e, 0x0a, 0x6d, 0xf5, 0x8a, 0xa7, 0x5a, 0xfa, 0x3f,
0x24, 0x61, 0xb5, 0x11, 0x04, 0x63, 0xe2, 0x9a, 0xe1, 0x2d, 0xfb, 0x93, 0xd9, 0x5b, 0xf6, 0xc1,
0xc2, 0x19, 0xce, 0xa8, 0xcc, 0x56, 0xf9, 0x2a, 0x49, 0x26, 0xa3, 0x24, 0xa9, 0x7f, 0x93, 0x08,
0xcb, 0xfb, 0xfb, 0xb1, 0x73, 0x53, 0x5a, 0x3f, 0x3b, 0x2f, 0xdf, 0x8a, 0x5b, 0xa2, 0x5d, 0xf7,
0xd8, 0xf5, 0x4e, 0x5d, 0xf4, 0x36, 0x2f, 0xf7, 0x9b, 0xf5, 0x67, 0x5a, 0xa2, 0x74, 0xfb, 0xec,
0xbc, 0x8c, 0x66, 0x40, 0x98, 0xba, 0xf4, 0x94, 0x5b, 0x6a, 0xd5, 0x9b, 0x35, 0x7e, 0x1f, 0x26,
0x17, 0x58, 0x6a, 0x51, 0xd7, 0xb2, 0xdd, 0x3e, 0xba, 0x07, 0xd9, 0x46, 0xbb, 0xdd, 0x15, 0x05,
0xd8, 0x1b, 0x67, 0xe7, 0xe5, 0x9b, 0x33, 0x28, 0xde, 0xa0, 0x16, 0x07, 0x71, 0x82, 0xc8, 0x6f,
0xca, 0x05, 0x20, 0xce, 0x5d, 0xa8, 0xa5, 0x22, 0xfc, 0xdf, 0x92, 0xa0, 0x19, 0xa6, 0x49, 0x47,
0x8c, 0xf7, 0x2b, 0xd2, 0xdd, 0x81, 0xdc, 0x88, 0xff, 0xb3, 0x45, 0x11, 0xc1, 0xc3, 0xe2, 0xe1,
0xc2, 0x27, 0xde, 0x39, 0xbd, 0x0a, 0xf6, 0x1c, 0x6a, 0x58, 0x43, 0x3b, 0x08, 0x78, 0x71, 0x29,
0x64, 0x38, 0xb2, 0x54, 0xfa, 0x55, 0x02, 0x6e, 0x2e, 0x40, 0xa0, 0x8f, 0x20, 0xed, 0x7b, 0x4e,
0xb8, 0x3d, 0x77, 0xaf, 0x7a, 0x80, 0xe1, 0xaa, 0x58, 0x20, 0xd1, 0x06, 0x00, 0x19, 0x33, 0x8f,
0x88, 0xf1, 0xc5, 0xc6, 0xe4, 0x70, 0x4c, 0x82, 0x9e, 0x41, 0x36, 0xa0, 0xa6, 0x4f, 0x43, 0x3e,
0xf3, 0xf9, 0xff, 0xd7, 0xfb, 0x4a, 0x5b, 0x98, 0xc1, 0xca, 0x5c, 0xa9, 0x02, 0x59, 0x29, 0xe1,
0x11, 0x6d, 0x11, 0x46, 0x84, 0xd3, 0x45, 0x2c, 0xfe, 0xf3, 0x40, 0x21, 0x4e, 0x3f, 0x0c, 0x14,
0xe2, 0xf4, 0xf5, 0x9f, 0x25, 0x01, 0xea, 0xcf, 0x19, 0xf5, 0x5d, 0xe2, 0x54, 0x0d, 0x54, 0x8f,
0x65, 0x48, 0x39, 0xdb, 0xf7, 0x16, 0x3e, 0xcb, 0x45, 0x1a, 0x95, 0xaa, 0xb1, 0x20, 0x47, 0xde,
0x81, 0xd4, 0xd8, 0x77, 0xd4, 0x13, 0xaf, 0x20, 0x22, 0x5d, 0xbc, 0x8b, 0xb9, 0x0c, 0xd5, 0xa7,
0x19, 0x29, 0x75, 0xf5, 0xdb, 0x7c, 0x6c, 0x80, 0xdf, 0x7e, 0x56, 0xfa, 0x00, 0x60, 0xea, 0x35,
0xda, 0x80, 0x4c, 0x75, 0xa7, 0xdd, 0xde, 0xd5, 0x96, 0x64, 0x8d, 0x38, 0xed, 0x12, 0x62, 0xfd,
0xef, 0x13, 0x90, 0xab, 0x1a, 0xea, 0x56, 0xd9, 0x01, 0x4d, 0xe4, 0x12, 0x93, 0xfa, 0xac, 0x47,
0x9f, 0x8f, 0x6c, 0x7f, 0xa2, 0xd2, 0xc1, 0xf5, 0x2c, 0x7e, 0x95, 0x6b, 0x55, 0xa9, 0xcf, 0xea,
0x42, 0x07, 0x61, 0x28, 0x52, 0x35, 0xc5, 0x9e, 0x49, 0xc2, 0xe4, 0xbc, 0x71, 0xfd, 0x52, 0x48,
0xf6, 0x37, 0x6d, 0x07, 0xb8, 0x10, 0x1a, 0xa9, 0x92, 0x40, 0x3f, 0x80, 0x9b, 0xfb, 0xbe, 0x39,
0xa0, 0x01, 0x93, 0x83, 0x2a, 0x97, 0x3f, 0x87, 0xbb, 0x8c, 0x04, 0xc7, 0xbd, 0x81, 0x1d, 0x30,
0xcf, 0x9f, 0xf4, 0x7c, 0xca, 0xa8, 0xcb, 0xfb, 0x7b, 0xe2, 0x0b, 0x80, 0xaa, 0xc1, 0xef, 0x70,
0xcc, 0x13, 0x09, 0xc1, 0x21, 0x62, 0x97, 0x03, 0xf4, 0x06, 0x14, 0x39, 0x61, 0xab, 0xd1, 0x23,
0x32, 0x76, 0x58, 0x80, 0x7e, 0x04, 0xe0, 0x78, 0xfd, 0xde, 0x2b, 0x67, 0xf2, 0xbc, 0xe3, 0xf5,
0xe5, 0x5f, 0xfd, 0xf7, 0x41, 0xab, 0xd9, 0xc1, 0x88, 0x30, 0x73, 0x10, 0x3e, 0x2e, 0xa0, 0xc7,
0xa0, 0x0d, 0x28, 0xf1, 0xd9, 0x21, 0x25, 0xac, 0x37, 0xa2, 0xbe, 0xed, 0x59, 0xaf, 0xb4, 0xa4,
0x6b, 0x91, 0x56, 0x4b, 0x28, 0xe9, 0xbf, 0x4e, 0x00, 0x60, 0x72, 0x14, 0x12, 0x80, 0x1f, 0xc0,
0x8d, 0xc0, 0x25, 0xa3, 0x60, 0xe0, 0xb1, 0x9e, 0xed, 0x32, 0xea, 0x9f, 0x10, 0x47, 0x15, 0x88,
0x5a, 0xd8, 0xd1, 0x50, 0x72, 0xf4, 0x01, 0xa0, 0x63, 0x4a, 0x47, 0x3d, 0xcf, 0xb1, 0x7a, 0x61,
0xa7, 0xfc, 0x44, 0x91, 0xc6, 0x1a, 0xef, 0xd9, 0x77, 0xac, 0x76, 0x28, 0x47, 0xdb, 0xb0, 0xc1,
0x57, 0x80, 0xba, 0xcc, 0xb7, 0x69, 0xd0, 0x3b, 0xf2, 0xfc, 0x5e, 0xe0, 0x78, 0xa7, 0xbd, 0x23,
0xcf, 0x71, 0xbc, 0x53, 0xea, 0x87, 0xe5, 0x77, 0xc9, 0xf1, 0xfa, 0x75, 0x09, 0xda, 0xf1, 0xfc,
0xb6, 0xe3, 0x9d, 0xee, 0x84, 0x08, 0xce, 0x12, 0xa6, 0xd3, 0x66, 0xb6, 0x79, 0x1c, 0xb2, 0x84,
0x48, 0xda, 0xb1, 0xcd, 0x63, 0x74, 0x0f, 0x56, 0xa8, 0x43, 0x45, 0x11, 0x27, 0x51, 0x19, 0x81,
0x2a, 0x86, 0x42, 0x0e, 0xd2, 0x7f, 0x07, 0xf2, 0x2d, 0x87, 0x98, 0xe2, 0x43, 0x10, 0x2f, 0x89,
0x4d, 0xcf, 0xe5, 0x41, 0x60, 0xbb, 0x4c, 0x66, 0xc7, 0x3c, 0x8e, 0x8b, 0xf4, 0x9f, 0x00, 0xfc,
0xd4, 0xb3, 0xdd, 0x8e, 0x77, 0x4c, 0x5d, 0xf1, 0x66, 0xce, 0x59, 0xaf, 0xda, 0xca, 0x3c, 0x56,
0x2d, 0xc1, 0xc9, 0x89, 0x4b, 0xfa, 0xd4, 0x8f, 0x9e, 0x8e, 0x65, 0x93, 0x5f, 0x2e, 0x59, 0xec,
0x79, 0xac, 0x6a, 0xa0, 0x32, 0x64, 0x4d, 0xd2, 0x0b, 0x4f, 0x5e, 0x71, 0x3b, 0x7f, 0x79, 0xb1,
0x99, 0xa9, 0x1a, 0x4f, 0xe9, 0x04, 0x67, 0x4c, 0xf2, 0x94, 0x4e, 0xf8, 0xed, 0x6b, 0x12, 0x71,
0x5e, 0x84, 0x99, 0xa2, 0xbc, 0x7d, 0xab, 0x06, 0x3f, 0x0c, 0x38, 0x6b, 0x12, 0xfe, 0x8b, 0x3e,
0x82, 0xa2, 0x02, 0xf5, 0x06, 0x24, 0x18, 0x48, 0xae, 0xba, 0xbd, 0x7a, 0x79, 0xb1, 0x09, 0x12,
0xf9, 0x84, 0x04, 0x03, 0x0c, 0x12, 0xcd, 0xff, 0xa3, 0x3a, 0x14, 0xbe, 0xf2, 0x6c, 0xb7, 0xc7,
0xc4, 0x24, 0x54, 0x25, 0xbd, 0xf0, 0xfc, 0x4c, 0xa7, 0xaa, 0xca, 0x7b, 0xf8, 0x2a, 0x92, 0xe8,
0xff, 0x9a, 0x80, 0x02, 0xb7, 0x69, 0x1f, 0xd9, 0x26, 0xbf, 0x2d, 0x5f, 0x3f, 0xd3, 0xdf, 0x81,
0x94, 0x19, 0xf8, 0x6a, 0x6e, 0x22, 0xd5, 0x55, 0xdb, 0x18, 0x73, 0x19, 0xfa, 0x02, 0xb2, 0xb2,
0xb8, 0x50, 0x49, 0x5e, 0xff, 0xee, 0x7b, 0x5d, 0xb9, 0xa8, 0xf4, 0xc4, 0x5e, 0x4e, 0xbd, 0x13,
0xb3, 0x2c, 0xe2, 0xb8, 0x08, 0xdd, 0x86, 0xa4, 0xe9, 0x8a, 0xa0, 0x50, 0xdf, 0xd2, 0xaa, 0x4d,
0x9c, 0x34, 0x5d, 0xfd, 0x9f, 0x13, 0xb0, 0x52, 0x77, 0x4d, 0x7f, 0x22, 0x92, 0x24, 0xdf, 0x88,
0xbb, 0x90, 0x0f, 0xc6, 0x87, 0xc1, 0x24, 0x60, 0x74, 0x18, 0x3e, 0xd5, 0x47, 0x02, 0xd4, 0x80,
0x3c, 0x71, 0xfa, 0x9e, 0x6f, 0xb3, 0xc1, 0x50, 0x71, 0xe3, 0xc5, 0x89, 0x39, 0x6e, 0xb3, 0x62,
0x84, 0x2a, 0x78, 0xaa, 0x1d, 0xa6, 0xe2, 0x94, 0x70, 0x56, 0xa4, 0xe2, 0xb7, 0xa1, 0xe8, 0x90,
0x21, 0xa7, 0xc2, 0x3d, 0x5e, 0x72, 0x89, 0x79, 0xa4, 0x71, 0x41, 0xc9, 0x78, 0x19, 0xa9, 0xeb,
0x90, 0x8f, 0x8c, 0xa1, 0x35, 0x28, 0x18, 0xf5, 0x76, 0xef, 0xe3, 0xad, 0x87, 0xbd, 0xc7, 0xd5,
0x3d, 0x6d, 0x49, 0x31, 0x81, 0x7f, 0x4c, 0xc0, 0xca, 0x9e, 0x8c, 0x41, 0x45, 0x9c, 0xee, 0xc1,
0xb2, 0x4f, 0x8e, 0x58, 0x48, 0xed, 0xd2, 0x32, 0xb8, 0x78, 0x12, 0xe0, 0xd4, 0x8e, 0x77, 0x2d,
0xa6, 0x76, 0xb1, 0x0f, 0x45, 0xa9, 0x6b, 0x3f, 0x14, 0xa5, 0x7f, 0x2b, 0x1f, 0x8a, 0xf4, 0x5f,
0x26, 0x60, 0x4d, 0x5d, 0xd4, 0xe1, 0xc7, 0x11, 0xf4, 0x1e, 0xe4, 0xe5, 0x9d, 0x3d, 0x25, 0xa6,
0xe2, 0x7b, 0x85, 0xc4, 0x35, 0x6a, 0x38, 0x27, 0xbb, 0x1b, 0x16, 0xfa, 0x71, 0xec, 0x55, 0xf4,
0x0a, 0x7a, 0x38, 0x67, 0xbd, 0x32, 0x7d, 0x2a, 0xbd, 0xf2, 0x7b, 0xc9, 0x26, 0x14, 0x94, 0x03,
0xa2, 0x6c, 0x90, 0x75, 0x20, 0x48, 0x51, 0x93, 0x0c, 0xa9, 0x7e, 0x1f, 0xd2, 0xdc, 0x0c, 0x02,
0xc8, 0xb6, 0xbf, 0x6c, 0x77, 0xea, 0x7b, 0xb2, 0xf2, 0xda, 0x69, 0x88, 0x8f, 0x56, 0xcb, 0x90,
0xaa, 0x37, 0x0f, 0xb4, 0xe4, 0xfb, 0xbf, 0x4e, 0x41, 0x3e, 0xaa, 0xe8, 0xf9, 0x79, 0xe0, 0x34,
0x72, 0x49, 0xbe, 0xeb, 0x45, 0xf2, 0xa6, 0x20, 0x90, 0x79, 0x63, 0x77, 0x77, 0xbf, 0x6a, 0x74,
0xea, 0x35, 0xed, 0x0b, 0xc9, 0x33, 0x23, 0x80, 0xe1, 0x38, 0x1e, 0x8f, 0x68, 0x0b, 0xe9, 0x53,
0x9e, 0xf9, 0x42, 0xbd, 0x1e, 0x46, 0xa8, 0x90, 0x64, 0xbe, 0x03, 0x39, 0xa3, 0xdd, 0x6e, 0x3c,
0x6e, 0xd6, 0x6b, 0xda, 0xd7, 0x89, 0xd2, 0xf7, 0xce, 0xce, 0xcb, 0x37, 0xa6, 0xa6, 0x82, 0xc0,
0xee, 0xbb, 0xd4, 0x12, 0xa8, 0x6a, 0xb5, 0xde, 0xe2, 0xe3, 0xbd, 0x48, 0xce, 0xa3, 0x04, 0xbb,
0x12, 0x5f, 0x02, 0xf2, 0x2d, 0x5c, 0x6f, 0x19, 0x98, 0x8f, 0xf8, 0x75, 0x72, 0xce, 0xaf, 0x96,
0x4f, 0x47, 0xc4, 0xe7, 0x63, 0x6e, 0x84, 0x5f, 0xc4, 0x5e, 0xa4, 0xe4, 0x6b, 0xf1, 0xf4, 0x19,
0x83, 0x12, 0x6b, 0xc2, 0x47, 0x13, 0xcf, 0x3f, 0xc2, 0x4c, 0x6a, 0x6e, 0xb4, 0x36, 0x23, 0x3e,
0xe3, 0x56, 0x74, 0x58, 0xc6, 0xdd, 0x66, 0x53, 0xcc, 0x2e, 0x3d, 0x37, 0x3b, 0x3c, 0x76, 0x5d,
0x8e, 0xb9, 0x0f, 0xb9, 0xf0, 0x75, 0x48, 0xfb, 0x3a, 0x3d, 0xe7, 0x50, 0x35, 0x7c, 0xda, 0x12,
0x03, 0x3e, 0xe9, 0x76, 0xc4, 0x07, 0xbb, 0x17, 0x99, 0xf9, 0x01, 0x07, 0x63, 0x66, 0x71, 0x66,
0x5f, 0x8e, 0xa8, 0xf6, 0xd7, 0x19, 0xc9, 0x70, 0x22, 0x8c, 0xe4, 0xd9, 0xdc, 0x0e, 0xae, 0xff,
0x54, 0x7e, 0xdb, 0x7b, 0x91, 0x9d, 0xb3, 0x83, 0xe9, 0x57, 0xd4, 0x64, 0xd4, 0x9a, 0x3e, 0x86,
0x47, 0x5d, 0xef, 0xff, 0x01, 0xe4, 0xc2, 0x6c, 0x88, 0x36, 0x20, 0xfb, 0x6c, 0x1f, 0x3f, 0xad,
0x63, 0x6d, 0x49, 0xae, 0x4e, 0xd8, 0xf3, 0x4c, 0x5e, 0x27, 0x65, 0x58, 0xde, 0x33, 0x9a, 0xc6,
0xe3, 0x3a, 0x0e, 0x1f, 0xe3, 0x43, 0x80, 0x3a, 0xd2, 0x25, 0x4d, 0x0d, 0x10, 0xd9, 0xdc, 0xbe,
0xfb, 0xcd, 0xb7, 0x1b, 0x4b, 0xbf, 0xf8, 0x76, 0x63, 0xe9, 0x57, 0xdf, 0x6e, 0x24, 0x5e, 0x5c,
0x6e, 0x24, 0xbe, 0xb9, 0xdc, 0x48, 0xfc, 0xfc, 0x72, 0x23, 0xf1, 0x1f, 0x97, 0x1b, 0x89, 0xc3,
0xac, 0xa0, 0x9b, 0x9f, 0xfc, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xf6, 0x7c, 0x35, 0x9d,
0x22, 0x00, 0x00,
}<|fim▁end|> | |
<|file_name|>destructured-fn-argument.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print a
// gdb-check:$1 = 1
// gdb-command:print b
// gdb-check:$2 = false
// gdb-command:continue
// gdb-command:print a
// gdb-check:$3 = 2
// gdb-command:print b
// gdb-check:$4 = 3
// gdb-command:print c
// gdb-check:$5 = 4
// gdb-command:continue
// gdb-command:print a
// gdb-check:$6 = 5
// gdb-command:print b
// gdb-check:$7 = {6, 7}
// gdb-command:continue
// gdb-command:print h
// gdb-check:$8 = 8
// gdb-command:print i
// gdb-check:$9 = {a = 9, b = 10}
// gdb-command:print j
// gdb-check:$10 = 11
// gdb-command:continue
// gdb-command:print k
// gdb-check:$11 = 12
// gdb-command:print l
// gdb-check:$12 = 13
// gdb-command:continue
// gdb-command:print m
// gdb-check:$13 = 14
// gdb-command:print n
// gdb-check:$14 = 16
// gdb-command:continue
// gdb-command:print o
// gdb-check:$15 = 18
// gdb-command:continue
// gdb-command:print p
// gdb-check:$16 = 19
// gdb-command:print q
// gdb-check:$17 = 20
// gdb-command:print r
// gdb-check:$18 = {a = 21, b = 22}
// gdb-command:continue
// gdb-command:print s
// gdb-check:$19 = 24
// gdb-command:print t
// gdb-check:$20 = 23
// gdb-command:continue
// gdb-command:print u
// gdb-check:$21 = 25
// gdb-command:print v
// gdb-check:$22 = 26
// gdb-command:print w
// gdb-check:$23 = 27
// gdb-command:print x
// gdb-check:$24 = 28
// gdb-command:print y
// gdb-check:$25 = 29
// gdb-command:print z
// gdb-check:$26 = 30
// gdb-command:print ae
// gdb-check:$27 = 31
// gdb-command:print oe
// gdb-check:$28 = 32
// gdb-command:print ue
// gdb-check:$29 = 33
// gdb-command:continue
// gdb-command:print aa
// gdb-check:$30 = {34, 35}
// gdb-command:continue
// gdb-command:print bb
// gdb-check:$31 = {36, 37}
// gdb-command:continue
// gdb-command:print cc
// gdb-check:$32 = 38
// gdb-command:continue
// gdb-command:print dd
// gdb-check:$33 = {40, 41, 42}
// gdb-command:continue
// gdb-command:print *ee
// gdb-check:$34 = {43, 44, 45}
// gdb-command:continue
// gdb-command:print *ff
// gdb-check:$35 = 46
// gdb-command:print gg
// gdb-check:$36 = {47, 48}
// gdb-command:continue
// gdb-command:print *hh
// gdb-check:$37 = 50
// gdb-command:continue
// gdb-command:print ii
// gdb-check:$38 = 51
// gdb-command:continue
// gdb-command:print *jj
// gdb-check:$39 = 52
// gdb-command:continue
// gdb-command:print kk
// gdb-check:$40 = 53
// gdb-command:print ll
// gdb-check:$41 = 54
// gdb-command:continue
// gdb-command:print mm
// gdb-check:$42 = 55
// gdb-command:print *nn
// gdb-check:$43 = 56
// gdb-command:continue
// gdb-command:print oo
// gdb-check:$44 = 57
// gdb-command:print pp
// gdb-check:$45 = 58
// gdb-command:print qq
// gdb-check:$46 = 59
// gdb-command:continue
// gdb-command:print rr
// gdb-check:$47 = 60
// gdb-command:print ss
// gdb-check:$48 = 61
// gdb-command:print tt
// gdb-check:$49 = 62
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print a
// lldb-check:[...]$0 = 1
// lldb-command:print b
// lldb-check:[...]$1 = false
// lldb-command:continue
// lldb-command:print a
// lldb-check:[...]$2 = 2
// lldb-command:print b
// lldb-check:[...]$3 = 3
// lldb-command:print c
// lldb-check:[...]$4 = 4
// lldb-command:continue
// lldb-command:print a
// lldb-check:[...]$5 = 5
// lldb-command:print b
// lldb-check:[...]$6 = (6, 7)
// lldb-command:continue
// lldb-command:print h
// lldb-check:[...]$7 = 8
// lldb-command:print i
// lldb-check:[...]$8 = Struct { a: 9, b: 10 }
// lldb-command:print j
// lldb-check:[...]$9 = 11
// lldb-command:continue
// lldb-command:print k
// lldb-check:[...]$10 = 12
// lldb-command:print l
// lldb-check:[...]$11 = 13
// lldb-command:continue
// lldb-command:print m
// lldb-check:[...]$12 = 14
// lldb-command:print n
// lldb-check:[...]$13 = 16
// lldb-command:continue
// lldb-command:print o
// lldb-check:[...]$14 = 18
// lldb-command:continue
// lldb-command:print p
// lldb-check:[...]$15 = 19
// lldb-command:print q
// lldb-check:[...]$16 = 20
// lldb-command:print r
// lldb-check:[...]$17 = Struct { a: 21, b: 22 }
// lldb-command:continue
// lldb-command:print s
// lldb-check:[...]$18 = 24
// lldb-command:print t
// lldb-check:[...]$19 = 23
// lldb-command:continue
// lldb-command:print u
// lldb-check:[...]$20 = 25
// lldb-command:print v
// lldb-check:[...]$21 = 26
// lldb-command:print w
// lldb-check:[...]$22 = 27
// lldb-command:print x
// lldb-check:[...]$23 = 28
// lldb-command:print y
// lldb-check:[...]$24 = 29
// lldb-command:print z
// lldb-check:[...]$25 = 30
// lldb-command:print ae
// lldb-check:[...]$26 = 31
// lldb-command:print oe
// lldb-check:[...]$27 = 32
// lldb-command:print ue
// lldb-check:[...]$28 = 33
// lldb-command:continue
// lldb-command:print aa
// lldb-check:[...]$29 = (34, 35)
// lldb-command:continue
// lldb-command:print bb
// lldb-check:[...]$30 = (36, 37)
// lldb-command:continue
// lldb-command:print cc
// lldb-check:[...]$31 = 38
// lldb-command:continue
// lldb-command:print dd
// lldb-check:[...]$32 = (40, 41, 42)
// lldb-command:continue
// lldb-command:print *ee
// lldb-check:[...]$33 = (43, 44, 45)
// lldb-command:continue
// lldb-command:print *ff
// lldb-check:[...]$34 = 46
// lldb-command:print gg
// lldb-check:[...]$35 = (47, 48)
// lldb-command:continue
// lldb-command:print *hh
// lldb-check:[...]$36 = 50
// lldb-command:continue
// lldb-command:print ii
// lldb-check:[...]$37 = 51
// lldb-command:continue
// lldb-command:print *jj
// lldb-check:[...]$38 = 52
// lldb-command:continue
// lldb-command:print kk
// lldb-check:[...]$39 = 53
// lldb-command:print ll
// lldb-check:[...]$40 = 54
// lldb-command:continue
// lldb-command:print mm
// lldb-check:[...]$41 = 55
// lldb-command:print *nn
// lldb-check:[...]$42 = 56
// lldb-command:continue
// lldb-command:print oo
// lldb-check:[...]$43 = 57
// lldb-command:print pp
// lldb-check:[...]$44 = 58
// lldb-command:print qq
// lldb-check:[...]$45 = 59
// lldb-command:continue
// lldb-command:print rr
// lldb-check:[...]$46 = 60
// lldb-command:print ss
// lldb-check:[...]$47 = 61
// lldb-command:print tt
// lldb-check:[...]$48 = 62
// lldb-command:continue
#![allow(unused_variables)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![omit_gdb_pretty_printer_section]
use self::Univariant::Unit;
struct Struct {
a: i64,
b: i32
}
enum Univariant {
Unit(i32)
}
struct TupleStruct (f64, isize);
fn simple_tuple((a, b): (isize, bool)) {
zzz(); // #break<|fim▁hole|>
fn nested_tuple((a, (b, c)): (isize, (u16, u16))) {
zzz(); // #break
}
fn destructure_only_first_level((a, b): (isize, (u32, u32))) {
zzz(); // #break
}
fn struct_as_tuple_element((h, i, j): (i16, Struct, i16)) {
zzz(); // #break
}
fn struct_pattern(Struct { a: k, b: l }: Struct) {
zzz(); // #break
}
fn ignored_tuple_element((m, _, n): (isize, u16, i32)) {
zzz(); // #break
}
fn ignored_struct_field(Struct { b: o, .. }: Struct) {
zzz(); // #break
}
fn one_struct_destructured_one_not((Struct { a: p, b: q }, r): (Struct, Struct)) {
zzz(); // #break
}
fn different_order_of_struct_fields(Struct { b: s, a: t }: Struct ) {
zzz(); // #break
}
fn complex_nesting(((u, v ), ((w, (x, Struct { a: y, b: z})), Struct { a: ae, b: oe }), ue ):
((i16, i32), ((i64, (i32, Struct, )), Struct ), u16))
{
zzz(); // #break
}
fn managed_box(&aa: &(isize, isize)) {
zzz(); // #break
}
fn borrowed_pointer(&bb: &(isize, isize)) {
zzz(); // #break
}
fn contained_borrowed_pointer((&cc, _): (&isize, isize)) {
zzz(); // #break
}
fn unique_pointer(box dd: Box<(isize, isize, isize)>) {
zzz(); // #break
}
fn ref_binding(ref ee: (isize, isize, isize)) {
zzz(); // #break
}
fn ref_binding_in_tuple((ref ff, gg): (isize, (isize, isize))) {
zzz(); // #break
}
fn ref_binding_in_struct(Struct { b: ref hh, .. }: Struct) {
zzz(); // #break
}
fn univariant_enum(Unit(ii): Univariant) {
zzz(); // #break
}
fn univariant_enum_with_ref_binding(Unit(ref jj): Univariant) {
zzz(); // #break
}
fn tuple_struct(TupleStruct(kk, ll): TupleStruct) {
zzz(); // #break
}
fn tuple_struct_with_ref_binding(TupleStruct(mm, ref nn): TupleStruct) {
zzz(); // #break
}
fn multiple_arguments((oo, pp): (isize, isize), qq : isize) {
zzz(); // #break
}
fn main() {
simple_tuple((1, false));
nested_tuple((2, (3, 4)));
destructure_only_first_level((5, (6, 7)));
struct_as_tuple_element((8, Struct { a: 9, b: 10 }, 11));
struct_pattern(Struct { a: 12, b: 13 });
ignored_tuple_element((14, 15, 16));
ignored_struct_field(Struct { a: 17, b: 18 });
one_struct_destructured_one_not((Struct { a: 19, b: 20 }, Struct { a: 21, b: 22 }));
different_order_of_struct_fields(Struct { a: 23, b: 24 });
complex_nesting(((25, 26), ((27, (28, Struct { a: 29, b: 30})), Struct { a: 31, b: 32 }), 33));
managed_box(&(34, 35));
borrowed_pointer(&(36, 37));
contained_borrowed_pointer((&38, 39));
unique_pointer(box() (40, 41, 42));
ref_binding((43, 44, 45));
ref_binding_in_tuple((46, (47, 48)));
ref_binding_in_struct(Struct { a: 49, b: 50 });
univariant_enum(Unit(51));
univariant_enum_with_ref_binding(Unit(52));
tuple_struct(TupleStruct(53.0, 54));
tuple_struct_with_ref_binding(TupleStruct(55.0, 56));
multiple_arguments((57, 58), 59);
fn nested_function(rr: isize, (ss, tt): (isize, isize)) {
zzz(); // #break
}
nested_function(60, (61, 62));
}
fn zzz() { () }<|fim▁end|> | } |
<|file_name|>manager.py<|end_file_name|><|fim▁begin|># Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift.common.utils import search_tree, remove_file, write_file
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print _("WARNING: Unable to increase file descriptor limit. "
"Running as non-root?")
<|fim▁hole|>
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yeilding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
class UnknownCommandError(Exception):
pass
class Manager():
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
server_names = set()
for server in servers:
if server == 'all':
server_names.update(ALL_SERVERS)
elif server == 'main':
server_names.update(MAIN_SERVERS)
elif server == 'rest':
server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
server_names.update([s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
server_names.add(server)
self.servers = set()
for name in server_names:
self.servers.add(Server(name, run_dir))
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print _('\nuser quit')
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print _('No %s running') % server
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print _("%s (%s) appears to have stopped") % (server, killed_pid)
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all proccesses have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print _('Waited %s seconds for %s to die; giving up') % (
kill_wait, server)
return 1
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.servers:
m = Manager([server.server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
try:
f = getattr(self, cmd)
except AttributeError:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server():
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
if '-' not in server:
server = '%s-server' % server
self.server = server.lower()
self.type = server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).rsplit(
'.conf', 1)[0] + '.pid'
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1)\
.rsplit('.pid', 1)[0] + '.conf'
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).rsplit(
'.pid', 1)[0] + '.conf'
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
found_conf_files = search_tree(SWIFT_DIR, self.server + '*',
'.conf')
else:
found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type,
'.conf')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
print _('Unable to locate config %sfor %s') % (
('number %s ' % number if number else ''), self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print _('Found configs:')
for i, conf_file in enumerate(found_conf_files):
print ' %d) %s' % (i + 1, conf_file)
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
pid_files = search_tree(self.run_dir, '%s*' % self.server, '.pid')
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
yield pid_file, int(open(pid_file).read().strip())
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
try:
if sig != signal.SIG_DFL:
print _('Signal %s pid: %s signal: %s') % (self.server,
pid, sig)
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print _("Removing stale pid file %s") % pid_file
remove_file(pid_file)
elif e.errno == errno.EPERM:
print _("No permission to signal PID %d") % pid
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print _("%s #%d not running (%s)") % (self.server, number,
conf_files[0])
else:
print _("No %s running") % self.server
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if true ask server to log to console
:returns : the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if output:
print output
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return []
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
elif not kwargs.get('number', 0):
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, pid_file)
if already_started:
print _("%s already started...") % self.server
return []
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print '%s...(%s)' % (msg, conf_file)
try:
pid = self.spawn(conf_file, **kwargs)
except OSError, e:
if e.errno == errno.ENOENT:
# TODO: should I check if self.cmd exists earlier?
print _("%s does not exist") % self.cmd
break
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)<|fim▁end|> | os.environ['PYTHON_EGG_CACHE'] = '/tmp' |
<|file_name|>status_change.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package github
import (
"sync"
"k8s.io/kubernetes/pkg/util/sets"
)
// StatusChange keeps track of issue/commit for status changes
type StatusChange struct {
heads map[int]string // Pull-Request ID -> head-sha
pullRequests map[string]sets.Int // head-sha -> Pull-Request IDs
changed sets.String // SHA of commits whose status changed
mutex sync.Mutex
}
// NewStatusChange creates a new status change tracker
func NewStatusChange() *StatusChange {
return &StatusChange{
heads: map[int]string{},
pullRequests: map[string]sets.Int{},
changed: sets.NewString(),
}
}
// UpdatePullRequestHead updates the head commit for a pull-request
func (s *StatusChange) UpdatePullRequestHead(pullRequestID int, newHead string) {
s.mutex.Lock()
defer s.mutex.Unlock()
if oldHead, has := s.heads[pullRequestID]; has {
delete(s.pullRequests, oldHead)
}
s.heads[pullRequestID] = newHead
if _, has := s.pullRequests[newHead]; !has {
s.pullRequests[newHead] = sets.NewInt()
}
s.pullRequests[newHead].Insert(pullRequestID)
}
// CommitStatusChanged must be called when the status for this commit has changed
func (s *StatusChange) CommitStatusChanged(commit string) {
s.mutex.Lock()
defer s.mutex.Unlock()
<|fim▁hole|>func (s *StatusChange) PopChangedPullRequests() []int {
s.mutex.Lock()
defer s.mutex.Unlock()
changedPullRequests := sets.NewInt()
for _, commit := range s.changed.List() {
if pullRequests, has := s.pullRequests[commit]; has {
changedPullRequests = changedPullRequests.Union(pullRequests)
}
}
s.changed = sets.NewString()
return changedPullRequests.List()
}<|fim▁end|> | s.changed.Insert(commit)
}
// PopChangedPullRequests returns the list of issues changed since last call |
<|file_name|>env_utils.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Various utility functions used by this plugin"""
import subprocess
from os import environ, devnull
from os.path import expanduser
from .constants import PLATFORM
from .window_utils import get_pref
class NodeNotFoundError(OSError):
def __init__(self, original_exception, node_path):
msg = "Node.js was not found in the default path"
OSError.__init__(self, msg + (": %s" % original_exception))
self.node_path = node_path
class NodeRuntimeError(RuntimeError):
def __init__(self, stdout, stderr):
msg = "Node.js encountered a runtime error"
RuntimeError.__init__(self, msg + (": %s\n%s" % (stderr, stdout)))
self.stdout = stdout
self.stderr = stderr
class NodeSyntaxError(RuntimeError):
def __init__(self, stdout, stderr):
msg = "Node.js encountered a runtime syntax error"
RuntimeError.__init__(self, msg + (": %s\n%s" % (stderr, stdout)))
self.stdout = stdout
self.stderr = stderr
def get_node_path():
"""Gets the node.js path specified in this plugin's settings file"""
node = get_pref("node_path").get(PLATFORM)
return expanduser(node)
def run_command(args):
"""Runs a command in a shell and returns the output"""
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"env": environ,
}
if PLATFORM == "windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_args["startupinfo"] = startupinfo
popen_args["stdin"] = open(devnull, 'wb')
stdout, stderr = subprocess.Popen(args, **popen_args).communicate()
if stderr:
if b"ExperimentalWarning" in stderr:
# Don't treat node experimental warnings as actual errors.
return stdout
elif b"SyntaxError" in stderr:
raise NodeSyntaxError(
stdout.decode('utf-8'), stderr.decode('utf-8'))
else:
raise NodeRuntimeError(
stdout.decode('utf-8'), stderr.decode('utf-8'))
return stdout
def run_node_command(args):
"""Runs a node command in a shell and returns the output"""
node_path = get_node_path()
try:<|fim▁hole|> except OSError as err:
if node_path in err.strerror or \
"No such file or directory" in err.strerror or \
"The system cannot find the file specified" in err.strerror:
raise NodeNotFoundError(err, node_path)
else:
raise err
return stdout<|fim▁end|> | stdout = run_command([node_path] + args) |
<|file_name|>bigFrustrationWorld.py<|end_file_name|><|fim▁begin|>dimensions(8,8)
wall((2,0),(2,4))
wall((2,4),(4,4))
wall((2,6),(6,6))
wall((6,6),(6,0))<|fim▁hole|>wall((6,2),(4,2))
initialRobotLoc(1.0, 1.0)<|fim▁end|> | |
<|file_name|>thompson.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
from collections import defaultdict
import numpy
class ThompsonAgent:
def __init__(self, seed=None):
self._succeeds = defaultdict(int)
self._fails = defaultdict(int)
self._np_random = numpy.random.RandomState(seed)
def choose(self, arms, features=None):
return max(arms, key=lambda arm: self._score(arm))
def _score(self, arm):<|fim▁hole|>
def update(self, arm, reward, arms=None, features=None):
if reward > 0:
self._succeeds[arm] += 1
else:
self._fails[arms] += 1<|fim▁end|> | return self._np_random.beta(
self._succeeds[arm] + 0.5,
self._fails[arm] + 0.5) |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// library of matrix and vector functions
// a 3 component vector
pub struct Vec3{
pub x: f32,
pub y: f32,
pub z: f32
}
// associated functions of Vec3
impl Vec3{
// used for constructing Vec3 nicely
pub fn new(x:f32,y:f32,z:f32) -> Vec3 {
Vec3{x: x, y: y, z: z}
}<|fim▁hole|>
// a 3x3 matrix
pub struct Mat3{
pub values: [[f32;3];3]
}
impl Mat3{
// used for constructing an empty matrix
pub fn new_empty() -> Mat3{
Mat3{values:
[[0.0,0.0,0.0],
[0.0,0.0,0.0],
[0.0,0.0,0.0]]
}
}
}
pub fn dot(vec1:&Vec3,vec2:&Vec3) -> f32{
(vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z)
}
// matrix multiplication of a 3x3 matrix with a 3 dimensional vector (1x3 matrix)
// returns the result as a vec3
#[allow(non_snake_case)]
pub fn MatXVec3(mat: &Mat3,vec: &Vec3) -> Vec3{
let mut result = Vec3::new(0.0,0.0,0.0);
// construct vectors from the matrix
let mval = mat.values;
let mvec1 = Vec3::new(mval[0][0],mval[0][1],mval[0][2]);
let mvec2 = Vec3::new(mval[1][0],mval[1][1],mval[1][2]);
let mvec3 = Vec3::new(mval[2][0],mval[2][1],mval[2][2]);
// matrix multiplication is just a bunch of dot products
result.x = dot(&mvec1,vec);
result.y = dot(&mvec2,vec);
result.z = dot(&mvec3,vec);
// return resulting vector
result
}
// Matrix multiplication of a 3x3 matrix with a 3x3 matrix
// returns the result as a Mat3
#[allow(non_snake_case)]
pub fn MatXMat3 (mat1: &Mat3,mat2: &Mat3) -> Mat3{
let mut result = Mat3::new_empty();
// go by row
for i in 0..3{
// go by column
for j in 0..3{
let mut sum :f32 = 0.0;
// get the result of the row,column pair (i,j)
for k in 0..3{
sum = sum + (mat1.values[i][k] * mat2.values[k][j]);
}
result.values[j][i] = sum;
}
}
result
}<|fim▁end|> | } |
<|file_name|>configureStore.js<|end_file_name|><|fim▁begin|>import { createStore, applyMiddleware, compose } from 'redux'
import logger from 'redux-logger'
import thunkMiddleware from 'redux-thunk'
import rootReducer from './reducers'
const configureStore = () => {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
return createStore(rootReducer, /* preloadedState, */ composeEnhancers(
applyMiddleware(thunkMiddleware)
// applyMiddleware(logger)
));<|fim▁hole|><|fim▁end|> | }
export default configureStore |
<|file_name|>0004_auto_20170428_0228.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-28 02:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webcore', '0003_auto_20170427_1825'),
]
operations = [
migrations.RemoveField(
model_name='postad',
name='user',
),
migrations.DeleteModel(
name='PostAd',
),<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>bounding_sphere_ball.rs<|end_file_name|><|fim▁begin|>use crate::bounding_volume::{BoundingSphere, HasBoundingVolume};
use crate::math::{Isometry, Point};
use crate::shape::Ball;
use na::RealField;
impl<N: RealField> HasBoundingVolume<N, BoundingSphere<N>> for Ball<N> {
#[inline]
fn bounding_volume(&self, m: &Isometry<N>) -> BoundingSphere<N> {
let bv: BoundingSphere<N> = self.local_bounding_volume();
bv.transform_by(m)
}
#[inline]
fn local_bounding_volume(&self) -> BoundingSphere<N> {<|fim▁hole|>}<|fim▁end|> | BoundingSphere::new(Point::origin(), self.radius)
} |
<|file_name|>pysix_moves.py<|end_file_name|><|fim▁begin|># copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# astroid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""Astroid hooks for six.moves."""
import sys
from textwrap import dedent
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
def six_moves_transform_py2():
return AstroidBuilder(MANAGER).string_build(dedent('''
import urllib as _urllib
import urllib2 as _urllib2
import urlparse as _urlparse
class Moves(object):
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
from UserDict import UserDict
from UserList import UserList
from UserString import UserString
import __builtin__ as builtins
import thread as _thread
import dummy_thread as _dummy_thread
import ConfigParser as configparser
import copy_reg as copyreg
from itertools import (imap as map,
ifilter as filter,
ifilterfalse as filterfalse,
izip_longest as zip_longest,
izip as zip)
import htmlentitydefs as html_entities
import HTMLParser as html_parser<|fim▁hole|> import Cookie as http_cookies
import Queue as queue
import repr as reprlib
from pipes import quote as shlex_quote
import SocketServer as socketserver
import SimpleXMLRPCServer as xmlrpc_server
import xmlrpclib as xmlrpc_client
import _winreg as winreg
import robotparser as urllib_robotparser
input = raw_input
intern = intern
range = xrange
xrange = xrange
reduce = reduce
reload_module = reload
class UrllibParse(object):
ParseResult = _urlparse.ParseResult
SplitResult = _urlparse.SplitResult
parse_qs = _urlparse.parse_qs
parse_qsl = _urlparse.parse_qsl
urldefrag = _urlparse.urldefrag
urljoin = _urlparse.urljoin
urlparse = _urlparse.urlparse
urlsplit = _urlparse.urlsplit
urlunparse = _urlparse.urlunparse
urlunsplit = _urlparse.urlunsplit
quote = _urllib.quote
quote_plus = _urllib.quote_plus
unquote = _urllib.unquote
unquote_plus = _urllib.unquote_plus
urlencode = _urllib.urlencode
splitquery = _urllib.splitquery
splittag = _urllib.splittag
splituser = _urllib.splituser
uses_fragment = _urlparse.uses_fragment
uses_netloc = _urlparse.uses_netloc
uses_params = _urlparse.uses_params
uses_query = _urlparse.uses_query
uses_relative = _urlparse.uses_relative
class UrllibError(object):
URLError = _urllib2.URLError
HTTPError = _urllib2.HTTPError
ContentTooShortError = _urllib.ContentTooShortError
class DummyModule(object):
pass
class UrllibRequest(object):
urlopen = _urllib2.urlopen
install_opener = _urllib2.install_opener
build_opener = _urllib2.build_opener
pathname2url = _urllib.pathname2url
url2pathname = _urllib.url2pathname
getproxies = _urllib.getproxies
Request = _urllib2.Request
OpenerDirector = _urllib2.OpenerDirector
HTTPDefaultErrorHandler = _urllib2.HTTPDefaultErrorHandler
HTTPRedirectHandler = _urllib2.HTTPRedirectHandler
HTTPCookieProcessor = _urllib2.HTTPCookieProcessor
ProxyHandler = _urllib2.ProxyHandler
BaseHandler = _urllib2.BaseHandler
HTTPPasswordMgr = _urllib2.HTTPPasswordMgr
HTTPPasswordMgrWithDefaultRealm = _urllib2.HTTPPasswordMgrWithDefaultRealm
AbstractBasicAuthHandler = _urllib2.AbstractBasicAuthHandler
HTTPBasicAuthHandler = _urllib2.HTTPBasicAuthHandler
ProxyBasicAuthHandler = _urllib2.ProxyBasicAuthHandler
AbstractDigestAuthHandler = _urllib2.AbstractDigestAuthHandler
HTTPDigestAuthHandler = _urllib2.HTTPDigestAuthHandler
ProxyDigestAuthHandler = _urllib2.ProxyDigestAuthHandler
HTTPHandler = _urllib2.HTTPHandler
HTTPSHandler = _urllib2.HTTPSHandler
FileHandler = _urllib2.FileHandler
FTPHandler = _urllib2.FTPHandler
CacheFTPHandler = _urllib2.CacheFTPHandler
UnknownHandler = _urllib2.UnknownHandler
HTTPErrorProcessor = _urllib2.HTTPErrorProcessor
urlretrieve = _urllib.urlretrieve
urlcleanup = _urllib.urlcleanup
proxy_bypass = _urllib.proxy_bypass
urllib_parse = UrllibParse()
urllib_error = UrllibError()
urllib = DummyModule()
urllib.request = UrllibRequest()
urllib.parse = UrllibParse()
urllib.error = UrllibError()
moves = Moves()
'''))
def six_moves_transform_py3():
return AstroidBuilder(MANAGER).string_build(dedent('''
class Moves(object):
import _io
cStringIO = _io.StringIO
filter = filter
from itertools import filterfalse
input = input
from sys import intern
map = map
range = range
from imp import reload as reload_module
from functools import reduce
from shlex import quote as shlex_quote
from io import StringIO
from collections import UserDict, UserList, UserString
xrange = range
zip = zip
from itertools import zip_longest
import builtins
import configparser
import copyreg
import _dummy_thread
import http.cookiejar as http_cookiejar
import http.cookies as http_cookies
import html.entities as html_entities
import html.parser as html_parser
import http.client as http_client
import http.server
BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server
import pickle as cPickle
import queue
import reprlib
import socketserver
import _thread
import winreg
import xmlrpc.server as xmlrpc_server
import xmlrpc.client as xmlrpc_client
import urllib.robotparser as urllib_robotparser
import email.mime.multipart as email_mime_multipart
import email.mime.nonmultipart as email_mime_nonmultipart
import email.mime.text as email_mime_text
import email.mime.base as email_mime_base
import urllib.parse as urllib_parse
import urllib.error as urllib_error
import tkinter
import tkinter.dialog as tkinter_dialog
import tkinter.filedialog as tkinter_filedialog
import tkinter.scrolledtext as tkinter_scrolledtext
import tkinter.simpledialog as tkinder_simpledialog
import tkinter.tix as tkinter_tix
import tkinter.ttk as tkinter_ttk
import tkinter.constants as tkinter_constants
import tkinter.dnd as tkinter_dnd
import tkinter.colorchooser as tkinter_colorchooser
import tkinter.commondialog as tkinter_commondialog
import tkinter.filedialog as tkinter_tkfiledialog
import tkinter.font as tkinter_font
import tkinter.messagebox as tkinter_messagebox
import urllib.request
import urllib.robotparser as urllib_robotparser
import urllib.parse as urllib_parse
import urllib.error as urllib_error
moves = Moves()
'''))
if sys.version_info[0] == 2:
TRANSFORM = six_moves_transform_py2
else:
TRANSFORM = six_moves_transform_py3
register_module_extender(MANAGER, 'six', TRANSFORM)<|fim▁end|> | import httplib as http_client
import cookielib as http_cookiejar |
<|file_name|>AngularFire.js<|end_file_name|><|fim▁begin|>import Firebase from 'firebase/firebase';
export class AngularFire {
ref: Firebase;
constructor(ref: Firebase) {
this.ref = ref;
}
asArray() {
return new FirebaseArray(this.ref);
}
}
/*
FirebaseArray
*/
export class FirebaseArray {
ref: Firebase;
error: any;
list: Array;
constructor(ref: Firebase) {
this.ref = ref;
this.list = [];
// listen for changes at the Firebase instance
this.ref.on('child_added', this.created.bind(this), this.error);
this.ref.on('child_moved', this.moved.bind(this), this.error);
this.ref.on('child_changed', this.updated.bind(this), this.error);
this.ref.on('child_removed', this.removed.bind(this), this.error);
// determine when initial load is completed
// ref.once('value', function() { resolve(null); }, resolve);
}<|fim▁hole|>
getItem(recOrIndex: any) {
var item = recOrIndex;
if(typeof(recOrIndex) === "number") {
item = this.getRecord(recOrIndex);
}
return item;
}
getChild(recOrIndex: any) {
var item = this.getItem(recOrIndex);
return this.ref.child(item._key);
}
add(rec: any) {
this.ref.push(rec);
}
remove(recOrIndex: any) {
this.getChild(recOrIndex).remove();
}
save(recOrIndex: any) {
var item = this.getItem(recOrIndex);
this.getChild(recOrIndex).update(item);
}
keyify(snap) {
var item = snap.val();
item._key = snap.key();
return item;
}
created(snap) {
debugger;
var addedValue = this.keyify(snap);
this.list.push(addedValue);
}
moved(snap) {
var key = snap.key();
this.spliceOut(key);
}
updated(snap) {
var key = snap.key();
var indexToUpdate = this.indexFor(key);
this.list[indexToUpdate] = this.keyify(snap);
}
removed(snap) {
var key = snap.key();
this.spliceOut(key);
}
bulkUpdate(items) {
this.ref.update(items);
}
spliceOut(key) {
var i = this.indexFor(key);
if( i > -1 ) {
return this.list.splice(i, 1)[0];
}
return null;
}
indexFor(key) {
var record = this.getRecord(key);
return this.list.indexOf(record);
}
getRecord(key) {
return this.list.find((item) => key === item._key);
}
}<|fim▁end|> | |
<|file_name|>position.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%! from data import to_rust_ident %>
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import ALL_SIZES, PHYSICAL_SIDES, LOGICAL_SIDES %>
<% data.new_style_struct("Position", inherited=False) %>
// "top" / "left" / "bottom" / "right"
% for side in PHYSICAL_SIDES:
${helpers.predefined_type(side, "LengthOrPercentageOrAuto",
"computed::LengthOrPercentageOrAuto::Auto",
spec="https://www.w3.org/TR/CSS2/visuren.html#propdef-%s" % side,
animation_value_type="ComputedValue",
allow_quirks=True)}
% endfor
// offset-* logical properties, map to "top" / "left" / "bottom" / "right"
% for side in LOGICAL_SIDES:
${helpers.predefined_type("offset-%s" % side, "LengthOrPercentageOrAuto",
"computed::LengthOrPercentageOrAuto::Auto",
spec="https://drafts.csswg.org/css-logical-props/#propdef-offset-%s" % side,
animation_value_type="ComputedValue", logical=True)}
% endfor
${helpers.predefined_type("z-index", "IntegerOrAuto",
"Either::Second(Auto)",
spec="https://www.w3.org/TR/CSS2/visuren.html#z-index",
flags="CREATES_STACKING_CONTEXT",
animation_value_type="ComputedValue")}
// CSS Flexible Box Layout Module Level 1
// http://www.w3.org/TR/css3-flexbox/
// Flex container properties
${helpers.single_keyword("flex-direction", "row row-reverse column column-reverse",
spec="https://drafts.csswg.org/css-flexbox/#flex-direction-property",
extra_prefixes="webkit", animation_value_type="none")}
${helpers.single_keyword("flex-wrap", "nowrap wrap wrap-reverse",
spec="https://drafts.csswg.org/css-flexbox/#flex-wrap-property",
extra_prefixes="webkit", animation_value_type="none")}
% if product == "servo":
// FIXME: Update Servo to support the same Syntax as Gecko.
${helpers.single_keyword("justify-content", "flex-start stretch flex-end center space-between space-around",
extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-align/#propdef-justify-content",
animation_value_type="none")}
% else:
${helpers.predefined_type(name="justify-content",
type="AlignJustifyContent",
initial_value="specified::AlignJustifyContent::normal()",
spec="https://drafts.csswg.org/css-align/#propdef-justify-content",
extra_prefixes="webkit",
animation_value_type="none")}
% endif
% if product == "servo":
// FIXME: Update Servo to support the same Syntax as Gecko.
${helpers.single_keyword("align-content", "stretch flex-start flex-end center space-between space-around",
extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-align/#propdef-align-content",
animation_value_type="none")}
${helpers.single_keyword("align-items",
"stretch flex-start flex-end center baseline",
extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-flexbox/#align-items-property",
animation_value_type="discrete")}
% else:
${helpers.predefined_type(name="align-content",
type="AlignJustifyContent",
initial_value="specified::AlignJustifyContent::normal()",
spec="https://drafts.csswg.org/css-align/#propdef-align-content",
extra_prefixes="webkit",
animation_value_type="none")}
${helpers.predefined_type(name="align-items",
type="AlignItems",
initial_value="specified::AlignItems::normal()",
spec="https://drafts.csswg.org/css-align/#propdef-align-items",
extra_prefixes="webkit",
animation_value_type="discrete")}
${helpers.predefined_type(name="justify-items",
type="JustifyItems",
initial_value="specified::JustifyItems::auto()",
spec="https://drafts.csswg.org/css-align/#propdef-justify-items",
animation_value_type="none")}
% endif
// Flex item properties
${helpers.predefined_type("flex-grow", "Number",
"0.0", "parse_non_negative",
spec="https://drafts.csswg.org/css-flexbox/#flex-grow-property",
extra_prefixes="webkit",
animation_value_type="ComputedValue")}
${helpers.predefined_type("flex-shrink", "Number",
"1.0", "parse_non_negative",
spec="https://drafts.csswg.org/css-flexbox/#flex-shrink-property",
extra_prefixes="webkit",
animation_value_type="ComputedValue")}
// https://drafts.csswg.org/css-align/#align-self-property
% if product == "servo":
// FIXME: Update Servo to support the same syntax as Gecko.
${helpers.single_keyword("align-self", "auto stretch flex-start flex-end center baseline",
need_clone=True,
extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-flexbox/#propdef-align-self",
animation_value_type="none")}
% else:
${helpers.predefined_type(name="align-self",
type="AlignJustifySelf",
initial_value="specified::AlignJustifySelf::auto()",
spec="https://drafts.csswg.org/css-align/#align-self-property",
extra_prefixes="webkit",
animation_value_type="none")}
${helpers.predefined_type(name="justify-self",
type="AlignJustifySelf",
initial_value="specified::AlignJustifySelf::auto()",
spec="https://drafts.csswg.org/css-align/#justify-self-property",
animation_value_type="none")}
% endif
// https://drafts.csswg.org/css-flexbox/#propdef-order
${helpers.predefined_type("order", "Integer", "0",
extra_prefixes="webkit",
animation_value_type="ComputedValue",
spec="https://drafts.csswg.org/css-flexbox/#order-property")}
// FIXME: Gecko doesn't support content value yet.
// FIXME: This property should be animatable.
${helpers.predefined_type("flex-basis",
"LengthOrPercentageOrAuto" if product == "gecko" else
"LengthOrPercentageOrAutoOrContent",
"computed::LengthOrPercentageOrAuto::Auto" if product == "gecko" else
"computed::LengthOrPercentageOrAutoOrContent::Auto",
"parse_non_negative",
spec="https://drafts.csswg.org/css-flexbox/#flex-basis-property",
extra_prefixes="webkit",
animation_value_type="ComputedValue" if product == "gecko" else "none")}
% for (size, logical) in ALL_SIZES:
<%
spec = "https://drafts.csswg.org/css-box/#propdef-%s"
if logical:
spec = "https://drafts.csswg.org/css-logical-props/#propdef-%s"
%>
// width, height, block-size, inline-size
${helpers.predefined_type("%s" % size,
"LengthOrPercentageOrAuto",
"computed::LengthOrPercentageOrAuto::Auto",
"parse_non_negative",
spec=spec % size,
allow_quirks=not logical,
animation_value_type="ComputedValue", logical = logical)}
% if product == "gecko":
% for min_max in ["min", "max"]:
<%
MinMax = min_max.title()
initial = "None" if "max" == min_max else "Auto"
%>
// min-width, min-height, min-block-size, min-inline-size,
// max-width, max-height, max-block-size, max-inline-size
//
// Keyword values are only valid in the inline direction; they must
// be replaced with auto/none in block.
<%helpers:longhand name="${min_max}-${size}" spec="${spec % ('%s-%s' % (min_max, size))}"
animation_value_type="ComputedValue"
logical="${logical}" predefined_type="${MinMax}Length">
use std::fmt;
use style_traits::ToCss;
use values::HasViewportPercentage;
use values::specified::{AllowQuirks, ${MinMax}Length};
impl HasViewportPercentage for SpecifiedValue {
fn has_viewport_percentage(&self) -> bool {<|fim▁hole|> }
}
pub mod computed_value {
pub type T = ::values::computed::${MinMax}Length;
}
#[derive(PartialEq, Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(${MinMax}Length);
#[inline]
pub fn get_initial_value() -> computed_value::T {
use values::computed::${MinMax}Length;
${MinMax}Length::${initial}
}
fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
% if logical:
let ret = ${MinMax}Length::parse(context, input);
% else:
let ret = ${MinMax}Length::parse_quirky(context, input, AllowQuirks::Yes);
% endif
// Keyword values don't make sense in the block direction; don't parse them
% if "block" in size:
if let Ok(${MinMax}Length::ExtremumLength(..)) = ret {
return Err(())
}
% endif
ret.map(SpecifiedValue)
}
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.0.to_css(dest)
}
}
impl ToComputedValue for SpecifiedValue {
type ComputedValue = computed_value::T;
#[inline]
fn to_computed_value(&self, context: &Context) -> computed_value::T {
use values::computed::${MinMax}Length;
let computed = self.0.to_computed_value(context);
// filter out keyword values in the block direction
% if logical:
% if "block" in size:
if let ${MinMax}Length::ExtremumLength(..) = computed {
return get_initial_value()
}
% endif
% else:
if let ${MinMax}Length::ExtremumLength(..) = computed {
<% is_height = "true" if "height" in size else "false" %>
if ${is_height} != context.style().writing_mode.is_vertical() {
return get_initial_value()
}
}
% endif
computed
}
#[inline]
fn from_computed_value(computed: &computed_value::T) -> Self {
SpecifiedValue(ToComputedValue::from_computed_value(computed))
}
}
</%helpers:longhand>
% endfor
% else:
// servo versions (no keyword support)
${helpers.predefined_type("min-%s" % size,
"LengthOrPercentage",
"computed::LengthOrPercentage::Length(Au(0))",
"parse_non_negative",
spec=spec % ("min-%s" % size),
animation_value_type="ComputedValue",
logical=logical,
allow_quirks=not logical)}
${helpers.predefined_type("max-%s" % size,
"LengthOrPercentageOrNone",
"computed::LengthOrPercentageOrNone::None",
"parse_non_negative",
spec=spec % ("min-%s" % size),
animation_value_type="ComputedValue",
logical=logical,
allow_quirks=not logical)}
% endif
% endfor
${helpers.single_keyword("box-sizing",
"content-box border-box",
extra_prefixes="moz webkit",
spec="https://drafts.csswg.org/css-ui/#propdef-box-sizing",
animation_value_type="none")}
${helpers.single_keyword("object-fit", "fill contain cover none scale-down",
products="gecko", animation_value_type="none",
spec="https://drafts.csswg.org/css-images/#propdef-object-fit")}
${helpers.predefined_type("object-position",
"Position",
"computed::Position::zero()",
products="gecko",
boxed="True",
spec="https://drafts.csswg.org/css-images-3/#the-object-position",
animation_value_type="ComputedValue")}
% for kind in ["row", "column"]:
${helpers.predefined_type("grid-%s-gap" % kind,
"LengthOrPercentage",
"computed::LengthOrPercentage::Length(Au(0))",
spec="https://drafts.csswg.org/css-grid/#propdef-grid-%s-gap" % kind,
animation_value_type="ComputedValue",
products="gecko")}
% for range in ["start", "end"]:
${helpers.predefined_type("grid-%s-%s" % (kind, range),
"GridLine",
"Default::default()",
animation_value_type="none",
spec="https://drafts.csswg.org/css-grid/#propdef-grid-%s-%s" % (kind, range),
products="gecko",
boxed=True)}
% endfor
// NOTE: According to the spec, this should handle multiple values of `<track-size>`,
// but gecko supports only a single value
${helpers.predefined_type("grid-auto-%ss" % kind,
"TrackSize",
"Default::default()",
animation_value_type="none",
spec="https://drafts.csswg.org/css-grid/#propdef-grid-auto-%ss" % kind,
products="gecko",
boxed=True)}
% endfor
<%helpers:longhand name="grid-auto-flow"
spec="https://drafts.csswg.org/css-grid/#propdef-grid-auto-flow"
products="gecko"
animation_value_type="none">
use std::fmt;
use style_traits::ToCss;
use values::HasViewportPercentage;
use values::computed::ComputedValueAsSpecified;
pub type SpecifiedValue = computed_value::T;
pub mod computed_value {
#[derive(PartialEq, Clone, Eq, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum AutoFlow {
Row,
Column,
}
#[derive(PartialEq, Clone, Eq, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct T {
pub autoflow: AutoFlow,
pub dense: bool,
}
}
no_viewport_percentage!(SpecifiedValue);
impl ComputedValueAsSpecified for SpecifiedValue {}
impl ToCss for computed_value::T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str(match self.autoflow {
computed_value::AutoFlow::Column => "column",
computed_value::AutoFlow::Row => "row"
})?;
if self.dense { dest.write_str(" dense")?; }
Ok(())
}
}
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T {
autoflow: computed_value::AutoFlow::Row,
dense: false
}
}
/// [ row | column ] || dense
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
use self::computed_value::AutoFlow;
let mut value = None;
let mut dense = false;
while !input.is_exhausted() {
match_ignore_ascii_case! { &input.expect_ident()?,
"row" if value.is_none() => {
value = Some(AutoFlow::Row);
continue
},
"column" if value.is_none() => {
value = Some(AutoFlow::Column);
continue
},
"dense" if !dense => {
dense = true;
continue
},
_ => return Err(())
}
}
if value.is_some() || dense {
Ok(computed_value::T {
autoflow: value.unwrap_or(AutoFlow::Row),
dense: dense,
})
} else {
Err(())
}
}
</%helpers:longhand><|fim▁end|> | self.0.has_viewport_percentage() |
<|file_name|>page.js<|end_file_name|><|fim▁begin|>var http = require('supertest');
var shared = require('../shared');
var server = require('../app');
var app;
describe('v2 user#page', function () {
before(function (done) {
server(function (data) {
app = data;
done();
});
});
it('should show user index', function (done) {
var req = http(app);<|fim▁hole|> '仍然很懒',
'最近创建的话题',
'无话题',
'最近参与的话题',
'无话题'
];
texts.forEach(function (text) {
res.text.should.containEql(text);
});
done(err);
});
});
});<|fim▁end|> | req.get('/user/page/' + shared.user.username)
.expect(200, function (err, res) {
var texts = [
'注册时间', |
<|file_name|>sst_file_writer.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#include "rocksdb/sst_file_writer.h"
#include <vector>
#include "db/dbformat.h"
#include "rocksdb/table.h"
#include "table/block_based_table_builder.h"
#include "table/sst_file_writer_collectors.h"
#include "util/file_reader_writer.h"
#include "util/sync_point.h"
namespace rocksdb {
const std::string ExternalSstFilePropertyNames::kVersion =
"rocksdb.external_sst_file.version";
const std::string ExternalSstFilePropertyNames::kGlobalSeqno =
"rocksdb.external_sst_file.global_seqno";
#ifndef ROCKSDB_LITE
const size_t kFadviseTrigger = 1024 * 1024; // 1MB
struct SstFileWriter::Rep {
Rep(const EnvOptions& _env_options, const Options& options,
const Comparator* _user_comparator, ColumnFamilyHandle* _cfh,
bool _invalidate_page_cache)
: env_options(_env_options),
ioptions(options),
mutable_cf_options(options),
internal_comparator(_user_comparator),
cfh(_cfh),
invalidate_page_cache(_invalidate_page_cache),
last_fadvise_size(0) {}
std::unique_ptr<WritableFileWriter> file_writer;
std::unique_ptr<TableBuilder> builder;
EnvOptions env_options;
ImmutableCFOptions ioptions;
MutableCFOptions mutable_cf_options;
InternalKeyComparator internal_comparator;
ExternalSstFileInfo file_info;
InternalKey ikey;
std::string column_family_name;
ColumnFamilyHandle* cfh;
// If true, We will give the OS a hint that this file pages is not needed
// everytime we write 1MB to the file
bool invalidate_page_cache;
// the size of the file during the last time we called Fadvise to remove
// cached pages from page cache.
uint64_t last_fadvise_size;
};
SstFileWriter::SstFileWriter(const EnvOptions& env_options,
const Options& options,
const Comparator* user_comparator,
ColumnFamilyHandle* column_family,
bool invalidate_page_cache)
: rep_(new Rep(env_options, options, user_comparator, column_family,
invalidate_page_cache)) {
rep_->file_info.file_size = 0;
}
SstFileWriter::~SstFileWriter() {
if (rep_->builder) {
// User did not call Finish() or Finish() failed, we need to
// abandon the builder.
rep_->builder->Abandon();
}
delete rep_;
}
Status SstFileWriter::Open(const std::string& file_path) {
Rep* r = rep_;
Status s;
std::unique_ptr<WritableFile> sst_file;
s = r->ioptions.env->NewWritableFile(file_path, &sst_file, r->env_options);
if (!s.ok()) {
return s;
}
CompressionType compression_type;
if (r->ioptions.bottommost_compression != kDisableCompressionOption) {
compression_type = r->ioptions.bottommost_compression;
} else if (!r->ioptions.compression_per_level.empty()) {
// Use the compression of the last level if we have per level compression
compression_type = *(r->ioptions.compression_per_level.rbegin());
} else {
compression_type = r->mutable_cf_options.compression;
}
std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
int_tbl_prop_collector_factories;
// SstFileWriter properties collector to add SstFileWriter version.
int_tbl_prop_collector_factories.emplace_back(
new SstFileWriterPropertiesCollectorFactory(2 /* version */,
0 /* global_seqno*/));
// User collector factories
auto user_collector_factories =
r->ioptions.table_properties_collector_factories;
for (size_t i = 0; i < user_collector_factories.size(); i++) {
int_tbl_prop_collector_factories.emplace_back(
new UserKeyTablePropertiesCollectorFactory(
user_collector_factories[i]));
}
int unknown_level = -1;
uint32_t cf_id;
if (r->cfh != nullptr) {
// user explicitly specified that this file will be ingested into cfh,
// we can persist this information in the file.
cf_id = r->cfh->GetID();
r->column_family_name = r->cfh->GetName();
} else {
r->column_family_name = "";
cf_id = TablePropertiesCollectorFactory::Context::kUnknownColumnFamily;
}
TableBuilderOptions table_builder_options(
r->ioptions, r->internal_comparator, &int_tbl_prop_collector_factories,
compression_type, r->ioptions.compression_opts,
nullptr /* compression_dict */, false /* skip_filters */,
r->column_family_name, unknown_level);
r->file_writer.reset(
new WritableFileWriter(std::move(sst_file), r->env_options));
// TODO(tec) : If table_factory is using compressed block cache, we will
// be adding the external sst file blocks into it, which is wasteful.
r->builder.reset(r->ioptions.table_factory->NewTableBuilder(
table_builder_options, cf_id, r->file_writer.get()));
r->file_info.file_path = file_path;
r->file_info.file_size = 0;
r->file_info.num_entries = 0;
r->file_info.sequence_number = 0;
r->file_info.version = 2;
return s;
}
Status SstFileWriter::Add(const Slice& user_key, const Slice& value) {
Rep* r = rep_;
if (!r->builder) {
return Status::InvalidArgument("File is not opened");
}
if (r->file_info.num_entries == 0) {
r->file_info.smallest_key.assign(user_key.data(), user_key.size());
} else {
if (r->internal_comparator.user_comparator()->Compare(
user_key, r->file_info.largest_key) <= 0) {
// Make sure that keys are added in order
return Status::InvalidArgument("Keys must be added in order");
}
}
// TODO(tec) : For external SST files we could omit the seqno and type.
r->ikey.Set(user_key, 0 /* Sequence Number */,
ValueType::kTypeValue /* Put */);
r->builder->Add(r->ikey.Encode(), value);
// update file info
r->file_info.num_entries++;
r->file_info.largest_key.assign(user_key.data(), user_key.size());
r->file_info.file_size = r->builder->FileSize();
InvalidatePageCache(false /* closing */);
return Status::OK();
}
Status SstFileWriter::Finish(ExternalSstFileInfo* file_info) {
Rep* r = rep_;
if (!r->builder) {
return Status::InvalidArgument("File is not opened");
}
if (r->file_info.num_entries == 0) {
return Status::InvalidArgument("Cannot create sst file with no entries");
}
Status s = r->builder->Finish();
r->file_info.file_size = r->builder->FileSize();
if (s.ok()) {
s = r->file_writer->Sync(r->ioptions.use_fsync);
InvalidatePageCache(true /* closing */);
if (s.ok()) {
s = r->file_writer->Close();
}
}
if (!s.ok()) {
r->ioptions.env->DeleteFile(r->file_info.file_path);
}
if (file_info != nullptr) {
*file_info = r->file_info;
}<|fim▁hole|> r->builder.reset();
return s;
}
void SstFileWriter::InvalidatePageCache(bool closing) {
Rep* r = rep_;
if (r->invalidate_page_cache == false) {
// Fadvise disabled
return;
}
uint64_t bytes_since_last_fadvise =
r->builder->FileSize() - r->last_fadvise_size;
if (bytes_since_last_fadvise > kFadviseTrigger || closing) {
TEST_SYNC_POINT_CALLBACK("SstFileWriter::InvalidatePageCache",
&(bytes_since_last_fadvise));
// Tell the OS that we dont need this file in page cache
r->file_writer->InvalidateCache(0, 0);
r->last_fadvise_size = r->builder->FileSize();
}
}
uint64_t SstFileWriter::FileSize() {
return rep_->file_info.file_size;
}
#endif // !ROCKSDB_LITE
} // namespace rocksdb<|fim▁end|> | |
<|file_name|>sale.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from openerp.osv import osv, fields
import openerp.tools as tools
from openerp.tools.translate import _
from tools import config
import openerp.netsvc as netsvc
import decimal_precision as dp
class sale_order_line(osv.Model):
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False,
packaging=False, fiscal_position=False, flag=False,
context=None):
'''
Overridden the method of product line sales, to replace the unit price calculation and selection of the cost structure
that handles the product, and later to filter the prices for the product selected
'''
if context is None:
context = {}
price_obj = self.pool.get('product.pricelist')
product_obj = self.pool.get('product.product')
product_brw = product and product_obj.browse(
cr, uid, product, context=context)
res = super(
sale_order_line, self).product_id_change(cr, uid, ids, pricelist,
product, qty=qty,
uom=uom, qty_uos=qty_uos,
uos=uos, name=name,
partner_id=partner_id,
lang=lang, update_tax=update_tax,
date_order=date_order,
packaging=packaging, fiscal_position=fiscal_position,
flag=flag, context=context)
res.get('value', False) and product_brw and\
product_brw.uom_id and\
res.get('value', False).update({'product_uom': product_brw.uom_id.id})
if context.get('price_change', False):
price = price_obj.price_get(cr, uid, [context.get(
'price_change', False)], product, qty, context=context)
res.get('value', {}).update({'price_unit': round(
price.get(context.get('price_change', False)), 2)})
res.get('value', False) and\
product_brw and product_brw.categ_id and\
res.get('value', False).update({'categ_id': product_brw.categ_id.id})
res.get('value', False) and 'price_unit' in res.get(
'value', False) and res['value'].pop('price_unit')
return res
def price_unit(self, cr, uid, ids, price_list, product_id, qty,
context=None):
'''
Calculating the amount of model _compute_price method product.uom
'''
if context is None:
context = {}
res = {'value': {}}
if price_list and product_id and qty:
price_obj = self.pool.get('product.pricelist')
price = price_obj.price_get(cr, uid, [price_list], product_id, qty,
context=context)
res['value'].update({'price_unit': round(
price.get(price_list), 2)})
return res
#
_inherit = 'sale.order.line'
_columns = {
'product_id': fields.many2one('product.product', 'Product',
domain=[('sale_ok', '=', True)], change_default=True),
'price_list_ids': fields.many2one('product.pricelist', 'Select Price'),
'cost_structure_id': fields.many2one('cost.structure',
'Cost Structure'),
'categ_id': fields.many2one('product.category', 'Category',
help='Category by product selected'),
}
class sale_order(osv.Model):
_inherit = 'sale.order'
def _price_status(self, cr, uid, ids, field_name, arg, context=None):
'''
Check That the products sold are not sold at a price less than or greater than the price rago allocated in the product.
Failure to comply with this will print a message informing the product that is not complying with this requirement
'''
if context is None:
context = {}
if not ids:
return {}
res = {}
product = []
context.update({'query': False})
pricelist_obj = self.pool.get('product.pricelist')
for order in len(ids) == 1 and\
self.browse(cr, uid, ids, context=context) or []:
for line in order.order_line:
price_compute = line.product_id and [pricelist_obj.price_get(
cr, uid, [i.price_list_id and i.price_list_id.id],
line.product_id.id, line.product_uom_qty,
context=context).get(i.price_list_id.id)\
for i in line.product_id.price_list_item_ids or\
line.product_id.category_item_ids]
property_cost_structure = line and line.product_id and\
line.product_id.property_cost_structure and\
line.product_id.property_cost_structure.id or False
if property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) <\
round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio menor al\
estimado para su venta' % line.product_id.name)
res[order.id] = {'status_bool': True}
elif property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) > round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio mayor al\
estimado para su venta' % line.product_id.name)
res[order.id] = {'status_bool': True}
elif not property_cost_structure:
product.append(
u'El producto %s no tiene una estructura de costo'\
% line.product_id.name)
res[order.id] = {'status_bool': True}
if product:
res[order.id] = '\n'.join(product)
else:
res[order.id] = {'status_bool': False}
product = []
res[order.id] = '\n'.join(product)
return res
_columns = {
'status_price': fields.function(_price_status, method=True,
type="text", store=True, string='Status Price'),
'status_bool': fields.function(_price_status, method=True,
type="boolean", string='Status Price'),
}
_defaults = {
'status_bool': False
}
def price_unit_confirm(self, cr, uid, ids, context=None):
'''
Workflow condition does not allow the sale process if at least one product is being sold in the price range set out in its cost structure
'''
if context is None:
context = {}
product = []
context.update({'query': False})
sale_brw = self.browse(cr, uid, ids and ids[0], context=context)
pricelist_obj = self.pool.get('product.pricelist')
for line in len(ids) == 1 and sale_brw.order_line or []:
property_cost_structure = line and line.product_id and\
line.product_id.property_cost_structure and\
line.product_id.property_cost_structure.id or False
price_compute = line.product_id and [pricelist_obj.price_get(
cr, uid, [i.price_list_id and i.price_list_id.id],
line.product_id.id, line.product_uom_qty,
context=context).get(i.price_list_id.id)\
for i in line.product_id.price_list_item_ids or\
line.product_id.category_item_ids]
if property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) < round(i, 2)]):
product.append(<|fim▁hole|> elif property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) > round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio mayor\
al estimado para su venta' % line.product_id.name)
elif not property_cost_structure:
product.append(
u'The product %s has not a cost structure' %\
line.product_id.name)
if len(product) > 0:
raise osv.except_osv(_('Error'), _('\n'.join(product)))
return True<|fim▁end|> | u'Intenta vender el producto %s a un precio menor\
al estimado para su venta' % line.product_id.name)
|
<|file_name|>transaction.ts<|end_file_name|><|fim▁begin|>// okay, this is where we put our transaction logic
// the actual logic behind it is fairly simple
// okay this how I think transactions work. So to input a transation you sign it with your private key
// which generates the string and can be verified from that. and it's given a number or amount
// and then the message has a shas
// so the output goes to the public key of the recipient which can only be unlocked with their
// private key, and an amount
// the input is the id of the recipient - their public key, and a idnex of a transactoin where they
// received the keys in the first place and a signature proving it's actually the thing
import * as CryptoJS from 'crypto-js';
class TxOut {
public address: string;
public amount: number;
constructor(address: string, amount: number) {
this.address = address;
this.amount = amount;
}
}
class TxIn {
public txOutId: string;
public txOutIndex: number;
public signature: string;
constructor(txoutid: string, txoutindex: number, signature: string) {
this.txOutId = txoutid;
this.txOutIndex = txoutindex;
this.signature = signature;
}
}
// a transactino consists of a transaction id and a list of public inputs and outputs
// which I assume are parrallel arrays
class Transaction {
public id: string;
public txIns: TxIn[];
public txOuts: TxOut[];
constructor(id: string, txIns: TxIn[], txOuts: TxOut[]) {
this.id = id;
this.txIns = txIns;
this.txOuts = txOuts;
}
}
class UnspentTxOut {
public readonly txOutId: string;
public readonly txOutIndex: number;
public readonly address: string;
public readonly amount: number;
constructor(txOutId: string, txOutIndex: number, address: string, amount: number) {
this.txOutId = txOutId;
this.txOutIndex = txOutIndex;
this.address = address;
this.amount = amount;
}
}
// this is a list which holds all unspent outputs inthe blockchain
// so basically everyone's balances is entirely public, as expected
// wecould perhaps hash it with the private key or something if we want
// to make it private and to be honest it probably does that realistically!
// although it's only a fairly small level of privacy really as once found once
// the entire thing will collapse, and perhaps the key can be recovered!
var unspentTxOuts: UnspentTxOut[] = [];
// the transaction id is calculated forom a hash of all the contents but not the signatures
// of the txids as they will be added onto later in the transation
const getTransactionId = function(transaction: Transaction): string {
const txInContent: string = transaction.txIns
.map(function(txIn: TxIn) {
return txIn.txOutId + txIn.txOutIndex;
})
.reduce(function(a,b) {
return a + b;
},"");
const txOutContent: string = transaction.txOuts
.map(function(txOut: TxOut) {
return txOut.address + txOut.amount;
})
.reduce(function(a, b) {
return a + b;
},"");
return CryptoJS.SHA256(txInContent + txOutContent).toString();
}
// we also need a transation signature above the transactoin id
// this will just include the hash of the transactino in the first place
// we only really sign the hash as if the contents is changed, so must the hash be
// making it invalid
// i.e. its' really difficult to figure uot both a signature and a hash of it to make it work
// as the problem is basically that of finding the hash collision, which is difficult!
const signTxIn = function(transaction: Transaction, txInIndex: number,
privateKey: string, aUnspentTxOuts: UnspentTxOut[]): string {
const txIn: TxIn = transaction.txIns[txInIndex];
const dataToSign = transaction.id;<|fim▁hole|> const referencedAddress = referencedUnspentTxOut.address;
const key = ec.keyFromPrivate(privateKey, 'hex');
const signature: string = toHexString(key.sign(dataToSign).toDER());
return signature;
}
// what's going on with the unspents? well, apprenly a transactoin must refer to an unspenct transaction
// output so that's our balance as such really, so it's just a list of things which can be updated
// fromthe current blockchain. let's write that
// so, every time a new block is added to the chain, we need to udate our list of unspent
// transactoin outputs since we're spending things and shuffling it around
// so first we need to get the transactoins, then see what are consumed and the update the resulting
// we also now need to lay out our transaction validatoin rules, which isn't so bad or difficult
// or horrendous generally, but it could be
const isValidTransactionStructure = function(transaction:Transaction) {
if (typeof transaction.id !== 'string') {
console.log('invalid transaction id');
return false;
}
if (typeof transaction.txIns !==TxIn[]) {
console.log('invalid transaction inputs');
return false;
}
if (typeof transaction.txOuts !== TxOut[]) {
console.log('invalid transaction outputs');
return false;
}
if (!transaction.txIns
.map(isValidTxInStructure)
.reduce((a, b) => (a && b), true)) {
return false;
}
//we also need the transaction id to be valid
// we also need the signatures to be valid
}
// and the general for multiple, using functional programming style instead of proper standard loop
// which makes way more sense, but probably not to mathematicians
const isValidTransactionsStructure = (transactions: Transaction[]): boolean => {
return transactions
.map(isValidTransactionStructure)
.reduce((a, b) => (a && b), true);
};
// we need to be ale to sign input transactions which couldcause issues!
const hasDuplicates = function(txIns: TxIn[]): boolean {
const groups = _.countBy(txIns, function(txIn) {
return txIn.txOutId + txIn.txOutId:
});
return groups.map(function(value, key) {
if(value > 1) {
console.log('duplicate txIn: ' + key);
return true;
} else {
return false;
}
})
.includes(true);
}
// this gets a transaction given a string orsomething. surely we need the thing
// or it I honestly don't know, shouldn't thie be querying the blockchain???
// who even knows?
const getCoinbaseTransaction = function(address: string, blockIndex: number): Transaction {
const t = new Transaction();
const txIn: TxIn = new TxIn();
txIn.signature = "";
txIn.txOutId = "";
txIn.txOutIndex = blockIndex;
t.txIns = [txIn];
t.txOuts = [new TxOut(address, 'COINBASE_AMOUNT')];
t.id = getTransactionId(t);
return t;
}
const validateTxIn = function(txIn: TxIn, transaction: Transaction, aUnspentTxOuts: UnspentTxOut[]): boolean {
const referencedUTxOut: UnspentTxOut = aUnspentTxOuts.find(function(uTxO) {
return UTxO.txOutId === txIn.txOutId && uTxO.txOut ===txIn.txOutId;
});
}
const isValidAddress = function(address: string): boolean {
const address_regexp = '^[a-fA-F0-9]+$';
if(address.length !==130) {
console.log('invalid public ket length');
return false;
}
else if (address.match(address_regexp) === null) {
console.log('public key fails regex match');
return false;
} else if (!address.startsWith('04')) {
console.log('public key must start with 04');
return false;
}
return true;
}
const tohexString = function(byteArray): string {
return Array.from(byteArray, function(byte:any) {
return ('0' + (byte & 0xFF).toString(16)).slice(-2);
}).join('');
}
const processTransactions = function(transactions: Transaction[], aUnspentTxOuts: UnspentTxOut[], blockIndex: number) {
if(!isValidTransactionsStructure(transactions)) {
return null;
}
if(!validateBlockTransactions(transactions, aUnspentTxOuts, blockIndex)) {
console.log('invalid block transactions');
return null;
}
return updateUnspentTxOuts(transactions, aUnspentTxOuts);
}
const getPublicKey = function(aPriveyKey: string): string {
return ec.keyFromPrivate(aPrivateKey, 'hex').getPublic().encode('hex');
}
const updateUnspentTxOuts = function(newTransactions: Transaction[], aUnspentTxOuts: UnspentTxOut[]): UnspentTxOut[] {
const newUnspentTxOuts: UnspentTxOut[] = newTransactions
.map(function(t){
return t.txOuts.map(function(txOut,index){
return new UnspentTxOut(t.id, index, txOut.address, txOut.amount;
});
})
.reduce(function(a,b){
return a.concat(b);
},[]);
const consumedTxOuts: UnspentTxOut[] = newTransactions
.map(function(t){
return t.txIns;
})
.reduce(function(a,b){
return a.concat(b);
},[]);
const resultingUnspentTxOuts = aUnspentTxOuts
.filter(function(uTxO) {
return !findUnspentTxOut(uTxO.txOutId, uTxO.txOutIndex, consumedTxOuts);
})
.concat(newUnspentTxOuts);
return resultingUnspentTxOuts;
}<|fim▁end|> | const referencedUnspentTxOut: UnspentTxOut = findUnspentTxOut(txIn.txOutId,txIn.txOutIndex, aUnspentTxOuts); |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>module.exports = LoadsModels
const resolve = require.resolve
<|fim▁hole|>
/*
load(
require('./create_root_portfolio.js'),
{ uri: resolve('./create_root_portfolio.js'), id: 'portfolios/root' }
)
*/
}<|fim▁end|> | function LoadsModels (models) {
const load = models.load.bind(models) |
<|file_name|>test_ical.py<|end_file_name|><|fim▁begin|># This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import WgerTestCase
from wger.utils.helpers import (
make_token,
next_weekday
)
# TODO: parse the generated calendar files with the icalendar library
class IcalToolsTestCase(WgerTestCase):
"""
Tests some tools used for iCal generation
"""
def test_next_weekday(self):
"""
Test the next weekday function
"""
start_date = datetime.date(2013, 12, 5)
# Find next monday
self.assertEqual(next_weekday(start_date, 0), datetime.date(2013, 12, 9))
# Find next wednesday
self.assertEqual(next_weekday(start_date, 2), datetime.date(2013, 12, 11))
# Find next saturday
self.assertEqual(next_weekday(start_date, 5), datetime.date(2013, 12, 7))
class WorkoutICalExportTestCase(WgerTestCase):
"""
Tests exporting the ical file for a workout
"""
def export_ical_token(self):
"""
Helper function that checks exporing an ical file using tokens for access
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:workout:ical', kwargs={'pk': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-workout-3.ics')
# Approximate size
self.assertGreater(len(response.content), 540)
self.assertLess(len(response.content), 560)
def export_ical_token_wrong(self):
"""
Helper function that checks exporing an ical file using a wrong token
"""
uid = 'AB'
token = 'abc-11223344556677889900'
response = self.client.get(reverse('manager:workout:ical', kwargs={'pk': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_ical(self, fail=False):
"""
Helper function
"""
response = self.client.get(reverse('manager:workout:ical', kwargs={'pk': 3}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-workout-3.ics')
# Approximate size
self.assertGreater(len(response.content), 540)
self.assertLess(len(response.content), 560)
def test_export_ical_anonymous(self):
"""
Tests exporting a workout as an ical file as an anonymous user
"""
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_owner(self):
"""
Tests exporting a workout as an ical file as the owner user
"""
self.user_login('test')
self.export_ical(fail=False)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_other(self):
"""
Tests exporting a workout as an ical file as a logged user not owning the data
"""
self.user_login('admin')
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()
class ScheduleICalExportTestCase(WgerTestCase):
"""
Tests exporting the ical file for a schedule
"""
def export_ical_token(self):
"""
Helper function that checks exporing an ical file using tokens for access
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:ical', kwargs={'pk': 2,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-schedule-2.ics')
# Approximate size
self.assertGreater(len(response.content), 1650)
self.assertLess(len(response.content), 1670)
def export_ical_token_wrong(self):
"""
Helper function that checks exporing an ical file using a wrong token
"""
uid = 'AB'
token = 'abc-11223344556677889900'
response = self.client.get(reverse('manager:schedule:ical', kwargs={'pk': 2,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_ical(self, fail=False):
"""
Helper function
"""
response = self.client.get(reverse('manager:schedule:ical', kwargs={'pk': 2}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-schedule-2.ics')
# Approximate size
self.assertGreater(len(response.content), 1650)
self.assertLess(len(response.content), 1670)
def test_export_ical_anonymous(self):
"""
Tests exporting a schedule as an ical file as an anonymous user
"""
self.export_ical(fail=True)<|fim▁hole|> def test_export_ical_owner(self):
"""
Tests exporting a schedule as an ical file as the owner user
"""
self.user_login('admin')
self.export_ical(fail=False)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_other(self):
"""
Tests exporting a schedule as an ical file as a logged user not owning the data
"""
self.user_login('test')
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()<|fim▁end|> | self.export_ical_token()
self.export_ical_token_wrong()
|
<|file_name|>mus_musculus.js<|end_file_name|><|fim▁begin|>Genoverse.Genomes.mus_musculus = {
"1": {
"size": 195471971,
"bands": [
{
"id": "A1",
"start": 2973781,
"end": 8840440,
"type": "gpos100"
},
{
"id": "A2",
"start": 8840441,
"end": 12278389,
"type": "gneg"
},
{
"id": "A3",
"start": 12278390,
"end": 20136559,
"type": "gpos33"
},
{
"id": "A4",
"start": 20136560,
"end": 22101101,
"type": "gneg"
},
{
"id": "A5",
"start": 22101102,
"end": 30941542,
"type": "gpos100"
},
{
"id": "B",
"start": 30941543,
"end": 43219933,
"type": "gneg"
},
{
"id": "C1.1",
"start": 43219934,
"end": 54516051,
"type": "gpos66"
},
{
"id": "C1.2",
"start": 54516052,
"end": 55989458,
"type": "gneg"
},
{
"id": "C1.3",
"start": 55989459,
"end": 59427408,
"type": "gpos66"
},
{
"id": "C2",
"start": 59427409,
"end": 65321034,
"type": "gneg"
},
{
"id": "C3",
"start": 65321035,
"end": 74652611,
"type": "gpos33"
},
{
"id": "C4",
"start": 74652612,
"end": 80055103,
"type": "gneg"
},
{
"id": "C5",
"start": 80055104,
"end": 87422136,
"type": "gpos33"
},
{
"id": "cenp",
"start": 991261,
"end": 1982520,
"type": "acen"
},
{
"id": "cenq",
"start": 1982521,
"end": 2973780,
"type": "acen"
},
{
"id": "D",
"start": 87422137,
"end": 99700527,
"type": "gneg"
},
{
"id": "E1.1",
"start": 99700528,
"end": 102647341,
"type": "gpos33"
},
{
"id": "E1.2",
"start": 102647342,
"end": 103629611,
"type": "gneg"
},
{
"id": "E2.1",
"start": 103629612,
"end": 112470053,
"type": "gpos100"
},
{
"id": "E2.2",
"start": 112470054,
"end": 113943460,
"type": "gneg"
},
{
"id": "E2.3",
"start": 113943461,
"end": 125730714,
"type": "gpos100"
},
{
"id": "E3",
"start": 125730715,
"end": 128677528,
"type": "gneg"
},
{
"id": "E4",
"start": 128677529,
"end": 139482511,
"type": "gpos66"
},
{
"id": "F",
"start": 139482512,
"end": 147340680,
"type": "gneg"
},
{
"id": "G1",
"start": 147340681,
"end": 151760902,
"type": "gpos100"
},
{
"id": "G2",
"start": 151760903,
"end": 152743172,
"type": "gneg"
},
{
"id": "G3",
"start": 152743173,
"end": 157163393,
"type": "gpos100"
},
{
"id": "H1",
"start": 157163394,
"end": 160110206,
"type": "gneg"
},
{
"id": "H2.1",
"start": 160110207,
"end": 164039291,
"type": "gpos33"
},
{
"id": "H2.2",
"start": 164039292,
"end": 165512698,
"type": "gneg"
},
{
"id": "H2.3",
"start": 165512699,
"end": 169932918,
"type": "gpos33"
},
{
"id": "H3",
"start": 169932919,
"end": 175826546,
"type": "gneg"
},
{
"id": "H4",
"start": 175826547,
"end": 181720173,
"type": "gpos33"
},
{
"id": "H5",
"start": 181720174,
"end": 188104936,
"type": "gneg"
},
{
"id": "H6",
"start": 188104937,
"end": 195471971,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 991260,
"type": "tip"
}
]
},
"2": {
"size": 182113224,
"bands": [
{
"id": "A1",
"start": 3006028,
"end": 14080919,
"type": "gpos100"
},
{
"id": "A2",
"start": 14080920,
"end": 16427738,
"type": "gneg"
},
{
"id": "A3",
"start": 16427739,
"end": 29100566,
"type": "gpos33"
},
{
"id": "B",
"start": 29100567,
"end": 48344489,
"type": "gneg"
},
{
"id": "C1.1",
"start": 48344490,
"end": 60547952,
"type": "gpos100"
},
{
"id": "C1.2",
"start": 60547953,
"end": 61017316,
"type": "gneg"
},
{
"id": "C1.3",
"start": 61017317,
"end": 68527140,
"type": "gpos100"
},
{
"id": "C2",
"start": 68527141,
"end": 71812688,
"type": "gneg"
},
{
"id": "C3",
"start": 71812689,
"end": 81199967,
"type": "gpos66"
},
{
"id": "cenp",
"start": 1002010,
"end": 2004018,
"type": "acen"
},
{
"id": "cenq",
"start": 2004019,
"end": 3006027,
"type": "acen"
},
{
"id": "D",
"start": 81199968,
"end": 88709791,
"type": "gneg"
},
{
"id": "E1",
"start": 88709792,
"end": 101382619,
"type": "gpos100"
},
{
"id": "E2",
"start": 101382620,
"end": 105137530,
"type": "gneg"
},
{
"id": "E3",
"start": 105137531,
"end": 113116719,
"type": "gpos33"
},
{
"id": "E4",
"start": 113116720,
"end": 115932902,
"type": "gneg"
},
{
"id": "E5",
"start": 115932903,
"end": 123912089,
"type": "gpos66"
},
{
"id": "F1",
"start": 123912090,
"end": 131891278,
"type": "gneg"
},
{
"id": "F2",
"start": 131891279,
"end": 134707461,
"type": "gpos33"
},
{
"id": "F3",
"start": 134707462,
"end": 141278557,
"type": "gneg"
},
{
"id": "G1",
"start": 141278558,
"end": 146910925,
"type": "gpos100"
},
{
"id": "G2",
"start": 146910926,
"end": 147849652,
"type": "gneg"
},
{
"id": "G3",
"start": 147849653,
"end": 152543293,
"type": "gpos100"
},
{
"id": "H1",
"start": 152543294,
"end": 159114388,
"type": "gneg"
},
{
"id": "H2",
"start": 159114389,
"end": 163338664,
"type": "gpos33"
},
{
"id": "H3",
"start": 163338665,
"end": 173664671,
"type": "gneg"
},
{
"id": "H4",
"start": 173664672,
"end": 182113224,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1002009,
"type": "tip"
}
]
},
"3": {
"size": 160039680,
"bands": [
{
"id": "A1",
"start": 3008269,
"end": 18541181,
"type": "gpos100"
},
{
"id": "A2",
"start": 18541182,
"end": 20492885,
"type": "gneg"
},
{
"id": "A3",
"start": 20492886,
"end": 35618586,
"type": "gpos66"
},
{
"id": "B",
"start": 35618587,
"end": 46840881,
"type": "gneg"
},
{
"id": "C",
"start": 46840882,
"end": 56599398,
"type": "gpos100"
},
{
"id": "cenp",
"start": 1002757,
"end": 2005512,
"type": "acen"
},
{
"id": "cenq",
"start": 2005513,
"end": 3008268,
"type": "acen"
},
{
"id": "D",
"start": 56599399,
"end": 60990731,
"type": "gneg"
},
{
"id": "E1",
"start": 60990732,
"end": 69773396,
"type": "gpos33"
},
{
"id": "E2",
"start": 69773397,
"end": 72700951,
"type": "gneg"
},
{
"id": "E3",
"start": 72700952,
"end": 83923246,
"type": "gpos100"
},
{
"id": "F1",
"start": 83923247,
"end": 93193837,
"type": "gneg"
},
{
"id": "F2.1",
"start": 93193838,
"end": 97585169,
"type": "gpos33"
},
{
"id": "F2.2",
"start": 97585170,
"end": 106367835,
"type": "gneg"
},
{
"id": "F2.3",
"start": 106367836,
"end": 108319539,
"type": "gpos33"
},
{
"id": "F3",
"start": 108319540,
"end": 115150501,
"type": "gneg"
},
{
"id": "G1",
"start": 115150502,
"end": 126860721,
"type": "gpos100"
},
{
"id": "G2",
"start": 126860722,
"end": 128812424,
"type": "gneg"
},
{
"id": "G3",
"start": 128812425,
"end": 138570942,
"type": "gpos66"
},
{
"id": "H1",
"start": 138570943,
"end": 143938126,
"type": "gneg"
},
{
"id": "H2",
"start": 143938127,
"end": 148329459,
"type": "gpos33"
},
{
"id": "H3",
"start": 148329460,
"end": 154184569,
"type": "gneg"
},
{
"id": "H4",
"start": 154184570,
"end": 160039680,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1002756,
"type": "tip"
}
]
},
"4": {
"size": 156508116,
"bands": [
{
"id": "A1",
"start": 3016925,
"end": 14882673,
"type": "gpos100"
},
{
"id": "A2",
"start": 14882674,
"end": 17763190,
"type": "gneg"
},
{
"id": "A3",
"start": 17763191,
"end": 28325088,
"type": "gpos100"
},
{
"id": "A4",
"start": 28325089,
"end": 30245433,
"type": "gneg"
},
{
"id": "A5",
"start": 30245434,
"end": 43687847,
"type": "gpos66"
},
{
"id": "B1",
"start": 43687848,
"end": 51849313,
"type": "gneg"
},
{
"id": "B2",
"start": 51849314,
"end": 55209917,
"type": "gpos33"
},
{
"id": "B3",
"start": 55209918,
"end": 63371383,
"type": "gneg"
},
{
"id": "C1",
"start": 63371384,
"end": 69612504,
"type": "gpos33"
},
{
"id": "C2",
"start": 69612505,
"end": 72012935,
"type": "gneg"
},
{
"id": "C3",
"start": 72012936,
"end": 84015092,
"type": "gpos100"
},
{
"id": "C4",
"start": 84015093,
"end": 89776127,
"type": "gneg"
},
{
"id": "C5",
"start": 89776128,
"end": 97457507,
"type": "gpos66"
},
{
"id": "C6",
"start": 97457508,
"end": 105618973,
"type": "gneg"
},
{
"id": "C7",
"start": 105618974,
"end": 110899922,
"type": "gpos66"
},
{
"id": "cenp",
"start": 1005642,
"end": 2011283,
"type": "acen"
},
{
"id": "cenq",
"start": 2011284,
"end": 3016924,
"type": "acen"
},
{
"id": "D1",
"start": 110899923,
"end": 117621129,
"type": "gneg"
},
{
"id": "D2.1",
"start": 117621130,
"end": 120501647,
"type": "gpos33"
},
{
"id": "D2.2",
"start": 120501648,
"end": 131063544,
"type": "gneg"
},
{
"id": "D2.3",
"start": 131063545,
"end": 133944061,
"type": "gpos33"
},
{
"id": "D3",
"start": 133944062,
"end": 141625441,
"type": "gneg"
},
{
"id": "E1",
"start": 141625442,
"end": 147866562,
"type": "gpos100"
},
{
"id": "E2",
"start": 147866563,
"end": 156508116,
"type": "gneg"
},
{
"id": "tip",
"start": 1,
"end": 1005641,
"type": "tip"
}
]
},
"5": {
"size": 151834684,
"bands": [
{
"id": "A1",
"start": 2986183,
"end": 14895174,
"type": "gpos100"
},
{
"id": "A2",
"start": 14895175,
"end": 16336642,
"type": "gneg"
},
{
"id": "A3",
"start": 16336643,
"end": 25465943,
"type": "gpos66"
},
{
"id": "B1",
"start": 25465944,
"end": 33634265,
"type": "gneg"
},
{
"id": "B2",
"start": 33634266,
"end": 35556222,
"type": "gpos33"
},
{
"id": "B3",
"start": 35556223,
"end": 50451397,
"type": "gneg"
},
{
"id": "C1",
"start": 50451398,
"end": 58619719,
"type": "gpos33"
},
{
"id": "C2",
"start": 58619720,
"end": 61022166,
"type": "gneg"
},
{
"id": "C3.1",
"start": 61022167,
"end": 71592935,
"type": "gpos100"
},
{
"id": "C3.2",
"start": 71592936,
"end": 73514894,
"type": "gneg"
},
{
"id": "C3.3",
"start": 73514895,
"end": 77839299,
"type": "gpos66"
},
{
"id": "cenp",
"start": 995395,
"end": 1990788,
"type": "acen"
},
{
"id": "cenq",
"start": 1990789,
"end": 2986182,
"type": "acen"
},
{
"id": "D",
"start": 77839300,
"end": 81683215,
"type": "gneg"
},
{
"id": "E1",
"start": 81683216,
"end": 91293005,
"type": "gpos100"
},
{
"id": "E2",
"start": 91293006,
"end": 93695452,
"type": "gneg"
},
{
"id": "E3",
"start": 93695453,
"end": 99461326,
"type": "gpos33"
},
{
"id": "E4",
"start": 99461327,
"end": 101863775,
"type": "gneg"
},
{
"id": "E5",
"start": 101863776,
"end": 107629649,
"type": "gpos33"
},
{
"id": "F",
"start": 107629650,
"end": 124927270,
"type": "gneg"
},
{
"id": "G1.1",
"start": 124927271,
"end": 126849229,
"type": "gpos33"
},
{
"id": "G1.2",
"start": 126849230,
"end": 127810207,
"type": "gneg"
},
{
"id": "G1.3",
"start": 127810208,
"end": 130693144,
"type": "gpos33"
},
{
"id": "G2",
"start": 130693145,
"end": 146068809,
"type": "gneg"
},
{
"id": "G3",
"start": 146068810,
"end": 151834684,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 995394,
"type": "tip"
}
]
},
"6": {
"size": 149736546,
"bands": [
{
"id": "A1",
"start": 3004405,
"end": 16637393,
"type": "gpos100"
},
{
"id": "A2",
"start": 16637394,
"end": 21530744,
"type": "gneg"
},
{
"id": "A3.1",
"start": 21530745,
"end": 27402766,
"type": "gpos100"
},
{
"id": "A3.2",
"start": 27402767,
"end": 28381436,
"type": "gneg"
},
{
"id": "A3.3",
"start": 28381437,
"end": 34253457,
"type": "gpos100"
},
{
"id": "B1",
"start": 34253458,
"end": 41593484,
"type": "gneg"
},
{
"id": "B2.1",
"start": 41593485,
"end": 44529494,
"type": "gpos66"
},
{
"id": "B2.2",
"start": 44529495,
"end": 45997500,
"type": "gneg"
},
{
"id": "B2.3",
"start": 45997501,
"end": 50890851,
"type": "gpos66"
},
{
"id": "B3",
"start": 50890852,
"end": 62634894,
"type": "gneg"
},
{
"id": "C1",
"start": 62634895,
"end": 74378937,
"type": "gpos100"
},
{
"id": "C2",
"start": 74378938,
"end": 76825612,
"type": "gneg"
},
{
"id": "C3",
"start": 76825613,
"end": 86122980,
"type": "gpos66"
},
{
"id": "cenp",
"start": 1001469,
"end": 2002936,
"type": "acen"
},
{
"id": "cenq",
"start": 2002937,
"end": 3004404,
"type": "acen"
},
{
"id": "D1",
"start": 86122981,
"end": 94441677,
"type": "gneg"
},
{
"id": "D2",
"start": 94441678,
"end": 95909682,
"type": "gpos33"
},
{
"id": "D3",
"start": 95909683,
"end": 103249709,
"type": "gneg"
},
{
"id": "E1",
"start": 103249710,
"end": 108632395,
"type": "gpos100"
},
{
"id": "E2",
"start": 108632396,
"end": 109611066,
"type": "gneg"
},
{
"id": "E3",
"start": 109611067,
"end": 116951092,
"type": "gpos100"
},
{
"id": "F1",
"start": 116951093,
"end": 122823113,
"type": "gneg"
},
{
"id": "F2",
"start": 122823114,
"end": 125269789,
"type": "gpos33"
},
{
"id": "F3",
"start": 125269790,
"end": 132120481,
"type": "gneg"
},
{
"id": "G1",
"start": 132120482,
"end": 139460507,
"type": "gpos66"
},
{
"id": "G2",
"start": 139460508,
"end": 142885854,
"type": "gneg"
},
{
"id": "G3",
"start": 142885855,
"end": 149736546,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1001468,
"type": "tip"
}
]
},
"7": {
"size": 145441459,
"bands": [
{
"id": "A1",
"start": 2860683,
"end": 15202939,
"type": "gpos100"
},
{
"id": "A2",
"start": 15202940,
"end": 18243527,
"type": "gneg"
},
{
"id": "A3",
"start": 18243528,
"end": 28378820,
"type": "gpos33"
},
{
"id": "B1",
"start": 28378821,
"end": 34459996,
"type": "gneg"
},
{
"id": "B2",
"start": 34459997,
"end": 37500585,
"type": "gpos33"
},
{
"id": "B3",
"start": 37500585,
"end": 47635877,
"type": "gneg"
},
{
"id": "B4",
"start": 47635878,
"end": 54223818,
"type": "gpos33"
},
{
"id": "B5",
"start": 54223819,
"end": 60811759,
"type": "gneg"
},
{
"id": "C",
"start": 60811760,
"end": 71453817,
"type": "gpos100"
},
{
"id": "cenp",
"start": 953561,
"end": 1907121,
"type": "acen"
},
{
"id": "cenq",
"start": 1907122,
"end": 2860682,
"type": "acen"
},
{
"id": "D1",
"start": 71453818,
"end": 77028228,
"type": "gneg"
},
{
"id": "D2",
"start": 77028229,
"end": 80575581,
"type": "gpos66"
},
{
"id": "D3",
"start": 80575582,
"end": 90204109,
"type": "gneg"
},
{
"id": "E1",
"start": 90204110,
"end": 99832638,
"type": "gpos100"
},
{
"id": "E2",
"start": 99832639,
"end": 102366461,
"type": "gneg"
},
{
"id": "E3",
"start": 102366462,
"end": 111488225,
"type": "gpos33"
},
{
"id": "F1",
"start": 111488226,
"end": 118582930,
"type": "gneg"
},
{
"id": "F2",
"start": 118582931,
"end": 123143812,
"type": "gpos33"
},
{
"id": "F3",
"start": 123143813,
"end": 137333224,
"type": "gneg"
},
{
"id": "F4",
"start": 137333225,
"end": 140880576,
"type": "gpos33"
},
{
"id": "F5",
"start": 140880577,
"end": 145441459,
"type": "gneg"
},
{
"id": "tip",
"start": 1,
"end": 953560,
"type": "tip"
}
]
},
"8": {
"size": 129401213,
"bands": [
{
"id": "A1.1",
"start": 2946767,
"end": 15940728,
"type": "gpos100"
},
{
"id": "A1.2",
"start": 15940729,
"end": 16878419,
"type": "gneg"
},
{
"id": "A1.3",
"start": 16878420,
"end": 20160333,
"type": "gpos33"
},
{
"id": "A2",
"start": 20160334,
"end": 29537233,
"type": "gneg"
},
{
"id": "A3",
"start": 29537234,
"end": 33756838,
"type": "gpos33"
},
{
"id": "A4",
"start": 33756839,
"end": 44071427,
"type": "gneg"
},
{
"id": "B1.1",
"start": 44071428,
"end": 48291032,
"type": "gpos66"
},
{
"id": "B1.2",
"start": 48291033,
"end": 50166412,
"type": "gneg"
},
{
"id": "B1.3",
"start": 50166413,
"end": 55792551,
"type": "gpos66"
},
{
"id": "B2",
"start": 55792552,
"end": 59543311,
"type": "gneg"
},
{
"id": "B3.1",
"start": 59543312,
"end": 67044831,
"type": "gpos100"
},
{
"id": "B3.2",
"start": 67044832,
"end": 67982520,
"type": "gneg"
},
{
"id": "B3.3",
"start": 67982521,
"end": 74546350,
"type": "gpos100"
},
{
"id": "C1",
"start": 74546351,
"end": 80172490,
"type": "gneg"
},
{
"id": "C2",
"start": 80172491,
"end": 84860939,
"type": "gpos33"
},
{
"id": "C3",
"start": 84860940,
"end": 90018235,
"type": "gneg"
},
{
"id": "C4",
"start": 90018236,
"end": 91424769,
"type": "gpos33"
},
{
"id": "C5",
"start": 91424770,
"end": 95644374,
"type": "gneg"
},
{
"id": "cenp",
"start": 982256,
"end": 1964510,
"type": "acen"
},
{
"id": "cenq",
"start": 1964511,
"end": 2946766,
"type": "acen"
},
{
"id": "D1",
"start": 95644375,
"end": 103145894,
"type": "gpos100"
},
{
"id": "D2",
"start": 103145895,
"end": 104083583,
"type": "gneg"
},
{
"id": "D3",
"start": 104083584,
"end": 110647414,
"type": "gpos33"
},
{
"id": "E1",
"start": 110647414,
"end": 123775073,
"type": "gneg"
},
{
"id": "E2",
"start": 123775074,
"end": 129401213,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 982255,
"type": "tip"
}
]
},
"9": {
"size": 124595110,
"bands": [
{
"id": "A1",
"start": 3012548,
"end": 14412120,
"type": "gpos100"
},
{
"id": "A2",
"start": 14412121,
"end": 19526099,
"type": "gneg"
},
{
"id": "A3",
"start": 19526100,
"end": 24175170,
"type": "gpos33"
},
{
"id": "A4",
"start": 24175171,
"end": 38122383,
"type": "gneg"
},
{
"id": "A5.1",
"start": 38122384,
"end": 44166176,
"type": "gpos66"
},
{
"id": "A5.2",
"start": 44166177,
"end": 46490712,
"type": "gneg"
},
{
"id": "A5.3",
"start": 46490713,
"end": 54859040,
"type": "gpos66"
},
{
"id": "B",
"start": 54859041,
"end": 63227368,
"type": "gneg"
},
{
"id": "C",
"start": 63227369,
"end": 69736068,
"type": "gpos33"
},
{
"id": "cenp",
"start": 1004183,
"end": 2008364,
"type": "acen"
},
{
"id": "cenq",
"start": 2008365,
"end": 3012547,
"type": "acen"
},
{
"id": "D",
"start": 69736069,
"end": 77639490,
"type": "gneg"
},
{
"id": "E1",
"start": 77639491,
"end": 82753467,
"type": "gpos33"
},
{
"id": "E2",
"start": 82753468,
"end": 84613096,
"type": "gneg"
},
{
"id": "E3.1",
"start": 84613097,
"end": 91121796,
"type": "gpos100"
},
{
"id": "E3.2",
"start": 91121797,
"end": 91586703,
"type": "gneg"
},
{
"id": "E3.3",
"start": 91586704,
"end": 100884845,
"type": "gpos100"
},
{
"id": "E4",
"start": 100884846,
"end": 101814660,
"type": "gpos66"
},
{
"id": "F1",
"start": 101814661,
"end": 108323360,
"type": "gneg"
},
{
"id": "F2",
"start": 108323361,
"end": 111112803,
"type": "gpos33"
},
{
"id": "F3",
"start": 111112804,
"end": 119946038,
"type": "gneg"
},
{
"id": "F4",
"start": 119946039,
"end": 124595110,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1004182,
"type": "tip"
}
]
},
"10": {
"size": 130694993,
"bands": [
{
"id": "A1",
"start": 3016195,
"end": 12822904,
"type": "gpos100"
},
{
"id": "A2",
"start": 12822905,
"end": 17754791,
"type": "gneg"
},
{
"id": "A3",
"start": 17754792,
"end": 23673055,
"type": "gpos33"
},
{
"id": "A4",
"start": 23673056,
"end": 33536827,
"type": "gneg"
},
{
"id": "B1",
"start": 33536828,
"end": 41427846,
"type": "gpos100"
},
{
"id": "B2",
"start": 41427847,
"end": 48332487,
"type": "gneg"
},
{
"id": "B3",
"start": 48332488,
"end": 56223505,
"type": "gpos100"
},
{
"id": "B4",
"start": 56223506,
"end": 64114524,
"type": "gneg"
},
{
"id": "B5.1",
"start": 64114525,
"end": 68060033,
"type": "gpos100"
},
{
"id": "B5.2",
"start": 68060034,
"end": 68553222,
"type": "gneg"
},
{
"id": "B5.3",
"start": 68553223,
"end": 74964674,
"type": "gpos100"
},
{
"id": "C1",
"start": 74964675,
"end": 89267145,
"type": "gneg"
},
{
"id": "C2",
"start": 89267146,
"end": 96171787,
"type": "gpos33"
},
{
"id": "C3",
"start": 96171788,
"end": 99130918,
"type": "gneg"
},
{
"id": "cenp",
"start": 1005399,
"end": 2010796,
"type": "acen"
},
{
"id": "cenq",
"start": 2010797,
"end": 3016194,
"type": "acen"
},
{
"id": "D1",
"start": 99130919,
"end": 111953823,
"type": "gpos100"
},
{
"id": "D2",
"start": 111953824,
"end": 124776728,
"type": "gneg"
},
{
"id": "D3",
"start": 124776729,
"end": 130694993,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1005398,
"type": "tip"
}
]
},
"11": {
"size": 122082543,
"bands": [
{
"id": "A1",
"start": 3005877,
"end": 13046988,
"type": "gpos100"
},
{
"id": "A2",
"start": 13046989,
"end": 17240663,
"type": "gneg"
},
{
"id": "A3.1",
"start": 17240664,
"end": 21900302,
"type": "gpos100"
},
{
"id": "A3.2",
"start": 21900303,
"end": 25628014,
"type": "gneg"
},
{
"id": "A3.3",
"start": 25628015,
"end": 30287653,
"type": "gpos100"
},
{
"id": "A4",
"start": 30287654,
"end": 36345184,
"type": "gneg"
},
{
"id": "A5",
"start": 36345185,
"end": 43334642,
"type": "gpos100"
},
{
"id": "B1.1",
"start": 43334643,
"end": 47994281,
"type": "gneg"
},
{
"id": "B1.2",
"start": 47994282,
"end": 49858137,
"type": "gpos33"
},
{
"id": "B1.3",
"start": 49858138,
"end": 60109343,
"type": "gneg"
},
{
"id": "B2",
"start": 60109344,
"end": 62905126,
"type": "gpos33"
},
{
"id": "B3",
"start": 62905127,
"end": 70826512,
"type": "gneg"
},
{
"id": "B4",
"start": 70826513,
"end": 74088260,
"type": "gpos33"
},
{
"id": "B5",
"start": 74088261,
"end": 82009646,
"type": "gneg"
},
{
"id": "C",
"start": 82009647,
"end": 90396996,
"type": "gpos100"
},
{
"id": "cenp",
"start": 1001959,
"end": 2003917,
"type": "acen"
},
{
"id": "cenq",
"start": 2003918,
"end": 3005876,
"type": "acen"
},
{
"id": "D",
"start": 90396997,
"end": 102512058,
"type": "gneg"
},
{
"id": "E1",
"start": 102512059,
"end": 110433444,
"type": "gpos66"
},
{
"id": "E2",
"start": 110433445,
"end": 122082543,
"type": "gneg"
},
{
"id": "tip",
"start": 1,
"end": 1001958,
"type": "tip"
}
]
},
"12": {
"size": 120129022,
"bands": [
{
"id": "A1.1",
"start": 2972080,
"end": 17601321,
"type": "gpos100"
},
{
"id": "A1.2",
"start": 17601322,
"end": 21121586,
"type": "gneg"
},
{
"id": "A1.3",
"start": 21121587,
"end": 25961949,
"type": "gpos66"
},
{
"id": "A2",
"start": 25961949,
"end": 31682378,
"type": "gneg"
},
{
"id": "A3",
"start": 31682379,
"end": 39162941,
"type": "gpos33"
},
{
"id": "B1",
"start": 39162942,
"end": 44003304,
"type": "gneg"
},
{
"id": "B2",
"start": 44003305,
"end": 44883370,
"type": "gpos33"
},
{
"id": "B3",
"start": 44883371,
"end": 51923898,
"type": "gneg"
},
{
"id": "C1",
"start": 51923899,
"end": 66004956,
"type": "gpos100"
},
{
"id": "C2",
"start": 66004957,
"end": 71285352,
"type": "gneg"
},
{
"id": "C3",
"start": 71285353,
"end": 80966079,
"type": "gpos100"
},
{
"id": "cenp",
"start": 990694,
"end": 1981386,
"type": "acen"
},
{
"id": "cenq",
"start": 1981387,
"end": 2972079,
"type": "acen"
},
{
"id": "D1",
"start": 80966080,
"end": 85366410,
"type": "gneg"
},
{
"id": "D2",
"start": 85366411,
"end": 88446642,
"type": "gpos33"
},
{
"id": "D3",
"start": 88446643,
"end": 95487170,
"type": "gneg"
},
{
"id": "E",
"start": 95487171,
"end": 106047964,
"type": "gpos100"
},
{
"id": "F1",
"start": 106047965,
"end": 114408591,
"type": "gneg"
},
{
"id": "F2",
"start": 114408592,
"end": 120129022,
"type": "gpos66"
},
{
"id": "tip",
"start": 1,
"end": 990693,
"type": "tip"
}
]
},
"13": {
"size": 120421639,
"bands": [
{
"id": "A1",
"start": 3003426,
"end": 16286532,
"type": "gpos100"
},
{
"id": "A2",
"start": 16286533,
"end": 21221846,
"type": "gneg"
},
{
"id": "A3.1",
"start": 21221847,
"end": 29611877,
"type": "gpos66"
},
{
"id": "A3.2",
"start": 29611878,
"end": 33066596,
"type": "gneg"
},
{
"id": "A3.3",
"start": 33066597,
"end": 41456629,
"type": "gpos33"
},
{
"id": "A4",
"start": 41456630,
"end": 44417817,
"type": "gneg"
},
{
"id": "A5",
"start": 44417818,
"end": 52807849,
"type": "gpos33"
},
{
"id": "B1",
"start": 52807850,
"end": 59223756,
"type": "gneg"
},
{
"id": "B2",
"start": 59223757,
"end": 61691412,
"type": "gpos33"
},
{
"id": "B3",
"start": 61691413,
"end": 69587913,
"type": "gneg"
},
{
"id": "C1",
"start": 69587914,
"end": 78471477,
"type": "gpos33"
},
{
"id": "C2",
"start": 78471478,
"end": 80939133,
"type": "gneg"
},
{
"id": "C3",
"start": 80939134,
"end": 94758010,
"type": "gpos100"
},
{
"id": "cenp",
"start": 1001142,
"end": 2002283,
"type": "acen"
},
{
"id": "cenq",
"start": 2002284,
"end": 3003425,
"type": "acen"
},
{
"id": "D1",
"start": 94758011,
"end": 106602762,
"type": "gneg"
},
{
"id": "D2.1",
"start": 106602763,
"end": 110551012,
"type": "gpos33"
},
{
"id": "D2.2",
"start": 110551013,
"end": 116473388,
"type": "gneg"
},
{
"id": "D2.3",
"start": 116473389,
"end": 120421639,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1001141,
"type": "tip"
}
]
},
"14": {
"size": 124902244,
"bands": [
{
"id": "A1",
"start": 2992989,
"end": 14988268,
"type": "gpos100"
},
{
"id": "A2",
"start": 14988269,
"end": 19484749,
"type": "gneg"
},
{
"id": "A3",
"start": 19484750,
"end": 29976538,
"type": "gpos33"
},
{
"id": "B",
"start": 29976539,
"end": 43465980,
"type": "gneg"
},
{
"id": "C1",<|fim▁hole|> "end": 51959333,
"type": "gpos100"
},
{
"id": "C2",
"start": 51959334,
"end": 54956987,
"type": "gneg"
},
{
"id": "C3",
"start": 54956988,
"end": 59953076,
"type": "gpos66"
},
{
"id": "cenp",
"start": 997663,
"end": 1995325,
"type": "acen"
},
{
"id": "cenq",
"start": 1995326,
"end": 2992988,
"type": "acen"
},
{
"id": "D1",
"start": 59953077,
"end": 68946037,
"type": "gneg"
},
{
"id": "D2",
"start": 68946038,
"end": 72942909,
"type": "gpos33"
},
{
"id": "D3",
"start": 72942910,
"end": 84933525,
"type": "gneg"
},
{
"id": "E1",
"start": 84933526,
"end": 88930397,
"type": "gpos66"
},
{
"id": "E2.1",
"start": 88930398,
"end": 98922576,
"type": "gpos100"
},
{
"id": "E2.2",
"start": 98922577,
"end": 99921795,
"type": "gneg"
},
{
"id": "E2.3",
"start": 99921795,
"end": 107415929,
"type": "gpos100"
},
{
"id": "E3",
"start": 107415930,
"end": 110913192,
"type": "gneg"
},
{
"id": "E4",
"start": 110913193,
"end": 120905371,
"type": "gpos100"
},
{
"id": "E5",
"start": 120905372,
"end": 124902244,
"type": "gneg"
},
{
"id": "tip",
"start": 1,
"end": 997662,
"type": "tip"
}
]
},
"15": {
"size": 104043685,
"bands": [
{
"id": "A1",
"start": 3015906,
"end": 16500319,
"type": "gpos100"
},
{
"id": "A2",
"start": 16500320,
"end": 24292137,
"type": "gneg"
},
{
"id": "B1",
"start": 24292138,
"end": 29792243,
"type": "gpos33"
},
{
"id": "B2",
"start": 29792244,
"end": 32083955,
"type": "gneg"
},
{
"id": "B3.1",
"start": 32083956,
"end": 43084168,
"type": "gpos100"
},
{
"id": "B3.2",
"start": 43084169,
"end": 44917537,
"type": "gneg"
},
{
"id": "B3.3",
"start": 44917538,
"end": 49959301,
"type": "gpos66"
},
{
"id": "C",
"start": 49959302,
"end": 53626039,
"type": "gneg"
},
{
"id": "cenp",
"start": 1005302,
"end": 2010603,
"type": "acen"
},
{
"id": "cenq",
"start": 2010604,
"end": 3015905,
"type": "acen"
},
{
"id": "D1",
"start": 53626040,
"end": 66459622,
"type": "gpos100"
},
{
"id": "D2",
"start": 66459623,
"end": 68751333,
"type": "gneg"
},
{
"id": "D3",
"start": 68751334,
"end": 77459835,
"type": "gpos66"
},
{
"id": "E1",
"start": 77459836,
"end": 83876626,
"type": "gneg"
},
{
"id": "E2",
"start": 83876627,
"end": 87085022,
"type": "gpos33"
},
{
"id": "E3",
"start": 87085023,
"end": 95793524,
"type": "gneg"
},
{
"id": "F1",
"start": 95793525,
"end": 101293631,
"type": "gpos66"
},
{
"id": "F2",
"start": 101293632,
"end": 102210316,
"type": "gneg"
},
{
"id": "F3",
"start": 102210317,
"end": 104043685,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 1005301,
"type": "tip"
}
]
},
"16": {
"size": 98207768,
"bands": [
{
"id": "A1",
"start": 2996602,
"end": 15432649,
"type": "gpos100"
},
{
"id": "A2",
"start": 15432650,
"end": 16367961,
"type": "gneg"
},
{
"id": "A3",
"start": 16367962,
"end": 20576864,
"type": "gpos33"
},
{
"id": "B1",
"start": 20576865,
"end": 26188738,
"type": "gneg"
},
{
"id": "B2",
"start": 26188739,
"end": 32268266,
"type": "gpos33"
},
{
"id": "B3",
"start": 32268267,
"end": 38347794,
"type": "gneg"
},
{
"id": "B4",
"start": 38347795,
"end": 44894979,
"type": "gpos33"
},
{
"id": "B5",
"start": 44894980,
"end": 53780444,
"type": "gneg"
},
{
"id": "C1.1",
"start": 53780445,
"end": 57989348,
"type": "gpos66"
},
{
"id": "C1.2",
"start": 57989349,
"end": 58924660,
"type": "gneg"
},
{
"id": "C1.3",
"start": 58924661,
"end": 66874813,
"type": "gpos66"
},
{
"id": "C2",
"start": 66874814,
"end": 70616061,
"type": "gneg"
},
{
"id": "C3.1",
"start": 70616062,
"end": 79033870,
"type": "gpos100"
},
{
"id": "C3.2",
"start": 79033871,
"end": 79501525,
"type": "gneg"
},
{
"id": "C3.3",
"start": 79501526,
"end": 91660583,
"type": "gpos100"
},
{
"id": "C4",
"start": 91660584,
"end": 98207768,
"type": "gneg"
},
{
"id": "cenp",
"start": 998868,
"end": 1997734,
"type": "acen"
},
{
"id": "cenq",
"start": 1997735,
"end": 2996601,
"type": "acen"
},
{
"id": "tip",
"start": 1,
"end": 998867,
"type": "tip"
}
]
},
"17": {
"size": 94987271,
"bands": [
{
"id": "A1",
"start": 2991014,
"end": 13943085,
"type": "gpos100"
},
{
"id": "A2",
"start": 13943086,
"end": 16121691,
"type": "gneg"
},
{
"id": "A3.1",
"start": 16121692,
"end": 17428856,
"type": "gpos33"
},
{
"id": "A3.2",
"start": 17428857,
"end": 21786070,
"type": "gneg"
},
{
"id": "A3.3",
"start": 21786071,
"end": 31371942,
"type": "gpos66"
},
{
"id": "B1",
"start": 31371943,
"end": 40086370,
"type": "gneg"
},
{
"id": "B2",
"start": 40086371,
"end": 41393535,
"type": "gpos33"
},
{
"id": "B3",
"start": 41393536,
"end": 45750749,
"type": "gneg"
},
{
"id": "C",
"start": 45750750,
"end": 55772342,
"type": "gpos66"
},
{
"id": "cenp",
"start": 997005,
"end": 1994009,
"type": "acen"
},
{
"id": "cenq",
"start": 1994010,
"end": 2991013,
"type": "acen"
},
{
"id": "D",
"start": 55772343,
"end": 60129556,
"type": "gneg"
},
{
"id": "E1.1",
"start": 60129557,
"end": 67972542,
"type": "gpos100"
},
{
"id": "E1.2",
"start": 67972543,
"end": 68843984,
"type": "gneg"
},
{
"id": "E1.3",
"start": 68843985,
"end": 73201199,
"type": "gpos100"
},
{
"id": "E2",
"start": 73201200,
"end": 78429856,
"type": "gneg"
},
{
"id": "E3",
"start": 78429857,
"end": 82787070,
"type": "gpos33"
},
{
"id": "E4",
"start": 82787071,
"end": 88887170,
"type": "gneg"
},
{
"id": "E5",
"start": 88887171,
"end": 94987271,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 997004,
"type": "tip"
}
]
},
"18": {
"size": 90702639,
"bands": [
{
"id": "A1",
"start": 2997707,
"end": 19406145,
"type": "gpos100"
},
{
"id": "A2",
"start": 19406146,
"end": 29531091,
"type": "gneg"
},
{
"id": "B1",
"start": 29531092,
"end": 35437309,
"type": "gpos66"
},
{
"id": "B2",
"start": 35437310,
"end": 37124800,
"type": "gneg"
},
{
"id": "B3",
"start": 37124801,
"end": 45562255,
"type": "gpos100"
},
{
"id": "C",
"start": 45562256,
"end": 49780983,
"type": "gneg"
},
{
"id": "cenp",
"start": 999236,
"end": 1998471,
"type": "acen"
},
{
"id": "cenq",
"start": 1998472,
"end": 2997706,
"type": "acen"
},
{
"id": "D1",
"start": 49780984,
"end": 53999710,
"type": "gpos100"
},
{
"id": "D2",
"start": 53999711,
"end": 54421582,
"type": "gneg"
},
{
"id": "D3",
"start": 54421583,
"end": 60749673,
"type": "gpos100"
},
{
"id": "E1",
"start": 60749674,
"end": 67921510,
"type": "gneg"
},
{
"id": "E2",
"start": 67921511,
"end": 75093346,
"type": "gpos33"
},
{
"id": "E3",
"start": 75093347,
"end": 83530801,
"type": "gneg"
},
{
"id": "E4",
"start": 83530802,
"end": 90702639,
"type": "gpos33"
},
{
"id": "tip",
"start": 1,
"end": 999235,
"type": "tip"
}
]
},
"19": {
"size": 61431566,
"bands": [
{
"id": "A",
"start": 3004360,
"end": 16680093,
"type": "gpos100"
},
{
"id": "B",
"start": 16680094,
"end": 25630388,
"type": "gneg"
},
{
"id": "C1",
"start": 25630389,
"end": 34987514,
"type": "gpos66"
},
{
"id": "C2",
"start": 34987515,
"end": 38242166,
"type": "gneg"
},
{
"id": "C3",
"start": 38242167,
"end": 47599292,
"type": "gpos66"
},
{
"id": "cenp",
"start": 1001454,
"end": 2002906,
"type": "acen"
},
{
"id": "cenq",
"start": 2002907,
"end": 3004359,
"type": "acen"
},
{
"id": "D1",
"start": 47599293,
"end": 51667607,
"type": "gneg"
},
{
"id": "D2",
"start": 51667608,
"end": 58990576,
"type": "gpos33"
},
{
"id": "D3",
"start": 58990577,
"end": 61431566,
"type": "gneg"
},
{
"id": "tip",
"start": 1,
"end": 1001453,
"type": "tip"
}
]
},
"X": {
"size": 171031299,
"bands": [
{
"id": "A1.1",
"start": 3078866,
"end": 15772338,
"type": "gpos100"
},
{
"id": "A1.2",
"start": 15772339,
"end": 18236766,
"type": "gneg"
},
{
"id": "A1.3",
"start": 18236767,
"end": 21194079,
"type": "gpos33"
},
{
"id": "A2",
"start": 21194080,
"end": 28094478,
"type": "gneg"
},
{
"id": "A3.1",
"start": 28094479,
"end": 33516219,
"type": "gpos66"
},
{
"id": "A3.2",
"start": 33516220,
"end": 34501990,
"type": "gneg"
},
{
"id": "A3.3",
"start": 34501991,
"end": 39923731,
"type": "gpos66"
},
{
"id": "A4",
"start": 39923732,
"end": 47809901,
"type": "gneg"
},
{
"id": "A5",
"start": 47809902,
"end": 56188956,
"type": "gpos66"
},
{
"id": "A6",
"start": 56188957,
"end": 63089355,
"type": "gneg"
},
{
"id": "A7.1",
"start": 63089356,
"end": 69496866,
"type": "gpos66"
},
{
"id": "A7.2",
"start": 69496868,
"end": 70975524,
"type": "gneg"
},
{
"id": "A7.3",
"start": 70975525,
"end": 77383036,
"type": "gpos66"
},
{
"id": "B",
"start": 77383037,
"end": 82311892,
"type": "gneg"
},
{
"id": "C1",
"start": 82311893,
"end": 91183833,
"type": "gpos100"
},
{
"id": "C2",
"start": 91183834,
"end": 92169603,
"type": "gneg"
},
{
"id": "C3",
"start": 92169604,
"end": 101041544,
"type": "gpos100"
},
{
"id": "cenp",
"start": 1026289,
"end": 2052577,
"type": "acen"
},
{
"id": "cenq",
"start": 2052578,
"end": 3078865,
"type": "acen"
},
{
"id": "D",
"start": 101041545,
"end": 109913485,
"type": "gneg"
},
{
"id": "E1",
"start": 109913486,
"end": 120264082,
"type": "gpos100"
},
{
"id": "E2",
"start": 120264084,
"end": 121249853,
"type": "gneg"
},
{
"id": "E3",
"start": 121249854,
"end": 135050651,
"type": "gpos100"
},
{
"id": "F1",
"start": 135050652,
"end": 141458162,
"type": "gneg"
},
{
"id": "F2",
"start": 141458163,
"end": 148851447,
"type": "gpos33"
},
{
"id": "F3",
"start": 148851448,
"end": 156244730,
"type": "gneg"
},
{
"id": "F4",
"start": 156244731,
"end": 163638014,
"type": "gpos33"
},
{
"id": "F5",
"start": 163638015,
"end": 171031299,
"type": "gneg"
},
{
"id": "tip",
"start": 1,
"end": 1026288,
"type": "tip"
}
]
},
"Y": {
"size": 91744698,
"bands": [
{
"id": "A1",
"start": 5,
"end": 20642552,
"type": "gpos100"
},
{
"id": "A2",
"start": 20642557,
"end": 32684047,
"type": "gpos66"
},
{
"id": "B",
"start": 32684053,
"end": 45298941,
"type": "gpos33"
},
{
"id": "C1",
"start": 45298947,
"end": 54473414,
"type": "gpos100"
},
{
"id": "C2",
"start": 54473420,
"end": 61927667,
"type": "gpos33"
},
{
"id": "C3",
"start": 61927673,
"end": 72248949,
"type": "gpos100"
},
{
"id": "D",
"start": 72248955,
"end": 83143629,
"type": "gpos33"
},
{
"id": "E",
"start": 83143635,
"end": 91744698,
"type": "gpos66"
}
]
},
"MT": {
"size": 16299,
"bands": [
{
"start": 1,
"end": 16299
}
]
}
};<|fim▁end|> | "start": 43465981, |
<|file_name|>tdAddBasic1.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: tdAddBasic1.py $
"""
VirtualBox Validation Kit - Additions Basics #1.
"""
__copyright__ = \
"""
Copyright (C) 2010-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard Python imports.
import os;
import sys;
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import base;
from testdriver import vbox;
from testdriver import vboxcon;
# Sub test driver imports.
sys.path.append(os.path.dirname(os.path.abspath(__file__))); # For sub-test drivers.
from tdAddGuestCtrl import SubTstDrvAddGuestCtrl;
class tdAddBasic1(vbox.TestDriver): # pylint: disable=R0902
"""
Additions Basics #1.
"""
## @todo
# - More of the settings stuff can e and need to be generalized!
#
def __init__(self):
vbox.TestDriver.__init__(self);
self.oTestVmSet = self.oTestVmManager.getStandardVmSet('nat');
self.asTestsDef = ['guestprops', 'stdguestprops', 'guestcontrol'];
self.asTests = self.asTestsDef;
self.addSubTestDriver(SubTstDrvAddGuestCtrl(self));
#
# Overridden methods.
#
def showUsage(self):
rc = vbox.TestDriver.showUsage(self);
reporter.log('');
reporter.log('tdAddBasic1 Options:');
reporter.log(' --tests <s1[:s2[:]]>');
reporter.log(' Default: %s (all)' % (':'.join(self.asTestsDef)));
reporter.log(' --quick');
reporter.log(' Same as --virt-modes hwvirt --cpu-counts 1.');
return rc;
def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
if asArgs[iArg] == '--tests':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of tests');
self.asTests = asArgs[iArg].split(':');
for s in self.asTests:
if s not in self.asTestsDef:
raise base.InvalidOption('The "--tests" value "%s" is not valid; valid values are: %s' \
% (s, ' '.join(self.asTestsDef)));
elif asArgs[iArg] == '--quick':
self.parseOption(['--virt-modes', 'hwvirt'], 0);
self.parseOption(['--cpu-counts', '1'], 0);
else:
return vbox.TestDriver.parseOption(self, asArgs, iArg);
return iArg + 1;
def actionConfig(self):
if not self.importVBoxApi(): # So we can use the constant below.
return False;
eNic0AttachType = vboxcon.NetworkAttachmentType_NAT;
sGaIso = self.getGuestAdditionsIso();
return self.oTestVmSet.actionConfig(self, eNic0AttachType = eNic0AttachType, sDvdImage = sGaIso);
def actionExecute(self):
return self.oTestVmSet.actionExecute(self, self.testOneCfg);
#
# Test execution helpers.
#
def testOneCfg(self, oVM, oTestVm):
"""
Runs the specified VM thru the tests.
Returns a success indicator on the general test execution. This is not
the actual test result.
"""
fRc = False;
self.logVmInfo(oVM);
oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(oTestVm.sVmName, fCdWait = True, \
sFileCdWait = 'AUTORUN.INF');
if oSession is not None:
self.addTask(oSession);
# Do the testing.
reporter.testStart('Install');
fRc, oTxsSession = self.testInstallAdditions(oSession, oTxsSession, oTestVm);
reporter.testDone();
fSkip = not fRc;
reporter.testStart('Guest Properties');<|fim▁hole|> reporter.testStart('Guest Control');
if not fSkip:
(fRc2, oTxsSession) = self.aoSubTstDrvs[0].testIt(oTestVm, oSession, oTxsSession);
fRc = fRc2 and fRc;
reporter.testDone(fSkip);
## @todo Save an restore test.
## @todo Reset tests.
## @todo Final test: Uninstallation.
# Cleanup.
self.removeTask(oTxsSession);
#self.terminateVmBySession(oSession)
return fRc;
def testInstallAdditions(self, oSession, oTxsSession, oTestVm):
"""
Tests installing the guest additions
"""
if oTestVm.isWindows():
fRc = self.testWindowsInstallAdditions(oSession, oTxsSession, oTestVm);
else:
reporter.error('Guest Additions installation not implemented for %s yet! (%s)' % \
(oTestVm.sKind, oTestVm.sVmName));
fRc = False;
#
# Verify installation of Guest Additions using commmon bits.
#
if fRc is True:
#
# Wait for the GAs to come up.
#
## @todo need to signed up for a OnAdditionsStateChanged and wait runlevel to
# at least reach Userland.
#
# Check if the additions are operational.
#
try: oGuest = oSession.o.console.guest;
except:
reporter.errorXcpt('Getting IGuest failed.');
return (False, oTxsSession);
# Check the additionsVersion attribute. It must not be empty.
reporter.testStart('IGuest::additionsVersion');
fRc = self.testIGuest_additionsVersion(oGuest);
reporter.testDone();
reporter.testStart('IGuest::additionsRunLevel');
self.testIGuest_additionsRunLevel(oGuest, oTestVm);
reporter.testDone();
## @todo test IAdditionsFacilities.
return (fRc, oTxsSession);
def testWindowsInstallAdditions(self, oSession, oTxsSession, oTestVm):
"""
Installs the Windows guest additions using the test execution service.
Since this involves rebooting the guest, we will have to create a new TXS session.
"""
asLogFile = [];
# Delete relevant log files.
if oTestVm.sKind in ('WindowsNT4',):
sWinDir = 'C:/WinNT/';
else:
sWinDir = 'C:/Windows/';
asLogFile = [sWinDir+'setupapi.log', sWinDir+'setupact.log', sWinDir+'setuperr.log'];
for sFile in asLogFile:
self.txsRmFile(oSession, oTxsSession, sFile);
# Install the public signing key.
if oTestVm.sKind not in ('WindowsNT4', 'Windows2000', 'WindowsXP', 'Windows2003'):
## TODO
pass;
#
# The actual install.
# Enable installing the optional auto-logon modules (VBoxGINA/VBoxCredProv) + (Direct)3D support.
# Also tell the installer to produce the appropriate log files.
#
fRc = self.txsRunTest(oTxsSession, 'VBoxWindowsAdditions.exe', 5 * 60 * 1000, \
'${CDROM}/VBoxWindowsAdditions.exe', ('${CDROM}/VBoxWindowsAdditions.exe', '/S', '/l', '/with_autologon'));
# For testing the installation (D)3D stuff ('/with_d3d') we need to boot up in safe mode.
#
# Reboot the VM and reconnect the TXS session.
#
if fRc is True:
(fRc, oTxsSession) = self.txsRebootAndReconnectViaTcp(oSession, oTxsSession, cMsTimeout = 3 * 60000);
# Add the Windows Guest Additions installer files to the files we want to download
# from the guest.
sGuestAddsDir = 'C:/Program Files/Oracle/VirtualBox Guest Additions/';
asLogFile.append(sGuestAddsDir + 'install.log');
# Note: There won't be a install_ui.log because of the silent installation.
asLogFile.append(sGuestAddsDir + 'install_drivers.log');
asLogFile.append('C:/Windows/setupapi.log');
asLogFile.append('C:/Windows/setupapi.dev.log');
#
# Download log files.
# Ignore errors as all files above might not be present (or in different locations)
# on different Windows guests.
#
self.txsDownloadFiles(oSession, oTxsSession, asLogFile, fIgnoreErrors = True);
return (fRc, oTxsSession);
def testIGuest_additionsVersion(self, oGuest):
"""
Returns False if no version string could be obtained, otherwise True
even though errors are logged.
"""
try:
sVer = oGuest.additionsVersion;
except:
reporter.errorXcpt('Getting the additions version failed.');
return False;
reporter.log('IGuest::additionsVersion="%s"' % (sVer,));
if sVer.strip() == '':
reporter.error('IGuest::additionsVersion is empty.');
return False;
if sVer != sVer.strip():
reporter.error('IGuest::additionsVersion is contains spaces: "%s".' % (sVer,));
asBits = sVer.split('.');
if len(asBits) < 3:
reporter.error('IGuest::additionsVersion does not contain at least tree dot separated fields: "%s" (%d).'
% (sVer, len(asBits)));
## @todo verify the format.
return True;
def testIGuest_additionsRunLevel(self, oGuest, oTestVm):
"""
Do run level tests.
"""
if oTestVm.isLoggedOntoDesktop():
eExpectedRunLevel = vboxcon.AdditionsRunLevelType_Desktop;
else:
eExpectedRunLevel = vboxcon.AdditionsRunLevelType_Userland;
## @todo Insert wait for the desired run level.
try:
iLevel = oGuest.additionsRunLevel;
except:
reporter.errorXcpt('Getting the additions run level failed.');
return False;
reporter.log('IGuest::additionsRunLevel=%s' % (iLevel,));
if iLevel != eExpectedRunLevel:
reporter.error('Expected runlevel %d, found %d instead' % (eExpectedRunLevel, iLevel));
return True;
def testGuestProperties(self, oSession, oTxsSession, oTestVm):
"""
Test guest properties.
"""
_ = oSession; _ = oTxsSession; _ = oTestVm;
return True;
if __name__ == '__main__':
sys.exit(tdAddBasic1().main(sys.argv));<|fim▁end|> | if not fSkip:
fRc = self.testGuestProperties(oSession, oTxsSession, oTestVm) and fRc;
reporter.testDone(fSkip);
|
<|file_name|>CS_Cholinc.cpp<|end_file_name|><|fim▁begin|>// CS_Cholinc.cpp
//
// 2007/10/16
//---------------------------------------------------------
#include "NDGLib_headers.h"
#include "CS_Type.h"
#define TRACE_CHOL 0
///////////////////////////////////////////////////////////
//
// Spa : buffer for storing sparse column info
//
///////////////////////////////////////////////////////////
//---------------------------------------------------------
class Spa
//---------------------------------------------------------
{
public:
Spa(int n) : length(0), m_status(0) { resize(n); }<|fim▁hole|> void scale_add(int j, CSd& A, int k, double alpha);
public:
int length, m_status;
IVec indices, bitmap;
DVec values;
};
//---------------------------------------------------------
bool Spa::resize(int n)
//---------------------------------------------------------
{
length = 0;
indices.resize(n); bitmap.resize(n); values.resize(n);
if (indices.ok() && bitmap.ok() && values.ok())
{
bitmap.fill(-1); // initialize bitmap
m_status = 0; return true;
} else {
m_status = -1; return false;
}
}
//---------------------------------------------------------
void Spa::set(CSd& A, int j)
//---------------------------------------------------------
{
// initialize info for column L(:,j)
assert(j < A.n);
int next=0, i=0; double Aij=0.0;
for (int ip = A.P[j]; ip < A.P[j+1]; ++ip)
{
i = A.I[ip]; Aij = A.X[ip];
assert( i >= j ); // A must be lower triangular
indices[next] = i;
values [i ] = Aij;
bitmap [i ] = j;
next++;
}
length = next;
}
//---------------------------------------------------------
void Spa::scale_add(int j, CSd& A, int k, double alpha)
//---------------------------------------------------------
{
assert(k < A.n);
#if (TRACE_CHOL>=5)
umMSG(1, "spa::scale_add: updating column %d with column %d\n",j,k);
umMSG(1, "spa::scale_add: colptr %d to %d-1\n",A.P[k],A.P[k+1]);
#endif
int next=0, i=0, ip=0; double Aik=0.0;
for (int ip = A.P[k]; ip < A.P[k+1]; ++ip)
{
i = A.I[ip];
if (i < j) continue;
Aik = A.X[ip];
if ((this->bitmap)[i] < j)
{
#if (TRACE_CHOL>=3)
umMSG(1, "fill in (%d,%d)\n",i,j);
#endif
bitmap [ i ] = j;
values [ i ] = 0.0;
indices[length] = i;
length++;
}
values[i] += alpha*Aik;
#if (TRACE_CHOL>=5)
umMSG(1, "spa::scale_add: A(%d,%d) -= %lg * %lg ==> %lg\n", i,j, alpha, Aik, values[i]);
#endif
}
}
///////////////////////////////////////////////////////////
//
// RowList : linked lists for mapping row dependencies
//
///////////////////////////////////////////////////////////
//---------------------------------------------------------
class RowList
//---------------------------------------------------------
{
public:
RowList(int n);
~RowList() {}
int create(int n);
int add(int i, int j, double v);
bool ok() const { return (m_status != 0) ? false : true; }
int getfirst (int rl) { return rowlist [ rl ]; }
int getnext (int rl) { return next [ rl ]; }
int getcolind(int rl) { return colind[ rl ]; }
double getvalue (int rl) { return values[ rl ]; }
protected:
IVec rowlist, next, colind;
DVec values;
int rowlist_size, freelist, next_expansion;
int m_status;
};
//---------------------------------------------------------
RowList::RowList(int n)
//---------------------------------------------------------
: rowlist_size(0), freelist(0), next_expansion(0), m_status(0)
{
// allocate initial rowlist structure
m_status = create(n);
}
//---------------------------------------------------------
int RowList::create(int n)
//---------------------------------------------------------
{
freelist = 0;
rowlist_size = 1000; next_expansion = 1000;
rowlist.resize(n); // system is (n,n)
next.resize (rowlist_size); // rowlist_size will grow
colind.resize(rowlist_size);
values.resize(rowlist_size);
if (!rowlist.ok() || !next.ok() || !colind.ok() || !values.ok())
{ m_status = -1; return -1; }
rowlist.fill(-1); // -1 indicates: no list for row[i]
for (int i=0; i<rowlist_size-1; ++i) {
next[i] = i+1;
}
next[rowlist_size-1] = -1;
return 0;
}
//---------------------------------------------------------
int RowList::add(int i, int j, double v)
//---------------------------------------------------------
{
if ( -1 == freelist )
{
// expand storage for row info
int inc = next_expansion, ii=0;
next_expansion = (int) floor(1.25 * (double) next_expansion);
int nlen = rowlist_size+inc;
next.realloc(nlen); if (!next.ok()) { return -1; }
colind.realloc(nlen); if (!colind.ok()) { return -1; }
values.realloc(nlen); if (!values.ok()) { return -1; }
freelist = rowlist_size;
for (int ii=rowlist_size; ii<nlen-1; ++ii) {
next[ii] = ii+1; // initialize new entries
}
next[ nlen-1 ] = -1; // set end marker
rowlist_size = nlen; // update current size
}
int rl = freelist;
freelist = next[ freelist ];
next [ rl ] = rowlist[ i ];
colind[ rl ] = j;
values[ rl ] = v;
rowlist[ i ] = rl;
return 0;
}
///////////////////////////////////////////////////////////
//
// Incomplete Cholesky factorization
//
// This is a left-looking column-column code using
// row lists. Performs a drop-tolerance incomplete
// factorization with or without diagonal modification
// to maintain rowsums.
//
///////////////////////////////////////////////////////////
// FIXME: (2007/09/21) "modified" option not yet working
// based on taucs_dccs_factor_llt
//---------------------------------------------------------
CSd& CS_Cholinc
(
CSd& A,
double droptol,
int modified
)
//---------------------------------------------------------
{
if (modified) {
umWARNING("CS_Cholinc", "\"modified\" option not yet working");
modified = 0;
}
CSd *pL = new CSd("L", OBJ_temp); CSd& L = *pL;
if (! (A.get_shape() & sp_SYMMETRIC)) { umWARNING("CS_Cholinc", "matrix must be symmetric"); return L; }
if (! (A.get_shape() & sp_LOWER )) { umWARNING("CS_Cholinc", "tril(A) must be represented\n"); return L; }
int n = A.num_cols();
umMSG(1, " ==> CS_Cholinc: n=%d droptol=%0.1e modified? %d\n", n, droptol, modified);
// Avoid frequent L.realloc() with large inital alloc
// TODO: tune initial allocation for incomplete factor:
int Lnnz = A.size();
if (droptol>=9.9e-3) { Lnnz = 1*Lnnz; } // L.nnz = 1.0*A.nnz
else if (droptol>=9.9e-4) { Lnnz = (3*Lnnz)/2; } // L.nnz = 1.5*A.nnz
else if (droptol>=9.9e-5) { Lnnz = (9*Lnnz)/5; } // L.nnz = 1.8*A.nnz
else if (droptol>=9.9e-6) { Lnnz = 2*Lnnz; } // L.nnz = 2.0*A.nnz
else { Lnnz = (5*Lnnz)/2; } // L.nnz = 2.5*A.nnz
int init_Lnnz = Lnnz;
L.resize(n,n,Lnnz, 1, 0);
if (!L.ok()) { return L; }
// factor is lower triangular
L.set_shape(sp_TRIANGULAR | sp_LOWER);
int next=0, Aj_nnz, i,j,k,ip; double Lkj,pivot,v,norm;
double flops = 0.0, Lj_nnz=0.0;
Spa spa(n); // allocate buffer for sparse columns
RowList rowlist(n); // allocate initial rowlist structure
DVec dropped(n); // allocate buffer for dropped values
if (!spa.ok() || !rowlist.ok() || !dropped.ok()) {
umWARNING("CS_Cholinc", "out of memory");
return L;
}
umLOG(1, " ==> CS_Cholinc: (n=%d) ", n);
for (j=0; j<n; ++j)
{
if (! (j%2000)) {umLOG(1, ".");}
spa.set(A,j); // load colum j into the accumulation buffer
for (int rl=rowlist.getfirst(j); rl != -1; rl=rowlist.getnext(rl)) {
k = rowlist.getcolind(rl);
Lkj = rowlist.getvalue(rl);
spa.scale_add(j,L,k, -(Lkj) ); // L_*j -= L_kj * L_*k
}
//-----------------------------------
// insert the j'th column of L
//-----------------------------------
if ( next+(spa.length) > Lnnz )
{
int inc = std::max((int)floor(1.25*(double)Lnnz), std::max(8192, spa.length));
Lnnz += inc;
if (!L.realloc(Lnnz)) {
return L;
}
}
L.P[j] = next;
norm = 0.0;
for (ip=0; ip < spa.length; ++ip) {
i = (spa.indices)[ip];
v = (spa.values)[i];
norm += v*v;
}
norm = sqrt(norm);
Aj_nnz = A.P[j+1] - A.P[j];
for (ip=0; ip < spa.length; ++ip) {
i = (spa.indices)[ip];
v = (spa.values )[i ];
//###################################################
// FIXME (a): test if L(i,j) is in pattern of A
//###################################################
//if (i==j || fabs(v) > droptol * norm)
if (i==j || fabs(v) > droptol * norm || ip < Aj_nnz)
{
// nothing
}
else {
dropped[i] -= v;
dropped[j] -= v;
}
}
if (modified) {
pivot = sqrt((spa.values)[j] - dropped[j]);
} else {
pivot = sqrt((spa.values)[j]);
}
#if (TRACE_CHOL>=2)
umMSG(1, "pivot=%.4e, sqrt=%.4e\n", (spa.values)[j], pivot);
#endif
if (0.0 == pivot) {
umLOG(1, " ==> CS_Cholinc: zero pivot in column %d\n",j);
umLOG(1, " ==> CS_Cholinc: Ajj in spa = %lg dropped[j] = %lg Aj_nnz=%d\n", (spa.values)[j],dropped[j],Aj_nnz);
} else if (fabs(pivot) < 1e-12) {
umLOG(1, " ==> CS_Cholinc: small pivot in column %d (%le)\n",j,pivot);
}
//-----------------------------------------------------
// 1st pass: find the diagonal entry for column j then
// store entry L(j,j) first in each compressed column:
//-----------------------------------------------------
for (ip=0; ip < spa.length; ++ip)
{
i = (spa.indices)[ip];
v = (spa.values )[i ];
if (i==j)
{
// must include diagonal entry in the droptol factor
if (modified) v = (spa.values)[j] - dropped[j];
v /= pivot;
L.I[next] = i;
L.X[next] = v;
next++;
if (rowlist.add(i,j,v) == -1) {
return L;
}
break;
}
}
//-----------------------------------------------------
// 2nd pass: build column L(:,j) applying droptol
// criteria to manage fill-in below the diagonal
//-----------------------------------------------------
for (ip=0; ip < spa.length; ++ip)
{
i = (spa.indices)[ip];
v = (spa.values )[i ];
if (i==j) continue; // diagonal was set above
//###################################################
// FIXME (b): test if L(i,j) is in pattern of A
//###################################################
//if (modified && i==j) v = (spa.values)[j] - dropped[j];
if (i==j || fabs(v) > droptol*norm || ip < Aj_nnz)
{
// include this entry in the droptol factor
v /= pivot;
L.I[next] = i;
L.X[next] = v;
next++;
if (rowlist.add(i,j,v) == -1) {
return L;
}
}
}
L.P[j+1] = next; // set column count
Lj_nnz = (double)(L.P[j+1]-L.P[j]); // accumulate flop count
flops += 2.0 * Lj_nnz * Lj_nnz;
}
L.P[n] = next; // finalize column counts
umLOG(1, "\n");
//umMSG(1, " ==> CS_Cholinc: nnz(L) = %d (init: %d), flops=%.1le\n", L.P[n],init_Lnnz,flops);
umMSG(1, " ==> CS_Cholinc: nnz(L) = %d (init: %d)\n", L.P[n],init_Lnnz);
// resize allocation
L.realloc(0);
return L;
}<|fim▁end|> |
bool ok() const { return (m_status != 0) ? false : true; }
bool resize(int n);
void set(CSd& A, int j); |
<|file_name|>config.go<|end_file_name|><|fim▁begin|>// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"os"
"path/filepath"
"regexp"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
)
type Config struct {
LocalOnly bool
Port int
CertFile string // This must be the full certificate chain.
KeyFile string // Just for the first cert, obviously.
CSRFile string // Certificate Signing Request.
// When set, both CertFile and NewCertFile will be read/write. CertFile and
// NewCertFile will be set when both are valid and that once CertFile becomes
// invalid, NewCertFile will replace it (CertFile = NewCertFile) and NewCertFile
// will be set to empty. This will also apply to disk copies as well (which
// we may require to be some sort of shared filesystem, if multiple replicas of
// ammpackager are running).
NewCertFile string // The new full certificate chain replacing the expired one.
OCSPCache string
ForwardedRequestHeaders []string
URLSet []URLSet
ACMEConfig *ACMEConfig
}
type URLSet struct {
Fetch *URLPattern
Sign *URLPattern
}
type URLPattern struct {
Scheme []string
DomainRE string
Domain string
PathRE *string
PathExcludeRE []string
QueryRE *string
ErrorOnStatefulHeaders bool
MaxLength int
SamePath *bool
}
type ACMEConfig struct {
Production *ACMEServerConfig
Development *ACMEServerConfig
}
type ACMEServerConfig struct {
// ACME Directory Resource URL
AccountURL string
// ACME Account URL. If non-empty, we will auto-renew cert via ACME.
DiscoURL string
// Email address registered with ACME CA.
EmailAddress string
// Key Identifier from ACME CA. Used for External Account Binding.
EABKid string
// MAC Key from ACME CA. Used for External Account Binding. Should be in
// Base64 URL Encoding without padding format.
EABHmac string
// See: https://letsencrypt.org/docs/challenge-types/
// For non-wildcard domains, only one of HttpChallengePort, HttpWebRootDir or
// TlsChallengePort needs to be present.
// HttpChallengePort means AmpPackager will respond to HTTP challenges via this port.
// HttpWebRootDir means AmpPackager will deposit challenge token in this directory.
// TlsChallengePort means AmpPackager will respond to TLS challenges via this port.
// For wildcard domains, DnsProvider must be set to one of the support LEGO configs:
// https://go-acme.github.io/lego/dns/
HttpChallengePort int // ACME HTTP challenge port.
HttpWebRootDir string // ACME HTTP web root directory where challenge token will be deposited.
TlsChallengePort int // ACME TLS challenge port.
DnsProvider string // ACME DNS Provider used for challenge.
}
// TODO(twifkak): Extract default values into a function separate from the one
// that does the parsing and validation. This would make signer_test and
// validation_test less brittle.
var emptyRegexp = ""
var defaultPathRegexp = ".*"
// Also sets defaults.
func ValidateURLPattern(pattern *URLPattern) error {
if pattern.PathRE == nil {
pattern.PathRE = &defaultPathRegexp
} else if _, err := regexp.Compile(*pattern.PathRE); err != nil {
return errors.New("PathRE must be a valid regexp")
}
for _, exclude := range pattern.PathExcludeRE {
if _, err := regexp.Compile(exclude); err != nil {
return errors.Errorf("PathExcludeRE contains invalid regexp %q", exclude)
}
}
if pattern.QueryRE == nil {
pattern.QueryRE = &emptyRegexp
} else if _, err := regexp.Compile(*pattern.QueryRE); err != nil {
return errors.New("QueryRE must be a valid regexp")
}
if pattern.MaxLength == 0 {
pattern.MaxLength = 2000
}
return nil
}
func ValidateSignURLPattern(pattern *URLPattern) error {
if pattern == nil {
return errors.New("This section must be specified")
}
if pattern.Scheme != nil {
return errors.New("Scheme not allowed here")
}
if pattern.Domain == "" {
return errors.New("Domain must be specified")
}
if pattern.DomainRE != "" {
return errors.New("DomainRE not allowed here")
}
if pattern.SamePath != nil {
return errors.New("SamePath not allowed here")
}
if err := ValidateURLPattern(pattern); err != nil {
return err
}
return nil
}
var allowedFetchSchemes = map[string]bool{"http": true, "https": true}
func ValidateFetchURLPattern(pattern *URLPattern) error {
if pattern == nil {
return nil
}
if len(pattern.Scheme) == 0 {
// Default Scheme to the list of keys in allowedFetchSchemes.
pattern.Scheme = make([]string, len(allowedFetchSchemes))
i := 0
for scheme := range allowedFetchSchemes {
pattern.Scheme[i] = scheme
i++
}
} else {
for _, scheme := range pattern.Scheme {
if !allowedFetchSchemes[scheme] {
return errors.Errorf("Scheme contains invalid value %q", scheme)
}
}
}
if pattern.Domain == "" && pattern.DomainRE == "" {
return errors.New("Domain or DomainRE must be specified")
}
if pattern.Domain != "" && pattern.DomainRE != "" {
return errors.New("Only one of Domain or DomainRE should be specified")
}
if pattern.SamePath == nil {
// Default SamePath to true.
pattern.SamePath = new(bool)
*pattern.SamePath = true
}
if pattern.ErrorOnStatefulHeaders {
return errors.New("ErrorOnStatefulHeaders not allowed here")
}
if err := ValidateURLPattern(pattern); err != nil {
return err
}
return nil
}<|fim▁hole|> return errors.Errorf("ForwardedRequestHeaders must not %s", msg)
}
}
return nil
}
// ReadConfig reads the config file specified at --config and validates it.
func ReadConfig(configBytes []byte) (*Config, error) {
tree, err := toml.LoadBytes(configBytes)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse TOML")
}
config := Config{}
if err = tree.Unmarshal(&config); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal TOML")
}
// TODO(twifkak): Return an error if the TOML includes any fields that aren't part of the Config struct.
if config.Port == 0 {
config.Port = 8080
}
if config.CertFile == "" {
return nil, errors.New("must specify CertFile")
}
if config.KeyFile == "" {
return nil, errors.New("must specify KeyFile")
}
if config.OCSPCache == "" {
return nil, errors.New("must specify OCSPCache")
}
if len(config.ForwardedRequestHeaders) > 0 {
if err := ValidateForwardedRequestHeaders(config.ForwardedRequestHeaders); err != nil {
return nil, err
}
}
ocspDir := filepath.Dir(config.OCSPCache)
if stat, err := os.Stat(ocspDir); os.IsNotExist(err) || !stat.Mode().IsDir() {
return nil, errors.Errorf("OCSPCache parent directory must exist: %s", ocspDir)
}
// TODO(twifkak): Verify OCSPCache is writable by the current user.
if len(config.URLSet) == 0 {
return nil, errors.New("must specify one or more [[URLSet]]")
}
for i := range config.URLSet {
if config.URLSet[i].Fetch != nil {
if err := ValidateFetchURLPattern(config.URLSet[i].Fetch); err != nil {
return nil, errors.Wrapf(err, "parsing URLSet.%d.Fetch", i)
}
}
if err := ValidateSignURLPattern(config.URLSet[i].Sign); err != nil {
return nil, errors.Wrapf(err, "parsing URLSet.%d.Sign", i)
}
}
return &config, nil
}<|fim▁end|> |
func ValidateForwardedRequestHeaders(hs []string) error {
for _, h := range hs {
if msg := haveInvalidForwardedRequestHeader(h); msg != "" { |
<|file_name|>scheduler.py<|end_file_name|><|fim▁begin|># Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import logging
import Queue
from threading import Thread, Lock
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.exceptions import CuckooMachineError, CuckooGuestError
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.common.objects import File
from lib.cuckoo.common.utils import create_folder
from lib.cuckoo.core.database import Database, TASK_COMPLETED, TASK_REPORTED
from lib.cuckoo.core.guest import GuestManager
from lib.cuckoo.core.plugins import list_plugins, RunAuxiliary, RunProcessing
from lib.cuckoo.core.plugins import RunSignatures, RunReporting
from lib.cuckoo.core.resultserver import ResultServer
log = logging.getLogger(__name__)
machinery = None
machine_lock = Lock()
latest_symlink_lock = Lock()
active_analysis_count = 0
class CuckooDeadMachine(Exception):
"""Exception thrown when a machine turns dead.
<|fim▁hole|>
class AnalysisManager(Thread):
"""Analysis Manager.
This class handles the full analysis process for a given task. It takes
care of selecting the analysis machine, preparing the configuration and
interacting with the guest agent and analyzer components to launch and
complete the analysis and store, process and report its results.
"""
def __init__(self, task, error_queue):
"""@param task: task object containing the details for the analysis."""
Thread.__init__(self)
Thread.daemon = True
self.task = task
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.machine = None
def init_storage(self):
"""Initialize analysis storage folder."""
self.storage = os.path.join(CUCKOO_ROOT,
"storage",
"analyses",
str(self.task.id))
# If the analysis storage folder already exists, we need to abort the
# analysis or previous results will be overwritten and lost.
if os.path.exists(self.storage):
log.error("Analysis results folder already exists at path \"%s\","
" analysis aborted", self.storage)
return False
# If we're not able to create the analysis storage folder, we have to
# abort the analysis.
try:
create_folder(folder=self.storage)
except CuckooOperationalError:
log.error("Unable to create analysis folder %s", self.storage)
return False
return True
def check_file(self):
"""Checks the integrity of the file to be analyzed."""
sample = Database().view_sample(self.task.sample_id)
sha256 = File(self.task.target).get_sha256()
if sha256 != sample.sha256:
log.error("Target file has been modified after submission: \"%s\"", self.task.target)
return False
return True
def store_file(self):
"""Store a copy of the file being analyzed."""
if not os.path.exists(self.task.target):
log.error("The file to analyze does not exist at path \"%s\", "
"analysis aborted", self.task.target)
return False
sha256 = File(self.task.target).get_sha256()
self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(self.binary):
log.info("File already exists at \"%s\"", self.binary)
else:
# TODO: do we really need to abort the analysis in case we are not
# able to store a copy of the file?
try:
shutil.copy(self.task.target, self.binary)
except (IOError, shutil.Error) as e:
log.error("Unable to store file from \"%s\" to \"%s\", "
"analysis aborted", self.task.target, self.binary)
return False
try:
new_binary_path = os.path.join(self.storage, "binary")
if hasattr(os, "symlink"):
os.symlink(self.binary, new_binary_path)
else:
shutil.copy(self.binary, new_binary_path)
except (AttributeError, OSError) as e:
log.error("Unable to create symlink/copy from \"%s\" to "
"\"%s\": %s", self.binary, self.storage, e)
return True
def acquire_machine(self):
"""Acquire an analysis machine from the pool of available ones."""
machine = None
# Start a loop to acquire the a machine to run the analysis on.
while True:
machine_lock.acquire()
# In some cases it's possible that we enter this loop without
# having any available machines. We should make sure this is not
# such case, or the analysis task will fail completely.
if not machinery.availables():
machine_lock.release()
time.sleep(1)
continue
# If the user specified a specific machine ID, a platform to be
# used or machine tags acquire the machine accordingly.
try:
machine = machinery.acquire(machine_id=self.task.machine,
platform=self.task.platform,
tags=self.task.tags)
finally:
machine_lock.release()
# If no machine is available at this moment, wait for one second
# and try again.
if not machine:
log.debug("Task #%d: no machine available yet", self.task.id)
time.sleep(1)
else:
log.info("Task #%d: acquired machine %s (label=%s)",
self.task.id, machine.name, machine.label)
break
self.machine = machine
def build_options(self):
"""Generate analysis options.
@return: options dict.
"""
options = {}
options["id"] = self.task.id
options["ip"] = self.machine.resultserver_ip
options["port"] = self.machine.resultserver_port
options["category"] = self.task.category
options["target"] = self.task.target
options["package"] = self.task.package
options["options"] = self.task.options
options["enforce_timeout"] = self.task.enforce_timeout
options["clock"] = self.task.clock
options["terminate_processes"] = self.cfg.cuckoo.terminate_processes
if not self.task.timeout or self.task.timeout == 0:
options["timeout"] = self.cfg.timeouts.default
else:
options["timeout"] = self.task.timeout
if self.task.category == "file":
options["file_name"] = File(self.task.target).get_name()
options["file_type"] = File(self.task.target).get_type()
return options
def launch_analysis(self):
"""Start analysis."""
succeeded = False
dead_machine = False
log.info("Starting analysis of %s \"%s\" (task=%d)",
self.task.category.upper(), self.task.target, self.task.id)
# Initialize the analysis folders.
if not self.init_storage():
return False
if self.task.category == "file":
# Check whether the file has been changed for some unknown reason.
# And fail this analysis if it has been modified.
if not self.check_file():
return False
# Store a copy of the original file.
if not self.store_file():
return False
# Acquire analysis machine.
try:
self.acquire_machine()
except CuckooOperationalError as e:
log.error("Cannot acquire machine: {0}".format(e))
return False
# Generate the analysis configuration file.
options = self.build_options()
# At this point we can tell the ResultServer about it.
try:
ResultServer().add_task(self.task, self.machine)
except Exception as e:
machinery.release(self.machine.label)
self.errors.put(e)
aux = RunAuxiliary(task=self.task, machine=self.machine)
aux.start()
try:
# Mark the selected analysis machine in the database as started.
guest_log = Database().guest_start(self.task.id,
self.machine.name,
self.machine.label,
machinery.__class__.__name__)
# Start the machine.
machinery.start(self.machine.label)
# Initialize the guest manager.
guest = GuestManager(self.machine.name, self.machine.ip,
self.machine.platform)
# Start the analysis.
guest.start_analysis(options)
guest.wait_for_completion()
succeeded = True
except CuckooMachineError as e:
log.error(str(e), extra={"task_id": self.task.id})
dead_machine = True
except CuckooGuestError as e:
log.error(str(e), extra={"task_id": self.task.id})
finally:
# Stop Auxiliary modules.
aux.stop()
# Take a memory dump of the machine before shutting it off.
if self.cfg.cuckoo.memory_dump or self.task.memory:
try:
dump_path = os.path.join(self.storage, "memory.dmp")
machinery.dump_memory(self.machine.label, dump_path)
except NotImplementedError:
log.error("The memory dump functionality is not available "
"for the current machine manager.")
except CuckooMachineError as e:
log.error(e)
try:
# Stop the analysis machine.
machinery.stop(self.machine.label)
except CuckooMachineError as e:
log.warning("Unable to stop machine %s: %s",
self.machine.label, e)
# Mark the machine in the database as stopped. Unless this machine
# has been marked as dead, we just keep it as "started" in the
# database so it'll not be used later on in this session.
Database().guest_stop(guest_log)
# After all this, we can make the ResultServer forget about the
# internal state for this analysis task.
ResultServer().del_task(self.task, self.machine)
if dead_machine:
# Remove the guest from the database, so that we can assign a
# new guest when the task is being analyzed with another
# machine.
Database().guest_remove(guest_log)
# Remove the analysis directory that has been created so
# far, as launch_analysis() is going to be doing that again.
shutil.rmtree(self.storage)
# This machine has turned dead, so we throw an exception here
# which informs the AnalysisManager that it should analyze
# this task again with another available machine.
raise CuckooDeadMachine()
try:
# Release the analysis machine. But only if the machine has
# not turned dead yet.
machinery.release(self.machine.label)
except CuckooMachineError as e:
log.error("Unable to release machine %s, reason %s. "
"You might need to restore it manually.",
self.machine.label, e)
return succeeded
def process_results(self):
"""Process the analysis results and generate the enabled reports."""
results = RunProcessing(task_id=self.task.id).run()
RunSignatures(results=results).run()
RunReporting(task_id=self.task.id, results=results).run()
# If the target is a file and the user enabled the option,
# delete the original copy.
if self.task.category == "file" and self.cfg.cuckoo.delete_original:
if not os.path.exists(self.task.target):
log.warning("Original file does not exist anymore: \"%s\": "
"File not found.", self.task.target)
else:
try:
os.remove(self.task.target)
except OSError as e:
log.error("Unable to delete original file at path "
"\"%s\": %s", self.task.target, e)
# If the target is a file and the user enabled the delete copy of
# the binary option, then delete the copy.
if self.task.category == "file" and self.cfg.cuckoo.delete_bin_copy:
if not os.path.exists(self.binary):
log.warning("Copy of the original file does not exist anymore: \"%s\": File not found", self.binary)
else:
try:
os.remove(self.binary)
except OSError as e:
log.error("Unable to delete the copy of the original file at path \"%s\": %s", self.binary, e)
log.info("Task #%d: reports generation completed (path=%s)",
self.task.id, self.storage)
return True
def run(self):
"""Run manager thread."""
global active_analysis_count
active_analysis_count += 1
try:
while True:
try:
success = self.launch_analysis()
except CuckooDeadMachine:
continue
break
Database().set_status(self.task.id, TASK_COMPLETED)
log.debug("Released database task #%d with status %s",
self.task.id, success)
if self.cfg.cuckoo.process_results:
self.process_results()
Database().set_status(self.task.id, TASK_REPORTED)
# We make a symbolic link ("latest") which links to the latest
# analysis - this is useful for debugging purposes. This is only
# supported under systems that support symbolic links.
if hasattr(os, "symlink"):
latest = os.path.join(CUCKOO_ROOT, "storage",
"analyses", "latest")
# First we have to remove the existing symbolic link, then we
# have to create the new one.
# Deal with race conditions using a lock.
latest_symlink_lock.acquire()
try:
if os.path.exists(latest):
os.remove(latest)
os.symlink(self.storage, latest)
except OSError as e:
log.warning("Error pointing latest analysis symlink: %s" % e)
finally:
latest_symlink_lock.release()
log.info("Task #%d: analysis procedure completed", self.task.id)
except:
log.exception("Failure in AnalysisManager.run")
active_analysis_count -= 1
class Scheduler:
"""Tasks Scheduler.
This class is responsible for the main execution loop of the tool. It
prepares the analysis machines and keep waiting and loading for new
analysis tasks.
Whenever a new task is available, it launches AnalysisManager which will
take care of running the full analysis process and operating with the
assigned analysis machine.
"""
def __init__(self, maxcount=None):
self.running = True
self.cfg = Config()
self.db = Database()
self.maxcount = maxcount
self.total_analysis_count = 0
def initialize(self):
"""Initialize the machine manager."""
global machinery
machinery_name = self.cfg.cuckoo.machinery
log.info("Using \"%s\" machine manager", machinery_name)
# Get registered class name. Only one machine manager is imported,
# therefore there should be only one class in the list.
plugin = list_plugins("machinery")[0]
# Initialize the machine manager.
machinery = plugin()
# Find its configuration file.
conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name)
if not os.path.exists(conf):
raise CuckooCriticalError("The configuration file for machine "
"manager \"{0}\" does not exist at path:"
" {1}".format(machinery_name, conf))
# Provide a dictionary with the configuration options to the
# machine manager instance.
machinery.set_options(Config(machinery_name))
# Initialize the machine manager.
try:
machinery.initialize(machinery_name)
except CuckooMachineError as e:
raise CuckooCriticalError("Error initializing machines: %s" % e)
# At this point all the available machines should have been identified
# and added to the list. If none were found, Cuckoo needs to abort the
# execution.
if not len(machinery.machines()):
raise CuckooCriticalError("No machines available.")
else:
log.info("Loaded %s machine/s", len(machinery.machines()))
if len(machinery.machines()) > 1 and self.db.engine.name == "sqlite":
log.warning("As you've configured Cuckoo to execute parallel "
"analyses, we recommend you to switch to a MySQL "
"a PostgreSQL database as SQLite might cause some "
"issues.")
if len(machinery.machines()) > 4 and self.cfg.cuckoo.process_results:
log.warning("When running many virtual machines it is recommended "
"to process the results in a separate process.py to "
"increase throughput and stability. Please read the "
"documentation about the `Processing Utility`.")
def stop(self):
"""Stop scheduler."""
self.running = False
# Shutdown machine manager (used to kill machines that still alive).
machinery.shutdown()
def start(self):
"""Start scheduler."""
self.initialize()
log.info("Waiting for analysis tasks.")
# Message queue with threads to transmit exceptions (used as IPC).
errors = Queue.Queue()
# Command-line overrides the configuration file.
if self.maxcount is None:
self.maxcount = self.cfg.cuckoo.max_analysis_count
# This loop runs forever.
while self.running:
time.sleep(1)
# If not enough free disk space is available, then we print an
# error message and wait another round (this check is ignored
# when the freespace configuration variable is set to zero).
if self.cfg.cuckoo.freespace:
# Resolve the full base path to the analysis folder, just in
# case somebody decides to make a symbolic link out of it.
dir_path = os.path.join(CUCKOO_ROOT, "storage", "analyses")
# TODO: Windows support
if hasattr(os, "statvfs"):
dir_stats = os.statvfs(dir_path)
# Calculate the free disk space in megabytes.
space_available = dir_stats.f_bavail * dir_stats.f_frsize
space_available /= 1024 * 1024
if space_available < self.cfg.cuckoo.freespace:
log.error("Not enough free disk space! (Only %d MB!)",
space_available)
continue
# Have we limited the number of concurrently executing machines?
if self.cfg.cuckoo.max_machines_count > 0:
# Are too many running?
if len(machinery.running()) >= self.cfg.cuckoo.max_machines_count:
continue
# If no machines are available, it's pointless to fetch for
# pending tasks. Loop over.
if not machinery.availables():
continue
# Exits if max_analysis_count is defined in the configuration
# file and has been reached.
if self.maxcount and self.total_analysis_count >= self.maxcount:
if active_analysis_count <= 0:
self.stop()
else:
# Fetch a pending analysis task.
#TODO: this fixes only submissions by --machine, need to add other attributes (tags etc.)
for machine in self.db.get_available_machines():
task = self.db.fetch(machine=machine.name)
if task:
log.debug("Processing task #%s", task.id)
self.total_analysis_count += 1
# Initialize and start the analysis manager.
analysis = AnalysisManager(task, errors)
analysis.start()
# Deal with errors.
try:
raise errors.get(block=False)
except Queue.Empty:
pass<|fim▁end|> | When this exception has been thrown, the analysis task will start again,
and will try to use another machine, when available.
"""
pass |
<|file_name|>grabbing.rs<|end_file_name|><|fim▁begin|>#[cfg(target_os = "android")]
#[macro_use]
extern crate android_glue;
extern crate glutin;
use glutin::{Event, ElementState};
mod support;
#[cfg(target_os = "android")]
android_start!(main);
<|fim▁hole|> window.set_title("glutin - Cursor grabbing test");
let _ = unsafe { window.make_current() };
let context = support::load(&window);
let mut grabbed = false;
for event in window.wait_events() {
match event {
Event::KeyboardInput(ElementState::Pressed, _, _) => {
if grabbed {
grabbed = false;
window.set_cursor_state(glutin::CursorState::Normal)
.ok().expect("could not ungrab mouse cursor");
} else {
grabbed = true;
window.set_cursor_state(glutin::CursorState::Grab)
.ok().expect("could not grab mouse cursor");
}
},
Event::Closed => break,
a @ Event::MouseMoved(_, _) => {
println!("{:?}", a);
},
_ => (),
}
context.draw_frame((0.0, 1.0, 0.0, 1.0));
let _ = window.swap_buffers();
}
}<|fim▁end|> | fn main() {
let window = glutin::WindowBuilder::new().build().unwrap(); |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Discussion XBlock
"""
import logging
import six
from six.moves import urllib
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from django.utils.translation import get_language_bidi, get_language
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Scope, String, UNIQUE_ID
from web_fragments.fragment import Fragment
from xblockutils.resources import ResourceLoader
from xblockutils.studio_editable import StudioEditableXBlockMixin
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.xblock_builtin import get_css_dependencies, get_js_dependencies
from xmodule.raw_module import RawDescriptor
from xmodule.xml_module import XmlParserMixin
log = logging.getLogger(__name__)
loader = ResourceLoader(__name__) # pylint: disable=invalid-name
def _(text):
"""
A noop underscore function that marks strings for extraction.
"""
return text
@XBlock.needs('user') # pylint: disable=abstract-method
@XBlock.needs('i18n')
class DiscussionXBlock(XBlock, StudioEditableXBlockMixin, XmlParserMixin):
"""
Provides a discussion forum that is inline with other content in the courseware.
"""
completion_mode = XBlockCompletionMode.EXCLUDED
discussion_id = String(scope=Scope.settings, default=UNIQUE_ID)
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Discussion",
scope=Scope.settings
)
discussion_category = String(
display_name=_("Category"),
default=_("Week 1"),
help=_(
"A category name for the discussion. "
"This name appears in the left pane of the discussion forum for the course."
),
scope=Scope.settings
)
discussion_target = String(
display_name=_("Subcategory"),
default="Topic-Level Student-Visible Label",
help=_(
"A subcategory name for the discussion. "
"This name appears in the left pane of the discussion forum for the course."
),
scope=Scope.settings
)
sort_key = String(scope=Scope.settings)
editable_fields = ["display_name", "discussion_category", "discussion_target"]
has_author_view = True # Tells Studio to use author_view
# support for legacy OLX format - consumed by XmlParserMixin.load_metadata
metadata_translations = dict(RawDescriptor.metadata_translations)
metadata_translations['id'] = 'discussion_id'
metadata_translations['for'] = 'discussion_target'
@property
def course_key(self):
"""<|fim▁hole|>
NB: The goal is to move this XBlock out of edx-platform, and so we use
scope_ids.usage_id instead of runtime.course_id so that the code will
continue to work with workbench-based testing.
"""
return getattr(self.scope_ids.usage_id, 'course_key', None)
@property
def django_user(self):
"""
Returns django user associated with user currently interacting
with the XBlock.
"""
user_service = self.runtime.service(self, 'user')
if not user_service:
return None
return user_service._django_user # pylint: disable=protected-access
@staticmethod
def get_translation_content():
try:
return 'js/i18n/{lang}/djangojs.js'.format(
lang=get_language(),
)
except IOError:
return 'js/i18n/en/djangojs.js'
@staticmethod
def vendor_js_dependencies():
"""
Returns list of vendor JS files that this XBlock depends on.
The helper function that it uses to obtain the list of vendor JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
vendor_dependencies = get_js_dependencies('discussion_vendor')
base_vendor_dependencies = [
'edx-ui-toolkit/js/utils/global-loader.js',
'edx-ui-toolkit/js/utils/string-utils.js',
'edx-ui-toolkit/js/utils/html-utils.js',
'js/vendor/URI.min.js',
'js/vendor/jquery.leanModal.js'
]
return base_vendor_dependencies + vendor_dependencies
@staticmethod
def js_dependencies():
"""
Returns list of JS files that this XBlock depends on.
The helper function that it uses to obtain the list of JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return get_js_dependencies('discussion')
@staticmethod
def css_dependencies():
"""
Returns list of CSS files that this XBlock depends on.
The helper function that it uses to obtain the list of CSS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
if get_language_bidi():
return get_css_dependencies('style-inline-discussion-rtl')
else:
return get_css_dependencies('style-inline-discussion')
def add_resource_urls(self, fragment):
"""
Adds URLs for JS and CSS resources that this XBlock depends on to `fragment`.
"""
# Add js translations catalog
fragment.add_javascript_url(staticfiles_storage.url(self.get_translation_content()))
# Head dependencies
for vendor_js_file in self.vendor_js_dependencies():
fragment.add_resource_url(staticfiles_storage.url(vendor_js_file), "application/javascript", "head")
for css_file in self.css_dependencies():
fragment.add_css_url(staticfiles_storage.url(css_file))
# Body dependencies
for js_file in self.js_dependencies():
fragment.add_javascript_url(staticfiles_storage.url(js_file))
def has_permission(self, permission):
"""
Encapsulates lms specific functionality, as `has_permission` is not
importable outside of lms context, namely in tests.
:param user:
:param str permission: Permission
:rtype: bool
"""
# normal import causes the xmodule_assets command to fail due to circular import - hence importing locally
from lms.djangoapps.discussion.django_comment_client.permissions import has_permission
return has_permission(self.django_user, permission, self.course_key)
def student_view(self, context=None):
"""
Renders student view for LMS.
"""
fragment = Fragment()
self.add_resource_urls(fragment)
login_msg = ''
if not self.django_user.is_authenticated:
qs = urllib.parse.urlencode({
'course_id': self.course_key,
'enrollment_action': 'enroll',
'email_opt_in': False,
})
login_msg = Text(_(u"You are not signed in. To view the discussion content, {sign_in_link} or "
u"{register_link}, and enroll in this course.")).format(
sign_in_link=HTML(u'<a href="{url}">{sign_in_label}</a>').format(
sign_in_label=_('sign in'),
url='{}?{}'.format(reverse('signin_user'), qs),
),
register_link=HTML(u'<a href="/{url}">{register_label}</a>').format(
register_label=_('register'),
url='{}?{}'.format(reverse('register_user'), qs),
),
)
context = {
'discussion_id': self.discussion_id,
'display_name': self.display_name if self.display_name else _("Discussion"),
'user': self.django_user,
'course_id': self.course_key,
'discussion_category': self.discussion_category,
'discussion_target': self.discussion_target,
'can_create_thread': self.has_permission("create_thread"),
'can_create_comment': self.has_permission("create_comment"),
'can_create_subcomment': self.has_permission("create_sub_comment"),
'login_msg': login_msg,
}
fragment.add_content(self.runtime.render_template('discussion/_discussion_inline.html', context))
fragment.initialize_js('DiscussionInlineBlock')
return fragment
def author_view(self, context=None): # pylint: disable=unused-argument
"""
Renders author view for Studio.
"""
return self.studio_view_fragment()
def preview_view(self, context=None): # pylint: disable=unused-argument
"""
Renders preview inside Studio. This is used when DiscussionXBlock is embedded
by another XBlock.
"""
return self.studio_view_fragment()
def studio_view_fragment(self):
"""
Returns a fragment for rendering this block in Studio.
"""
fragment = Fragment()
fragment.add_content(self.runtime.render_template(
'discussion/_discussion_inline_studio.html',
{'discussion_id': self.discussion_id}
))
return fragment
def student_view_data(self):
"""
Returns a JSON representation of the student_view of this XBlock.
"""
return {'topic_id': self.discussion_id}
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Parses OLX into XBlock.
This method is overridden here to allow parsing legacy OLX, coming from discussion XModule.
XBlock stores all the associated data, fields and children in a XML element inlined into vertical XML file
XModule stored only minimal data on the element included into vertical XML and used a dedicated "discussion"
folder in OLX to store fields and children. Also, some info was put into "policy.json" file.
If no external data sources are found (file in "discussion" folder), it is exactly equivalent to base method
XBlock.parse_xml. Otherwise this method parses file in "discussion" folder (known as definition_xml), applies
policy.json and updates fields accordingly.
"""
block = super(DiscussionXBlock, cls).parse_xml(node, runtime, keys, id_generator)
cls._apply_translations_to_node_attributes(block, node)
cls._apply_metadata_and_policy(block, node, runtime)
return block
@classmethod
def _apply_translations_to_node_attributes(cls, block, node):
"""
Applies metadata translations for attributes stored on an inlined XML element.
"""
for old_attr, target_attr in six.iteritems(cls.metadata_translations):
if old_attr in node.attrib and hasattr(block, target_attr):
setattr(block, target_attr, node.attrib[old_attr])
@classmethod
def _apply_metadata_and_policy(cls, block, node, runtime):
"""
Attempt to load definition XML from "discussion" folder in OLX, than parse it and update block fields
"""
if node.get('url_name') is None:
return # Newer/XBlock XML format - no need to load an additional file.
try:
definition_xml, _ = cls.load_definition_xml(node, runtime, block.scope_ids.def_id)
except Exception as err: # pylint: disable=broad-except
log.info(
u"Exception %s when trying to load definition xml for block %s - assuming XBlock export format",
err,
block
)
return
metadata = cls.load_metadata(definition_xml)
cls.apply_policy(metadata, runtime.get_policy(block.scope_ids.usage_id))
for field_name, value in six.iteritems(metadata):
if field_name in block.fields:
setattr(block, field_name, value)<|fim▁end|> | :return: int course id |
<|file_name|>TCDContent.java<|end_file_name|><|fim▁begin|>package org.techniche.technothlon.katana.tcd;
import android.content.Context;
import android.content.SharedPreferences;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.AsyncTask;
import android.os.Looper;
import android.util.Log;
import android.widget.TextView;
import android.widget.Toast;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.techniche.technothlon.katana.R;
import org.techniche.technothlon.katana.db.TCDDatabase;
import java.io.*;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* Helper class for providing sample content for user interfaces created by
* Android template wizards.
* <p/>
* TODO: Replace all uses of this class before publishing your app.
*/
public class TCDContent {
/**
* An array of sample (dummy) items.
*/
public static List<TCDQuestionMini> ITEMS = new ArrayList<TCDQuestionMini>();
/**
* A map of sample (dummy) items, by ID.
*/
public static Map<String, TCDQuestion> ITEM_MAP = new HashMap<String, TCDQuestion>();
private static String url = "http://localhost/technothlon/technocoupdoeil_app_gateway/android/?technocoupdoeil=fjalkfq2045rudacnavsofu0aswd988q29ra&lastFetchId=";
private static int download(Context context) {
SharedPreferences sharedPref = context.getSharedPreferences(
context.getString(R.string.preference_file_key), Context.MODE_PRIVATE);
long lastFetchID = sharedPref.getLong(context.getString(R.string.tcd_fetch_id), 0);
Log.d("Pref - log", lastFetchID + " from shared pref");
ConnectivityManager connMgr = (ConnectivityManager)
context.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo networkInfo = connMgr.getActiveNetworkInfo();
if (networkInfo != null && networkInfo.isConnected()) {
try {
JSONObject json = new JSONObject(downloadUrl(url + lastFetchID));
if (json.getString("status").equals("success")) {
TCDDatabase db = new TCDDatabase(context);
JSONArray questions = json.getJSONArray("questions");
lastFetchID = json.getLong("lastFetchId");
int count = json.getInt("questions_count"), lastID;
for (int i = 0; i < count; i++) {
JSONObject q = questions.getJSONObject(i);
JSONObject links = q.getJSONObject("links");
lastID = q.getInt("uniqueId");
db.insert(
lastID,
q.getString("id"),
q.getString("color"),
q.getString("title"),
q.getString("question"),
links.getString("facebook"),
links.getString("google"),
links.getString("tumblr"),
links.getString("answer"),
q.getString("by"),
q.getString("time"),
q.getString("answer")
);
Log.d("Database - log", lastID + " loaded in database");
}
db.close();
SharedPreferences.Editor edit = sharedPref.edit();
edit.putLong(context.getString(R.string.tcd_fetch_id), lastFetchID);
edit.commit();
} else if (json.getString("status").equals("reset")) {
TCDDatabase db = new TCDDatabase(context);
db.reset();
db.close();
SharedPreferences.Editor edit = sharedPref.edit();
edit.putLong(context.getString(R.string.tcd_fetch_id), 0);
edit.commit();
download(context);
}
final Context ct = context;
new Thread() {
@Override
public void run() {
Looper.prepare();
Toast.makeText(ct, "Sync Completed.", Toast.LENGTH_SHORT).show();
Looper.loop();
}
}.start();
return 0;
} catch (JSONException e) {
e.printStackTrace();
final Context ct = context;
new Thread() {
@Override
public void run() {
Looper.prepare();
Toast.makeText(ct, "Sync Failed.", Toast.LENGTH_SHORT).show();
Looper.loop();
}
}.start();
return 3;
} catch (IOException e) {
e.printStackTrace();
final Context ct = context;
new Thread() {
@Override
public void run() {
Looper.prepare();
Toast.makeText(ct, "Sync Failed.", Toast.LENGTH_SHORT).show();
Looper.loop();
}
}.start();
return 2;
}
} else {
final Context ct = context;
new Thread() {
@Override
public void run() {
Looper.prepare();
Toast.makeText(ct, "No network connection available.", Toast.LENGTH_SHORT).show();
Looper.loop();
}
}.start();
return 1;
}
}
private static String downloadUrl(String myurl) throws IOException {
InputStream is = null;
// Only display the first 500 characters of the retrieved
// web page content.
try {
URL url = new URL(myurl);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(10000 /* milliseconds */);
conn.setConnectTimeout(15000 /* milliseconds */);
conn.setRequestMethod("GET");
conn.setDoInput(true);
// Starts the query
conn.connect();
int response = conn.getResponseCode();
Log.d("TCD latest downloads", "The response is: " + response);
int size = conn.getContentLength();
Log.d("TCD latest downloads", "The content-length is: " + size);
is = conn.getInputStream();
// Convert the InputStream into a string
return readTextResponse(is);
// Makes sure that the InputStream is closed after the app is
// finished using it.
} finally {
if (is != null) {
is.close();
}
}
}
private static String readTextResponse(InputStream inputStream) throws IOException {
Reader in = new InputStreamReader(inputStream);
BufferedReader bufferedreader = new BufferedReader(in);
StringBuilder stringBuilder = new StringBuilder();
String stringReadLine;
while ((stringReadLine = bufferedreader.readLine()) != null) {
stringBuilder.append(stringReadLine);
}
return stringBuilder.toString();
}
public static void load(Context context) {
boolean update = ITEMS.isEmpty() ? false : true;
TCDDatabase helper = new TCDDatabase(context);
SQLiteDatabase db = helper.getReadableDatabase();
assert db != null;
Cursor c = db.rawQuery("SELECT * FROM " + TCDDatabase.Contracts.NAME + " ORDER BY " + TCDDatabase.Contracts.FIELD_TIME + " DESC, " + TCDDatabase.Contracts.FIELD_ID + " DESC", null);
Log.d("DB", c.getCount() + " object in database");
c.moveToFirst();
while (!c.isAfterLast()) {
addItem(new TCDQuestion(
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_ID)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_DISPLAY_ID)),
c.getInt(c.getColumnIndex(TCDDatabase.Contracts.FIELD_COLOR)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_TITLE)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_QUESTION)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_FACEBOOK)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_GOOGLE)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_TUMBLR)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_ANSWER_URL)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_BY)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_ANSWER)),
c.getString(c.getColumnIndex(TCDDatabase.Contracts.FIELD_TIME))
), update);
c.moveToNext();
}
c.close();
db.close();
}
private static void addItem(TCDQuestion item, boolean update) {
if (!ITEM_MAP.containsKey(item.uniqueId)) {
if (update) ITEMS.add(0, (new TCDQuestionMini(item.uniqueId)));
else ITEMS.add((new TCDQuestionMini(item.uniqueId)));
ITEM_MAP.put(item.uniqueId, item);
}
}
public abstract static class TCDLoader extends AsyncTask<Object, Integer, Integer> {
@Override
protected Integer doInBackground(Object[] params) {
int d = 4;
try {
d = download((Context) params[0]);
} catch (Exception e) {
e.printStackTrace();
} finally {
load((Context) params[0]);
}
return d;
}
@Override
protected void onPostExecute(Integer o) {
finished(o);
}
public abstract void finished(int result);
}
/**
* A dummy item representing a piece of content.
*/
public static class TCDQuestion {
public String id;
public String question;
public String facebook;
public String google;
public String tumblr;
public String answer_url;
public String by;
public String answer;
public String title;
public java.util.Date date = null;
public String dateString = "";
public int color = R.drawable.tcd_background_1;
public String uniqueId;
private String status;
private boolean ret = false;
public TCDQuestion(String uniqueId, String id, int color, String title, String question, String facebook, String google, String tumblr,
String answer_url, String by, String answer, String status) {
this.uniqueId = uniqueId;
this.id = id;
this.title = title;
this.question = question;
this.facebook = facebook;
this.google = google;
this.tumblr = tumblr;
this.answer_url = answer_url;
this.by = by;
this.color = getBackground(color);
this.answer = answer;
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
try {
this.date = sdf.parse(status);
} catch (ParseException e) {
e.printStackTrace();<|fim▁hole|> sdf = new SimpleDateFormat("yyyy-MM-dd");
assert this.date != null;
this.dateString = sdf.format(this.date);
this.status = getStatus();
}
private int getBackground(int color) {
switch (color) {
case 10:
return R.drawable.tcd_background_2;
case 20:
return R.drawable.tcd_background_3;
case 30:
return R.drawable.tcd_background_4;
case 40:
return R.drawable.tcd_background_5;
case 50:
return R.drawable.tcd_background_6;
default:
return R.drawable.tcd_background_1;
}
}
public String getStatus() {
if (ret) return status;
long seconds = Math.abs(((new Date()).getTime() - date.getTime()) / 1000);
if (seconds < 60) status = "about " + seconds + " seconds ago";
else if (seconds < 3600) status = "about " + (seconds / 60) + " minutes ago";
else if (seconds < 86400) status = "about " + (seconds / 3600) + " hours ago";
else if (seconds < 172800) status = "yesterday";
else if (seconds < 345600) status = (seconds / 86400) + " days ago";
else {
ret = true;
status = dateString;
}
return status;
}
}
public static class TCDHolder {
public TextView id, title, question, status;
}
public static class TCDQuestionMini {
public String id;
public TCDQuestionMini(String id) {
this.id = id;
}
}
}<|fim▁end|> | } |
<|file_name|>image_tags.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from django import template
FMT = 'JPEG'
EXT = 'jpg'
QUAL = 75
register = template.Library()
def resized_path(path, size, method):
"Returns the path for the resized image."
dir, name = os.path.split(path)
image_name, ext = name.rsplit('.', 1)
return os.path.join(dir, '%s_%s_%s.%s' % (image_name, method, size, EXT))
def scale(imagefield, size, method='scale'):
"""
Template filter used to scale an image
that will fit inside the defined area.
Returns the url of the resized image.
{% load image_tags %}
{{ profile.picture|scale:"48x48" }}
"""
# imagefield can be a dict with "path" and "url" keys
if imagefield.__class__.__name__ == 'dict':
imagefield = type('imageobj', (object,), imagefield)
image_path = resized_path(imagefield.path, size, method)
if not os.path.exists(image_path):
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
raise ImportError('Cannot import the Python Image Library.')
image = Image.open(imagefield.path)
# normalize image mode
if image.mode != 'RGB':
image = image.convert('RGB')
# parse size string 'WIDTHxHEIGHT'
width, height = [int(i) for i in size.split('x')]
# use PIL methods to edit images
if method == 'scale':
image.thumbnail((width, height), Image.ANTIALIAS)
image.save(image_path, FMT)
elif method == 'crop':
try:
import ImageOps
except ImportError:
from PIL import ImageOps
ImageOps.fit(image, (width, height), Image.ANTIALIAS
).save(image_path, FMT)
return resized_path(imagefield.url, size, method)
def crop(imagefield, size):
"""
Template filter used to crop an image
to make it fill the defined area.
{% load image_tags %}
{{ profile.picture|crop:"48x48" }}
"""
return scale(imagefield, size, 'crop')
<|fim▁hole|><|fim▁end|> |
register.filter('scale', scale)
register.filter('crop', crop) |
<|file_name|>config_vlan.py<|end_file_name|><|fim▁begin|># Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.command_processing.base_command_processor import BaseCommandProcessor
class ConfigVlanCommandProcessor(BaseCommandProcessor):
def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args):
super(ConfigVlanCommandProcessor, self).init(switch_configuration, terminal_controller, logger, piping_processor)
self.vlan = args[0]
def get_prompt(self):
return self.switch_configuration.name + "(config-vlan)#"
def do_name(self, *args):
self.vlan.name = (args[0][:32])<|fim▁hole|>
def do_exit(self):
self.is_done = True<|fim▁end|> | |
<|file_name|>nib_length.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.7
"""
Print the number of bases in a nib file.
usage: %prog nib_file
"""
<|fim▁hole|>
nib = seq_nib.NibFile( file( sys.argv[1] ) )
print nib.length<|fim▁end|> | from bx.seq import nib as seq_nib
import sys |
<|file_name|>skeleton.py<|end_file_name|><|fim▁begin|>import sys
from aimes.emgr.utils import *
__author__ = "Matteo Turilli"
__copyright__ = "Copyright 2015, The AIMES Project"
__license__ = "MIT"
# -----------------------------------------------------------------------------
def write_skeleton_conf(cfg, scale, cores, uniformity, fout):
'''Write a skeleton configuration file with the set number/type/duration of
tasks and stages.
'''
substitutes = dict()
substitutes['SCALE'] = scale
substitutes['CORES'] = cores[-1]
if substitutes['CORES'] > 1:
substitutes['TASK_TYPE'] = 'parallel'
elif substitutes['CORES'] == 1:
substitutes['TASK_TYPE'] = 'serial'
else:
print "ERROR: invalid number of cores per task: '%s'." % cores
sys.exit(1)
if uniformity == 'uniform':
substitutes['UNIFORMITY_DURATION'] = "%s %s" % \
(uniformity, cfg['skeleton_task_duration']['max'])
# TODO: Calculate stdev and avg.<|fim▁hole|> (uniformity, cfg['skeleton_task_duration']['avg'],
cfg['skeleton_task_duration']['stdev'])
else:
print "ERROR: invalid task uniformity '%s' specified." % uniformity
sys.exit(1)
write_template(cfg['skeleton_template'], substitutes, fout)<|fim▁end|> | elif uniformity == 'gauss':
substitutes['UNIFORMITY_DURATION'] = "%s [%s, %s]" % \ |
<|file_name|>saturating_square.rs<|end_file_name|><|fim▁begin|>use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base_test_util::generators::{signed_gen, unsigned_gen};
#[test]
fn test_saturating_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.saturating_square(), out);
let mut x = x;
x.saturating_square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
test::<u16>(1000, u16::MAX);
test::<i16>(-1000, i16::MAX);
}<|fim▁hole|> let mut square = x;
square.saturating_square_assign();
assert_eq!(square, x.saturating_square());
assert_eq!(square, x.saturating_pow(2));
assert!(square >= x);
if square < T::MAX {
assert_eq!(square, x.square());
}
});
}
fn saturating_square_properties_helper_signed<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|x| {
let mut square = x;
square.saturating_square_assign();
assert_eq!(square, x.saturating_square());
assert_eq!(square, x.saturating_pow(2));
if square > T::MIN && square < T::MAX {
assert_eq!(square, x.square());
}
});
}
#[test]
fn saturating_square_properties() {
apply_fn_to_unsigneds!(saturating_square_properties_helper_unsigned);
apply_fn_to_signeds!(saturating_square_properties_helper_signed);
}<|fim▁end|> |
fn saturating_square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen::<T>().test_properties(|x| { |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import logging
log = logging.getLogger(__name__)
def has_bin(arg):
"""
Helper function checks whether args contains binary data<|fim▁hole|> :return: (bool)
"""
if type(arg) is list or type(arg) is tuple:
return reduce(lambda has_binary, item: has_binary or has_bin(item), arg, False)
if type(arg) is bytearray or hasattr(arg, 'read'):
return True
if type(arg) is dict:
return reduce(lambda has_binary, item: has_binary or has_bin(item), [v for k, v in arg.items()], False)
return False<|fim▁end|> | :param args: list | tuple | bytearray | dict |
<|file_name|>InvalidTimeValueError.ts<|end_file_name|><|fim▁begin|>export class InvalidTimeValueError extends Error {<|fim▁hole|><|fim▁end|> | constructor(unit: string, providedValue: number) {
super(`Cannot create a valid time with provided ${unit} value: ${providedValue}`);
}
} |
<|file_name|>spanmap.rs<|end_file_name|><|fim▁begin|>use system::*;
use span::Span;
use objectheap::StaticObjectHeap;
// Addressing bits: 48
// Page bits: 48 - 12 = 36
// 36 / 3: 12
pub const INDEX_BITS: usize = 12;
pub const INDEX_SIZE: usize = 1 << INDEX_BITS;
struct SpanMapLevel1 {
pml: [usize; INDEX_SIZE],
}
struct SpanMapLevel2 {
pml: [*mut SpanMapLevel1; INDEX_SIZE],
}
struct SpanMapLevel3 {
pml: [*mut SpanMapLevel2; INDEX_SIZE],
}
pub struct SpanMap {
pml3: SpanMapLevel3,
}
static mut PML2_ALLOCATOR: StaticObjectHeap<SpanMapLevel2> = StaticObjectHeap::new();
static mut PML1_ALLOCATOR: StaticObjectHeap<SpanMapLevel1> = StaticObjectHeap::new();
#[inline]
fn indices_from_address(uptr: usize) -> (usize, usize, usize) {
let pml1i = (uptr >> PAGE_BITS) & 0xfff;
let pml2i = (uptr >> (PAGE_BITS + INDEX_BITS)) & 0xfff;
let pml3i = (uptr >> (PAGE_BITS + 2 * INDEX_BITS)) & 0xfff;
(pml3i, pml2i, pml1i)
}
impl SpanMap {
pub const fn new() -> SpanMap {
SpanMap { pml3: SpanMapLevel3 { pml: [0_usize as *mut SpanMapLevel2; INDEX_SIZE] } }
}
fn ensure(&mut self, ptr: usize, n: usize) {
let mut key = ptr;
loop {
if key > ptr + n {
break;
}
unsafe {
let (pml3i, pml2i, _) = indices_from_address(key);
if self.pml3.pml[pml3i].is_null() {
self.pml3.pml[pml3i] = PML2_ALLOCATOR.zero_allocate()
}
let pml2 = self.pml3.pml[pml3i];
if (*pml2).pml[pml2i].is_null() {
(*pml2).pml[pml2i] = PML1_ALLOCATOR.zero_allocate()
}
}
key += 1 << INDEX_BITS;
}
}
pub fn span(&self, ptr: usize) -> usize {
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
let l2 = self.pml3.pml[pml3i];
if !l2.is_null() {
let l1 = (*l2).pml[pml2i];
if !l1.is_null() {
return (*l1).pml[pml1i];
}
}
0
}
}
pub fn set_span(&mut self, ptr: usize, value: usize) {
self.ensure(ptr, 0);
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
(*(*self.pml3.pml[pml3i]).pml[pml2i]).pml[pml1i] = value;
}
}
}
extern crate libc;
static mut SPANMAP_MUTEX: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
static mut SPANMAP: SpanMap = SpanMap::new();
pub fn span(ptr: usize) -> *mut Span {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
let r = SPANMAP.span(ptr);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
r as *mut Span
}
}
pub fn set_span(ptr: usize, value: *mut Span) {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
SPANMAP.set_span(ptr, value as usize);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn spanmap() {
let mut sp = SpanMap::new();
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
sp.set_span(p, v);
}
}
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 0..10 {
for j in 0..20 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);<|fim▁hole|> }
for i in 20..30 {
for j in 30..40 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
}
}<|fim▁end|> | } |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>mod common;
mod color;
mod math;
mod ppm;
mod renderer;
mod shade_record;
mod shape;
mod tracer;
mod view_plane;
mod world;
use std::cell::RefCell;
use std::rc::Rc;
use color::RGBColor;
use renderer::Renderer;<|fim▁hole|>use world::World;
fn main() {
let mut view_plane = ViewPlane::new(300, 300, 1.0, 1.0, RGBColor::new(0.5 , 0.5, 0.5));
let world = Rc::new(RefCell::new(World::new()));
let tracer = Box::new(MultipleObjects::new(world.clone())) as Box<Tracer>;
let renderer = Renderer::new(tracer);
world.borrow().build();
renderer.render_world(&mut view_plane);
ppm::write("output", view_plane.width(), view_plane.height(), view_plane.pixels());
}<|fim▁end|> | use tracer::{Tracer, MultipleObjects};
use view_plane::ViewPlane; |
<|file_name|>client_fi.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fi_FI" version="2.1">
<context>
<name>FileSystem</name>
<message>
<location filename="../src/libsync/filesystem.cpp" line="273"/>
<source>The destination file has an unexpected size or modification time</source>
<translation>Kohdetiedostolla on odottamaton koko tai muokkausaika</translation>
</message>
</context>
<context>
<name>FolderWizardSourcePage</name>
<message>
<location filename="../src/gui/folderwizardsourcepage.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/folderwizardsourcepage.ui" line="33"/>
<source>Pick a local folder on your computer to sync</source>
<translation>Valitse synkronoitava paikalliskansio tietokoneelta</translation>
</message>
<message>
<location filename="../src/gui/folderwizardsourcepage.ui" line="44"/>
<source>&Choose...</source>
<translation>&Valitse...</translation>
</message>
</context>
<context>
<name>FolderWizardTargetPage</name>
<message>
<location filename="../src/gui/folderwizardtargetpage.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/folderwizardtargetpage.ui" line="131"/>
<source>Select a remote destination folder</source>
<translation>Valitse etäkohdekansio</translation>
</message>
<message>
<location filename="../src/gui/folderwizardtargetpage.ui" line="143"/>
<source>Create Folder</source>
<translation>Luo kansio</translation>
</message>
<message>
<location filename="../src/gui/folderwizardtargetpage.ui" line="163"/>
<source>Refresh</source>
<translation>Päivitä</translation>
</message>
<message>
<location filename="../src/gui/folderwizardtargetpage.ui" line="177"/>
<source>Folders</source>
<translation>Kansiot</translation>
</message>
<message>
<location filename="../src/gui/folderwizardtargetpage.ui" line="110"/>
<source>TextLabel</source>
<translation>TextLabel</translation>
</message>
</context>
<context>
<name>NotificationWidget</name>
<message>
<location filename="../src/gui/notificationwidget.ui" line="20"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/notificationwidget.ui" line="56"/>
<source>Lorem ipsum dolor sit amet</source>
<translation>Lorem ipsum dolor sit amet</translation>
</message>
<message>
<location filename="../src/gui/notificationwidget.ui" line="69"/>
<source>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod temporm </source>
<translation>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod temporm </translation>
</message>
<message>
<location filename="../src/gui/notificationwidget.ui" line="89"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
</context>
<context>
<name>OCC::AccountSettings</name>
<message>
<location filename="../src/gui/accountsettings.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.ui" line="55"/>
<source>...</source>
<translation>...</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.ui" line="76"/>
<source>Storage space: ...</source>
<translation>Tallennustila: ...</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.ui" line="146"/>
<source>Unchecked folders will be <b>removed</b> from your local file system and will not be synchronized to this computer anymore</source>
<translation>Ilman valintaa olevat kansiot <b>poistetaan</b> paikallisesta tiedostojärjestelmästä, eikä niitä synkronoida enää jatkossa tämän tietokoneen kanssa</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.ui" line="193"/>
<source>Apply</source>
<translation>Toteuta</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.ui" line="180"/>
<location filename="../src/gui/accountsettings.cpp" line="352"/>
<location filename="../src/gui/accountsettings.cpp" line="690"/>
<source>Cancel</source>
<translation>Peruuta</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.ui" line="42"/>
<source>Connected with <server> as <user></source>
<translation>Yhdistetty palvelimeen <server> käyttäen tunnusta <user></translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="127"/>
<source>No account configured.</source>
<translation>Tiliä ei ole määritelty.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="141"/>
<source>Add new</source>
<translation>Lisää uusi</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="149"/>
<source>Remove</source>
<translation>Poista</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="153"/>
<source>Account</source>
<translation>Tili</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="221"/>
<source>Choose what to sync</source>
<translation>Valitse synkronoitavat tiedot</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="229"/>
<source>Remove folder sync connection</source>
<translation>Poista kansion synkronointiyhteys</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="294"/>
<source>Folder creation failed</source>
<translation>Kansion luominen epäonnistui</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="295"/>
<source><p>Could not create local folder <i>%1</i>.</source>
<translation><p>Paikallisen kansion <i>%1</i> luominen epäonnistui.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="345"/>
<source>Confirm Folder Sync Connection Removal</source>
<translation>Vahvista kansion synkronointiyhteyden poisto</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="351"/>
<source>Remove Folder Sync Connection</source>
<translation>Poista kansion synkronointiyhteys</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="433"/>
<source>Sync Running</source>
<translation>Synkronointi meneillään</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="434"/>
<source>The syncing operation is running.<br/>Do you want to terminate it?</source>
<translation>Synkronointioperaatio on meneillään.<br/>Haluatko keskeyttää sen?</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="501"/>
<source>%1 in use</source>
<translation>%1 käytössä</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="521"/>
<source>%1 as <i>%2</i></source>
<translation>%1 käyttäjänä <i>%2</i></translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="527"/>
<source>The server version %1 is old and unsupported! Proceed at your own risk.</source>
<translation>Palvelimen versio %1 on vanha ja sen tuki on loppunut! Jatka omalla vastuulla.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="529"/>
<source>Connected to %1.</source>
<translation>Yhteys muodostettu kohteeseen %1.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="531"/>
<source>Server %1 is temporarily unavailable.</source>
<translation>Palvelin %1 ei ole juuri nyt saatavilla.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="533"/>
<source>Signed out from %1.</source>
<translation>Kirjauduttu ulos kohteesta %1.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="535"/>
<source>No connection to %1 at %2.</source>
<translation>Ei yhteyttä kohteeseen %1 osoitteessa %2.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="558"/>
<source>Log in</source>
<translation>Kirjaudu sisään</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="642"/>
<source>There are new folders that were not synchronized because they are too big: </source>
<translation>Havaittiin uusia kansioita, joita ei synkronoitu niiden suuren koon vuoksi:</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="682"/>
<source>Confirm Account Removal</source>
<translation>Vahvista tilin poistaminen</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="683"/>
<source><p>Do you really want to remove the connection to the account <i>%1</i>?</p><p><b>Note:</b> This will <b>not</b> delete any files.</p></source>
<translation><p>Haluatko varmasti poistaa tilin <i>%1</i>?</p><p><b>Huomio:</b> Tämä toimenpide <b>ei</b> poista mitään tiedostoja.</p></translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="689"/>
<source>Remove connection</source>
<translation>Poista yhteys</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="217"/>
<source>Open folder</source>
<translation>Avaa kansio</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="145"/>
<location filename="../src/gui/accountsettings.cpp" line="560"/>
<source>Log out</source>
<translation>Kirjaudu ulos</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="226"/>
<source>Resume sync</source>
<translation>Palauta synkronointi</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="226"/>
<source>Pause sync</source>
<translation>Keskeytä synkronointi</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="346"/>
<source><p>Do you really want to stop syncing the folder <i>%1</i>?</p><p><b>Note:</b> This will <b>not</b> delete any files.</p></source>
<translation><p>Haluatko varmasti lopettaa kansion <i>%1</i> synkronoinnin?</p><p><b>Huomio:</b> Tämä toimenpide <b>ei</b> poista mitään tiedostoja.</p></translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="488"/>
<source>%1 (%3%) of %2 in use. Some folders, including network mounted or shared folders, might have different limits.</source>
<translation>%1/%2 (%3 %) käytössä. Jotkin kansiot, mukaan lukien verkkojaot ja jaetut kansiot, voivat sisältää eri rajoitukset.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="489"/>
<source>%1 of %2 in use</source>
<translation>%1/%2 käytössä</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="498"/><|fim▁hole|> <translation>Tallennustilan käyttötietoja ei ole juuri nyt saatavilla.</translation>
</message>
<message>
<location filename="../src/gui/accountsettings.cpp" line="541"/>
<source>No %1 connection configured.</source>
<translation>%1-yhteyttä ei ole määritelty.</translation>
</message>
</context>
<context>
<name>OCC::AccountState</name>
<message>
<location filename="../src/gui/accountstate.cpp" line="112"/>
<source>Signed out</source>
<translation>Kirjauduttu ulos</translation>
</message>
<message>
<location filename="../src/gui/accountstate.cpp" line="114"/>
<source>Disconnected</source>
<translation>Yhteys katkaistu</translation>
</message>
<message>
<location filename="../src/gui/accountstate.cpp" line="116"/>
<source>Connected</source>
<translation>Yhdistetty</translation>
</message>
<message>
<location filename="../src/gui/accountstate.cpp" line="118"/>
<source>Service unavailable</source>
<translation>Palvelu ei ole käytettävissä</translation>
</message>
<message>
<location filename="../src/gui/accountstate.cpp" line="120"/>
<source>Network error</source>
<translation>Verkkovirhe</translation>
</message>
<message>
<location filename="../src/gui/accountstate.cpp" line="122"/>
<source>Configuration error</source>
<translation>Asetusvirhe</translation>
</message>
<message>
<location filename="../src/gui/accountstate.cpp" line="124"/>
<source>Unknown account state</source>
<translation>Tuntematon tilin tila</translation>
</message>
</context>
<context>
<name>OCC::ActivityItemDelegate</name>
<message>
<location filename="../src/gui/activityitemdelegate.cpp" line="145"/>
<source>%1 on %2</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/activityitemdelegate.cpp" line="147"/>
<source>%1 on %2 (disconnected)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::ActivitySettings</name>
<message>
<location filename="../src/gui/activitywidget.cpp" line="516"/>
<location filename="../src/gui/activitywidget.cpp" line="571"/>
<source>Server Activity</source>
<translation>Palvelimen toimet</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="522"/>
<source>Sync Protocol</source>
<translation>Synkronointiprotokolla</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="530"/>
<source>List of ignored or erroneous files</source>
<translation>Luettelo ohitettavista tai virheellisistä tiedostoista</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="534"/>
<source>Copy</source>
<translation>Kopioi</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="535"/>
<source>Copy the activity list to the clipboard.</source>
<translation>Kopioi toimilista leikepöydälle.</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="577"/>
<source>Not Synced</source>
<translation>Ei synkronoitu</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="580"/>
<source>Not Synced (%1)</source>
<extracomment>%1 is the number of not synced files.</extracomment>
<translation>Ei synkronoitu (%1)</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="596"/>
<source>The server activity list has been copied to the clipboard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="600"/>
<source>The sync activity list has been copied to the clipboard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="603"/>
<source>The list of unsynched items has been copied to the clipboard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="608"/>
<source>Copied to clipboard</source>
<translation>Kopioitu leikepöydälle</translation>
</message>
</context>
<context>
<name>OCC::ActivityWidget</name>
<message>
<location filename="../src/gui/activitywidget.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.ui" line="26"/>
<location filename="../src/gui/activitywidget.ui" line="65"/>
<location filename="../src/gui/activitywidget.ui" line="88"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="135"/>
<source>Server Activities</source>
<translation>Palvelimen toimet</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="90"/>
<source>Copy</source>
<translation>Kopioi</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="91"/>
<source>Copy the activity list to the clipboard.</source>
<translation>Kopioi toimilista leikepöydälle.</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="139"/>
<source>Action Required: Notifications</source>
<translation>Toimenpiteitä vaaditaan: ilmoitukset</translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="144"/>
<source><br/>Account %1 does not have activities enabled.</source>
<translation><br/>Tilillä %1 ei ole toimia käytössä.</translation>
</message>
<message numerus="yes">
<location filename="../src/gui/activitywidget.cpp" line="351"/>
<source>You received %n new notification(s) from %2.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/gui/activitywidget.cpp" line="359"/>
<source>You received %n new notification(s) from %1 and %2.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="361"/>
<source>You received new notifications from %1, %2 and other accounts.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/activitywidget.cpp" line="365"/>
<source>%1 Notifications - Action Required</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::AddCertificateDialog</name>
<message>
<location filename="../src/gui/addcertificatedialog.ui" line="17"/>
<source>SSL client certificate authentication</source>
<translation>SSL-asiakkaan varmenteen tunnistautuminen</translation>
</message>
<message>
<location filename="../src/gui/addcertificatedialog.ui" line="23"/>
<source>This server probably requires a SSL client certificate.</source>
<translation>Tämä palvelin vaatii luultavasti SSL-asiakasvarmenteen.</translation>
</message>
<message>
<location filename="../src/gui/addcertificatedialog.ui" line="35"/>
<source>Certificate :</source>
<translation>Varmenne:</translation>
</message>
<message>
<location filename="../src/gui/addcertificatedialog.ui" line="51"/>
<source>Browse...</source>
<translation>Selaa...</translation>
</message>
<message>
<location filename="../src/gui/addcertificatedialog.ui" line="60"/>
<source>Certificate password :</source>
<translation>Varmenteen salasana:</translation>
</message>
<message>
<location filename="../src/gui/addcertificatedialog.cpp" line="37"/>
<source>Select a certificate</source>
<translation>Valitse varmenne</translation>
</message>
<message>
<location filename="../src/gui/addcertificatedialog.cpp" line="37"/>
<source>Certificate files (*.p12 *.pfx)</source>
<translation>Varmennetiedostot (*.p12 *.pfx)</translation>
</message>
</context>
<context>
<name>OCC::AuthenticationDialog</name>
<message>
<location filename="../src/gui/authenticationdialog.cpp" line="29"/>
<source>Authentication Required</source>
<translation>Tunnistautuminen vaaditaan</translation>
</message>
<message>
<location filename="../src/gui/authenticationdialog.cpp" line="31"/>
<source>Enter username and password for '%1' at %2.</source>
<translation>Anna käyttäjätunnus ja salasana kohteeseen '%1' osoitteessa %2.</translation>
</message>
<message>
<location filename="../src/gui/authenticationdialog.cpp" line="35"/>
<source>&User:</source>
<translation>K&äyttäjä:</translation>
</message>
<message>
<location filename="../src/gui/authenticationdialog.cpp" line="36"/>
<source>&Password:</source>
<translation>&Salasana:</translation>
</message>
</context>
<context>
<name>OCC::CleanupPollsJob</name>
<message>
<location filename="../src/libsync/owncloudpropagator.cpp" line="779"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::ConnectionValidator</name>
<message>
<location filename="../src/libsync/connectionvalidator.cpp" line="65"/>
<source>No ownCloud account configured</source>
<translation>ownCloud-tiliä ei ole määritelty</translation>
</message>
<message>
<location filename="../src/libsync/connectionvalidator.cpp" line="127"/>
<source>The configured server for this client is too old</source>
<translation>Määritelty palvelin on ohjelmistoversioltaan liian vanha tälle asiakasohjelmistolle</translation>
</message>
<message>
<location filename="../src/libsync/connectionvalidator.cpp" line="128"/>
<source>Please update to the latest server and restart the client.</source>
<translation>Päivitä uusimpaan palvelinversioon ja käynnistä asiakasohjelmisto uudelleen.</translation>
</message>
<message>
<location filename="../src/libsync/connectionvalidator.cpp" line="148"/>
<source>Authentication error: Either username or password are wrong.</source>
<translation>Tunnistautumisvirhe: käyttäjätunnus tai salasana on väärin.</translation>
</message>
<message>
<location filename="../src/libsync/connectionvalidator.cpp" line="160"/>
<source>timeout</source>
<translation>aikakatkaisu</translation>
</message>
<message>
<location filename="../src/libsync/connectionvalidator.cpp" line="192"/>
<source>The provided credentials are not correct</source>
<translation>Annetut tilitiedot eivät ole oikein</translation>
</message>
</context>
<context>
<name>OCC::DeleteJob</name>
<message>
<location filename="../src/libsync/propagateremotedelete.cpp" line="42"/>
<source>Connection timed out</source>
<translation>Yhteys aikakatkaistiin</translation>
</message>
</context>
<context>
<name>OCC::DiscoveryMainThread</name>
<message>
<location filename="../src/libsync/discoveryphase.cpp" line="540"/>
<source>Aborted by the user</source>
<translation>Keskeytetty käyttäjän toimesta</translation>
</message>
</context>
<context>
<name>OCC::Folder</name>
<message>
<location filename="../src/gui/folder.cpp" line="129"/>
<source>Local folder %1 does not exist.</source>
<translation>Paikallista kansiota %1 ei ole olemassa.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="132"/>
<source>%1 should be a folder but is not.</source>
<translation>Kohteen %1 pitäisi olla kansio, mutta se ei kuitenkaan ole kansio.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="135"/>
<source>%1 is not readable.</source>
<translation>%1 ei ole luettavissa.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="381"/>
<source>%1: %2</source>
<extracomment>this displays an error string (%2) for a file %1</extracomment>
<translation>%1: %2</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="491"/>
<source>%1 has been removed.</source>
<comment>%1 names a file.</comment>
<translation>%1 on poistettu.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="498"/>
<source>%1 has been downloaded.</source>
<comment>%1 names a file.</comment>
<translation>%1 on ladattu.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="505"/>
<source>%1 has been updated.</source>
<comment>%1 names a file.</comment>
<translation>%1 on päivitetty.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="512"/>
<source>%1 has been renamed to %2.</source>
<comment>%1 and %2 name files.</comment>
<translation>%1 on nimetty uudeelleen muotoon %2.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="519"/>
<source>%1 has been moved to %2.</source>
<translation>%1 on siirretty kohteeseen %2.</translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="489"/>
<source>%1 and %n other file(s) have been removed.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="496"/>
<source>%1 and %n other file(s) have been downloaded.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="503"/>
<source>%1 and %n other file(s) have been updated.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="510"/>
<source>%1 has been renamed to %2 and %n other file(s) have been renamed.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="517"/>
<source>%1 has been moved to %2 and %n other file(s) have been moved.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="524"/>
<source>%1 has and %n other file(s) have sync conflicts.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="526"/>
<source>%1 has a sync conflict. Please check the conflict file!</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location filename="../src/gui/folder.cpp" line="531"/>
<source>%1 and %n other file(s) could not be synced due to errors. See the log for details.</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="533"/>
<source>%1 could not be synced due to an error. See the log for details.</source>
<translation>Kohdetta %1 ei voi synkronoida virheen vuoksi. Katso tarkemmat tiedot lokista.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="539"/>
<source>Sync Activity</source>
<translation>Synkronointiaktiviteetti</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="747"/>
<source>Could not read system exclude file</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="958"/>
<source>A new folder larger than %1 MB has been added: %2.
Please go in the settings to select it if you wish to download it.</source>
<translation>Uusi kansio, joka on suurempi kuin %1 Mt, on lisätty: %2.
Siirry asetuksiin valitaksesi sen, jos haluat ladata kyseisen kansion.</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="976"/>
<source>This sync would remove all the files in the sync folder '%1'.
This might be because the folder was silently reconfigured, or that all the files were manually removed.
Are you sure you want to perform this operation?</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="980"/>
<source>Remove All Files?</source>
<translation>Poistetaanko kaikki tiedostot?</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="982"/>
<source>Remove all files</source>
<translation>Poista kaikki tiedostot</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="983"/>
<source>Keep files</source>
<translation>Säilytä tiedostot</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="1001"/>
<source>This sync would reset the files to an erlier time in the sync folder '%1'.
This might be because a backup was restored on the server.
Continuing the sync as normal will cause all your files to be overwritten by an older file in an earlier state. Do you want to keep your local most recent files as conflict files?</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="1006"/>
<source>Backup detected</source>
<translation>Varmuuskopio poistettu</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="1008"/>
<source>Normal Synchronisation</source>
<translation>Normaali synkronointi</translation>
</message>
<message>
<location filename="../src/gui/folder.cpp" line="1009"/>
<source>Keep Local Files as Conflict</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::FolderMan</name>
<message>
<location filename="../src/gui/folderman.cpp" line="265"/>
<source>Could not reset folder state</source>
<translation>Kansion tilaa ei voitu alustaa</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="266"/>
<source>An old sync journal '%1' was found, but could not be removed. Please make sure that no application is currently using it.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="927"/>
<source> (backup)</source>
<translation> (varmuuskopio)</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="932"/>
<source> (backup %1)</source>
<translation> (varmuuskopio %1)</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1139"/>
<source>Undefined State.</source>
<translation>Määrittelemätön tila.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1142"/>
<source>Waiting to start syncing.</source>
<translation>Odotetaan synkronoinnin aloitusta.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1145"/>
<source>Preparing for sync.</source>
<translation>Valmistellaan synkronointia.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1148"/>
<source>Sync is running.</source>
<translation>Synkronointi on meneillään.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1151"/>
<source>Last Sync was successful.</source>
<translation>Viimeisin synkronointi suoritettiin onnistuneesti.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1156"/>
<source>Last Sync was successful, but with warnings on individual files.</source>
<translation>Viimeisin synkronointi onnistui, mutta yksittäisten tiedostojen kanssa ilmeni varoituksia.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1159"/>
<source>Setup Error.</source>
<translation>Asetusvirhe.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1162"/>
<source>User Abort.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1165"/>
<source>Sync is paused.</source>
<translation>Synkronointi on keskeytetty.</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1171"/>
<source>%1 (Sync is paused)</source>
<translation>%1 (Synkronointi on keskeytetty)</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1179"/>
<source>No valid folder selected!</source>
<translation>Kelvollista kansiota ei ole valittu!</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1190"/>
<source>The selected path is not a folder!</source>
<translation>Valittu polku ei ole kansio!</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1194"/>
<source>You have no permission to write to the selected folder!</source>
<translation>Sinulla ei ole kirjoitusoikeutta valittuun kansioon!</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1209"/>
<source>The local folder %1 is already used in a folder sync connection. Please pick another one!</source>
<translation>Paikallinen kansio %1 on jo käytössä kansion synkronointiyhteydessä. Valitse toinen kansio!</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1214"/>
<source>The local folder %1 already contains a folder used in a folder sync connection. Please pick another one!</source>
<translation>Paikallinen kansio %1 sisältää kansion, jota käytetään kansion synkronointiyhteydessä. Valitse toinen kansio!</translation>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1221"/>
<source>The local folder %1 is a symbolic link. The link target already contains a folder used in a folder sync connection. Please pick another one!</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1228"/>
<source>The local folder %1 is already contained in a folder used in a folder sync connection. Please pick another one!</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderman.cpp" line="1234"/>
<source>The local folder %1 is a symbolic link. The link target is already contained in a folder used in a folder sync connection. Please pick another one!</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::FolderStatusDelegate</name>
<message>
<location filename="../src/gui/folderstatusdelegate.cpp" line="45"/>
<source>Add Folder Sync Connection</source>
<translation>Lisää kansion synkronointiyhteys</translation>
</message>
<message>
<location filename="../src/gui/folderstatusdelegate.cpp" line="241"/>
<source>Synchronizing with local folder</source>
<translation>Synkronoidaan paikallisen kansion kanssa</translation>
</message>
<message>
<location filename="../src/gui/folderstatusdelegate.cpp" line="285"/>
<source>File</source>
<translation>Tiedosto</translation>
</message>
</context>
<context>
<name>OCC::FolderStatusModel</name>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="127"/>
<source>You need to be connected to add a folder</source>
<translation>Yhteyden tulee olla muodostettu, jotta voit lisätä kansion</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="137"/>
<source>Click this button to add a folder to synchronize.</source>
<translation>Napsauta valitaksesi synkronoitavan kansion.</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="148"/>
<source>%1 (%2)</source>
<extracomment>Example text: "File.txt (23KB)"</extracomment>
<translation>%1 (%2)</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="167"/>
<source>Error while loading the list of folders from the server.</source>
<translation>Virhe ladatessa kansiolistausta palvelimelta.</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="205"/>
<source>Signed out</source>
<translation>Kirjauduttu ulos</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="132"/>
<source>Adding folder is disabled because you are already syncing all your files. If you want to sync multiple folders, please remove the currently configured root folder.</source>
<translation>Kansion lisääminen on poistettu käytöstä, koska synkronoit jo kaikki tiedostot. Jos haluat synkronoida useita kansioita, poista nykyisen juurikansion synkronointiyhteys.</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="170"/>
<source>Fetching folder list from server...</source>
<translation>Haetaan kansioluetteloa palvelimelta...</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="841"/>
<source>Checking for changes in '%1'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="876"/>
<source>, '%1'</source>
<extracomment>Build a list of file names</extracomment>
<translation>, '%1'</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="879"/>
<source>'%1'</source>
<extracomment>Argument is a file name</extracomment>
<translation>'%1'</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="904"/>
<source>Syncing %1</source>
<extracomment>Example text: "Syncing 'foo.txt', 'bar.txt'"</extracomment>
<translation>Synkronoidaan %1</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="906"/>
<location filename="../src/gui/folderstatusmodel.cpp" line="916"/>
<source>, </source>
<translation>, </translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="910"/>
<source>download %1/s</source>
<extracomment>Example text: "download 24Kb/s" (%1 is replaced by 24Kb (translated))</extracomment>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="912"/>
<source>u2193 %1/s</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="919"/>
<source>upload %1/s</source>
<extracomment>Example text: "upload 24Kb/s" (%1 is replaced by 24Kb (translated))</extracomment>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="921"/>
<source>u2191 %1/s</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="926"/>
<source>%1 %2 (%3 of %4)</source>
<extracomment>Example text: "uploading foobar.png (2MB of 2MB)"</extracomment>
<translation>%1 %2 (%3/%4)</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="930"/>
<source>%1 %2</source>
<extracomment>Example text: "uploading foobar.png"</extracomment>
<translation>%1 %2</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="947"/>
<source>%5 left, %1 of %2, file %3 of %4</source>
<extracomment>Example text: "5 minutes left, 12 MB of 345 MB, file 6 of 7"</extracomment>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="953"/>
<source>file %1 of %2</source>
<translation>tiedosto %1/%2</translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="993"/>
<source>Waiting...</source>
<translation>Odotetaan...</translation>
</message>
<message numerus="yes">
<location filename="../src/gui/folderstatusmodel.cpp" line="995"/>
<source>Waiting for %n other folder(s)...</source>
<translation><numerusform>Odotetaan %n muuta kansiota...</numerusform><numerusform>Odotetaan %n muuta kansiota...</numerusform></translation>
</message>
<message>
<location filename="../src/gui/folderstatusmodel.cpp" line="1001"/>
<source>Preparing to sync...</source>
<translation>Valmistaudutaan synkronointiin...</translation>
</message>
</context>
<context>
<name>OCC::FolderWizard</name>
<message>
<location filename="../src/gui/folderwizard.cpp" line="542"/>
<source>Add Folder Sync Connection</source>
<translation>Lisää kansion synkronointiyhteys</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="544"/>
<source>Add Sync Connection</source>
<translation>Lisää synkronointiyhteys</translation>
</message>
</context>
<context>
<name>OCC::FolderWizardLocalPath</name>
<message>
<location filename="../src/gui/folderwizard.cpp" line="65"/>
<source>Click to select a local folder to sync.</source>
<translation>Napsauta valitaksesi synkronoitavan paikalliskansion.</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="69"/>
<source>Enter the path to the local folder.</source>
<translation>Anna paikallisen kansion polku</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="126"/>
<source>Select the source folder</source>
<translation>Valitse lähdekansio</translation>
</message>
</context>
<context>
<name>OCC::FolderWizardRemotePath</name>
<message>
<location filename="../src/gui/folderwizard.cpp" line="176"/>
<source>Create Remote Folder</source>
<translation>Luo etäkansio</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="177"/>
<source>Enter the name of the new folder to be created below '%1':</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="206"/>
<source>Folder was successfully created on %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="217"/>
<source>Authentication failed accessing %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="219"/>
<source>Failed to create the folder on %1. Please check manually.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="226"/>
<source>Failed to list a folder. Error: %1</source>
<translation>Kansion listaaminen epäonnistui. Virhe: %1</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="307"/>
<source>Choose this to sync the entire account</source>
<translation>Valitse tämä synkronoidaksesi koko tilin</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="436"/>
<source>This folder is already being synced.</source>
<translation>Tätä kansiota synkronoidaan jo.</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="438"/>
<source>You are already syncing <i>%1</i>, which is a parent folder of <i>%2</i>.</source>
<translation>Synkronoit jo kansiota <i>%1</i>, ja se on kansion <i>%2</i> yläkansio.</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="442"/>
<source>You are already syncing all your files. Syncing another folder is <b>not</b> supported. If you want to sync multiple folders, please remove the currently configured root folder sync.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::FolderWizardSelectiveSync</name>
<message>
<location filename="../src/gui/folderwizard.cpp" line="480"/>
<source>Choose What to Sync: You can optionally deselect remote subfolders you do not wish to synchronize.</source>
<translation>Päätä mitä synkronoidaan: voit valinnaisesti jättää valitsematta etäkansioita, joita et halua synkronoitavan.</translation>
</message>
</context>
<context>
<name>OCC::FormatWarningsWizardPage</name>
<message>
<location filename="../src/gui/folderwizard.cpp" line="47"/>
<source><b>Warning:</b> %1</source>
<translation><b>Varoitus:</b> %1</translation>
</message>
<message>
<location filename="../src/gui/folderwizard.cpp" line="49"/>
<source><b>Warning:</b></source>
<translation><b>Varoitus:</b></translation>
</message>
</context>
<context>
<name>OCC::GETFileJob</name>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="150"/>
<source>No E-Tag received from server, check Proxy/Gateway</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="157"/>
<source>We received a different E-Tag for resuming. Retrying next time.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="184"/>
<source>Server returned wrong content-range</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="292"/>
<source>Connection Timeout</source>
<translation>Yhteys aikakatkaistiin</translation>
</message>
</context>
<context>
<name>OCC::GeneralSettings</name>
<message>
<location filename="../src/gui/generalsettings.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="20"/>
<source>General Settings</source>
<translation>Yleisasetukset</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="40"/>
<source>For System Tray</source>
<translation>Ilmoitusalueelle</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="53"/>
<source>Advanced</source>
<translation>Lisäasetukset</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="101"/>
<source>MB</source>
<translation>Mt</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="26"/>
<source>&Launch on System Startup</source>
<translation>&Käynnistä järjestelmän käynnistyessä</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="33"/>
<source>Show &Desktop Notifications</source>
<translation>&Näytä työpöytäilmoitukset</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="43"/>
<source>Use &Monochrome Icons</source>
<translation>Käytä &mustavalkoisia kuvakkeita</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="59"/>
<source>Edit &Ignored Files</source>
<translation>Muokkaa &ohitettavia tiedostoja</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="81"/>
<source>Ask &confirmation before downloading folders larger than</source>
<translation>Kysy &vahvistus, ennen kuin ladataan kansiot suurempia kuin</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="129"/>
<source>S&how crash reporter</source>
<translation>N&äytä kaatumisraportoija</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="156"/>
<location filename="../src/gui/generalsettings.ui" line="162"/>
<source>About</source>
<translation>Tietoja</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="172"/>
<source>Updates</source>
<translation>Päivitykset</translation>
</message>
<message>
<location filename="../src/gui/generalsettings.ui" line="197"/>
<source>&Restart && Update</source>
<translation>&Käynnistä uudelleen && päivitä</translation>
</message>
</context>
<context>
<name>OCC::HttpCredentialsGui</name>
<message>
<location filename="../src/gui/creds/httpcredentialsgui.cpp" line="34"/>
<source>Please enter %1 password:
User: %2
Account: %3
</source>
<translation>Anna %1-salasana:
Käyttäjä: %2
Tili: %3
</translation>
</message>
<message>
<location filename="../src/gui/creds/httpcredentialsgui.cpp" line="40"/>
<source>Reading from keychain failed with error: '%1'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/creds/httpcredentialsgui.cpp" line="45"/>
<source>Enter Password</source>
<translation>Anna salasana</translation>
</message>
</context>
<context>
<name>OCC::IgnoreListEditor</name>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="14"/>
<source>Ignored Files Editor</source>
<translation>Ohitettavien tiedostojen muokkain</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="20"/>
<source>Global Ignore Settings</source>
<translation>Yleiset ohitusasetukset</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="26"/>
<source>Sync hidden files</source>
<translation>Synkronoi piilotiedostot</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="36"/>
<source>Files Ignored by Patterns</source>
<translation>Kaavojen perusteella ohitettavat tiedostot</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="55"/>
<source>Add</source>
<translation>Lisää</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="75"/>
<source>Pattern</source>
<translation>Kaava</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="80"/>
<source>Allow Deletion</source>
<translation>Salli poistaminen</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.ui" line="45"/>
<source>Remove</source>
<translation>Poista</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.cpp" line="40"/>
<source>Files or folders matching a pattern will not be synchronized.
Items where deletion is allowed will be deleted if they prevent a directory from being removed. This is useful for meta data.</source>
<translation>Kaavaa vastaavat tiedostot ja kansiot jätetään synkronoimatta.
Kohteet, joiden poisto on sallittu, poistetaan, jos ne estävät kansion poistamisen. Tämä on hyödyllistä metatietojen osalta.</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.cpp" line="110"/>
<source>Could not open file</source>
<translation>Tiedoston avaaminen ei onnistunut</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.cpp" line="111"/>
<source>Cannot write changes to '%1'.</source>
<translation>Muutoksien kirjoittaminen kohteeseen '%1' epäonnistui.</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.cpp" line="138"/>
<source>Add Ignore Pattern</source>
<translation>Lisää ohituskaava</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.cpp" line="139"/>
<source>Add a new ignore pattern:</source>
<translation>Lisää uusi ohituskaava:</translation>
</message>
<message>
<location filename="../src/gui/ignorelisteditor.cpp" line="46"/>
<source>This entry is provided by the system at '%1' and cannot be modified in this view.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::LogBrowser</name>
<message>
<location filename="../src/gui/logbrowser.cpp" line="59"/>
<source>Log Output</source>
<translation>Loki</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="71"/>
<source>&Search:</source>
<translation>&Etsi:</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="79"/>
<source>&Find</source>
<translation>&Etsi</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="97"/>
<source>Clear</source>
<translation>Tyhjennä</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="98"/>
<source>Clear the log display.</source>
<translation>Tyhjennä lokinäyttö.</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="104"/>
<source>S&ave</source>
<translation>&Tallenna</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="105"/>
<source>Save the log file to a file on disk for debugging.</source>
<translation>Tallenna loki tiedostoon virheenetsintää varten.</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="184"/>
<source>Save log file</source>
<translation>Tallenna lokitiedosto</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="194"/>
<source>Error</source>
<translation>Virhe</translation>
</message>
<message>
<location filename="../src/gui/logbrowser.cpp" line="194"/>
<source>Could not write to log file %1</source>
<translation>Lokitiedostoon %1 kirjoittaminen epäonnistui</translation>
</message>
</context>
<context>
<name>OCC::Logger</name>
<message>
<location filename="../src/libsync/logger.cpp" line="190"/>
<source>Error</source>
<translation>Virhe</translation>
</message>
<message>
<location filename="../src/libsync/logger.cpp" line="191"/>
<source><nobr>File '%1'<br/>cannot be opened for writing.<br/><br/>The log output can <b>not</b> be saved!</nobr></source>
<translation><nobr>Tiedostoa '%1'<br/>ei voida avata kirjoittamista varten.<br/><br/>Lokitulostusta<b>ei</b>pystytä tallentamaan!</nobr></translation>
</message>
</context>
<context>
<name>OCC::MoveJob</name>
<message>
<location filename="../src/libsync/propagateremotemove.cpp" line="48"/>
<source>Connection timed out</source>
<translation>Yhteys aikakatkaistiin</translation>
</message>
</context>
<context>
<name>OCC::NSISUpdater</name>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="331"/>
<source>New Version Available</source>
<translation>Uusi versio saatavilla</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="337"/>
<source><p>A new version of the %1 Client is available.</p><p><b>%2</b> is available for download. The installed version is %3.</p></source>
<translation><p>Uusi versio %1-asiakasohjelmistosta on saatavilla.</p><p><b>%2</b> on ladattavissa. Asennettu versio on %3.</p></translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="350"/>
<source>Skip this version</source>
<translation>Ohita tämä versio</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="351"/>
<source>Skip this time</source>
<translation>Ohita tämän kerran</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="352"/>
<source>Get update</source>
<translation>Päivitä</translation>
</message>
</context>
<context>
<name>OCC::NetworkSettings</name>
<message>
<location filename="../src/gui/networksettings.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="23"/>
<source>Proxy Settings</source>
<translation>Välityspalvelimen asetukset</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="29"/>
<source>No Proxy</source>
<translation>Ei välityspalvelinta</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="42"/>
<source>Use system proxy</source>
<translation>Käytä järjestelmän välityspalvelinta</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="52"/>
<source>Specify proxy manually as</source>
<translation>Määritä välityspalvelin käsin</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="80"/>
<source>Host</source>
<translation>Isäntä</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="100"/>
<source>:</source>
<translation>:</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="131"/>
<source>Proxy server requires authentication</source>
<translation>Välityspalvelin vaatii tunnistautumisen</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="190"/>
<source>Download Bandwidth</source>
<translation>Latauskaista</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="196"/>
<location filename="../src/gui/networksettings.ui" line="275"/>
<source>Limit to</source>
<translation>Rajoita</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="241"/>
<location filename="../src/gui/networksettings.ui" line="320"/>
<source>KBytes/s</source>
<translation>kilotavua/s</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="203"/>
<location filename="../src/gui/networksettings.ui" line="292"/>
<source>No limit</source>
<translation>Ei rajoitusta</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="250"/>
<location filename="../src/gui/networksettings.ui" line="282"/>
<source>Limit to 3/4 of estimated bandwidth</source>
<translation>Rajoita 3/4:aan arvioidusta kaistanleveydestä</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="269"/>
<source>Upload Bandwidth</source>
<translation>Lähetyskaista</translation>
</message>
<message>
<location filename="../src/gui/networksettings.ui" line="253"/>
<location filename="../src/gui/networksettings.ui" line="285"/>
<source>Limit automatically</source>
<translation>Rajoita automaattisesti</translation>
</message>
<message>
<location filename="../src/gui/networksettings.cpp" line="34"/>
<source>Hostname of proxy server</source>
<translation>Välityspalvelinkoneen nimi</translation>
</message>
<message>
<location filename="../src/gui/networksettings.cpp" line="35"/>
<source>Username for proxy server</source>
<translation>Välityspalvelimen käyttäjätunnus</translation>
</message>
<message>
<location filename="../src/gui/networksettings.cpp" line="36"/>
<source>Password for proxy server</source>
<translation>Välityspalvelimen salasana</translation>
</message>
<message>
<location filename="../src/gui/networksettings.cpp" line="38"/>
<source>HTTP(S) proxy</source>
<translation>HTTP(S)-välityspalvelin</translation>
</message>
<message>
<location filename="../src/gui/networksettings.cpp" line="39"/>
<source>SOCKS5 proxy</source>
<translation>SOCKS5-välityspalvelin</translation>
</message>
<message>
<location filename="../src/gui/networksettings.cpp" line="133"/>
<source>Qt >= 5.4 is required in order to use the bandwidth limit</source>
<translation>Qt >= 5.4 vaaditaan kaistanrajoituksen käyttöä varten</translation>
</message>
</context>
<context>
<name>OCC::NotificationWidget</name>
<message>
<location filename="../src/gui/notificationwidget.cpp" line="50"/>
<source>Created at %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/notificationwidget.cpp" line="99"/>
<source>Closing in a few seconds...</source>
<translation>Suljetaan muutamassa sekunnissa...</translation>
</message>
<message>
<location filename="../src/gui/notificationwidget.cpp" line="133"/>
<source>%1 request failed at %2</source>
<extracomment>The second parameter is a time, such as 'failed at 09:58pm'</extracomment>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/notificationwidget.cpp" line="139"/>
<source>'%1' selected at %2</source>
<extracomment>The second parameter is a time, such as 'selected at 09:58pm'</extracomment>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::OCUpdater</name>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="103"/>
<source>New %1 Update Ready</source>
<translation>Uusi %1-päivitys valmiina</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="104"/>
<source>A new update for %1 is about to be installed. The updater may ask
for additional privileges during the process.</source>
<translation>Uusi %1-päivitys on valmiina asennettavaksi. Päivitysohjelma saattaa pyytää lisäoikeuksia päivityksen aikana.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="141"/>
<source>Downloading version %1. Please wait...</source>
<translation>Ladataan versiota %1. Odota hetki...</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="145"/>
<source>Could not download update. Please click <a href='%1'>here</a> to download the update manually.</source>
<translation>Päivityksen lataaminen epäonnistui. Napsauta <a href='%1'>tästä</a> ladataksesi päivityksen manuaalisesti.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="147"/>
<source>Could not check for new updates.</source>
<translation>Päivitysten tarkistus epäonnistui.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="143"/>
<source>%1 version %2 available. Restart application to start the update.</source>
<translation>%1-versio %2 saatavilla. Käynnistä sovellus uudelleen aloittaaksesi päivityksen.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="149"/>
<source>New %1 version %2 available. Please use the system's update tool to install it.</source>
<translation>Uusi %1-versio %2 on saatavilla. Käytä järjestelmän päivitystyökalua asentaaksesi sen.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="151"/>
<source>Checking update server...</source>
<translation>Tarkistetaan päivityspalvelinta...</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="153"/>
<source>Update status is unknown: Did not check for new updates.</source>
<translation>Päivityksen tila on tuntematon: uusia päivityksiä ei tarkistettu.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="157"/>
<source>No updates available. Your installation is at the latest version.</source>
<translation>Päivityksiä ei ole saatavilla. Käytössäsi on uusin versio.</translation>
</message>
<message>
<location filename="../src/gui/updater/ocupdater.cpp" line="177"/>
<source>Update Check</source>
<translation>Päivitystarkistus</translation>
</message>
</context>
<context>
<name>OCC::OwncloudAdvancedSetupPage</name>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="50"/>
<source>Connect to %1</source>
<translation>Muodosta yhteys - %1</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="51"/>
<source>Setup local folder options</source>
<translation>Aseta paikallisen kansion valinnat</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="60"/>
<source>Connect...</source>
<translation>Yhdistä...</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="139"/>
<source>%1 folder '%2' is synced to local folder '%3'</source>
<translation>%1-kansio '%2' on synkronoitu paikalliseen kansioon '%3'</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="142"/>
<source>Sync the folder '%1'</source>
<translation>Synkronoi kansio '%1'</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="147"/>
<source><p><small><strong>Warning:</strong> The local folder is not empty. Pick a resolution!</small></p></source>
<translation><p><small><strong>Varoitus:</strong> Paikallinen kansio ei ole tyhjä. Valitse jatkotoimenpide!</small></p></translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="246"/>
<source>Local Sync Folder</source>
<translation>Paikallinen synkronointikansio</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="285"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.cpp" line="306"/>
<source>(%1)</source>
<translation>(%1)</translation>
</message>
</context>
<context>
<name>OCC::OwncloudConnectionMethodDialog</name>
<message>
<location filename="../src/gui/wizard/owncloudconnectionmethoddialog.ui" line="14"/>
<source>Connection failed</source>
<translation>Yhteys epäonnistui</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudconnectionmethoddialog.ui" line="43"/>
<source><html><head/><body><p>Failed to connect to the secure server address specified. How do you wish to proceed?</p></body></html></source>
<translation><html><head/><body><p>Yhteys määritettyyn palvelimen salattuun osoitteeseen epäonnistui. Miten haluat edetä?</p></body></html></translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudconnectionmethoddialog.ui" line="55"/>
<source>Select a different URL</source>
<translation>Valitse eri verkko-osoite</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudconnectionmethoddialog.ui" line="62"/>
<source>Retry unencrypted over HTTP (insecure)</source>
<translation>Yritä uudelleen salaamattomana HTTP:n yli (turvaton!)</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudconnectionmethoddialog.ui" line="69"/>
<source>Configure client-side TLS certificate</source>
<translation>Määritä asiakaspuolen TLS-varmenteen asetukset</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudconnectionmethoddialog.cpp" line="37"/>
<source><html><head/><body><p>Failed to connect to the secure server address <em>%1</em>. How do you wish to proceed?</p></body></html></source>
<translation><html><head/><body><p>Yhteys palvelimen salattuun osoitteeseen <em>%1</em> epäonnistui. Miten haluat edetä?</p></body></html></translation>
</message>
</context>
<context>
<name>OCC::OwncloudHttpCredsPage</name>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.cpp" line="51"/>
<source>&Email</source>
<translation>&Sähköpostiosoite</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.cpp" line="61"/>
<source>Connect to %1</source>
<translation>Muodosta yhteys - %1</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.cpp" line="62"/>
<source>Enter user credentials</source>
<translation>Anna käyttäjätiedot</translation>
</message>
</context>
<context>
<name>OCC::OwncloudSetupPage</name>
<message>
<location filename="../src/gui/wizard/owncloudsetuppage.cpp" line="51"/>
<source>Connect to %1</source>
<translation>Muodosta yhteys - %1</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetuppage.cpp" line="52"/>
<source>Setup %1 server</source>
<translation>%1-palvelimen asetuksien määritys</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetuppage.cpp" line="131"/>
<source>This url is NOT secure as it is not encrypted.
It is not advisable to use it.</source>
<translation>Tämä osoite ei ole turvallinen, koska sitä ei ole salattu.
Osoitteen käyttäminen ei ole suositeltavaa.</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetuppage.cpp" line="135"/>
<source>This url is secure. You can use it.</source>
<translation>Tämä osoite on turvallinen. Voit käyttää sitä.</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetuppage.cpp" line="174"/>
<source>&Next ></source>
<translation>&Seuraava ></translation>
</message>
</context>
<context>
<name>OCC::OwncloudSetupWizard</name>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="145"/>
<source><font color="green">Successfully connected to %1: %2 version %3 (%4)</font><br/><br/></source>
<translation><font color="green">Muodostettu yhteys onnistuneesti kohteeseen %1: %2 versio %3 (%4)</font><br/><br/></translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="169"/>
<source>Failed to connect to %1 at %2:<br/>%3</source>
<translation>Yhteys %1iin osoitteessa %2 epäonnistui:<br/>%3</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="181"/>
<source>Timeout while trying to connect to %1 at %2.</source>
<translation>Aikakatkaisu yrittäessä yhteyttä kohteeseen %1 osoitteessa %2.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="192"/>
<source>Trying to connect to %1 at %2...</source>
<translation>Yritetään yhdistetää palvelimeen %1 portissa %2...</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="239"/>
<source>The authenticated request to the server was redirected to '%1'. The URL is bad, the server is misconfigured.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="261"/>
<source>There was an invalid response to an authenticated webdav request</source>
<translation>Todennettuun webdav-pyyntöön saatiin virheellinen vastaus</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="252"/>
<source>Access forbidden by server. To verify that you have proper access, <a href="%1">click here</a> to access the service with your browser.</source>
<translation>Palvelin esti käyttämisen. Vahvista käyttöoikeutesi palvelimeen <a href="%1">napsauttamalla tästä</a> ja kirjaudu palveluun selaimella.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="303"/>
<source>Local sync folder %1 already exists, setting it up for sync.<br/><br/></source>
<translation>Paikallinen kansio %1 on jo olemassa, asetetaan se synkronoitavaksi.<br/><br/></translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="305"/>
<source>Creating local sync folder %1...</source>
<translation>Luodaan paikallista synkronointikansiota %1...</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="309"/>
<source>ok</source>
<translation>ok</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="311"/>
<source>failed.</source>
<translation>epäonnistui.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="313"/>
<source>Could not create local folder %1</source>
<translation>Paikalliskansion %1 luonti epäonnistui</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="338"/>
<source>No remote folder specified!</source>
<translation>Etäkansiota ei määritelty!</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="344"/>
<source>Error: %1</source>
<translation>Virhe: %1</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="357"/>
<source>creating folder on ownCloud: %1</source>
<translation>luodaan kansio ownCloudiin: %1</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="373"/>
<source>Remote folder %1 created successfully.</source>
<translation>Etäkansio %1 luotiin onnistuneesti.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="375"/>
<source>The remote folder %1 already exists. Connecting it for syncing.</source>
<translation>Etäkansio %1 on jo olemassa. Otetaan siihen yhteyttä tiedostojen täsmäystä varten.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="377"/>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="379"/>
<source>The folder creation resulted in HTTP error code %1</source>
<translation>Kansion luonti aiheutti HTTP-virhekoodin %1</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="381"/>
<source>The remote folder creation failed because the provided credentials are wrong!<br/>Please go back and check your credentials.</p></source>
<translation>Etäkansion luominen epäonnistui koska antamasi tunnus/salasana ei täsmää!<br/>Ole hyvä ja palaa tarkistamaan tunnus/salasana</p></translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="384"/>
<source><p><font color="red">Remote folder creation failed probably because the provided credentials are wrong.</font><br/>Please go back and check your credentials.</p></source>
<translation><p><font color="red">Pilvipalvelun etäkansion luominen ei onnistunut , koska tunnistautumistietosi ovat todennäköisesti väärin.</font><br/>Palaa takaisin ja tarkista käyttäjätunnus ja salasana.</p></translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="389"/>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="390"/>
<source>Remote folder %1 creation failed with error <tt>%2</tt>.</source>
<translation>Etäkansion %1 luonti epäonnistui, virhe <tt>%2</tt>.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="406"/>
<source>A sync connection from %1 to remote directory %2 was set up.</source>
<translation>Täsmäysyhteys kansiosta %1 etäkansioon %2 on asetettu.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="411"/>
<source>Successfully connected to %1!</source>
<translation>Yhteys kohteeseen %1 muodostettiin onnistuneesti!</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="418"/>
<source>Connection to %1 could not be established. Please check again.</source>
<translation>Yhteyttä osoitteeseen %1 ei voitu muodostaa. Ole hyvä ja tarkista uudelleen.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="431"/>
<source>Folder rename failed</source>
<translation>Kansion nimen muuttaminen epäonnistui</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="432"/>
<source>Can't remove and back up the folder because the folder or a file in it is open in another program. Please close the folder or file and hit retry or cancel the setup.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/owncloudsetupwizard.cpp" line="474"/>
<source><font color="green"><b>Local sync folder %1 successfully created!</b></font></source>
<translation><font color="green"><b>Paikallinen synkronointikansio %1 luotu onnistuneesti!</b></font></translation>
</message>
</context>
<context>
<name>OCC::OwncloudWizard</name>
<message>
<location filename="../src/gui/wizard/owncloudwizard.cpp" line="71"/>
<source>%1 Connection Wizard</source>
<translation>%1-yhteysavustaja</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudwizard.cpp" line="80"/>
<source>Skip folders configuration</source>
<translation>Ohita kansioiden määritykset</translation>
</message>
</context>
<context>
<name>OCC::OwncloudWizardResultPage</name>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.cpp" line="38"/>
<source>Everything set up!</source>
<translation>Kaikki valmiina!</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.cpp" line="42"/>
<source>Open Local Folder</source>
<translation>Avaa paikallinen kansio</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.cpp" line="50"/>
<source>Open %1 in Browser</source>
<translation>Avaa %1 selaimessa</translation>
</message>
</context>
<context>
<name>OCC::PUTFileJob</name>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="103"/>
<source>Connection Timeout</source>
<translation>Yhteys aikakatkaistiin</translation>
</message>
</context>
<context>
<name>OCC::PollJob</name>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="160"/>
<source>Invalid JSON reply from the poll URL</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::PropagateDirectory</name>
<message>
<location filename="../src/libsync/owncloudpropagator.cpp" line="718"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::PropagateDownloadFileQNAM</name>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="327"/>
<source>File %1 can not be downloaded because of a local file name clash!</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="376"/>
<source>The download would reduce free disk space below %1</source>
<translation>Lataaminen laskisi vapaan levytilan määrän alle rajan %1</translation>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="381"/>
<source>Free space on disk is less than %1</source>
<translation>Levyllä on vapaata tilaa vähemmän kuin %1</translation>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="496"/>
<source>File was deleted from server</source>
<translation>Tiedosto poistettiin palvelimelta</translation>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="545"/>
<source>The file could not be downloaded completely.</source>
<translation>Tiedostoa ei voitu ladata täysin.</translation>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="552"/>
<source>The downloaded file is empty despite the server announced it should have been %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="711"/>
<source>File %1 cannot be saved because of a local file name clash!</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="759"/>
<source>File has changed since discovery</source>
<translation>Tiedosto on muuttunut löytymisen jälkeen</translation>
</message>
<message>
<location filename="../src/libsync/propagatedownload.cpp" line="809"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::PropagateItemJob</name>
<message>
<location filename="../src/libsync/owncloudpropagator.cpp" line="125"/>
<source>; Restoration Failed: %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/owncloudpropagator.cpp" line="148"/>
<source>Continue blacklisting:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/owncloudpropagator.cpp" line="241"/>
<source>A file or folder was removed from a read only share, but restoring failed: %1</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::PropagateLocalMkdir</name>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="155"/>
<source>could not delete file %1, error: %2</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="163"/>
<source>Attention, possible case sensitivity clash with %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="169"/>
<source>could not create folder %1</source>
<translation>kansiota %1 ei voitu luoda</translation>
</message>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="181"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::PropagateLocalRemove</name>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="72"/>
<source>Error removing '%1': %2;</source>
<translation>Virhe poistaessa '%1': %2;</translation>
</message>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="98"/>
<source>Could not remove folder '%1'</source>
<translation>Ei voitu poistaa kansiota '%1'</translation>
</message>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="116"/>
<source>Could not remove %1 because of a local file name clash</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::PropagateLocalRename</name>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="215"/>
<source>File %1 can not be renamed to %2 because of a local file name clash</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagatorjobs.cpp" line="245"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::PropagateRemoteDelete</name>
<message>
<location filename="../src/libsync/propagateremotedelete.cpp" line="94"/>
<source>The file has been removed from a read only share. It was restored.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateremotedelete.cpp" line="115"/>
<source>Wrong HTTP code returned by server. Expected 204, but received "%1 %2".</source>
<translation>HTTP-palvelin palautti väärän koodin. Odotettiin koodia 204, vastaanotettiin "%1 %2".</translation>
</message>
</context>
<context>
<name>OCC::PropagateRemoteMkdir</name>
<message>
<location filename="../src/libsync/propagateremotemkdir.cpp" line="97"/>
<source>Wrong HTTP code returned by server. Expected 201, but received "%1 %2".</source>
<translation>HTTP-palvelin palautti väärän koodin. Odotettiin koodia 201, vastaanotettiin "%1 %2".</translation>
</message>
<message>
<location filename="../src/libsync/propagateremotemkdir.cpp" line="148"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::PropagateRemoteMove</name>
<message>
<location filename="../src/libsync/propagateremotemove.cpp" line="87"/>
<source>This folder must not be renamed. It is renamed back to its original name.</source>
<translation>Tätä kansiota ei ole tule nimetä uudelleen. Muutetaan takaisin alkuperäinen nimi.</translation>
</message>
<message>
<location filename="../src/libsync/propagateremotemove.cpp" line="89"/>
<source>This folder must not be renamed. Please name it back to Shared.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateremotemove.cpp" line="127"/>
<source>The file was renamed but is part of a read only share. The original file was restored.</source>
<translation>Tiedosto nimettiin uudelleen, mutta se on osa "vain luku"-jakoa. Alkuperäinen tiedosto palautettiin.</translation>
</message>
<message>
<location filename="../src/libsync/propagateremotemove.cpp" line="144"/>
<source>Wrong HTTP code returned by server. Expected 201, but received "%1 %2".</source>
<translation>HTTP-palvelin palautti väärän koodin. Odotettiin koodia 201, vastaanotettiin "%1 %2".</translation>
</message>
<message>
<location filename="../src/libsync/propagateremotemove.cpp" line="175"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::PropagateUploadFileQNAM</name>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="297"/>
<source>File Removed</source>
<translation>Tiedosto poistettu</translation>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="309"/>
<source>Local file changed during syncing. It will be resumed.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="321"/>
<location filename="../src/libsync/propagateupload.cpp" line="710"/>
<source>Local file changed during sync.</source>
<translation>Paikallinen tiedosto muuttui synkronoinnin aikana.</translation>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="637"/>
<source>Forcing job abort on HTTP connection reset with Qt < 5.4.2.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="645"/>
<source>The file was edited locally but is part of a read only share. It is restored and your edit is in the conflict file.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="676"/>
<source>Poll URL missing</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="699"/>
<source>The local file was removed during sync.</source>
<translation>Paikallinen tiedosto poistettiin synkronoinnin aikana.</translation>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="725"/>
<source>The server did not acknowledge the last chunk. (No e-tag was present)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/propagateupload.cpp" line="800"/>
<source>Error writing metadata to the database</source>
<translation>Virhe kirjoittaessa metadataa tietokantaan</translation>
</message>
</context>
<context>
<name>OCC::ProtocolWidget</name>
<message>
<location filename="../src/gui/protocolwidget.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.ui" line="20"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="54"/>
<source>Time</source>
<translation>Aika</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="55"/>
<source>File</source>
<translation>Tiedosto</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="56"/>
<source>Folder</source>
<translation>Kansio</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="57"/>
<source>Action</source>
<translation>Toiminto</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="58"/>
<source>Size</source>
<translation>Koko</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="78"/>
<source>Local sync protocol</source>
<translation>Paikallinen synkronointiprotokolla</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="80"/>
<source>Copy</source>
<translation>Kopioi</translation>
</message>
<message>
<location filename="../src/gui/protocolwidget.cpp" line="81"/>
<source>Copy the activity list to the clipboard.</source>
<translation>Kopioi toimilista leikepöydälle.</translation>
</message>
</context>
<context>
<name>OCC::ProxyAuthDialog</name>
<message>
<location filename="../src/gui/proxyauthdialog.ui" line="20"/>
<source>Proxy authentication required</source>
<translation>Välityspalvelin vaatii tunnistautumisen</translation>
</message>
<message>
<location filename="../src/gui/proxyauthdialog.ui" line="26"/>
<source>Username:</source>
<translation>Käyttäjätunnus:</translation>
</message>
<message>
<location filename="../src/gui/proxyauthdialog.ui" line="36"/>
<source>Proxy:</source>
<translation>Välityspalvelin:</translation>
</message>
<message>
<location filename="../src/gui/proxyauthdialog.ui" line="43"/>
<source>The proxy server needs a username and password.</source>
<translation>Välityspalvelin vaatii käyttäjätunnuksen ja salasanan.</translation>
</message>
<message>
<location filename="../src/gui/proxyauthdialog.ui" line="50"/>
<source>Password:</source>
<translation>Salasana:</translation>
</message>
<message>
<location filename="../src/gui/proxyauthdialog.ui" line="74"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
</context>
<context>
<name>OCC::SelectiveSyncDialog</name>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="399"/>
<source>Unchecked folders will be <b>removed</b> from your local file system and will not be synchronized to this computer anymore</source>
<translation>Ilman valintaa olevat kansiot <b>poistetaan</b> paikallisesta tiedostojärjestelmästä, eikä niitä synkronoida enää jatkossa tämän tietokoneen kanssa</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="416"/>
<source>Choose What to Sync: Select remote subfolders you wish to synchronize.</source>
<translation>Päätä mitä synkronoidaan: valitse etäkansiot, jotka haluat synkronoida.</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="417"/>
<source>Choose What to Sync: Deselect remote subfolders you do not wish to synchronize.</source>
<translation>Päätä mitä synkronoidaan: jätä valitsematta etäkansiot, joita et halua synkronoitavan.</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="423"/>
<source>Choose What to Sync</source>
<translation>Valitse synkronoitavat tiedot</translation>
</message>
</context>
<context>
<name>OCC::SelectiveSyncTreeView</name>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="60"/>
<source>Loading ...</source>
<translation>Ladataan...</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="73"/>
<source>Name</source>
<translation>Nimi</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="74"/>
<source>Size</source>
<translation>Koko</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="201"/>
<location filename="../src/gui/selectivesyncdialog.cpp" line="245"/>
<source>No subfolders currently on the server.</source>
<translation>Palvelimella ei ole alihakemistoja juuri nyt.</translation>
</message>
<message>
<location filename="../src/gui/selectivesyncdialog.cpp" line="247"/>
<source>An error occurred while loading the list of sub folders.</source>
<translation>Alikansioluetteloa ladatessa tapahtui virhe.</translation>
</message>
</context>
<context>
<name>OCC::SettingsDialog</name>
<message>
<location filename="../src/gui/settingsdialog.ui" line="14"/>
<source>Settings</source>
<translation>Asetukset</translation>
</message>
<message>
<location filename="../src/gui/settingsdialog.cpp" line="87"/>
<source>Activity</source>
<translation>Toimet</translation>
</message>
<message>
<location filename="../src/gui/settingsdialog.cpp" line="96"/>
<source>General</source>
<translation>Yleiset</translation>
</message>
<message>
<location filename="../src/gui/settingsdialog.cpp" line="102"/>
<source>Network</source>
<translation>Verkko</translation>
</message>
<message>
<location filename="../src/gui/settingsdialog.cpp" line="199"/>
<source>Account</source>
<translation>Tili</translation>
</message>
</context>
<context>
<name>OCC::SettingsDialogMac</name>
<message>
<location filename="../src/gui/settingsdialogmac.cpp" line="69"/>
<source>%1</source>
<translation>%1</translation>
</message>
<message>
<location filename="../src/gui/settingsdialogmac.cpp" line="73"/>
<source>Activity</source>
<translation>Toimet</translation>
</message>
<message>
<location filename="../src/gui/settingsdialogmac.cpp" line="87"/>
<source>General</source>
<translation>Yleiset</translation>
</message>
<message>
<location filename="../src/gui/settingsdialogmac.cpp" line="91"/>
<source>Network</source>
<translation>Verkko</translation>
</message>
<message>
<location filename="../src/gui/settingsdialogmac.cpp" line="120"/>
<source>Account</source>
<translation>Tili</translation>
</message>
</context>
<context>
<name>OCC::ShareDialog</name>
<message>
<location filename="../src/gui/sharedialog.ui" line="54"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/sharedialog.ui" line="28"/>
<source>share label</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/sharedialog.ui" line="14"/>
<source>Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/sharedialog.ui" line="47"/>
<source>ownCloud Path:</source>
<translation>ownCloud-polku:</translation>
</message>
<message>
<location filename="../src/gui/sharedialog.cpp" line="98"/>
<source>%1 Sharing</source>
<translation>%1 - Jakaminen</translation>
</message>
<message>
<location filename="../src/gui/sharedialog.cpp" line="73"/>
<source>%1</source>
<translation>%1</translation>
</message>
<message>
<location filename="../src/gui/sharedialog.cpp" line="95"/>
<source>Folder: %2</source>
<translation>Kansio: %2</translation>
</message>
<message>
<location filename="../src/gui/sharedialog.cpp" line="101"/>
<source>The server does not allow sharing</source>
<translation>Palvelin ei salli jakamista</translation>
</message>
<message>
<location filename="../src/gui/sharedialog.cpp" line="113"/>
<source>Retrieving maximum possible sharing permissions from server...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/sharedialog.cpp" line="169"/>
<source>The file can not be shared because it was shared without sharing permission.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::ShareLinkWidget</name>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="14"/>
<source>Share NewDocument.odt</source>
<translation>Jaa UusiAsiakirja.odt</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="25"/>
<source>Share link</source>
<translation>Jaa linkki</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="77"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="114"/>
<source>Set &password </source>
<translation>Aseta &salasana</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="128"/>
<source>Set &expiration date</source>
<translation>Aseta &vanhenemispäivä</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="152"/>
<source>Set password</source>
<translation>Aseta salasana</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="201"/>
<source>Copy &link</source>
<translation>Kopioi &linkki</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.ui" line="225"/>
<source>Allow editing</source>
<translation>Salli muokkaus</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.cpp" line="88"/>
<source>P&assword protect</source>
<translation>&Suojaa salasanalla</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.cpp" line="196"/>
<source>Password Protected</source>
<translation>Salasanasuojattu</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.cpp" line="289"/>
<source>The file can not be shared because it was shared without sharing permission.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.cpp" line="366"/>
<location filename="../src/gui/sharelinkwidget.cpp" line="413"/>
<source>Public sh&aring requires a password</source>
<translation>&Julkinen jakaminen vaatii salasanan</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.cpp" line="435"/>
<source>Please Set Password</source>
<translation>Aseta salasana</translation>
</message>
<message>
<location filename="../src/gui/sharelinkwidget.cpp" line="500"/>
<location filename="../src/gui/sharelinkwidget.cpp" line="501"/>
<source>&Share link</source>
<translation>&Jaa linkki</translation>
</message>
</context>
<context>
<name>OCC::ShareUserGroupWidget</name>
<message>
<location filename="../src/gui/shareusergroupwidget.ui" line="14"/>
<source>Share NewDocument.odt</source>
<translation>Jaa UusiAsiakirja.odt</translation>
</message>
<message>
<location filename="../src/gui/shareusergroupwidget.ui" line="22"/>
<source>Share with users or groups ...</source>
<translation>Jaa käyttäjien tai ryhmien kanssa…</translation>
</message>
<message>
<location filename="../src/gui/shareusergroupwidget.cpp" line="224"/>
<source>No results for '%1'</source>
<translation>Ei tuloksia haulla '%1'</translation>
</message>
</context>
<context>
<name>OCC::ShareWidget</name>
<message>
<location filename="../src/gui/sharewidget.ui" line="17"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/sharewidget.ui" line="37"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/sharewidget.ui" line="66"/>
<source>can edit</source>
<translation>voi muokata</translation>
</message>
<message>
<location filename="../src/gui/sharewidget.ui" line="73"/>
<source>can share</source>
<translation>jaa</translation>
</message>
<message>
<location filename="../src/gui/sharewidget.ui" line="80"/>
<source>...</source>
<translation>...</translation>
</message>
<message>
<location filename="../src/gui/shareusergroupwidget.cpp" line="302"/>
<source>create</source>
<translation>luo</translation>
</message>
<message>
<location filename="../src/gui/shareusergroupwidget.cpp" line="305"/>
<source>change</source>
<translation>muuta</translation>
</message>
<message>
<location filename="../src/gui/shareusergroupwidget.cpp" line="308"/>
<source>delete</source>
<translation>poista</translation>
</message>
</context>
<context>
<name>OCC::ShibbolethCredentials</name>
<message>
<location filename="../src/gui/creds/shibbolethcredentials.cpp" line="235"/>
<source>Login Error</source>
<translation>Kirjautumisvirhe</translation>
</message>
<message>
<location filename="../src/gui/creds/shibbolethcredentials.cpp" line="235"/>
<source>You must sign in as user %1</source>
<translation>Sinun tulee kirjautua käyttäjänä %1</translation>
</message>
</context>
<context>
<name>OCC::ShibbolethWebView</name>
<message>
<location filename="../src/gui/creds/shibboleth/shibbolethwebview.cpp" line="81"/>
<source>%1 - Authenticate</source>
<translation>%1 - Tunnistaudu</translation>
</message>
<message>
<location filename="../src/gui/creds/shibboleth/shibbolethwebview.cpp" line="91"/>
<source>SSL Chipher Debug View</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/creds/shibboleth/shibbolethwebview.cpp" line="99"/>
<source>Reauthentication required</source>
<translation>Tunnistaudu uudelleen</translation>
</message>
<message>
<location filename="../src/gui/creds/shibboleth/shibbolethwebview.cpp" line="99"/>
<source>Your session has expired. You need to re-login to continue to use the client.</source>
<translation>Istunto on vanhentunut. Kirjaudu uudelleen jatkaaksesi sovelluksen käyttämistä.</translation>
</message>
</context>
<context>
<name>OCC::SocketApi</name>
<message>
<location filename="../src/gui/socketapi.cpp" line="453"/>
<source>Share with %1</source>
<comment>parameter is ownCloud</comment>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::SslButton</name>
<message>
<location filename="../src/gui/sslbutton.cpp" line="102"/>
<source><h3>Certificate Details</h3></source>
<translation><h3>Varmenteen tiedot</h3></translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="105"/>
<source>Common Name (CN):</source>
<translation>Yleinen nimi (CN):</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="106"/>
<source>Subject Alternative Names:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="108"/>
<source>Organization (O):</source>
<translation>Organisaatio (O):</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="109"/>
<source>Organizational Unit (OU):</source>
<translation>Organisaatioyksikkö (OU):</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="110"/>
<source>State/Province:</source>
<translation>Lääni/maakunta/provinssi:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="111"/>
<source>Country:</source>
<translation>Maa:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="112"/>
<source>Serial:</source>
<translation>Sarjanumero:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="115"/>
<source><h3>Issuer</h3></source>
<translation><h3>Myöntäjä</h3></translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="118"/>
<source>Issuer:</source>
<translation>Myöntäjä:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="119"/>
<source>Issued on:</source>
<translation>Myönnetty:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="120"/>
<source>Expires on:</source>
<translation>Vanhenee:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="123"/>
<source><h3>Fingerprints</h3></source>
<translation><h3>Sormenjäljet</h3></translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="127"/>
<source>MD 5:</source>
<translation>MD 5:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="129"/>
<source>SHA-256:</source>
<translation>SHA-256:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="131"/>
<source>SHA-1:</source>
<translation>SHA-1:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="135"/>
<source><p><b>Note:</b> This certificate was manually approved</p></source>
<translation><p><b>Huomio:</b> Tämä varmenne hyväksyttiin käsin</p></translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="155"/>
<source>%1 (self-signed)</source>
<translation>%1 (allekirjoitettu itse)</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="157"/>
<source>%1</source>
<translation>%1</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="191"/>
<source>This connection is encrypted using %1 bit %2.
</source>
<translation>Yhteys on salattu, käytössä %1-bittinen %2.
</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="218"/>
<source>No support for SSL session tickets/identifiers</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="229"/>
<source>Certificate information:</source>
<translation>Varmenteen tiedot:</translation>
</message>
<message>
<location filename="../src/gui/sslbutton.cpp" line="195"/>
<source>This connection is NOT secure as it is not encrypted.
</source>
<translation>Yhteys EI OLE turvallinen, koska sitä ei ole salattu.
</translation>
</message>
</context>
<context>
<name>OCC::SslErrorDialog</name>
<message>
<location filename="../src/gui/sslerrordialog.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.ui" line="25"/>
<source>Trust this certificate anyway</source>
<translation>Luota tähän varmisteeseen silti</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="66"/>
<source>Untrusted Certificate</source>
<translation>Varmenne ei ole luotettu</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="135"/>
<source>Cannot connect securely to <i>%1</i>:</source>
<translation>Yhteyttä kohteeseen <i>%1</i> ei voi muodostaa turvallisesti:</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="171"/>
<source>with Certificate %1</source>
<translation>varmenteella %1</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="179"/>
<location filename="../src/gui/sslerrordialog.cpp" line="180"/>
<location filename="../src/gui/sslerrordialog.cpp" line="181"/>
<source>&lt;not specified&gt;</source>
<translation>&lt;ei määritelty&gt;</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="182"/>
<location filename="../src/gui/sslerrordialog.cpp" line="202"/>
<source>Organization: %1</source>
<translation>Organisaatio: %1</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="183"/>
<location filename="../src/gui/sslerrordialog.cpp" line="203"/>
<source>Unit: %1</source>
<translation>Yksikkö: %1</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="184"/>
<location filename="../src/gui/sslerrordialog.cpp" line="204"/>
<source>Country: %1</source>
<translation>Maa: %1</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="191"/>
<source>Fingerprint (MD5): <tt>%1</tt></source>
<translation>Sormenjälki (MD5): <tt>%1</tt></translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="192"/>
<source>Fingerprint (SHA1): <tt>%1</tt></source>
<translation>Sormenjälki (SHA1): <tt>%1</tt></translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="194"/>
<source>Effective Date: %1</source>
<translation>Voimassa oleva päivämäärä: %1</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="195"/>
<source>Expiration Date: %1</source>
<translation>Vanhenemispäivä: %1</translation>
</message>
<message>
<location filename="../src/gui/sslerrordialog.cpp" line="199"/>
<source>Issuer: %1</source>
<translation>Myöntäjä: %1</translation>
</message>
</context>
<context>
<name>OCC::SyncEngine</name>
<message>
<location filename="../src/libsync/syncengine.cpp" line="118"/>
<source>Success.</source>
<translation>Onnistui.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="125"/>
<source>CSync failed to load the journal file. The journal file is corrupted.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="128"/>
<source><p>The %1 plugin for csync could not be loaded.<br/>Please verify the installation!</p></source>
<translation><p>%1-liitännäistä csyncia varten ei voitu ladata.<br/>Varmista asennuksen toimivuus!</p></translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="131"/>
<source>CSync got an error while processing internal trees.</source>
<translation>Csync-synkronointipalvelussa tapahtui virhe sisäisten puurakenteiden prosessoinnissa.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="134"/>
<source>CSync failed to reserve memory.</source>
<translation>CSync ei onnistunut varaamaan muistia.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="137"/>
<source>CSync fatal parameter error.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="140"/>
<source>CSync processing step update failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="143"/>
<source>CSync processing step reconcile failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="146"/>
<source>CSync could not authenticate at the proxy.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="149"/>
<source>CSync failed to lookup proxy or server.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="152"/>
<source>CSync failed to authenticate at the %1 server.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="155"/>
<source>CSync failed to connect to the network.</source>
<translation>CSync ei onnistunut yhdistämään verkkoon.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="158"/>
<source>A network connection timeout happened.</source>
<translation>Tapahtui verkon aikakatkaisu.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="161"/>
<source>A HTTP transmission error happened.</source>
<translation>Tapahtui HTTP-välitysvirhe.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="185"/>
<source>The mounted folder is temporarily not available on the server</source>
<translation>Liitetty kansio on väliaikaisesti pois käytöstä tällä palvelimella</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="191"/>
<source>An error occurred while opening a folder</source>
<translation>Kansiota avatessa tapahtui virhe</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="194"/>
<source>Error while reading folder.</source>
<translation>Kansiota lukiessa tapahtui virhe</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="440"/>
<source>File/Folder is ignored because it's hidden.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="701"/>
<source>Only %1 are available, need at least %2 to start</source>
<comment>Placeholders are postfixed with file sizes using Utility::octetsToString()</comment>
<translation>Vain %1 on käytettävissä, käynnistymiseen tarvitaan %2</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1139"/>
<source>Not allowed because you don't have permission to add parent folder</source>
<translation>Ei sallittu, koska käyttöoikeutesi eivät riitä ylätason kansion lisäämiseen</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1146"/>
<source>Not allowed because you don't have permission to add files in that folder</source>
<translation>Ei sallittu, koska käyttöoikeutesi eivät riitä tiedostojen lisäämiseen kyseiseen kansioon</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="173"/>
<source>CSync: No space on %1 server available.</source>
<translation>CSync: %1-palvelimella ei ole tilaa vapaana.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="176"/>
<source>CSync unspecified error.</source>
<translation>CSync - määrittämätön virhe.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="179"/>
<source>Aborted by the user</source>
<translation>Keskeytetty käyttäjän toimesta</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="434"/>
<source>Filename contains invalid characters that can not be synced cross platform.</source>
<translation>Tiedoston nimi sisältää virheellisiä merkkejä, joita ei voi synkronoida alustariippumattomasti.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="167"/>
<source>CSync failed to access</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="121"/>
<source>CSync failed to load or create the journal file. Make sure you have read and write permissions in the local sync folder.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="164"/>
<source>CSync failed due to unhandled permission denied.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="170"/>
<source>CSync tried to create a folder that already exists.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="182"/>
<source>The service is temporarily unavailable</source>
<translation>Palvelu ei ole juuri nyt käytettävissä</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="188"/>
<source>Access is forbidden</source>
<translation>Pääsy estetty</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="199"/>
<source>An internal error number %1 occurred.</source>
<translation>Sisäinen virhe, numero %1.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="263"/>
<source>The item is not synced because of previous errors: %1</source>
<translation>Kohdetta ei synkronoitu aiempien virheiden vuoksi: %1</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="428"/>
<source>Symbolic links are not supported in syncing.</source>
<translation>Symboliset linkit eivät ole tuettuja synkronoinnissa.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="431"/>
<source>File is listed on the ignore list.</source>
<translation>Tiedosto on ohituslistalla.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="437"/>
<source>Filename is too long.</source>
<translation>Tiedoston nimi on liian pitkä.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="446"/>
<source>Stat failed.</source>
<translation>Stat epäonnistui.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="473"/>
<source>Filename encoding is not valid</source>
<translation>Tiedostonimen merkistökoodaus ei ole kelvollista</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="644"/>
<source>Invalid characters, please rename "%1"</source>
<translation>Virheellisiä merkkejä, anna uusi nimi kohteelle "%1"</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="735"/>
<source>Unable to initialize a sync journal.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="754"/>
<source>Unable to read the blacklist from the local database</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="791"/>
<source>Unable to read from the sync journal.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="837"/>
<source>Cannot open the sync journal</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="894"/>
<source>File name contains at least one invalid character</source>
<translation>Tiedoston nimi sisältää ainakin yhden virheellisen merkin</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1098"/>
<location filename="../src/libsync/syncengine.cpp" line="1105"/>
<source>Ignored because of the "choose what to sync" blacklist</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1124"/>
<source>Not allowed because you don't have permission to add subfolders to that folder</source>
<translation>Ei sallittu, koska oikeutesi eivät riitä alikansioiden lisäämiseen kyseiseen kansioon</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1166"/>
<source>Not allowed to upload this file because it is read-only on the server, restoring</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1183"/>
<location filename="../src/libsync/syncengine.cpp" line="1203"/>
<source>Not allowed to remove, restoring</source>
<translation>Poistaminen ei ole sallittua, palautetaan</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1216"/>
<source>Local files and share folder removed.</source>
<translation>Paikalliset tiedostot ja jakokansio poistettu.</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1272"/>
<source>Move not allowed, item restored</source>
<translation>Siirtäminen ei ole sallittua, kohde palautettu</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1283"/>
<source>Move not allowed because %1 is read-only</source>
<translation>Siirto ei ole sallittu, koska %1 on "vain luku"-tilassa</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1284"/>
<source>the destination</source>
<translation>kohde</translation>
</message>
<message>
<location filename="../src/libsync/syncengine.cpp" line="1284"/>
<source>the source</source>
<translation>lähde</translation>
</message>
</context>
<context>
<name>OCC::SyncLogDialog</name>
<message>
<location filename="../src/gui/synclogdialog.ui" line="14"/>
<source>Synchronisation Log</source>
<translation>Synkronointiloki</translation>
</message>
</context>
<context>
<name>OCC::Systray</name>
<message>
<location filename="../src/gui/systray.cpp" line="55"/>
<source>%1: %2</source>
<translation>%1: %2</translation>
</message>
</context>
<context>
<name>OCC::Theme</name>
<message>
<location filename="../src/libsync/theme.cpp" line="285"/>
<source><p>Version %1. For more information please visit <a href='%2'>%3</a>.</p></source>
<translation><p>Versio %1. Lisätietoja osoitteessa <a href='%2'>%3</a>.</p></translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="289"/>
<source><p>Copyright ownCloud GmbH</p></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="290"/>
<source><p>Distributed by %1 and licensed under the GNU General Public License (GPL) Version 2.0.<br/>%2 and the %2 logo are registered trademarks of %1 in the United States, other countries, or both.</p></source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::ValidateChecksumHeader</name>
<message>
<location filename="../src/libsync/checksums.cpp" line="189"/>
<source>The checksum header is malformed.</source>
<translation>Tarkistesumman otsake on muodostettu väärin.</translation>
</message>
<message>
<location filename="../src/libsync/checksums.cpp" line="204"/>
<source>The checksum header contained an unknown checksum type '%1'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/checksums.cpp" line="209"/>
<source>The downloaded file does not match the checksum, it will be resumed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OCC::ownCloudGui</name>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="292"/>
<source>Please sign in</source>
<translation>Kirjaudu sisään</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="317"/>
<source>Folder %1: %2</source>
<translation>Kansio %1: %2</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="322"/>
<source>No sync folders configured.</source>
<translation>Synkronointikansioita ei ole määritetty.</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="332"/>
<source>There are no sync folders configured.</source>
<translation>Synkronointikansioita ei ole määritelty.</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="340"/>
<source>Open in browser</source>
<translation>Avaa selaimessa</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="390"/>
<location filename="../src/gui/owncloudgui.cpp" line="544"/>
<location filename="../src/gui/owncloudgui.cpp" line="611"/>
<source>Log in...</source>
<translation>Kirjaudu sisään...</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="394"/>
<location filename="../src/gui/owncloudgui.cpp" line="536"/>
<location filename="../src/gui/owncloudgui.cpp" line="613"/>
<source>Log out</source>
<translation>Kirjaudu ulos</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="415"/>
<location filename="../src/gui/owncloudgui.cpp" line="456"/>
<source>Recent Changes</source>
<translation>Viimeisimmät muutokset</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="656"/>
<source>Checking for changes in '%1'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="367"/>
<source>Managed Folders:</source>
<translation>Hallitut kansiot:</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="370"/>
<source>Open folder '%1'</source>
<translation>Avaa kansio '%1'</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="342"/>
<source>Open %1 in browser</source>
<translation>Avaa %1 selaimeen</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="598"/>
<source>Unknown status</source>
<translation>Tuntematon tila</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="600"/>
<source>Settings...</source>
<translation>Asetukset...</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="601"/>
<source>Details...</source>
<translation>Tiedot...</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="606"/>
<source>Help</source>
<translation>Ohje</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="608"/>
<source>Quit %1</source>
<translation>Lopeta %1</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="271"/>
<source>Disconnected from %1</source>
<translation>Katkaise yhteys kohteeseen %1</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="236"/>
<source>Unsupported Server Version</source>
<translation>Palvelimen versio ei ole tuettu</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="237"/>
<source>The server on account %1 runs an old and unsupported version %2. Using this client with unsupported server versions is untested and potentially dangerous. Proceed at your own risk.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="275"/>
<source>Disconnected from accounts:</source>
<translation>Katkaistu yhteys tileihin:</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="277"/>
<source>Account %1: %2</source>
<translation>Tili %1: %2</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="296"/>
<source>Account synchronization is disabled</source>
<translation>Tilin synkronointi on poistettu käytöstä</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="379"/>
<source>Unpause all folders</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="384"/>
<source>Pause all folders</source>
<translation>Keskeytä kaikki kansiot</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="515"/>
<source>Unpause all synchronization</source>
<translation>Palauta kaikki synkronointi</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="517"/>
<source>Unpause synchronization</source>
<translation>Palauta synkronointi</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="525"/>
<source>Pause all synchronization</source>
<translation>Keskeytä kaikki synkronointi</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="527"/>
<source>Pause synchronization</source>
<translation>Keskeytä synkronointi</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="534"/>
<source>Log out of all accounts</source>
<translation>Kirjaudu ulos kaikista tileistä</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="542"/>
<source>Log in to all accounts...</source>
<translation>Kirjaudu kaikille tileille...</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="617"/>
<source>Crash now</source>
<comment>Only shows in debug mode to allow testing the crash handler</comment>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="634"/>
<source>No items synced recently</source>
<translation>Kohteita ei ole synkronoitu äskettäin</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="661"/>
<source>Syncing %1 of %2 (%3 left)</source>
<translation>Synkronoidaan %1/%2 (%3 jäljellä)</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="666"/>
<source>Syncing %1 (%2 left)</source>
<translation>Synkronoidaan %1 (%2 jäljellä)</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="686"/>
<source>%1 (%2, %3)</source>
<translation>%1 (%2, %3)</translation>
</message>
<message>
<location filename="../src/gui/owncloudgui.cpp" line="715"/>
<source>Up to date</source>
<translation>Ajan tasalla</translation>
</message>
</context>
<context>
<name>OCC::ownCloudTheme</name>
<message>
<location filename="../src/libsync/owncloudtheme.cpp" line="47"/>
<source><p>Version %2. For more information visit <a href="%3">%4</a></p><p><small>By Klaas Freitag, Daniel Molkentin, Jan-Christoph Borchardt, Olivier Goffart, Markus Götz and others.</small></p><p>Copyright ownCloud GmbH</p><p>Licensed under the GNU General Public License (GPL) Version 2.0<br/>ownCloud and the ownCloud Logo are registered trademarks of ownCloud, Inc. in the United States, other countries, or both.</p></source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OwncloudAdvancedSetupPage</name>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="20"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="32"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="78"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="131"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="247"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="285"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="312"/>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="335"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="88"/>
<source>Server</source>
<translation>Palvelin</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="200"/>
<source><html><head/><body><p>If this box is checked, existing content in the local folder will be erased to start a clean sync from the server.</p><p>Do not check this if the local content should be uploaded to the servers folder.</p></body></html></source>
<translation><html><head/><body><p>Jos tämä kohta on valittu, paikallisen kansion olemassa oleva sisältö poistetaan ja sisältö synkronoidaan palvelimelta.</p><p>Älä valitse tätä, jos tarkoituksesi on lähettää paikallisen kansion sisältö palvelimelle.</p></body></html></translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="203"/>
<source>Start a &clean sync (Erases the local folder!)</source>
<translation>Aloita &puhdas synkronointi (poistaa paikallisen kansion!)</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="278"/>
<source>Choose what to sync</source>
<translation>Valitse synkronoitavat tiedot</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="147"/>
<source>&Local Folder</source>
<translation>&Paikallinen kansio</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="222"/>
<source>pbSelectLocalFolder</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="190"/>
<source>&Keep local data</source>
<translation>&Säilytä paikallinen data</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="237"/>
<source>S&ync everything from server</source>
<translation>S&ynkronoi kaikki palvelimelta</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudadvancedsetuppage.ui" line="319"/>
<source>Status message</source>
<translation>Tilaviesti</translation>
</message>
</context>
<context>
<name>OwncloudHttpCredsPage</name>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.ui" line="38"/>
<source>&Username</source>
<translation>&Käyttäjätunnus</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.ui" line="48"/>
<source>&Password</source>
<translation>&Salasana</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.ui" line="58"/>
<source>Error Label</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/gui/wizard/owncloudhttpcredspage.ui" line="112"/>
<location filename="../src/gui/wizard/owncloudhttpcredspage.ui" line="125"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
</context>
<context>
<name>OwncloudSetupPage</name>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="14"/>
<location filename="../src/gui/wizard/owncloudsetupnocredspage.ui" line="20"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="20"/>
<source>Server &address:</source>
<translation>Palvelimen &osoite:</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="36"/>
<location filename="../src/gui/owncloudsetuppage.ui" line="129"/>
<location filename="../src/gui/owncloudsetuppage.ui" line="156"/>
<location filename="../src/gui/wizard/owncloudsetupnocredspage.ui" line="32"/>
<location filename="../src/gui/wizard/owncloudsetupnocredspage.ui" line="187"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="47"/>
<source>Use &secure connection</source>
<translation>Käytä salattua &yhteyttä</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="60"/>
<source>CheckBox</source>
<translation>Ruksauslaatikko</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="75"/>
<source>&Username:</source>
<translation>&Käyttäjätunnus:</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="85"/>
<source>Enter the ownCloud username.</source>
<translation>Anna ownCloudin käyttäjätunnus.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="92"/>
<source>&Password:</source>
<translation>&Salasana:</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="102"/>
<source>Enter the ownCloud password.</source>
<translation>Anna ownCloudin salasana.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="117"/>
<source>Do not allow the local storage of the password.</source>
<translation>Älä tallenna salasanaa paikallisesti, vaan kysy joka kerta kun ohjelma käynnistyy.</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="120"/>
<source>&Do not store password on local machine</source>
<translation>&Älä tallenna salasanaa paikalliselle koneelle</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="140"/>
<source>https://</source>
<translation>https://</translation>
</message>
<message>
<location filename="../src/gui/owncloudsetuppage.ui" line="147"/>
<source>Enter the url of the ownCloud you want to connect to (without http or https).</source>
<translation>Anna sen ownCloudin verkko-osoite, johon haluat yhdistää (ilman http- tai https-etuliitettä).</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetupnocredspage.ui" line="83"/>
<source>Server &Address</source>
<translation>Palvelimen &osoite</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetupnocredspage.ui" line="99"/>
<source>https://...</source>
<translation>https://...</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudsetupnocredspage.ui" line="157"/>
<source>Error Label</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OwncloudWizardResultPage</name>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.ui" line="14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.ui" line="20"/>
<source>TextLabel</source>
<translation>TekstiLeima</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.ui" line="163"/>
<source>Your entire account is synced to the local folder </source>
<translation>Koko tilisi on synkronoitu paikalliseen kansioon</translation>
</message>
<message>
<location filename="../src/gui/wizard/owncloudwizardresultpage.ui" line="98"/>
<location filename="../src/gui/wizard/owncloudwizardresultpage.ui" line="120"/>
<source>PushButton</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QApplication</name>
<message>
<location filename="../src/gui/application.cpp" line="599"/>
<source>QT_LAYOUT_DIRECTION</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QObject</name>
<message>
<location filename="../src/libsync/utility.cpp" line="473"/>
<source>in the future</source>
<translation>tulevaisuudessa</translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="469"/>
<source>%n day(s) ago</source>
<translation><numerusform>%n päivä sitten</numerusform><numerusform>%n päivää sitten</numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="477"/>
<source>%n hour(s) ago</source>
<translation><numerusform>%n tunti sitten</numerusform><numerusform>%n tuntia sitten</numerusform></translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="482"/>
<source>now</source>
<translation>nyt</translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="484"/>
<source>Less than a minute ago</source>
<translation>Alle minuutti sitten</translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="487"/>
<source>%n minute(s) ago</source>
<translation><numerusform>%n minuutti sitten</numerusform><numerusform>%n minuuttia sitten</numerusform></translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="490"/>
<source>Some time ago</source>
<translation>Jokin aika sitten</translation>
</message>
</context>
<context>
<name>Utility</name>
<message>
<location filename="../src/libsync/utility.cpp" line="125"/>
<source>%L1 GB</source>
<translation>%L1 Gt</translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="129"/>
<source>%L1 MB</source>
<translation>%L1 Mt</translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="132"/>
<source>%L1 KB</source>
<translation>%L1 kt</translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="135"/>
<source>%L1 B</source>
<translation>%L1 t</translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="319"/>
<source>%n year(s)</source>
<translation><numerusform>%n vuosi</numerusform><numerusform>%n vuotta</numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="320"/>
<source>%n month(s)</source>
<translation><numerusform>%n kuukausi</numerusform><numerusform>%n kuukautta</numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="321"/>
<source>%n day(s)</source>
<translation><numerusform>%n päivä</numerusform><numerusform>%n päivää</numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="322"/>
<source>%n hour(s)</source>
<translation><numerusform>%n tunti</numerusform><numerusform>%n tuntia</numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="323"/>
<source>%n minute(s)</source>
<translation><numerusform>%n minuutti</numerusform><numerusform>%n minuuttia</numerusform></translation>
</message>
<message numerus="yes">
<location filename="../src/libsync/utility.cpp" line="324"/>
<source>%n second(s)</source>
<translation><numerusform>%n sekunti</numerusform><numerusform>%n sekuntia</numerusform></translation>
</message>
<message>
<location filename="../src/libsync/utility.cpp" line="348"/>
<source>%1 %2</source>
<translation>%1 %2</translation>
</message>
</context>
<context>
<name>main.cpp</name>
<message>
<location filename="../src/gui/main.cpp" line="38"/>
<source>System Tray not available</source>
<translation>Ilmoitusaluetta ei ole saatavilla</translation>
</message>
<message>
<location filename="../src/gui/main.cpp" line="39"/>
<source>%1 requires on a working system tray. If you are running XFCE, please follow <a href="http://docs.xfce.org/xfce/xfce4-panel/systray">these instructions</a>. Otherwise, please install a system tray application such as 'trayer' and try again.</source>
<translation>%1 vaatii toimivan ilmoitusalueen. Jos käytät XFCE:tä, seuraa <a href="http://docs.xfce.org/xfce/xfce4-panel/systray">näitä ohjeita</a>. Muussa tapauksessa asenna jokin ilmoitusalueen tarjoava sovellus, kuten "trayer" ja yritä uudelleen.</translation>
</message>
</context>
<context>
<name>ownCloudTheme::about()</name>
<message>
<location filename="../src/libsync/theme.cpp" line="271"/>
<source><p><small>Built from Git revision <a href="%1">%2</a> on %3, %4 using Qt %5, %6</small></p></source>
<translation><p><small>Koostettu Git-revisiosta <a href="%1">%2</a> %3, %4 käyttäen Qt:n versiota %5, %6</small></p></translation>
</message>
</context>
<context>
<name>progress</name>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="32"/>
<source>Downloaded</source>
<translation>Ladattu</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="34"/>
<source>Uploaded</source>
<translation>Lähetetty</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="37"/>
<source>Server version downloaded, copied changed local file into conflict file</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="39"/>
<source>Deleted</source>
<translation>Poistettu</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="42"/>
<source>Moved to %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="44"/>
<source>Ignored</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="46"/>
<source>Filesystem access error</source>
<translation>Tiedostojärjestelmän käyttövirhe</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="48"/>
<source>Error</source>
<translation>Virhe</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="51"/>
<location filename="../src/libsync/progressdispatcher.cpp" line="54"/>
<source>Unknown</source>
<translation>Tuntematon</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="65"/>
<source>downloading</source>
<translation>ladataan</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="67"/>
<source>uploading</source>
<translation>lähetetään</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="69"/>
<source>deleting</source>
<translation>poistetaan</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="72"/>
<source>moving</source>
<translation>siirretään</translation>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="74"/>
<source>ignoring</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../src/libsync/progressdispatcher.cpp" line="76"/>
<location filename="../src/libsync/progressdispatcher.cpp" line="78"/>
<source>error</source>
<translation>virhe</translation>
</message>
</context>
<context>
<name>theme</name>
<message>
<location filename="../src/libsync/theme.cpp" line="58"/>
<source>Status undefined</source>
<translation>Tila on määrittelemätön.</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="61"/>
<source>Waiting to start sync</source>
<translation>Odotetaan synkronoinnin käynnistymistä</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="64"/>
<source>Sync is running</source>
<translation>Synkronointi meneillään</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="67"/>
<source>Sync Success</source>
<translation>Synkronointi valmistui</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="70"/>
<source>Sync Success, some files were ignored.</source>
<translation>Synkronointi onnistui, jotkin tiedostot ohitettiin.</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="73"/>
<source>Sync Error</source>
<translation>Synkronointivirhe</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="76"/>
<source>Setup Error</source>
<translation>Asetusvirhe.</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="79"/>
<source>Preparing to sync</source>
<translation>Valmistaudutaan synkronointiin</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="82"/>
<source>Aborting...</source>
<translation>Keskeytetään...</translation>
</message>
<message>
<location filename="../src/libsync/theme.cpp" line="85"/>
<source>Sync is paused</source>
<translation>Synkronointi on keskeytetty</translation>
</message>
</context>
</TS><|fim▁end|> | <source>Currently there is no storage usage information available.</source> |
<|file_name|>describe_invocations.go<|end_file_name|><|fim▁begin|>package ecs
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// DescribeInvocations invokes the ecs.DescribeInvocations API synchronously
func (client *Client) DescribeInvocations(request *DescribeInvocationsRequest) (response *DescribeInvocationsResponse, err error) {
response = CreateDescribeInvocationsResponse()
err = client.DoAction(request, response)
return
}
// DescribeInvocationsWithChan invokes the ecs.DescribeInvocations API asynchronously
func (client *Client) DescribeInvocationsWithChan(request *DescribeInvocationsRequest) (<-chan *DescribeInvocationsResponse, <-chan error) {
responseChan := make(chan *DescribeInvocationsResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.DescribeInvocations(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}<|fim▁hole|>}
// DescribeInvocationsWithCallback invokes the ecs.DescribeInvocations API asynchronously
func (client *Client) DescribeInvocationsWithCallback(request *DescribeInvocationsRequest, callback func(response *DescribeInvocationsResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *DescribeInvocationsResponse
var err error
defer close(result)
response, err = client.DescribeInvocations(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// DescribeInvocationsRequest is the request struct for api DescribeInvocations
type DescribeInvocationsRequest struct {
*requests.RpcRequest
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
InvokeStatus string `position:"Query" name:"InvokeStatus"`
IncludeOutput requests.Boolean `position:"Query" name:"IncludeOutput"`
CommandId string `position:"Query" name:"CommandId"`
PageNumber requests.Integer `position:"Query" name:"PageNumber"`
ContentEncoding string `position:"Query" name:"ContentEncoding"`
PageSize requests.Integer `position:"Query" name:"PageSize"`
InvokeId string `position:"Query" name:"InvokeId"`
Timed requests.Boolean `position:"Query" name:"Timed"`
CommandName string `position:"Query" name:"CommandName"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
OwnerAccount string `position:"Query" name:"OwnerAccount"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
CommandType string `position:"Query" name:"CommandType"`
InstanceId string `position:"Query" name:"InstanceId"`
}
// DescribeInvocationsResponse is the response struct for api DescribeInvocations
type DescribeInvocationsResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
TotalCount int64 `json:"TotalCount" xml:"TotalCount"`
PageNumber int64 `json:"PageNumber" xml:"PageNumber"`
PageSize int64 `json:"PageSize" xml:"PageSize"`
Invocations InvocationsInDescribeInvocations `json:"Invocations" xml:"Invocations"`
}
// CreateDescribeInvocationsRequest creates a request to invoke DescribeInvocations API
func CreateDescribeInvocationsRequest() (request *DescribeInvocationsRequest) {
request = &DescribeInvocationsRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Ecs", "2014-05-26", "DescribeInvocations", "ecs", "openAPI")
request.Method = requests.POST
return
}
// CreateDescribeInvocationsResponse creates a response to parse from DescribeInvocations response
func CreateDescribeInvocationsResponse() (response *DescribeInvocationsResponse) {
response = &DescribeInvocationsResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}<|fim▁end|> | return responseChan, errChan |
<|file_name|>data.py<|end_file_name|><|fim▁begin|>import numpy as np
import pywt
from scipy.misc import imresize
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_L = 10
L = 14
N_BATCH = 50
OBS_SIZE = 30
# ---------------------------- helpers
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# -------------------------------------- making the datas<|fim▁hole|># assume X is already a 2D matrix
def mk_query(X):
avg = np.median(X)
X = X + avg
def query(O):
Ox, Oy = O
if X[Ox][Oy] > 0.0:
return [1.0, 0.0]
else:
return [0.0, 1.0]
return query
def sample_coord():
Ox, Oy = np.random.randint(0,L), np.random.randint(0,L)
if 0 <= Ox < L:
if 0 <= Oy < L:
return Ox, Oy
return sample_coord()
def gen_O(X):
query = mk_query(X)
Ox, Oy = sample_coord()
O = (Ox, Oy)
return O, query(O)
def get_img_class():
img, _x = mnist.train.next_batch(1)
img = np.reshape(img[0], [28, 28])
img = imresize(img, (L,L)) / 255.0
A,(B,C,D) = pywt.dwt2(img, 'haar')
img = np.reshape(np.array([A,B,C,D]), [L, L])
return img, _x
def gen_data():
x = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
new_ob_x = []
new_ob_y = []
new_ob_tf = []
imgs = []
for bb in range(N_BATCH):
# generate a hidden variable X
# get a single thing out
img, _x = get_img_class()
imgs.append(img)
# add to x
x.append(_x[0])
# generate new observation
_new_ob_coord, _new_ob_lab = gen_O(img)
_new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
new_ob_x.append(_new_ob_x)
new_ob_y.append(_new_ob_y)
new_ob_tf.append(_new_ob_lab)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = gen_O(img)
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(x, np.float32),\
np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
np.array(obs_tfs, np.float32),\
np.array(new_ob_x, np.float32),\
np.array(new_ob_y, np.float32),\
np.array(new_ob_tf, np.float32), imgs<|fim▁end|> | |
<|file_name|>stub.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#Copyright 2007,2008,2012 Sebastian Hagen
# This file is part of gonium.
#
# gonium is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# gonium is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
import logging
import struct
import socket
from io import BytesIO
import random
from .base import *
from .. import ip_address
from ..ip_address import ip_address_build
from ..fdm.packet import AsyncPacketSock
from ..fdm.stream import AsyncDataStream
# ----------------------------------------------------------------------------- question / RR sections
class ValueVerifier:
NAME_MIN = 0
NAME_MAX = 255
TYPE_MIN = 1
TYPE_MAX = 255
@classmethod
def name_validate(cls, name):
if not (cls.NAME_MIN <= len(name) <= cls.NAME_MAX):
raise ValueError('NAME {!a} is invalid'.format(name,))
@classmethod
def type_validate(cls, rtype):
if not (cls.TYPE_MIN <= int(rtype) <= cls.TYPE_MAX):
raise ValueError('TYPE {!a} is invalid'.format(rtype,))
class DNSQuestion(ValueVerifier, DNSReprBase):
fields = ('name', 'type', 'rclass')
def __init__(self, name, rtype, rclass=CLASS_IN):
self.name_validate(name)
self.type_validate(rtype)
self.name = name
self.type = rtype
self.rclass = rclass
@classmethod
def build_from_binstream(cls, binstream):
name = DomainName.build_from_binstream(binstream)
tc_str = binstream.read(4)
if (len(tc_str) < 4):
raise ValueError('Insufficient data in binstream')
(rtype, rclass) = struct.unpack(b'>HH', tc_str)
return cls(name, rtype, rclass)
def binary_repr(self):
"""Return binary representation of this question section"""
return (self.name.binary_repr() + struct.pack(b'>HH', self.type,
self.rclass))
def __eq__(self, other):
return (self.binary_repr() == other.binary_repr())
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.binary_repr())
class ResourceRecord(ValueVerifier, DNSReprBase):
fields = ('name', 'type', 'rclass', 'ttl', 'rdata')
def __init__(self, name, rtype, ttl, rdata, rclass=CLASS_IN):
self.name_validate(name)
self.type_validate(rtype)
self.name = name
self.type = rtype
self.rclass = rclass
self.ttl = ttl
self.rdata = rdata
@classmethod
def build_from_binstream(cls, binstream):
name = DomainName.build_from_binstream(binstream)
tctl_str = binstream.read(10)
if (len(tctl_str) < 10):
raise ValueError('Insufficient data in binstream')
(rtype, rclass, ttl, rdlength) = struct.unpack(b'>HHLH', tctl_str)
rdata = RDATA.class_get(rtype).build_from_binstream(binstream, rdlength)
return cls(name, rtype, ttl, rdata, rclass=rclass)
def binary_repr(self):
"""Return binary representation of this RR"""
rdata_str = self.rdata.binary_repr()
return (self.name.binary_repr() + struct.pack(b'>HHLH', self.type,
self.rclass, self.ttl, len(rdata_str)) + rdata_str)
# ----------------------------------------------------------------------------- DNS Frames and Headers
class DNSHeader(DNSReprBase):
ID_MIN = QDC_MIN = ANC_MIN = NSC_MIN = ARC_MIN = 0
ID_MAX = QDC_MAX = ANC_MAX = NSC_MAX = ARC_MAX = 65535
QR_MIN = AA_MIN = TC_MIN = RD_MIN = RA_MIN = False
QR_MAX = AA_MAX = TC_MAX = RD_MAX = RA_MAX = True
OPCODE_MIN = 0
OPCODE_MAX = 2
RCODE_MIN = 0
RCODE_MAX = 5
fields = ('id', 'response', 'opcode', 'authoritative_answer', 'truncation',
'recursion_desired', 'recursion_available', 'response_code', 'qdcount',
'ancount', 'nscount', 'arcount')
def __init__(self, id, response=False, opcode=0, authoritative_answer=False,
truncation=False, recursion_desired=True, recursion_available=False,
response_code=0, qdcount=0, ancount=0, nscount=0, arcount=0):
self.limit_verify(self.ID_MIN, self.ID_MAX, id)
self.limit_verify(self.QR_MIN, self.QR_MAX, response)
self.limit_verify(self.OPCODE_MIN, self.OPCODE_MAX, opcode)
self.limit_verify(self.AA_MIN, self.AA_MAX, authoritative_answer)
self.limit_verify(self.TC_MIN, self.TC_MAX, truncation)
self.limit_verify(self.RD_MIN, self.RD_MAX, recursion_desired)
self.limit_verify(self.RA_MIN, self.RA_MAX, recursion_available)
self.limit_verify(self.RCODE_MIN, self.RCODE_MAX, response_code)
self.limit_verify(self.QDC_MIN, self.QDC_MAX, qdcount)
self.limit_verify(self.ANC_MIN, self.ANC_MAX, ancount)
self.limit_verify(self.NSC_MIN, self.NSC_MAX, nscount)
self.limit_verify(self.ARC_MIN, self.ARC_MAX, arcount)
self.id = id
self.response = response
self.opcode = opcode
self.authoritative_answer = authoritative_answer
self.truncation = truncation
self.recursion_desired = recursion_desired
self.recursion_available = recursion_available
self.response_code = response_code
self.qdcount = qdcount
self.ancount = ancount
self.nscount = nscount
self.arcount = arcount
@staticmethod
def limit_verify(limit_min, limit_max, val):
if not (limit_min <= val <= limit_max):
raise ValueError('Expected value to lie between {} and {}; got {!a} instead.'.format(limit_min, limit_max, val))
@classmethod
def build_from_binstream(cls, binstream):
s = binstream.read(12)
if (len(s) < 12):
raise ValueError('Insufficient data in stream')
return cls.build_from_binstring(s)
@classmethod
def build_from_binstring(cls, binstring):
if (len(binstring) != 12):
raise ValueError('Binstring {!a} has invalid length'.format(binstring,))
(id, flags_1, flags_2, qdcount, ancount, nscount, arcount) = \
struct.unpack(b'>HBBHHHH', binstring)
qr = bool(flags_1 >> 7)
opcode = (flags_1 % 128) >> 3
aa = bool((flags_1 % 8) >> 2)
tc = bool((flags_1 % 4) >> 1)
rd = bool(flags_1 % 2)
ra = bool(flags_2 >> 7)
Z = (flags_2 % 128) >> 4
rcode = flags_2 % 16
if (Z != 0):
raise ValueError('Got non-zero value in Z header field')
return cls(id, qr, opcode, aa, tc, rd, ra, rcode, qdcount,
ancount, nscount, arcount)
def binary_repr(self):
"""Return binary representation of this DNS Header"""
flags_1 = (
(self.response << 7) +
(self.opcode << 3) +
(self.truncation << 2) +
(self.recursion_desired)
)
flags_2 = (self.recursion_available << 7) + self.response_code
return struct.pack(b'>HBBHHHH', self.id, flags_1, flags_2, self.qdcount,
self.ancount, self.nscount, self.arcount)
class DNSFrame(DNSReprBase):
fields = ('questions', 'answers', 'ns_records', 'ar', 'header')
def __init__(self, questions, answers=(), ns_records=(),
ar=(), header=None, *args, **kwargs):
self.questions = questions
self.answers = answers
self.ns_records = ns_records
self.ar = ar
if (header is None):
header = DNSHeader(qdcount=len(questions), ancount=len(answers),
nscount=len(ns_records), arcount=len(ar), *args, **kwargs)
self.header = header
@classmethod
def build_from_binstream(cls, binstream):
header = DNSHeader.build_from_binstream(binstream)
questions = tuple([DNSQuestion.build_from_binstream(binstream) for i in range(header.qdcount)])
answers = tuple([ResourceRecord.build_from_binstream(binstream) for i in range(header.ancount)])
ns_records = tuple([ResourceRecord.build_from_binstream(binstream) for i in range(header.nscount)])
ar = tuple([ResourceRecord.build_from_binstream(binstream) for i in range(header.arcount)])
return cls(header=header, questions=questions, answers=answers,
ns_records=ns_records, ar=ar)
def binary_repr(self):
"""Return binary representation of this DNS Header"""
return (self.header.binary_repr() +
b''.join([s.binary_repr() for s in (tuple(self.questions) +
tuple(self.answers) + tuple(self.ns_records) +
tuple(self.ar))]))
# ----------------------------------------------------------------------------- statekeeping
class DNSQuery:
"""Class representing outstanding local requests
Callback args: dns_request, response_frame
response_frame will be None iff the query timeouted."""
def __init__(self, lookup_manager, result_handler, id, question, timeout):
if (not hasattr(question.binary_repr,'__call__')):<|fim▁hole|>
self.id = id
self.result_handler = result_handler
self.question = question
self.la = lookup_manager
self.la.query_add(self)
if not (timeout is None):
self.tt = self.la.event_dispatcher.set_timer(timeout, self.timeout_process, parent=self)
def __eq__(self, other):
return ((self.id == other.id) and (self.question == other.question))
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.id, self.question))
def timeout_process(self):
"""Process a timeout on this query"""
self.tt = None
self.la.query_forget(self)
try:
self.failure_report()
finally:
self.la = None
def failure_report(self):
"""Call callback handler with dummy results indicating lookup failure"""
self.result_handler(self, None)
def is_response(self, response):
"""Check a response for whether it answers our query. Do not process it further either way."""
return (tuple(response.questions) == (self.question,))
def potential_response_process(self, response):
"""Check a response for whether it answers our query, and if so process it.
Returns whether the response was accepted."""
if (not self.is_response(response)):
return False
self.tt.cancel()
self.tt = None
try:
self.result_handler(self, response)
finally:
self.la = None
return True
def clean_up(self):
"""Cancel request, if still pending"""
if not (self.tt is None):
self.tt.cancel()
self.tt = None
if not (self.la is None):
self.la.query_forget(self)
self.la = None
def get_dns_frame(self):
return DNSFrame(questions=(self.question,), id=self.id)
class ResolverConfig:
DEFAULT_FN = '/etc/resolv.conf'
PORT = 53
def __init__(self, nameservers):
self.ns = nameservers
@classmethod
def build_from_file(cls, fn=None):
if (fn is None):
fn = cls.DEFAULT_FN
nameservers = []
try:
f = open(fn, 'r')
except IOError:
pass
else:
for line in f:
words = line.split()
if (not words):
continue
if (words[0].startswith('#')):
continue
if (words[0] == 'nameserver'):
if (len(words) > 1):
try:
ip = ip_address_build(words[1])
except ValueError:
continue
nameservers.append(ip)
continue
f.close()
if (not nameservers):
nameservers = [ip_address_build(s) for s in ('127.0.0.1','::1')]
return cls(nameservers)
def get_addr(self):
return (str(self.ns[0]), self.PORT)
def build_lookup_manager(self, ed):
return DNSLookupManager(ed, ns_addr=self.get_addr())
class DNSTCPStream(AsyncDataStream):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.size = 2
def process_input(self, data):
bytes_used = 0
bytes_left = len(data)
msgs = []
while (bytes_left > 2):
(l,) = struct.unpack('>H', data[bytes_used:bytes_used+2])
wb = l + 2
if (wb > bytes_left):
self.size = wb
break
msgs.append(data[bytes_used+2:bytes_used+wb])
bytes_used += wb
bytes_left -= wb
else:
self.size = 2
self.discard_inbuf_data(bytes_used)
if (msgs):
self.process_msgs(msgs)
def send_query(self, query):
frame_data = query.get_dns_frame().binary_repr()
try:
header = struct.pack('>H', (len(frame_data)))
except struct.error as exc:
raise ValueError('Too much data.') from struct.error
self.send_bytes((header, frame_data))
class DNSLookupManager:
logger = logging.getLogger('gonium.dns_resolving.DNSLookupManager')
log = logger.log
def __init__(self, event_dispatcher, ns_addr, addr_family=None):
self.event_dispatcher = event_dispatcher
self.cleaning_up = False
if (not (len(ns_addr) == 2)):
raise ValueError('Argument ns_addr should have two elements; got {!a}'.format(ns_addr,))
ip_addr = ip_address.ip_address_build(ns_addr[0])
if (addr_family is None):
addr_family = ip_addr.AF
sock = socket.socket(addr_family, socket.SOCK_DGRAM)
self.sock_udp = AsyncPacketSock(self.event_dispatcher, sock)
self.sock_udp.process_input = self.data_process
self.sock_udp.process_close = self.close_process
self.sock_tcp = None
self.sock_tcp_connected = False
self._qq_tcp = deque()
# Normalize ns_addr argument
self.ns_addr = (str(ip_addr), int(ns_addr[1]))
self.queries = {}
def _have_tcp_connection(self):
s = self.sock_tcp
return (s and (s.state == s.CS_UP))
def _send_tcp(self, query):
if (self._have_tcp_connection()):
try:
self.sock_tcp.send_query(query)
except ValueError:
self.log(40, '{!a} unable to send query over TCP:'.format(self), exc_info=True)
self.event_dispatcher.set_timer(0, query.timeout_process, parent=self, interval_relative=False)
else:
if (self.sock_tcp is None):
self._make_tcp_sock()
self._qq_tcp.append(query)
return
def _make_tcp_sock(self):
self.sock_tcp = s = DNSTCPStream(run_start=False)
s.process_close = self._process_tcp_close
s.connect_async_sock(self.event_dispatcher, ip_address_build(self.ns_addr[0]), self.ns_addr[1], connect_callback=self._process_tcp_connect)
s.process_msgs = self._process_tcp_msgs
def _process_tcp_connect(self, conn):
for query in self._qq_tcp:
self.sock_tcp.send_query(query)
self._qq_tcp.clear()
def _process_tcp_close(self):
self.sock_tcp = None
def _process_tcp_msgs(self, msgs):
for msg in msgs:
self.data_process(msg, self.ns_addr, tcp=True)
def data_process(self, data, source, tcp=False):
try:
dns_frame = DNSFrame.build_from_binstream(BytesIO(data))
except ValueError:
self.log(30, '{!a} got frame {!a} not parsable as dns data from {!a}. Ignoring. Parsing error was:'.format(self, bytes(data), source), exc_info=True)
return
if (source != self.ns_addr):
self.log(30, '{!a} got spurious udp frame from {!a}; target NS is at {!a}. Ignoring.'.format(self, source, self.ns_addr))
return
if (not (dns_frame.header.id in self.queries)):
self.log(30, '{!a} got spurious (unexpected id) query dns response {!a} from {!a}. Ignoring.'.format(self, dns_frame, source))
return
def log_spurious():
self.log(30, '{!a} got spurious (unexpected question section) query dns response {!a} from {!a}. Ignoring.'.format(self, dns_frame, source))
if (dns_frame.header.truncation):
if (tcp):
self.log(30, '{!a} got truncated dns response {!a} over TCP from {!a}. Ignoring.'.format(self, dns_frame, source))
return
self.log(25, '{!a} got truncated dns response {!a} from {!a}. Retrying over TCP.'.format(self, dns_frame, source))
for query in self.queries[dns_frame.header.id]:
if (query.is_response(dns_frame)):
self._send_tcp(query)
break
else:
log_spurious()
return
for query in self.queries[dns_frame.header.id][:]:
if (query.potential_response_process(dns_frame)):
self.queries[dns_frame.header.id].remove(query)
break
else:
log_spurious()
def query_forget(self, query):
"""Forget outstanding dns query"""
self.queries[query.id].remove(query)
try:
self._qq_tcp.remove(query)
except ValueError:
pass
def id_suggestion_get(self):
"""Return suggestion for a frame id to use"""
while True:
rv = random.randint(0, 2**16-1)
if not (rv in self.queries):
return rv
def query_add(self, query):
"""Register new outstanding dns query and send query frame"""
dns_frame_str = query.get_dns_frame().binary_repr()
if (not (query.id in self.queries)):
self.queries[query.id] = []
query_list = self.queries[query.id]
if (query in query_list):
raise ValueError('query {!a} is already registered with {!a}.'.format(query, self))
query_list.append(query)
self.sock_udp.fl.sendto(dns_frame_str, self.ns_addr)
def close_process(self):
"""Process close of UDP socket"""
#if not (fd == self.sock_udp.fd):
# raise ValueError('{!a} is not responsible for fd {!a}'.fomat(self, fd))
if (not self.cleaning_up):
self.log(30, 'UDP socket of {!a} is unexpectedly being closed.'.format(self))
self.sock_udp = None
# Don't raise an exception here; this is most likely being called as a
# result of another exception, which we wouldn't want to mask.
def clean_up(self):
"""Shutdown instance, if still active"""
self.cleaning_up = True
if not (self.sock_udp is None):
self.sock_udp.clean_up()
self.sock_udp = None
for query_list in self.queries.values():
for query in query_list[:]:
query.failure_report()
query.clean_up()
self.queries.clear()
self._qq_tcp.clear()
self.cleaning_up = False
def build_simple_query(self, *args, **kwargs):
return SimpleDNSQuery(self, *args, **kwargs)
class DNSLookupResult:
def __init__(self, query_name, answers, additional_records):
self.query_name = query_name
self.answers = answers
self.additional_records = additional_records
def get_rr_bytypes(self, rtypes):
return tuple([a.data_get() for a in self.answers if (a.type in rtypes)])
def get_rr_A(self):
return self.get_rr_bytypes((RDATA_A.type,))
def get_rr_AAAA(self):
return self.get_rr_bytypes((RDATA_AAAA.type,))
def get_rr_ip_addresses(self):
return self.get_rr_bytypes((RDATA_A.type, RDATA_AAAA.type))
def get_rr_MX(self):
return self.get_rr_bytypes((RDATA_MX.type,))
def get_rr_TXT(self):
return self.get_rr_bytypes((RDATA_TXT.type,))
def __repr__(self):
return '%s%s' % (self.__class__.__name__, (self.query_name, self.answers,
self.additional_records))
def __nonzero__(self):
return True
class SimpleDNSQuery:
"""DNSQuery wrapper with more comfortable call syntax"""
logger = logging.getLogger('gonium.dns_resolving.SimpleDNSQuery')
log = logger.log
def __init__(self, lookup_manager, result_handler, query_name, qtypes, timeout):
if (isinstance(query_name, str)):
query_name = query_name.encode('ascii')
if (query_name.endswith(b'.') and not query_name.endswith(b'..')):
query_name = query_name[:-1]
query_name = DomainName(query_name)
self.lookup_manager = lookup_manager
self.result_handler = result_handler
self.query_name = query_name
self.qtypes = qtypes
self.results = []
self.queries = []
self.qtype_special = False
for qtype in self.qtypes:
question = DNSQuestion(query_name, qtype)
self.queries.append(DNSQuery(lookup_manager=lookup_manager,
result_handler=self.query_result_handler,
id=lookup_manager.id_suggestion_get(), question=question,
timeout=timeout))
if (qtype in QTYPES_SPECIAL):
self.qtype_special = True
def query_result_handler(self, query, result):
"""Process result for wrapped query"""
self.results.append(result)
if (len(self.results) >= len(self.qtypes)):
self.query_results_process()
def query_results_process(self):
"""Collocate and return query results"""
valid_results = []
valid_ars = []
names_valid = set((self.query_name,))
results = [x for x in self.results if (not (x is None))]
if (len(results) == 0):
self.result_handler(self, None)
return
for result in results:
if (result is None):
continue
for answer in result.answers:
if (not (answer.name in names_valid)):
self.log(30, "{!a} got bogus answer {!a}; didn't expect this name. Ignoring.".format(self, answer))
continue
if (answer.type == RDATA_CNAME.type):
names_valid.add(answer.rdata.domain_name)
elif (not ((answer.type in self.qtypes) or self.qtype_special)):
self.log(30, "{!a} got bogus answer {!a}; didn't expect this type. Ignoring.".format(self, answer))
continue
if not (answer.rdata in valid_results):
valid_results.append(answer.rdata)
for ar in result.ar:
if not (ar in valid_ars):
valid_ars.append(ar)
res = DNSLookupResult(self.query_name, valid_results, valid_ars)
try:
self.result_handler(self, res)
except BaseException as exc:
self.log(40, 'Error on DNS lookup result processing for {!a}:'.format(res), exc_info=True)
self.result_handler = None
self.lookup_manager = None
self.queries = ()
def clean_up(self):
"""Cancel request, if still pending"""
for query in self.queries:
query.clean_up
self.queries = ()
self.result_handler = None
self.lookup_manager = None<|fim▁end|> | raise ValueError('Value {!a} for argument question is invalid'.format(question,)) |
<|file_name|>ng-i18next.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for i18next v0.3.6
// Project: https://github.com/i18next/ng-i18next
// Definitions by: Cyril Schumacher <https://github.com/cyrilschumacher>
// Definitions: https://github.com/borisyankov/DefinitelyTyped
/// <reference path="../i18next/i18next.d.ts" />
declare namespace angular.i18next {<|fim▁hole|> }
}<|fim▁end|> | interface I18nextProvider {
options: I18nextOptions; |
<|file_name|>request.py<|end_file_name|><|fim▁begin|>def get_ip_address(request):
ip_address = request.META.get('HTTP_X_FORWARDED_FOR')
<|fim▁hole|> return ip_address.split(',')[-1]
return request.META.get('REMOTE_ADDR')<|fim▁end|> | if ip_address: |
<|file_name|>index-page-sk_puppeteer_test.ts<|end_file_name|><|fim▁begin|>import { expect } from 'chai';
import {
addEventListenersToPuppeteerPage,
loadCachedTestBed,
takeScreenshot,
TestBed,
} from '../../../puppeteer-tests/util';
import { END_BUSY_EVENT } from '../codesize-scaffold-sk/events';
describe('index-page-sk', () => {
let testBed: TestBed;
before(async () => {
testBed = await loadCachedTestBed();<|fim▁hole|> beforeEach(async () => {
await testBed.page.setViewport({ width: 1024, height: 768 });
const eventPromise = await addEventListenersToPuppeteerPage(testBed.page, [END_BUSY_EVENT]);
const loaded = eventPromise(END_BUSY_EVENT); // Emitted when page is loaded.
await testBed.page.goto(testBed.baseUrl);
await loaded;
});
it('should render the demo page', async () => {
expect(await testBed.page.$$('index-page-sk')).to.have.length(1);
});
describe('screenshots', () => {
it('shows the default view', async () => {
await takeScreenshot(testBed.page, 'codesize', 'index-page-sk');
});
});
});<|fim▁end|> | });
|
<|file_name|>Std-symbols.cpp<|end_file_name|><|fim▁begin|>#include <string>
template std::allocator<char>::allocator();
template std::allocator<char>::~allocator();
template std::basic_string<char, std::char_traits<char>, std::allocator<char>>::basic_string(const char*, const std::allocator<char>&);<|fim▁hole|><|fim▁end|> | template std::basic_string<char, std::char_traits<char>, std::allocator<char>>::~basic_string();
template const char* std::basic_string<char, std::char_traits<char>, std::allocator<char>>::c_str() const noexcept; |
<|file_name|>cppreference.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# formatter.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <[email protected]>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import datetime
import re
import string
import urllib.request
from functools import partial
from cppman.formatter.tableparser import parse_table
from cppman.util import fixupHTML, html2man
def member_table_def(g):
tbl = parse_table('<table>%s</table>' % str(g.group(3)))
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
return '\n.IP "%s"\n%s\n%s\n' % (g.group(1), g.group(2), tbl)
def member_type_function(g):
if g.group(1).find("<a href=") == -1:
return ""
head = re.sub(r'<.*?>', '', g.group(1)).strip()
tail = ''
cppvertag = re.search(
'^(.*?)(\[(?:(?:since|until) )?C\+\+\d+\]\s*(,\s*)?)+$', head)
if cppvertag:
head = cppvertag.group(1).strip()
tail = ' ' + cppvertag.group(2)
if ',' in head:
head = ', '.join([x.strip() + ' (3)' for x in head.split(',')])
else:
head = head.strip() + ' (3)'
full = (head + tail).replace('"', '\\(dq')
""" remove [static] tag as in string::npos[static] """
full = full.replace("[static]", "");
return '\n.IP "%s"\n%s\n' % (full, g.group(2))
NAV_BAR_END = '<div class="t-navbar-sep">.?</div></div>'
# Format replacement RE list
# The '.SE' pseudo macro is described in the function: html2groff
rps = [
# Workaround: remove <p> in t-dcl
(r'<tr class="t-dcl">(.*?)</tr>',
lambda g: re.sub('<p/?>', '', g.group(1)), re.S),
# Header, Name
(r'<h1.*?>(.*?)</h1>',
r'\n.TH "{{name}}" 3 "%s" "cppreference.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n{{name}} {{shortdesc}}\n.SE\n' % datetime.date.today(),
re.S),
# Defined in header
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END + r'.*?'
r'Defined in header <code>(.*?)</code>(.*?)<tr class="t-dcl-sep">',
r'\n.SH "SYNOPSIS"\n#include \1\n.sp\n'
r'.nf\n\2\n.fi\n.SE\n'
r'\n.SH "DESCRIPTION"\n', re.S),
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<tr class="t-dcl-sep">',
r'\n.SH "SYNOPSIS"\n.nf\n\1\n.fi\n.SE\n'
r'\n.SH "DESCRIPTION"\n', re.S),
# <unordered_map>
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<table class="t-dsc-begin">',
r'\n.SH "DESCRIPTION"\n\1\n', re.S),
# access specifiers
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<h3',
r'\n.SH "DESCRIPTION"\n\1\n<h3', re.S),
(r'<td>\s*\([0-9]+\)\s*</td>', r'', 0),
# Section headers
(r'<div class="t-inherited">.*?<h2>.*?Inherited from\s*(.*?)\s*</h2>',
r'\n.SE\n.IEND\n.IBEGIN \1\n', re.S),
# Remove tags
(r'<span class="edit.*?">.*?</span> ?', r'', re.S),
(r'[edit]', r'', re.S),
(r'\[edit\]', r'', re.S),
(r'<div id="siteSub">.*?</div>', r'', 0),
(r'<div id="contentSub">.*?</div>', r'', 0),
(r'<table class="toc" id="toc"[^>]*>.*?</table>', r'', re.S),
(r'<h2[^>]*>.*?</h2>', r'', re.S),
(r'<div class="coliru-btn coliru-btn-run-init">.*?</div>', r'', re.S),
(r'<tr class="t-dsc-hitem">.*?</tr>', r'', re.S),
# C++11/14/17/20
(r'\(((?:since|until) C\+\+\d+)\)', r' [\1]', re.S),
(r'\((C\+\+\d+)\)', r' [\1]', re.S),
# Subsections
(r'<h5[^>]*>\s*(.*)</h5>', r'\n.SS "\1"\n', 0),
# Group t-lines
(r'<span></span>', r'', re.S),
(r'<span class="t-lines">(?:<span>.+?</span>.*)+</span>',
lambda x: re.sub('\s*</span><span>\s*', r', ', x.group(0)), re.S),
# Member type & function second col is group see basic_fstream for example
(r'<tr class="t-dsc">\s*?<td>((?:(?!</td>).)*?)</td>\s*?'
r'<td>((?:(?!</td>).)*?)<table[^>]*>((?:(?!</table>).)*?)</table>'
r'(?:(?!</td>).)*?</td>\s*?</tr>',
member_table_def, re.S),
# Section headers
(r'.*<h3>(.+?)</h3>', r'\n.SE\n.SH "\1"\n', 0),
# Member type & function
(r'<tr class="t-dsc">\n?<td>\s*(.*?)\n?</td>.*?<td>\s*(.*?)</td>.*?</tr>',
member_type_function, re.S),
# Parameters
(r'<tr class="t-par">.*?<td>\s*(.*?)\n?</td>.*?<td>.*?</td>.*?'
r'<td>\s*(.*?)</td>.*?</tr>',
r'\n.IP "\1"\n\2\n', re.S),
# 'ul' tag
(r'<ul>', r'\n.RS 2\n', 0),
(r'</ul>', r'\n.RE\n.sp\n', 0),
# 'li' tag
(r'<li>\s*(.+?)</li>', r'\n.IP \[bu] 3\n\1\n', re.S),
# 'pre' tag
(r'<pre[^>]*>(.+?)</pre\s*>', r'\n.in +2n\n.nf\n\1\n.fi\n.in\n', re.S),
# Footer
(r'<div class="printfooter">',
r'\n.SE\n.IEND\n.SH "REFERENCE"\n'
r'cppreference.com, 2015 - All rights reserved.', re.S),
# C++ version tag
(r'<div title="(C\+\+..)"[^>]*>', r'.sp\n\1\n', 0),
# Output
(r'<p>Output:\n?</p>', r'\n.sp\nOutput:\n', re.S),
# Paragraph
(r'<p>(.*?)</p>', r'\n\1\n.sp\n', re.S),
(r'<div class="t-li1">(.*?)</div>', r'\n\1\n.sp\n', re.S),
(r'<div class="t-li2">(.*?)</div>',
r'\n.RS\n\1\n.RE\n.sp\n', re.S),
# 'br' tag
(r'<br/>', r'\n.br\n', 0),
(r'\n.br\n.br\n', r'\n.sp\n', 0),
# 'dd' 'dt' tag
(r'<dt>(.+?)</dt>\s*<dd>(.+?)</dd>', r'\n.IP "\1"\n\2\n', re.S),
# Bold
(r'<strong>(.+?)</strong>', r'\n.B \1\n', 0),
# Any other tags
(r'<script[^>]*>[^<]*</script>', r'', 0),
(r'<.*?>', r'', re.S),
# Escape
(r'^#', r'\#', 0),
(r' ', ' ', 0),
(r'&#(\d+);', lambda g: chr(int(g.group(1))), 0),
# Misc
(r'<', r'<', 0),
(r'>', r'>', 0),
(r'"', r'"', 0),
(r'&', r'&', 0),
(r' ', r' ', 0),
(r'\\([^\^nE])', r'\\\\\1', 0),
(r'>/">', r'', 0),
(r'/">', r'', 0),
# Remove empty sections
(r'\n.SH (.+?)\n+.SE', r'', 0),
# Remove empty lines
(r'\n\s*\n+', r'\n', 0),
(r'\n\n+', r'\n', 0),
# Preserve \n" in EXAMPLE
(r'\\n', r'\\en', 0),
# Remove leading whitespace
(r'^\s+', r'', re.S),
# Trailing white-spaces
(r'\s+\n', r'\n', re.S),
# Remove extra whitespace and newline in .SH/SS/IP section
(r'.(SH|SS|IP) "\s*(.*?)\s*\n?"', r'.\1 "\2"', 0),
# Remove extra whitespace before .IP bullet
(r'(.IP \\\\\[bu\] 3)\n\s*(.*?)\n', r'\1\n\2\n', 0),
# Remove extra '\n' before C++ version Tag (don't do it in table)
(r'(?<!T{)\n\s*(\[(:?since|until) C\+\+\d+\])', r' \1', re.S)
]
def html2groff(data, name):
"""Convert HTML text from cppreference.com to Groff-formatted text."""
# Remove header and footer
try:
data = data[data.index('<div id="cpp-content-base">'):]
data = data[:data.index('<div class="printfooter">') + 25]
except ValueError:
pass
# Remove non-printable characters
data = ''.join([x for x in data if x in string.printable])
for table in re.findall(
r'<table class="(?:wikitable|dsctable)"[^>]*>.*?</table>',
data, re.S):
tbl = parse_table(table)
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
data = data.replace(table, tbl)
# Pre replace all
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
# Remove non-printable characters<|fim▁hole|> data = ''.join([x for x in data if x in string.printable])
# Upper case all section headers
for st in re.findall(r'.SH .*\n', data):
data = data.replace(st, st.upper())
# Add tags to member/inherited member functions
# e.g. insert -> vector::insert
#
# .SE is a pseudo macro I created which means 'SECTION END'
# The reason I use it is because I need a marker to know where section
# ends.
# re.findall find patterns which does not overlap, which means if I do
# this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S)
# re.findall will skip the later .SH tag and thus skip the later section.
# To fix this, '.SE' is used to mark the end of the section so the next
# '.SH' can be find by re.findall
try:
idx = data.index('.IEND')
except ValueError:
idx = None
def add_header_multi(prefix, g):
if ',' in g.group(1):
res = ', '.join(['%s::%s' % (prefix, x.strip())
for x in g.group(1).split(',')])
else:
res = '%s::%s' % (prefix, g.group(1))
return '\n.IP "%s"' % res
if idx:
class_name = name
if class_name.startswith('std::'):
normalized_class_name = class_name[len('std::'):]
else:
normalized_class_name = class_name
class_member_content = data[:idx]
secs = re.findall(r'\.SH "(.+?)"(.+?)\.SE', class_member_content, re.S)
for sec, content in secs:
# Member functions
if (('MEMBER' in sec and
'NON-MEMBER' not in sec and
'INHERITED' not in sec and
'MEMBER TYPES' != sec) or
'CONSTANTS' == sec):
content2 = re.sub(r'\n\.IP "([^:]+?)"',
partial(add_header_multi, class_name),
content)
# Replace (constructor) (destructor)
content2 = re.sub(r'\(constructor\)', r'%s' %
normalized_class_name, content2)
content2 = re.sub(r'\(destructor\)', r'~%s' %
normalized_class_name, content2)
data = data.replace(content, content2)
blocks = re.findall(r'\.IBEGIN\s*(.+?)\s*\n(.+?)\.IEND', data, re.S)
for inherited_class, content in blocks:
content2 = re.sub(r'\.SH "(.+?)"', r'\n.SH "\1 INHERITED FROM %s"'
% inherited_class.upper(), content)
data = data.replace(content, content2)
secs = re.findall(r'\.SH "(.+?)"(.+?)\.SE', content, re.S)
for sec, content in secs:
# Inherited member functions
if 'MEMBER' in sec and \
sec != 'MEMBER TYPES':
content2 = re.sub(r'\n\.IP "(.+)"',
partial(add_header_multi, inherited_class),
content)
data = data.replace(content, content2)
# Remove unneeded pseudo macro
data = re.sub('(?:\n.SE|.IBEGIN.*?\n|\n.IEND)', '', data)
# Replace all macros
desc_re = re.search(r'.SH "DESCRIPTION"\n.*?([^\n\s].*?)\n', data)
shortdesc = ''
# not empty description
if desc_re and not desc_re.group(1).startswith('.SH'):
shortdesc = '- ' + desc_re.group(1)
def dereference(g):
d = dict(name=name, shortdesc=shortdesc)
if g.group(1) in d:
return d[g.group(1)]
data = re.sub('{{(.*?)}}', dereference, data)
return data
def func_test():
"""Test if there is major format changes in cplusplus.com"""
ifs = urllib.request.urlopen(
'http://en.cppreference.com/w/cpp/container/vector')
result = html2groff(fixupHTML(ifs.read()), 'std::vector')
assert '.SH "NAME"' in result
assert '.SH "SYNOPSIS"' in result
assert '.SH "DESCRIPTION"' in result
def test():
"""Simple Text"""
ifs = urllib.request.urlopen(
'http://en.cppreference.com/w/cpp/container/vector')
print(html2groff(fixupHTML(ifs.read()), 'std::vector'), end=' ')
# with open('test.html') as ifs:
# data = fixupHTML(ifs.read())
# print html2groff(data, 'std::vector'),
if __name__ == '__main__':
test()<|fim▁end|> | |
<|file_name|>0007_auto_20140805_2253.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('codecompetitions', '0006_auto_20140805_2234'),
]
operations = [
migrations.AddField(
model_name='problem',
name='expected_output',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='problem',
name='input_data',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='read_from_file',
field=models.CharField(blank=True, null=True, max_length=80),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='time_limit',
field=models.PositiveIntegerField(default=5),
preserve_default=True,
),
]<|fim▁end|> | from __future__ import unicode_literals |
<|file_name|>Domain.java<|end_file_name|><|fim▁begin|>package org.whale.ext.domain;
import java.util.ArrayList;
import java.util.List;
import org.whale.system.annotation.jdbc.Column;
import org.whale.system.annotation.jdbc.Id;
import org.whale.system.annotation.jdbc.Table;
import org.whale.system.annotation.jdbc.Validate;
import org.whale.system.base.BaseEntry;
import org.whale.system.common.util.PropertiesUtil;
/**
* 实体对象
*
* @author wjs
* 2014年9月10日-上午10:12:48
*/
@Table(value="sys_domian", cnName="实体对象")
public class Domain extends BaseEntry{
private static final long serialVersionUID = -23042834921L;
@Id
@Column(cnName="id")
private Long id;
@Validate(required=true)
@Column(cnName="实体名")
private String domainName;
@Validate(required=true)
@Column(cnName="中文名")
private String domainCnName;
@Validate(required=true)
@Column(cnName="数据库", unique=true)
private String domainSqlName;
@Column(cnName="基础包路径")
private String pkgName = "org.whale.system";
//树模型
private Integer treeModel;
private String treeId;
private String treePid;
private String treeName;
//模板类型
private Integer ftlType;
//代码路径
private String codePath;
private String author = PropertiesUtil.getValue("author", "wjs");
//主键
private Attr idAttr;
private List<Attr> attrs;
private List<Attr> listAttrs = new ArrayList<Attr>();
private List<Attr> formAttrs = new ArrayList<Attr>();
private List<Attr> queryAttrs = new ArrayList<Attr>();
<|fim▁hole|> public String getDomainName() {
return domainName;
}
public void setDomainName(String domainName) {
this.domainName = domainName;
}
public String getDomainCnName() {
return domainCnName;
}
public void setDomainCnName(String domainCnName) {
this.domainCnName = domainCnName;
}
public String getDomainSqlName() {
return domainSqlName;
}
public void setDomainSqlName(String domainSqlName) {
this.domainSqlName = domainSqlName;
}
public String getPkgName() {
return pkgName;
}
public void setPkgName(String pkgName) {
this.pkgName = pkgName;
}
public Attr getIdAttr() {
return idAttr;
}
public void setIdAttr(Attr idAttr) {
this.idAttr = idAttr;
}
public List<Attr> getAttrs() {
return attrs;
}
public void setAttrs(List<Attr> attrs) {
this.attrs = attrs;
}
public List<Attr> getListAttrs() {
return listAttrs;
}
public void setListAttrs(List<Attr> listAttrs) {
this.listAttrs = listAttrs;
}
public List<Attr> getFormAttrs() {
return formAttrs;
}
public void setFormAttrs(List<Attr> formAttrs) {
this.formAttrs = formAttrs;
}
public List<Attr> getQueryAttrs() {
return queryAttrs;
}
public void setQueryAttrs(List<Attr> queryAttrs) {
this.queryAttrs = queryAttrs;
}
public void setId(Long id) {
this.id = id;
}
public Integer getFtlType() {
return ftlType;
}
public void setFtlType(Integer ftlType) {
this.ftlType = ftlType;
}
public String getCodePath() {
return codePath;
}
public void setCodePath(String codePath) {
this.codePath = codePath;
}
public Integer getTreeModel() {
return treeModel;
}
public void setTreeModel(Integer treeModel) {
this.treeModel = treeModel;
}
public String getTreeId() {
return treeId;
}
public void setTreeId(String treeId) {
this.treeId = treeId;
}
public String getTreePid() {
return treePid;
}
public void setTreePid(String treePid) {
this.treePid = treePid;
}
public String getTreeName() {
return treeName;
}
public void setTreeName(String treeName) {
this.treeName = treeName;
}
public String getAuthor() {
return author;
}
public void setAuthor(String author) {
this.author = author;
}
}<|fim▁end|> | public Long getId() {
return id;
}
|
<|file_name|>uploadSnapshotDialog.js<|end_file_name|><|fim▁begin|>'use strict';
var form = $('[name="uploadForm"]');
<|fim▁hole|>exports.setDetails = function(url, id) {
form.element(by.model('inputText')).sendKeys(url);
form.element(by.model('snapshotId')).sendKeys(id);
};
exports.submit = function() {
form.element(by.css('[ng-click="upload()"]')).click();
};<|fim▁end|> | exports.getForm = function() {
return form;
};
|
<|file_name|>variance.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This file infers the variance of type and lifetime parameters. The
//! algorithm is taken from Section 4 of the paper "Taming the Wildcards:
//! Combining Definition- and Use-Site Variance" published in PLDI'11 and
//! written by Altidor et al., and hereafter referred to as The Paper.
//!
//! This inference is explicitly designed *not* to consider the uses of
//! types within code. To determine the variance of type parameters
//! defined on type `X`, we only consider the definition of the type `X`
//! and the definitions of any types it references.
//!
//! We only infer variance for type parameters found on *types*: structs,
//! enums, and traits. We do not infer variance for type parameters found
//! on fns or impls. This is because those things are not type definitions
//! and variance doesn't really make sense in that context.
//!
//! It is worth covering what variance means in each case. For structs and
//! enums, I think it is fairly straightforward. The variance of the type
//! or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
//! (resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
//! the variances we compute for type parameters.)
//!
//! ### Variance on traits
//!
//! The meaning of variance for trait parameters is more subtle and worth
//! expanding upon. There are in fact two uses of the variance values we
//! compute.
//!
//! #### Trait variance and object types
//!
//! The first is for object types. Just as with structs and enums, we can
//! decide the subtyping relationship between two object types `&Trait<A>`
//! and `&Trait<B>` based on the relationship of `A` and `B`. Note that
//! for object types we ignore the `Self` type parameter -- it is unknown,
//! and the nature of dynamic dispatch ensures that we will always call a
//! function that is expected the appropriate `Self` type. However, we
//! must be careful with the other type parameters, or else we could end
//! up calling a function that is expecting one type but provided another.
//!
//! To see what I mean, consider a trait like so:
//!
//! trait ConvertTo<A> {
//! fn convertTo(&self) -> A;
//! }
//!
//! Intuitively, If we had one object `O=&ConvertTo<Object>` and another
//! `S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
//! (presuming Java-like "string" and "object" types, my go to examples
//! for subtyping). The actual algorithm would be to compare the
//! (explicit) type parameters pairwise respecting their variance: here,
//! the type parameter A is covariant (it appears only in a return
//! position), and hence we require that `String <: Object`.
//!
//! You'll note though that we did not consider the binding for the
//! (implicit) `Self` type parameter: in fact, it is unknown, so that's
//! good. The reason we can ignore that parameter is precisely because we
//! don't need to know its value until a call occurs, and at that time (as
//! you said) the dynamic nature of virtual dispatch means the code we run
//! will be correct for whatever value `Self` happens to be bound to for
//! the particular object whose method we called. `Self` is thus different
//! from `A`, because the caller requires that `A` be known in order to
//! know the return type of the method `convertTo()`. (As an aside, we
//! have rules preventing methods where `Self` appears outside of the
//! receiver position from being called via an object.)
//!
//! #### Trait variance and vtable resolution
//!
//! But traits aren't only used with objects. They're also used when
//! deciding whether a given impl satisfies a given trait bound. To set the
//! scene here, imagine I had a function:
//!
//! fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
//! ...
//! }
//!
//! Now imagine that I have an implementation of `ConvertTo` for `Object`:
//!
//! impl ConvertTo<int> for Object { ... }
//!
//! And I want to call `convertAll` on an array of strings. Suppose
//! further that for whatever reason I specifically supply the value of
//! `String` for the type parameter `T`:
//!
//! let mut vector = ~["string", ...];
//! convertAll::<int, String>(v);
//!
//! Is this legal? To put another way, can we apply the `impl` for
//! `Object` to the type `String`? The answer is yes, but to see why
//! we have to expand out what will happen:
//!
//! - `convertAll` will create a pointer to one of the entries in the
//! vector, which will have type `&String`
//! - It will then call the impl of `convertTo()` that is intended
//! for use with objects. This has the type:
//!
//! fn(self: &Object) -> int
//!
//! It is ok to provide a value for `self` of type `&String` because
//! `&String <: &Object`.
//!
//! OK, so intuitively we want this to be legal, so let's bring this back
//! to variance and see whether we are computing the correct result. We
//! must first figure out how to phrase the question "is an impl for
//! `Object,int` usable where an impl for `String,int` is expected?"
//!
//! Maybe it's helpful to think of a dictionary-passing implementation of
//! type classes. In that case, `convertAll()` takes an implicit parameter
//! representing the impl. In short, we *have* an impl of type:
//!
//! V_O = ConvertTo<int> for Object
//!
//! and the function prototype expects an impl of type:
//!
//! V_S = ConvertTo<int> for String
//!
//! As with any argument, this is legal if the type of the value given
//! (`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
//! The answer will depend on the variance of the various parameters. In
//! this case, because the `Self` parameter is contravariant and `A` is
//! covariant, it means that:
//!
//! V_O <: V_S iff
//! int <: int
//! String <: Object
//!
//! These conditions are satisfied and so we are happy.
//!
//! ### The algorithm
//!
//! The basic idea is quite straightforward. We iterate over the types
//! defined and, for each use of a type parameter X, accumulate a
//! constraint indicating that the variance of X must be valid for the
//! variance of that use site. We then iteratively refine the variance of
//! X until all constraints are met. There is *always* a sol'n, because at
//! the limit we can declare all type parameters to be invariant and all
//! constraints will be satisfied.
//!
//! As a simple example, consider:
//!
//! enum Option<A> { Some(A), None }
//! enum OptionalFn<B> { Some(|B|), None }
//! enum OptionalMap<C> { Some(|C| -> C), None }
//!
//! Here, we will generate the constraints:
//!
//! 1. V(A) <= +
//! 2. V(B) <= -
//! 3. V(C) <= +
//! 4. V(C) <= -
//!
//! These indicate that (1) the variance of A must be at most covariant;
//! (2) the variance of B must be at most contravariant; and (3, 4) the
//! variance of C must be at most covariant *and* contravariant. All of these
//! results are based on a variance lattice defined as follows:
//!
//! * Top (bivariant)
//! - +
//! o Bottom (invariant)
//!
//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
//! optimal solution. Note that there is always a naive solution which
//! just declares all variables to be invariant.
//!
//! You may be wondering why fixed-point iteration is required. The reason
//! is that the variance of a use site may itself be a function of the
//! variance of other type parameters. In full generality, our constraints
//! take the form:
//!
//! V(X) <= Term
//! Term := + | - | * | o | V(X) | Term x Term
//!
//! Here the notation V(X) indicates the variance of a type/region
//! parameter `X` with respect to its defining class. `Term x Term`
//! represents the "variance transform" as defined in the paper:
//!
//! If the variance of a type variable `X` in type expression `E` is `V2`
//! and the definition-site variance of the [corresponding] type parameter
//! of a class `C` is `V1`, then the variance of `X` in the type expression
//! `C<E>` is `V3 = V1.xform(V2)`.
//!
//! ### Constraints
//!
//! If I have a struct or enum with where clauses:
//!
//! struct Foo<T:Bar> { ... }
//!
//! you might wonder whether the variance of `T` with respect to `Bar`
//! affects the variance `T` with respect to `Foo`. I claim no. The
//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
//! `Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that
//! case, the upcast will be illegal, but not because of a variance
//! failure, but rather because the target type `Foo<Y>` is itself just
//! not well-formed. Basically we get to assume well-formedness of all
//! types involved before considering variance.
//!
//! ### Associated types
//!
//! Any trait with an associated type is invariant with respect to all
//! of its inputs. To see why this makes sense, consider what
//! subtyping for a trait reference means:
//!
//! <T as Trait> <: <U as Trait>
//!
//! means that if I know that `T as Trait`,
//! I also know that `U as
//! Trait`. Moreover, if you think of it as
//! dictionary passing style, it means that
//! a dictionary for `<T as Trait>` is safe
//! to use where a dictionary for `<U as
//! Trait>` is expected.
//!
//! The problem is that when you can
//! project types out from `<T as Trait>`,
//! the relationship to types projected out
//! of `<U as Trait>` is completely unknown
//! unless `T==U` (see #21726 for more
//! details). Making `Trait` invariant
//! ensures that this is true.
//!
//! *Historical note: we used to preserve this invariant another way,
//! by tweaking the subtyping rules and requiring that when a type `T`
//! appeared as part of a projection, that was considered an invariant
//! location, but this version does away with the need for those
//! somewhat "special-case-feeling" rules.*
//!
//! Another related reason is that if we didn't make traits with
//! associated types invariant, then projection is no longer a
//! function with a single result. Consider:
//!
//! ```
//! trait Identity { type Out; fn foo(&self); }
//! impl<T> Identity for T { type Out = T; ... }
//! ```
//!
//! Now if I have `<&'static () as Identity>::Out`, this can be
//! validly derived as `&'a ()` for any `'a`:
//!
//! <&'a () as Identity> <: <&'static () as Identity>
//! if &'static () < : &'a () -- Identity is contravariant in Self
//! if 'static : 'a -- Subtyping rules for relations
//!
//! This change otoh means that `<'static () as Identity>::Out` is
//! always `&'static ()` (which might then be upcast to `'a ()`,
//! separately). This was helpful in solving #21750.
use self::VarianceTerm::*;
use self::ParamKind::*;
use arena;
use arena::TypedArena;
use middle::resolve_lifetime as rl;
use middle::subst;
use middle::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace};
use middle::ty::{self, Ty};
use std::fmt;
use std::rc::Rc;
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util;
use syntax::visit;
use syntax::visit::Visitor;
use util::nodemap::NodeMap;
use util::ppaux::Repr;
pub fn infer_variance(tcx: &ty::ctxt) {
let krate = tcx.map.krate();
let mut arena = arena::TypedArena::new();
let terms_cx = determine_parameters_to_be_inferred(tcx, &mut arena, krate);
let constraints_cx = add_constraints_from_crate(terms_cx, krate);
solve_constraints(constraints_cx);
tcx.variance_computed.set(true);
}
// Representing terms
//
// Terms are structured as a straightforward tree. Rather than rely on
// GC, we allocate terms out of a bounded arena (the lifetime of this
// arena is the lifetime 'a that is threaded around).
//
// We assign a unique index to each type/region parameter whose variance
// is to be inferred. We refer to such variables as "inferreds". An
// `InferredIndex` is a newtype'd int representing the index of such
// a variable.
type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
#[derive(Copy, Debug)]
struct InferredIndex(uint);
#[derive(Copy)]
enum VarianceTerm<'a> {
ConstantTerm(ty::Variance),
TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>),
InferredTerm(InferredIndex),
}
impl<'a> fmt::Debug for VarianceTerm<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ConstantTerm(c1) => write!(f, "{:?}", c1),
TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2),
InferredTerm(id) => write!(f, "[{}]", { let InferredIndex(i) = id; i })
}
}
}
// The first pass over the crate simply builds up the set of inferreds.
struct TermsContext<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
arena: &'a TypedArena<VarianceTerm<'a>>,
empty_variances: Rc<ty::ItemVariances>,
// For marker types, UnsafeCell, and other lang items where
// variance is hardcoded, records the item-id and the hardcoded
// variance.
lang_items: Vec<(ast::NodeId, Vec<ty::Variance>)>,
// Maps from the node id of a type/generic parameter to the
// corresponding inferred index.
inferred_map: NodeMap<InferredIndex>,
// Maps from an InferredIndex to the info for that variable.
inferred_infos: Vec<InferredInfo<'a>> ,
}
#[derive(Copy, Debug, PartialEq)]
enum ParamKind {
TypeParam,
RegionParam,
}
struct InferredInfo<'a> {
item_id: ast::NodeId,
kind: ParamKind,
space: ParamSpace,
index: uint,
param_id: ast::NodeId,
term: VarianceTermPtr<'a>,
// Initial value to use for this parameter when inferring
// variance. For most parameters, this is Bivariant. But for lang
// items and input type parameters on traits, it is different.
initial_variance: ty::Variance,
}
fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
arena: &'a mut TypedArena<VarianceTerm<'a>>,
krate: &ast::Crate)
-> TermsContext<'a, 'tcx> {
let mut terms_cx = TermsContext {
tcx: tcx,
arena: arena,
inferred_map: NodeMap(),
inferred_infos: Vec::new(),
lang_items: lang_items(tcx),
// cache and share the variance struct used for items with
// no type/region parameters
empty_variances: Rc::new(ty::ItemVariances {
types: VecPerParamSpace::empty(),
regions: VecPerParamSpace::empty()
})
};
visit::walk_crate(&mut terms_cx, krate);
terms_cx
}
fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
let all = vec![
(tcx.lang_items.phantom_fn(), vec![ty::Contravariant, ty::Covariant]),
(tcx.lang_items.phantom_data(), vec![ty::Covariant]),
(tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
// Deprecated:
(tcx.lang_items.covariant_type(), vec![ty::Covariant]),
(tcx.lang_items.contravariant_type(), vec![ty::Contravariant]),
(tcx.lang_items.invariant_type(), vec![ty::Invariant]),
(tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]),
(tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]),
(tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]),
];
all.into_iter()
.filter(|&(ref d,_)| d.is_some())
.filter(|&(ref d,_)| d.as_ref().unwrap().krate == ast::LOCAL_CRATE)
.map(|(d, v)| (d.unwrap().node, v))
.collect()
}
impl<'a, 'tcx> TermsContext<'a, 'tcx> {
fn add_inferreds_for_item(&mut self,
item_id: ast::NodeId,
has_self: bool,
generics: &ast::Generics)
{
/*!
* Add "inferreds" for the generic parameters declared on this
* item. This has a lot of annoying parameters because we are
* trying to drive this from the AST, rather than the
* ty::Generics, so that we can get span info -- but this
* means we must accommodate syntactic distinctions.
*/
// NB: In the code below for writing the results back into the
// tcx, we rely on the fact that all inferreds for a particular
// item are assigned continuous indices.
let inferreds_on_entry = self.num_inferred();
if has_self {
self.add_inferred(item_id, TypeParam, SelfSpace, 0, item_id);
}
for (i, p) in generics.lifetimes.iter().enumerate() {
let id = p.lifetime.id;
self.add_inferred(item_id, RegionParam, TypeSpace, i, id);
}
for (i, p) in generics.ty_params.iter().enumerate() {
self.add_inferred(item_id, TypeParam, TypeSpace, i, p.id);
}
// If this item has no type or lifetime parameters,
// then there are no variances to infer, so just
// insert an empty entry into the variance map.
// Arguably we could just leave the map empty in this
// case but it seems cleaner to be able to distinguish
// "invalid item id" from "item id with no
// parameters".
if self.num_inferred() == inferreds_on_entry {
let newly_added =
self.tcx.item_variance_map.borrow_mut().insert(
ast_util::local_def(item_id),
self.empty_variances.clone()).is_none();
assert!(newly_added);
}
}
fn add_inferred(&mut self,
item_id: ast::NodeId,
kind: ParamKind,
space: ParamSpace,
index: uint,
param_id: ast::NodeId) {
let inf_index = InferredIndex(self.inferred_infos.len());
let term = self.arena.alloc(InferredTerm(inf_index));
let initial_variance = self.pick_initial_variance(item_id, space, index);
self.inferred_infos.push(InferredInfo { item_id: item_id,
kind: kind,
space: space,
index: index,
param_id: param_id,
term: term,
initial_variance: initial_variance });
let newly_added = self.inferred_map.insert(param_id, inf_index).is_none();
assert!(newly_added);
debug!("add_inferred(item_path={}, \
item_id={}, \
kind={:?}, \
space={:?}, \
index={}, \
param_id={}, \
inf_index={:?}, \
initial_variance={:?})",
ty::item_path_str(self.tcx, ast_util::local_def(item_id)),
item_id, kind, space, index, param_id, inf_index,
initial_variance);
}
fn pick_initial_variance(&self,
item_id: ast::NodeId,
space: ParamSpace,
index: uint)
-> ty::Variance
{
match space {
SelfSpace | FnSpace => {
ty::Bivariant
}
TypeSpace => {
match self.lang_items.iter().find(|&&(n, _)| n == item_id) {
Some(&(_, ref variances)) => variances[index],
None => ty::Bivariant
}
}
}
}
fn num_inferred(&self) -> uint {
self.inferred_infos.len()
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item) {
debug!("add_inferreds for item {}", item.repr(self.tcx));
match item.node {
ast::ItemEnum(_, ref generics) |
ast::ItemStruct(_, ref generics) => {
self.add_inferreds_for_item(item.id, false, generics);
}
ast::ItemTrait(_, ref generics, _, _) => {
self.add_inferreds_for_item(item.id, true, generics);
visit::walk_item(self, item);
}
ast::ItemExternCrate(_) |
ast::ItemUse(_) |
ast::ItemDefaultImpl(..) |
ast::ItemImpl(..) |
ast::ItemStatic(..) |
ast::ItemConst(..) |
ast::ItemFn(..) |
ast::ItemMod(..) |
ast::ItemForeignMod(..) |
ast::ItemTy(..) |
ast::ItemMac(..) => {
visit::walk_item(self, item);
}
}
}
}
// Constraint construction and representation
//
// The second pass over the AST determines the set of constraints.
// We walk the set of items and, for each member, generate new constraints.
struct ConstraintContext<'a, 'tcx: 'a> {
terms_cx: TermsContext<'a, 'tcx>,
// These are pointers to common `ConstantTerm` instances
covariant: VarianceTermPtr<'a>,
contravariant: VarianceTermPtr<'a>,
invariant: VarianceTermPtr<'a>,
bivariant: VarianceTermPtr<'a>,
constraints: Vec<Constraint<'a>> ,
}
/// Declares that the variable `decl_id` appears in a location with
/// variance `variance`.
#[derive(Copy)]
struct Constraint<'a> {
inferred: InferredIndex,
variance: &'a VarianceTerm<'a>,
}
fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>,
krate: &ast::Crate)
-> ConstraintContext<'a, 'tcx>
{
let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant));
let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant));
let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant));
let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant));
let mut constraint_cx = ConstraintContext {
terms_cx: terms_cx,
covariant: covariant,
contravariant: contravariant,
invariant: invariant,
bivariant: bivariant,
constraints: Vec::new(),
};
visit::walk_crate(&mut constraint_cx, krate);
constraint_cx
}
impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item) {
let did = ast_util::local_def(item.id);
let tcx = self.terms_cx.tcx;
debug!("visit_item item={}",
item.repr(tcx));
match item.node {
ast::ItemEnum(ref enum_definition, _) => {
let scheme = ty::lookup_item_type(tcx, did);
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
// in comment at top of module.
//
// self.add_constraints_from_generics(&scheme.generics);
// Hack: If we directly call `ty::enum_variants`, it
// annoyingly takes it upon itself to run off and
// evaluate the discriminants eagerly (*grumpy* that's
// not the typical pattern). This results in double
// error messages because typeck goes off and does
// this at a later time. All we really care about is
// the types of the variant arguments, so we just call
// `ty::VariantInfo::from_ast_variant()` ourselves
// here, mainly so as to mask the differences between
// struct-like enums and so forth.
for ast_variant in &enum_definition.variants {
let variant =
ty::VariantInfo::from_ast_variant(tcx,
&**ast_variant,
/*discriminant*/ 0);
for arg_ty in &variant.args {
self.add_constraints_from_ty(&scheme.generics, *arg_ty, self.covariant);
}
}
}
ast::ItemStruct(..) => {
let scheme = ty::lookup_item_type(tcx, did);
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
// in comment at top of module.
//
// self.add_constraints_from_generics(&scheme.generics);
let struct_fields = ty::lookup_struct_fields(tcx, did);
for field_info in &struct_fields {
assert_eq!(field_info.id.krate, ast::LOCAL_CRATE);
let field_ty = ty::node_id_to_type(tcx, field_info.id.node);
self.add_constraints_from_ty(&scheme.generics, field_ty, self.covariant);
}
}
ast::ItemTrait(..) => {
let trait_def = ty::lookup_trait_def(tcx, did);
let predicates = ty::lookup_super_predicates(tcx, did);
self.add_constraints_from_predicates(&trait_def.generics,
predicates.predicates.as_slice(),
self.covariant);
let trait_items = ty::trait_items(tcx, did);
for trait_item in &*trait_items {
match *trait_item {
ty::MethodTraitItem(ref method) => {
self.add_constraints_from_predicates(
&method.generics,
method.predicates.predicates.get_slice(FnSpace),
self.contravariant);
self.add_constraints_from_sig(
&method.generics,
&method.fty.sig,
self.covariant);
}
ty::TypeTraitItem(ref data) => {
// Any trait with an associated type is
// invariant with respect to all of its
// inputs. See length discussion in the comment
// on this module.
let projection_ty = ty::mk_projection(tcx,
trait_def.trait_ref.clone(),
data.name);
self.add_constraints_from_ty(&trait_def.generics,
projection_ty,
self.invariant);
}
}
}
}
ast::ItemExternCrate(_) |
ast::ItemUse(_) |
ast::ItemStatic(..) |
ast::ItemConst(..) |
ast::ItemFn(..) |
ast::ItemMod(..) |
ast::ItemForeignMod(..) |
ast::ItemTy(..) |
ast::ItemImpl(..) |
ast::ItemDefaultImpl(..) |
ast::ItemMac(..) => {
}
}
visit::walk_item(self, item);
}
}
/// Is `param_id` a lifetime according to `map`?
fn is_lifetime(map: &ast_map::Map, param_id: ast::NodeId) -> bool {
match map.find(param_id) {
Some(ast_map::NodeLifetime(..)) => true, _ => false
}
}
impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.terms_cx.tcx
}
fn inferred_index(&self, param_id: ast::NodeId) -> InferredIndex {
match self.terms_cx.inferred_map.get(¶m_id) {
Some(&index) => index,
None => {
self.tcx().sess.bug(&format!(
"no inferred index entry for {}",
self.tcx().map.node_to_string(param_id)));
}
}
}
fn find_binding_for_lifetime(&self, param_id: ast::NodeId) -> ast::NodeId {
let tcx = self.terms_cx.tcx;
assert!(is_lifetime(&tcx.map, param_id));
match tcx.named_region_map.get(¶m_id) {
Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id))
=> lifetime_decl_id,
Some(_) => panic!("should not encounter non early-bound cases"),
// The lookup should only fail when `param_id` is
// itself a lifetime binding: use it as the decl_id.
None => param_id,
}
}
/// Is `param_id` a type parameter for which we infer variance?
fn is_to_be_inferred(&self, param_id: ast::NodeId) -> bool {
let result = self.terms_cx.inferred_map.contains_key(¶m_id);
// To safe-guard against invalid inferred_map constructions,
// double-check if variance is inferred at some use of a type
// parameter (by inspecting parent of its binding declaration
// to see if it is introduced by a type or by a fn/impl).
let check_result = |this:&ConstraintContext| -> bool {
let tcx = this.terms_cx.tcx;
let decl_id = this.find_binding_for_lifetime(param_id);
// Currently only called on lifetimes; double-checking that.
assert!(is_lifetime(&tcx.map, param_id));
let parent_id = tcx.map.get_parent(decl_id);
let parent = tcx.map.find(parent_id).unwrap_or_else(
|| panic!("tcx.map missing entry for id: {}", parent_id));
let is_inferred;
macro_rules! cannot_happen { () => { {
panic!("invalid parent: {} for {}",
tcx.map.node_to_string(parent_id),
tcx.map.node_to_string(param_id));
} } }
match parent {
ast_map::NodeItem(p) => {
match p.node {
ast::ItemTy(..) |
ast::ItemEnum(..) |
ast::ItemStruct(..) |
ast::ItemTrait(..) => is_inferred = true,
ast::ItemFn(..) => is_inferred = false,
_ => cannot_happen!(),
}
}
ast_map::NodeTraitItem(..) => is_inferred = false,
ast_map::NodeImplItem(..) => is_inferred = false,
_ => cannot_happen!(),
}
return is_inferred;
};
assert_eq!(result, check_result(self));
return result;
}
/// Returns a variance term representing the declared variance of the type/region parameter
/// with the given id.
fn declared_variance(&self,
param_def_id: ast::DefId,
item_def_id: ast::DefId,
kind: ParamKind,
space: ParamSpace,
index: uint)
-> VarianceTermPtr<'a> {
assert_eq!(param_def_id.krate, item_def_id.krate);
if param_def_id.krate == ast::LOCAL_CRATE {
// Parameter on an item defined within current crate:
// variance not yet inferred, so return a symbolic
// variance.
let InferredIndex(index) = self.inferred_index(param_def_id.node);
self.terms_cx.inferred_infos[index].term
} else {
// Parameter on an item defined within another crate:
// variance already inferred, just look it up.
let variances = ty::item_variances(self.tcx(), item_def_id);
let variance = match kind {
TypeParam => *variances.types.get(space, index),
RegionParam => *variances.regions.get(space, index),
};
self.constant_term(variance)
}
}
fn add_constraint(&mut self,
InferredIndex(index): InferredIndex,
variance: VarianceTermPtr<'a>) {
debug!("add_constraint(index={}, variance={:?})",
index, variance);
self.constraints.push(Constraint { inferred: InferredIndex(index),
variance: variance });
}
fn contravariant(&mut self,
variance: VarianceTermPtr<'a>)
-> VarianceTermPtr<'a> {
self.xform(variance, self.contravariant)
}
fn invariant(&mut self,
variance: VarianceTermPtr<'a>)
-> VarianceTermPtr<'a> {
self.xform(variance, self.invariant)
}
fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> {
match v {
ty::Covariant => self.covariant,
ty::Invariant => self.invariant,
ty::Contravariant => self.contravariant,
ty::Bivariant => self.bivariant,
}
}
fn xform(&mut self,
v1: VarianceTermPtr<'a>,
v2: VarianceTermPtr<'a>)
-> VarianceTermPtr<'a> {
match (*v1, *v2) {
(_, ConstantTerm(ty::Covariant)) => {
// Applying a "covariant" transform is always a no-op
v1
}
(ConstantTerm(c1), ConstantTerm(c2)) => {
self.constant_term(c1.xform(c2))
}
_ => {
&*self.terms_cx.arena.alloc(TransformTerm(v1, v2))
}
}
}
fn add_constraints_from_trait_ref(&mut self,
generics: &ty::Generics<'tcx>,
trait_ref: &ty::TraitRef<'tcx>,
variance: VarianceTermPtr<'a>) {
debug!("add_constraints_from_trait_ref: trait_ref={} variance={:?}",
trait_ref.repr(self.tcx()),
variance);
let trait_def = ty::lookup_trait_def(self.tcx(), trait_ref.def_id);
self.add_constraints_from_substs(
generics,
trait_ref.def_id,
trait_def.generics.types.as_slice(),
trait_def.generics.regions.as_slice(),
trait_ref.substs,
variance);
}
/// Adds constraints appropriate for an instance of `ty` appearing
/// in a context with the generics defined in `generics` and
/// ambient variance `variance`
fn add_constraints_from_ty(&mut self,
generics: &ty::Generics<'tcx>,
ty: Ty<'tcx>,
variance: VarianceTermPtr<'a>) {
debug!("add_constraints_from_ty(ty={}, variance={:?})",
ty.repr(self.tcx()),
variance);
match ty.sty {
ty::ty_bool |
ty::ty_char | ty::ty_int(_) | ty::ty_uint(_) |
ty::ty_float(_) | ty::ty_str => {
/* leaf type -- noop */
}
ty::ty_closure(..) => {
self.tcx().sess.bug("Unexpected closure type in variance computation");
}
ty::ty_rptr(region, ref mt) => {
let contra = self.contravariant(variance);
self.add_constraints_from_region(generics, *region, contra);
self.add_constraints_from_mt(generics, mt, variance);
}
ty::ty_uniq(typ) | ty::ty_vec(typ, _) => {
self.add_constraints_from_ty(generics, typ, variance);
}
ty::ty_ptr(ref mt) => {
self.add_constraints_from_mt(generics, mt, variance);
}
ty::ty_tup(ref subtys) => {
for &subty in subtys {
self.add_constraints_from_ty(generics, subty, variance);
}
}
ty::ty_enum(def_id, substs) |
ty::ty_struct(def_id, substs) => {
let item_type = ty::lookup_item_type(self.tcx(), def_id);
// All type parameters on enums and structs should be
// in the TypeSpace.
assert!(item_type.generics.types.is_empty_in(subst::SelfSpace));
assert!(item_type.generics.types.is_empty_in(subst::FnSpace));
assert!(item_type.generics.regions.is_empty_in(subst::SelfSpace));
assert!(item_type.generics.regions.is_empty_in(subst::FnSpace));
self.add_constraints_from_substs(
generics,
def_id,
item_type.generics.types.get_slice(subst::TypeSpace),
item_type.generics.regions.get_slice(subst::TypeSpace),
substs,
variance);
}
ty::ty_projection(ref data) => {
let trait_ref = &data.trait_ref;
let trait_def = ty::lookup_trait_def(self.tcx(), trait_ref.def_id);
self.add_constraints_from_substs(
generics,
trait_ref.def_id,
trait_def.generics.types.as_slice(),
trait_def.generics.regions.as_slice(),
trait_ref.substs,
variance);
}
ty::ty_trait(ref data) => {
let poly_trait_ref =
data.principal_trait_ref_with_self_ty(self.tcx(),
self.tcx().types.err);
// The type `Foo<T+'a>` is contravariant w/r/t `'a`:
let contra = self.contravariant(variance);
self.add_constraints_from_region(generics, data.bounds.region_bound, contra);
// Ignore the SelfSpace, it is erased.
self.add_constraints_from_trait_ref(generics, &*poly_trait_ref.0, variance);
let projections = data.projection_bounds_with_self_ty(self.tcx(),
self.tcx().types.err);
for projection in &projections {
self.add_constraints_from_ty(generics, projection.0.ty, self.invariant);<|fim▁hole|> }
}
ty::ty_param(ref data) => {
let def_id = generics.types.get(data.space, data.idx as uint).def_id;
assert_eq!(def_id.krate, ast::LOCAL_CRATE);
match self.terms_cx.inferred_map.get(&def_id.node) {
Some(&index) => {
self.add_constraint(index, variance);
}
None => {
// We do not infer variance for type parameters
// declared on methods. They will not be present
// in the inferred_map.
}
}
}
ty::ty_bare_fn(_, &ty::BareFnTy { ref sig, .. }) => {
self.add_constraints_from_sig(generics, sig, variance);
}
ty::ty_err => {
// we encounter this when walking the trait references for object
// types, where we use ty_err as the Self type
}
ty::ty_infer(..) => {
self.tcx().sess.bug(
&format!("unexpected type encountered in \
variance inference: {}",
ty.repr(self.tcx())));
}
}
}
/// Adds constraints appropriate for a nominal type (enum, struct,
/// object, etc) appearing in a context with ambient variance `variance`
fn add_constraints_from_substs(&mut self,
generics: &ty::Generics<'tcx>,
def_id: ast::DefId,
type_param_defs: &[ty::TypeParameterDef<'tcx>],
region_param_defs: &[ty::RegionParameterDef],
substs: &subst::Substs<'tcx>,
variance: VarianceTermPtr<'a>) {
debug!("add_constraints_from_substs(def_id={}, substs={}, variance={:?})",
def_id.repr(self.tcx()),
substs.repr(self.tcx()),
variance);
for p in type_param_defs {
let variance_decl =
self.declared_variance(p.def_id, def_id, TypeParam,
p.space, p.index as uint);
let variance_i = self.xform(variance, variance_decl);
let substs_ty = *substs.types.get(p.space, p.index as uint);
debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
variance_decl, variance_i);
self.add_constraints_from_ty(generics, substs_ty, variance_i);
}
for p in region_param_defs {
let variance_decl =
self.declared_variance(p.def_id, def_id,
RegionParam, p.space, p.index as uint);
let variance_i = self.xform(variance, variance_decl);
let substs_r = *substs.regions().get(p.space, p.index as uint);
self.add_constraints_from_region(generics, substs_r, variance_i);
}
}
fn add_constraints_from_predicates(&mut self,
generics: &ty::Generics<'tcx>,
predicates: &[ty::Predicate<'tcx>],
variance: VarianceTermPtr<'a>) {
debug!("add_constraints_from_generics({})",
generics.repr(self.tcx()));
for predicate in predicates.iter() {
match *predicate {
ty::Predicate::Trait(ty::Binder(ref data)) => {
self.add_constraints_from_trait_ref(generics, &*data.trait_ref, variance);
}
ty::Predicate::Equate(ty::Binder(ref data)) => {
self.add_constraints_from_ty(generics, data.0, variance);
self.add_constraints_from_ty(generics, data.1, variance);
}
ty::Predicate::TypeOutlives(ty::Binder(ref data)) => {
self.add_constraints_from_ty(generics, data.0, variance);
let variance_r = self.xform(variance, self.contravariant);
self.add_constraints_from_region(generics, data.1, variance_r);
}
ty::Predicate::RegionOutlives(ty::Binder(ref data)) => {
// `'a : 'b` is still true if 'a gets bigger
self.add_constraints_from_region(generics, data.0, variance);
// `'a : 'b` is still true if 'b gets smaller
let variance_r = self.xform(variance, self.contravariant);
self.add_constraints_from_region(generics, data.1, variance_r);
}
ty::Predicate::Projection(ty::Binder(ref data)) => {
self.add_constraints_from_trait_ref(generics,
&*data.projection_ty.trait_ref,
variance);
self.add_constraints_from_ty(generics, data.ty, self.invariant);
}
}
}
}
/// Adds constraints appropriate for a function with signature
/// `sig` appearing in a context with ambient variance `variance`
fn add_constraints_from_sig(&mut self,
generics: &ty::Generics<'tcx>,
sig: &ty::PolyFnSig<'tcx>,
variance: VarianceTermPtr<'a>) {
let contra = self.contravariant(variance);
for &input in &sig.0.inputs {
self.add_constraints_from_ty(generics, input, contra);
}
if let ty::FnConverging(result_type) = sig.0.output {
self.add_constraints_from_ty(generics, result_type, variance);
}
}
/// Adds constraints appropriate for a region appearing in a
/// context with ambient variance `variance`
fn add_constraints_from_region(&mut self,
_generics: &ty::Generics<'tcx>,
region: ty::Region,
variance: VarianceTermPtr<'a>) {
match region {
ty::ReEarlyBound(param_id, _, _, _) => {
if self.is_to_be_inferred(param_id) {
let index = self.inferred_index(param_id);
self.add_constraint(index, variance);
}
}
ty::ReStatic => { }
ty::ReLateBound(..) => {
// We do not infer variance for region parameters on
// methods or in fn types.
}
ty::ReFree(..) | ty::ReScope(..) | ty::ReInfer(..) |
ty::ReEmpty => {
// We don't expect to see anything but 'static or bound
// regions when visiting member types or method types.
self.tcx()
.sess
.bug(&format!("unexpected region encountered in variance \
inference: {}",
region.repr(self.tcx())));
}
}
}
/// Adds constraints appropriate for a mutability-type pair
/// appearing in a context with ambient variance `variance`
fn add_constraints_from_mt(&mut self,
generics: &ty::Generics<'tcx>,
mt: &ty::mt<'tcx>,
variance: VarianceTermPtr<'a>) {
match mt.mutbl {
ast::MutMutable => {
let invar = self.invariant(variance);
self.add_constraints_from_ty(generics, mt.ty, invar);
}
ast::MutImmutable => {
self.add_constraints_from_ty(generics, mt.ty, variance);
}
}
}
}
// Constraint solving
//
// The final phase iterates over the constraints, refining the variance
// for each inferred until a fixed point is reached. This will be the
// optimal solution to the constraints. The final variance for each
// inferred is then written into the `variance_map` in the tcx.
struct SolveContext<'a, 'tcx: 'a> {
terms_cx: TermsContext<'a, 'tcx>,
constraints: Vec<Constraint<'a>> ,
// Maps from an InferredIndex to the inferred value for that variable.
solutions: Vec<ty::Variance> }
fn solve_constraints(constraints_cx: ConstraintContext) {
let ConstraintContext { terms_cx, constraints, .. } = constraints_cx;
let solutions =
terms_cx.inferred_infos.iter()
.map(|ii| ii.initial_variance)
.collect();
let mut solutions_cx = SolveContext {
terms_cx: terms_cx,
constraints: constraints,
solutions: solutions
};
solutions_cx.solve();
solutions_cx.write();
}
impl<'a, 'tcx> SolveContext<'a, 'tcx> {
fn solve(&mut self) {
// Propagate constraints until a fixed point is reached. Note
// that the maximum number of iterations is 2C where C is the
// number of constraints (each variable can change values at most
// twice). Since number of constraints is linear in size of the
// input, so is the inference process.
let mut changed = true;
while changed {
changed = false;
for constraint in &self.constraints {
let Constraint { inferred, variance: term } = *constraint;
let InferredIndex(inferred) = inferred;
let variance = self.evaluate(term);
let old_value = self.solutions[inferred];
let new_value = glb(variance, old_value);
if old_value != new_value {
debug!("Updating inferred {} (node {}) \
from {:?} to {:?} due to {:?}",
inferred,
self.terms_cx
.inferred_infos[inferred]
.param_id,
old_value,
new_value,
term);
self.solutions[inferred] = new_value;
changed = true;
}
}
}
}
fn write(&self) {
// Collect all the variances for a particular item and stick
// them into the variance map. We rely on the fact that we
// generate all the inferreds for a particular item
// consecutively (that is, we collect solutions for an item
// until we see a new item id, and we assume (1) the solutions
// are in the same order as the type parameters were declared
// and (2) all solutions or a given item appear before a new
// item id).
let tcx = self.terms_cx.tcx;
let solutions = &self.solutions;
let inferred_infos = &self.terms_cx.inferred_infos;
let mut index = 0;
let num_inferred = self.terms_cx.num_inferred();
while index < num_inferred {
let item_id = inferred_infos[index].item_id;
let mut types = VecPerParamSpace::empty();
let mut regions = VecPerParamSpace::empty();
while index < num_inferred && inferred_infos[index].item_id == item_id {
let info = &inferred_infos[index];
let variance = solutions[index];
debug!("Index {} Info {} / {:?} / {:?} Variance {:?}",
index, info.index, info.kind, info.space, variance);
match info.kind {
TypeParam => { types.push(info.space, variance); }
RegionParam => { regions.push(info.space, variance); }
}
index += 1;
}
let item_variances = ty::ItemVariances {
types: types,
regions: regions
};
debug!("item_id={} item_variances={}",
item_id,
item_variances.repr(tcx));
let item_def_id = ast_util::local_def(item_id);
// For unit testing: check for a special "rustc_variance"
// attribute and report an error with various results if found.
if ty::has_attr(tcx, item_def_id, "rustc_variance") {
let found = item_variances.repr(tcx);
span_err!(tcx.sess, tcx.map.span(item_id), E0208, "{}", &found[..]);
}
let newly_added = tcx.item_variance_map.borrow_mut()
.insert(item_def_id, Rc::new(item_variances)).is_none();
assert!(newly_added);
}
}
fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance {
match *term {
ConstantTerm(v) => {
v
}
TransformTerm(t1, t2) => {
let v1 = self.evaluate(t1);
let v2 = self.evaluate(t2);
v1.xform(v2)
}
InferredTerm(InferredIndex(index)) => {
self.solutions[index]
}
}
}
}
// Miscellany transformations on variance
trait Xform {
fn xform(self, v: Self) -> Self;
}
impl Xform for ty::Variance {
fn xform(self, v: ty::Variance) -> ty::Variance {
// "Variance transformation", Figure 1 of The Paper
match (self, v) {
// Figure 1, column 1.
(ty::Covariant, ty::Covariant) => ty::Covariant,
(ty::Covariant, ty::Contravariant) => ty::Contravariant,
(ty::Covariant, ty::Invariant) => ty::Invariant,
(ty::Covariant, ty::Bivariant) => ty::Bivariant,
// Figure 1, column 2.
(ty::Contravariant, ty::Covariant) => ty::Contravariant,
(ty::Contravariant, ty::Contravariant) => ty::Covariant,
(ty::Contravariant, ty::Invariant) => ty::Invariant,
(ty::Contravariant, ty::Bivariant) => ty::Bivariant,
// Figure 1, column 3.
(ty::Invariant, _) => ty::Invariant,
// Figure 1, column 4.
(ty::Bivariant, _) => ty::Bivariant,
}
}
}
fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance {
// Greatest lower bound of the variance lattice as
// defined in The Paper:
//
// *
// - +
// o
match (v1, v2) {
(ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant,
(ty::Covariant, ty::Contravariant) => ty::Invariant,
(ty::Contravariant, ty::Covariant) => ty::Invariant,
(ty::Covariant, ty::Covariant) => ty::Covariant,
(ty::Contravariant, ty::Contravariant) => ty::Contravariant,
(x, ty::Bivariant) | (ty::Bivariant, x) => x,
}
}<|fim▁end|> | |
<|file_name|>streaming_wordcount_it_test.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""End-to-end test for the streaming wordcount example."""
from __future__ import absolute_import
import logging
import unittest
import uuid
from builtins import range
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.examples import streaming_wordcount
from apache_beam.io.gcp.tests.pubsub_matcher import PubSubMessageMatcher
from apache_beam.runners.runner import PipelineState
from apache_beam.testing import test_utils
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
INPUT_TOPIC = 'wc_topic_input'
OUTPUT_TOPIC = 'wc_topic_output'
INPUT_SUB = 'wc_subscription_input'
OUTPUT_SUB = 'wc_subscription_output'
DEFAULT_INPUT_NUMBERS = 500
WAIT_UNTIL_FINISH_DURATION = 6 * 60 * 1000 # in milliseconds
class StreamingWordCountIT(unittest.TestCase):
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.project = self.test_pipeline.get_option('project')
self.uuid = str(uuid.uuid4())
# Set up PubSub environment.
from google.cloud import pubsub
self.pub_client = pubsub.PublisherClient()
self.input_topic = self.pub_client.create_topic(
self.pub_client.topic_path(self.project, INPUT_TOPIC + self.uuid))
self.output_topic = self.pub_client.create_topic(
self.pub_client.topic_path(self.project, OUTPUT_TOPIC + self.uuid))
self.sub_client = pubsub.SubscriberClient()
self.input_sub = self.sub_client.create_subscription(
self.sub_client.subscription_path(self.project, INPUT_SUB + self.uuid),
self.input_topic.name)
self.output_sub = self.sub_client.create_subscription(
self.sub_client.subscription_path(self.project, OUTPUT_SUB + self.uuid),
self.output_topic.name,
ack_deadline_seconds=60)
def _inject_numbers(self, topic, num_messages):
"""Inject numbers as test data to PubSub."""
logging.debug('Injecting %d numbers to topic %s', num_messages, topic.name)
for n in range(num_messages):
self.pub_client.publish(self.input_topic.name, str(n).encode('utf-8'))
def tearDown(self):
test_utils.cleanup_subscriptions(self.sub_client,
[self.input_sub, self.output_sub])
test_utils.cleanup_topics(self.pub_client,
[self.input_topic, self.output_topic])<|fim▁hole|> # Build expected dataset.
expected_msg = [('%d: 1' % num).encode('utf-8')
for num in range(DEFAULT_INPUT_NUMBERS)]
# Set extra options to the pipeline for test purpose
state_verifier = PipelineStateMatcher(PipelineState.RUNNING)
pubsub_msg_verifier = PubSubMessageMatcher(self.project,
self.output_sub.name,
expected_msg,
timeout=400)
extra_opts = {'input_subscription': self.input_sub.name,
'output_topic': self.output_topic.name,
'wait_until_finish_duration': WAIT_UNTIL_FINISH_DURATION,
'on_success_matcher': all_of(state_verifier,
pubsub_msg_verifier)}
# Generate input data and inject to PubSub.
self._inject_numbers(self.input_topic, DEFAULT_INPUT_NUMBERS)
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
streaming_wordcount.run(
self.test_pipeline.get_full_options_as_args(**extra_opts),
save_main_session=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()<|fim▁end|> |
@attr('IT')
def test_streaming_wordcount_it(self): |
<|file_name|>cast-as-bool.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
<|fim▁hole|> let u = (5 as bool);
//~^ ERROR cannot cast as `bool`
//~^^ HELP compare with zero instead
}<|fim▁end|> | fn main() { |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from opencvBuilder import exists,generate<|fim▁end|> | |
<|file_name|>cloudtasks_pb2_grpc.py<|end_file_name|><|fim▁begin|># Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.tasks_v2beta2.proto import (
cloudtasks_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
queue_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
task_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class CloudTasksStub(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListQueues = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListQueues",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.FromString,
)
self.GetQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.CreateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.UpdateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/UpdateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.DeleteQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PurgeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PurgeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.PauseQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PauseQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.ResumeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ResumeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
self.ListTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.FromString,
)
self.GetTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CreateTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.DeleteTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.LeaseTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/LeaseTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.FromString,
)
self.AcknowledgeTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/AcknowledgeTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.RenewLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RenewLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CancelLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CancelLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.RunTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RunTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
class CloudTasksServicer(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def ListQueues(self, request, context):
"""Lists queues.
Queues are returned in lexicographical order.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetQueue(self, request, context):
"""Gets a queue.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateQueue(self, request, context):
"""Creates a queue.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless of whether
it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateQueue(self, request, context):
"""Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless of whether
it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteQueue(self, request, context):
"""Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be created
for 7 days.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PurgeQueue(self, request, context):
"""Purges a queue by deleting all of its tasks.
All tasks created before this method is called are permanently deleted.
Purge operations can take up to one minute to take effect. Tasks
might be dispatched before the purge takes effect. A purge is irreversible.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PauseQueue(self, request, context):
"""Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added
when the queue is paused. A queue is paused if its
[state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ResumeQueue(self, request, context):
"""Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored
in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it
will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can
lead to target overloading. If you are resuming high-QPS
queues, follow the 500/50/5 pattern described in
[Managing Cloud Tasks Scaling
Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue].
Returns an empty policy if the resource exists and does not have a policy
set.
Authorization requires the following
[Google IAM](https://cloud.google.com/iam) permission on the specified
resource parent:
* `cloudtasks.queues.getIamPolicy`
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing<|fim▁hole|> Project-level permissions are required to use the Cloud Console.
Authorization requires the following
[Google IAM](https://cloud.google.com/iam) permission on the specified
resource parent:
* `cloudtasks.queues.setIamPolicy`
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue].
If the resource does not exist, this will return an empty set of
permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTasks(self, request, context):
"""Lists the tasks in a queue.
By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved
due to performance considerations;
[response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the
subset of information which is returned.
The tasks may be returned in any order. The ordering may change at any
time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTask(self, request, context):
"""Gets a task.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTask(self, request, context):
"""Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask command.
* For [App Engine queues][google.cloud.tasks.v2beta2.AppEngineHttpTarget], the maximum task size is
100KB.
* For [pull queues][google.cloud.tasks.v2beta2.PullTarget], the maximum task size is 1MB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTask(self, request, context):
"""Deletes a task.
A task can be deleted if it is scheduled or dispatched. A task
cannot be deleted if it has completed successfully or permanently
failed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LeaseTasks(self, request, context):
"""Leases tasks from a pull queue for
[lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration].
This method is invoked by the worker to obtain a lease. The
worker must acknowledge the task via
[AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have
performed the work associated with the task.
The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that
the worker needs to perform the work associated with the task. To
return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
[response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to
[FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
requests are allowed per
queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
is returned when this limit is
exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
is also returned when
[max_tasks_dispatched_per_second][google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second]
is exceeded.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AcknowledgeTask(self, request, context):
"""Acknowledges a pull task.
The worker, that is, the entity that
[leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method
to indicate that the work associated with the task has finished.
The worker must acknowledge a task within the
[lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease
will expire and the task will become available to be leased
again. After the task is acknowledged, it will not be returned
by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
[GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
[ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RenewLease(self, request, context):
"""Renew the current lease of a pull task.
The worker can use this method to extend the lease by a new
duration, starting from now. The new task lease will be
returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelLease(self, request, context):
"""Cancel a pull task's lease.
The worker can use this method to cancel a task's lease by
setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will
make the task available to be leased to the next caller of
[LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunTask(self, request, context):
"""Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task, even if
the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or
is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed
task after a fix has been made or to manually force a task to be
dispatched now.
The dispatched task is returned. That is, the task that is returned
contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but
before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified
in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a
[pull task][google.cloud.tasks.v2beta2.PullMessage].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_CloudTasksServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListQueues": grpc.unary_unary_rpc_method_handler(
servicer.ListQueues,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.SerializeToString,
),
"GetQueue": grpc.unary_unary_rpc_method_handler(
servicer.GetQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"CreateQueue": grpc.unary_unary_rpc_method_handler(
servicer.CreateQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"UpdateQueue": grpc.unary_unary_rpc_method_handler(
servicer.UpdateQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"DeleteQueue": grpc.unary_unary_rpc_method_handler(
servicer.DeleteQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"PurgeQueue": grpc.unary_unary_rpc_method_handler(
servicer.PurgeQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"PauseQueue": grpc.unary_unary_rpc_method_handler(
servicer.PauseQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"ResumeQueue": grpc.unary_unary_rpc_method_handler(
servicer.ResumeQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
"ListTasks": grpc.unary_unary_rpc_method_handler(
servicer.ListTasks,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.SerializeToString,
),
"GetTask": grpc.unary_unary_rpc_method_handler(
servicer.GetTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"CreateTask": grpc.unary_unary_rpc_method_handler(
servicer.CreateTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"DeleteTask": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"LeaseTasks": grpc.unary_unary_rpc_method_handler(
servicer.LeaseTasks,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.SerializeToString,
),
"AcknowledgeTask": grpc.unary_unary_rpc_method_handler(
servicer.AcknowledgeTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"RenewLease": grpc.unary_unary_rpc_method_handler(
servicer.RenewLease,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"CancelLease": grpc.unary_unary_rpc_method_handler(
servicer.CancelLease,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"RunTask": grpc.unary_unary_rpc_method_handler(
servicer.RunTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.tasks.v2beta2.CloudTasks", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))<|fim▁end|> | policy.
Note: The Cloud Console does not check queue-level IAM permissions yet. |
<|file_name|>vaunix.py<|end_file_name|><|fim▁begin|>from __future__ import division
"""
instek_pst.py
part of the CsPyController package for AQuA experiment control by Martin Lichtman
Handles sending commands to Instek PST power supplies over RS232.
created = 2015.07.09
modified >= 2015.07.09
"""
__author__ = 'Martin Lichtman'
import logging
logger = logging.getLogger(__name__)
from atom.api import Bool, Str, Member, Int
from instrument_property import Prop, IntProp, ListProp, FloatProp
from cs_instruments import Instrument
<|fim▁hole|>from cs_errors import PauseError
from ctypes import *
class Vaunix(Prop):
isInitialized = Bool(False)
ID = Int()
va = Member()
model = Str()
serial = Int()
frequency = Member()
power = Member()
pulsewidth = Member()
pulserep = Member()
pulseenable = Bool()
startfreq = Member()
endfreq = Member()
sweeptime = Member()
sweepmode = Bool()
sweeptype = Bool()
sweepenable = Bool()
sweepdir = Bool()
internalref = Bool()
useexternalmod = Bool()
rfonoff = Bool()
maxPower = Int()
minPower = Int()
minFreq = Int()
maxFreq = Int()
def __init__(self, name, experiment, description=''):
super(Vaunix, self).__init__(name, experiment, description)
self.frequency = FloatProp('Frequency', experiment, 'Frequency (MHz)', '0')
self.power = FloatProp('Power', experiment, 'Power (dBm)', '0')
self.pulsewidth = FloatProp('PulseWidth', experiment, 'Pulse Width (us)', '0')
self.pulserep = FloatProp('PulseRep', experiment, 'Pulse Rep Time (us)', '0')
self.startfreq = FloatProp('StartFreq', experiment, 'Start Frequency (MHz)', '0')
self.endfreq = FloatProp('EndFreq', experiment, 'End Frequency (MHz)', '0')
self.sweeptime = IntProp('SweepTime', experiment, 'Sweep Time (ms)', '0')
self.properties += ['ID', 'model', 'serial', 'frequency','power','pulsewidth','pulserep','pulseenable','startfreq','endfreq','sweeptime',
'sweepmode', 'sweeptype', 'sweepdir', 'sweepenable', 'internalref', 'useexternalmod', 'rfonoff', 'maxPower']
def initialize(self,va):
self.va = va
errcode = self.va.fnLMS_InitDevice(self.ID)
if (errcode !=0):
errcodereset = self.va.fnLMS_CloseDevice(self.ID)
if (errcodereset != 0): #if device fails to initialize, it may be because it was not closed previously. Try closing and reinitializing it.
logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode))
raise PauseError
errcode = self.va.fnLMS_InitDevice(self.ID)
if (errcode != 0):
logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode))
raise PauseError
self.maxPower = int(self.va.fnLMS_GetMaxPwr(self.ID)/4)
self.minPower = int(self.va.fnLMS_GetMinPwr(self.ID)/4)
self.minFreq = int(self.va.fnLMS_GetMinFreq(self.ID))
self.maxFreq = int(self.va.fnLMS_GetMaxFreq(self.ID))
return
def freq_unit(self,val):
return int(val*100000)
def power_unit(self,value):
return int((self.maxPower - value)*4)
def power_sanity_check(self,value):
if (value < self.minPower or value > self.maxPower):
logger.error("Vaunix device {} power ({} dBm) outside min/max range: {} dBm, {} dBm.".format(self.ID,value,self.minPower,self.maxPower))
raise PauseError
return
def freq_sanity_check(self,value):
if (value < self.minFreq or value > self.maxFreq):
logger.error("Vaunix device {} frequency ({} x10 Hz) outside min/max range: {} x10 Hz, {} x10 Hz.".format(self.ID,value,self.minFreq,self.maxFreq))
raise PauseError
return
def update(self):
if (self.rfonoff):
self.freq_sanity_check(self.freq_unit(self.frequency.value))
self.va.fnLMS_SetFrequency(self.ID, self.freq_unit(self.frequency.value))
self.power_sanity_check(self.power.value)
self.va.fnLMS_SetPowerLevel(self.ID, self.power_unit(self.power.value))
if (self.sweepenable):
self.freq_sanity_check(self.freq_unit(self.startfreq.value))
self.va.fnLMS_SetStartFrequency(self.ID, self.freq_unit(self.startfreq.value))
self.freq_sanity_check(self.freq_unit(self.endfreq.value))
self.va.fnLMS_SetEndFrequency(self.ID, self.freq_unit(self.endfreq.value))
self.va.fnLMS_SetSweepTime(self.ID, self.sweeptime.value)
self.va.fnLMS_SetSweepDirection(self.ID, self.sweepdir)
self.va.fnLMS_SetSweepMode(self.ID, self.sweepmode) #True: Repeat Sweep, False: Sweep Once
self.va.fnLMS_SetSweepType(self.ID, self.sweeptype) #True: Bidirectional Sweep, False: Unidirectional Sweep
self.va.fnLMS_StartSweep(self.ID, self.sweepenable)
self.va.fnLMS_SetFastPulsedOutput(self.ID, c_float(self.pulsewidth.value*1e-6), c_float(self.pulserep.value*1e-6), self.pulseenable)
self.va.fnLMS_SetUseExternalPulseMod(self.ID, self.useexternalmod)
self.va.fnLMS_SetUseInternalRef(self.ID, self.internalref) #True: internal ref, False: external ref
self.va.fnLMS_SaveSettings(self.ID)
self.va.fnLMS_SetRFOn(self.ID, self.rfonoff)
self.getparams()
return
def getparams(self):
logger.info("Parameters for Vaunix # {}".format(self.ID))
logger.info("Frequency: {} MHz".format(
self.va.fnLMS_GetFrequency(self.ID)/100000))
logger.info("Power Level: {} dBm".format(
self.va.fnLMS_GetPowerLevel(self.ID)/4))
class Vaunixs(Instrument):
version = '2015.11.19'
motors = Member()
isInitialized = Bool(False)
va = Member()
testMode = Bool(False) #Test mode: Set to False for actual use.
def __init__(self, name, experiment, description=''):
super(Vaunixs, self).__init__(name, experiment, description)
self.motors = ListProp('motors', experiment, 'A list of individual Vaunix signal generators', listElementType=Vaunix,
listElementName='Vaunix')
self.properties += ['version', 'motors']
num = self.initialize()
self.motors.length = num
self.motors.refreshGUI()
#Initialize: loads and initializes DLL
def initialize(self):
num = 0
if self.enable:
CDLL_file = "./vaunix/VNX_fmsynth.dll"
self.va = CDLL(CDLL_file)
if (self.testMode):
logger.warning("Warning: Vaunix in test mode. Set testMode=False in vaunix.py to turn off test mode.")
self.va.fnLMS_SetTestMode(self.testMode) #Test mode... this needs to be set False for actual run. Do not remove this command (default setting is True).
self.isInitialized = True
num = self.detect_generators()
return num
def preExperiment(self, hdf5):
if self.enable:
if (not self.isInitialized):
self.initialize()
for i in self.motors:
#initialize serial connection to each power supply
i.initialize(self.va)
self.isInitialized = True
def preIteration(self, iterationresults, hdf5):
"""
Every iteration, send the motors updated positions.
"""
if self.enable:
msg = ''
try:
for i in self.motors:
i.update()
except Exception as e:
logger.error('Problem updating Vaunix:\n{}\n{}\n'.format(msg, e))
self.isInitialized = False
raise PauseError
def postMeasurement(self, measurementresults, iterationresults, hdf5):
return
def postIteration(self, iterationresults, hdf5):
return
def postExperiment(self, hdf5):
return
def finalize(self,hdf5):
return
#detect_generators: Calls DLL function to check for number of generators and their IDs.
def detect_generators(self):
if (not self.isInitialized): #test if DLL is already loaded. If not, load it.
self.initialize()
num=self.va.fnLMS_GetNumDevices() #ask DLL for the number of connected devices
logger.debug("Number of vaunix devices detected: {}".format(num))
while (num>len(self.motors)): #if num connected devices > number in array, add elements.
self.motors.add()
while (num<len(self.motors)): #if <, subtract elements.
self.motors.pop(self.motors.length-1)
self.motors.length -= 1
devinfotype = c_uint*num
devinfo = devinfotype()
self.va.fnLMS_GetDevInfo(addressof(devinfo)) #get device IDs
for mn, i in enumerate(self.motors):
i.ID = int(devinfo[mn]) #copy device IDs to ID variable
modnumtype = c_char*100
modnum = modnumtype()
self.va.fnLMS_GetModelNameA(i.ID,addressof(modnum)) #get device model names
i.model = modnum.value
serial = c_int()
serial = self.va.fnLMS_GetSerialNumber(i.ID) #get device serial numbers
i.serial = serial
return num<|fim▁end|> | |
<|file_name|>builder.go<|end_file_name|><|fim▁begin|>package activekit
func ItemsFromIter(maxIndex uint, next func(index uint) *MenuItem) MenuItems {
var items = make(MenuItems, 0, maxIndex)
for i := uint(0); i < maxIndex; i++ {
var item = next(i)
if item != nil {
items = append(items, item)
}
}
return items<|fim▁hole|><|fim▁end|> | } |
<|file_name|>test-tls-friendly-error-message.js<|end_file_name|><|fim▁begin|>// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
<|fim▁hole|>
var common = require('../common');
var assert = require('assert');
var fs = require('fs');
var tls = require('tls');
var key = fs.readFileSync(common.fixturesDir + '/keys/agent1-key.pem');
var cert = fs.readFileSync(common.fixturesDir + '/keys/agent1-cert.pem');
tls.createServer({ key: key, cert: cert }, function(conn) {
conn.end();
this.close();
}).listen(0, function() {
var options = { port: this.address().port, rejectUnauthorized: true };
tls.connect(options).on('error', common.mustCall(function(err) {
assert.equal(err.code, 'UNABLE_TO_VERIFY_LEAF_SIGNATURE');
assert.equal(err.message, 'unable to verify the first certificate');
this.destroy();
}));
});<|fim▁end|> | if (!process.versions.openssl) {
console.error('Skipping because node compiled without OpenSSL.');
process.exit(0);
} |
<|file_name|>get_action.py<|end_file_name|><|fim▁begin|>e = .1
mean_list = base.List(self.get_theta(key="treatment"), base.Mean, ["control", "treatment"])
if np.random.binomial(1,e) == 1:<|fim▁hole|>else:
self.action["treatment"] = mean_list.max()
self.action["propensity"] = (1-e)<|fim▁end|> | self.action["treatment"] = mean_list.random()
self.action["propensity"] = 0.1*0.5 |
<|file_name|>resized_fuse_test.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and<|fim▁hole|>
import tensorflow as tf
from deeplab2.model.layers import resized_fuse
class ResizedFuseTest(tf.test.TestCase):
def test_resize_and_fuse_features(self):
batch, height, width, channels = 2, 11, 11, 6
smaller_height, smaller_width, smaller_channels = 6, 6, 3
larger_height1, larger_width1 = 21, 21 # Stride 2 conv.
larger_height2, larger_width2 = 22, 22 # Stride 2 conv.
larger_height3, larger_width3 = 23, 23 # Conv and resize.
feature_list = []
feature_list.append(tf.zeros([batch, smaller_height, smaller_width,
smaller_channels]))
feature_list.append(tf.zeros([batch, smaller_height, smaller_width,
channels]))
feature_list.append(tf.zeros([batch, height, width, smaller_channels]))
feature_list.append(tf.zeros([batch, height, width, channels]))
feature_list.append(tf.zeros([batch, larger_height1, larger_width1,
channels]))
feature_list.append(tf.zeros([batch, larger_height1, larger_width1,
smaller_channels]))
feature_list.append(tf.zeros([batch, larger_height2, larger_width2,
smaller_channels]))
feature_list.append(tf.zeros([batch, larger_height3, larger_width3,
smaller_channels]))
layer = resized_fuse.ResizedFuse(name='fuse',
height=height,
width=width,
num_channels=channels)
output = layer(feature_list)
self.assertEqual(output.get_shape().as_list(), [batch, height, width,
channels])
if __name__ == '__main__':
tf.test.main()<|fim▁end|> | # limitations under the License.
"""Tests for resized_fuse.""" |
<|file_name|>rhs-type.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that trans treats the rhs of pth's decl
// as a _|_-typed thing, not a str-typed thing<|fim▁hole|>
#![allow(unreachable_code)]
#![allow(unused_variable)]
struct T { t: StrBuf }
fn main() {
let pth = fail!("bye");
let _rs: T = T {t: pth};
}<|fim▁end|> | // error-pattern:bye |
<|file_name|>ComponentsFactory.java<|end_file_name|><|fim▁begin|>package com.github.nikolaymakhonin.android_app_example.di.factories;
import android.content.Context;
import android.support.annotation.NonNull;
import com.github.nikolaymakhonin.android_app_example.di.components.AppComponent;
import com.github.nikolaymakhonin.android_app_example.di.components.DaggerAppComponent;
import com.github.nikolaymakhonin.android_app_example.di.components.DaggerServiceComponent;
import com.github.nikolaymakhonin.android_app_example.di.components.ServiceComponent;
import com.github.nikolaymakhonin.common_di.modules.service.ServiceModuleBase;
public final class ComponentsFactory {
public static AppComponent buildAppComponent(@NonNull Context appContext) {
<|fim▁hole|> AppComponent appComponent = DaggerAppComponent.builder()
.serviceComponent(serviceComponent)
.build();
return appComponent;
}
public static ServiceComponent buildServiceComponent(@NonNull Context appContext) {
ServiceComponent serviceComponent = DaggerServiceComponent.builder()
.serviceModuleBase(new ServiceModuleBase(appContext))
.build();
return serviceComponent;
}
}<|fim▁end|> | ServiceComponent serviceComponent = buildServiceComponent(appContext);
|
<|file_name|>ContainsOperand.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright 2012 Apigee Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.usergrid.persistence.query.tree;
import org.antlr.runtime.Token;
import org.usergrid.persistence.exceptions.PersistenceException;
/**
* @author tnine
*
*/
public class ContainsOperand extends Operand {
/**
* @param property
* @param literal
*/
public ContainsOperand(Token t) {
super(t);
}
/* (non-Javadoc)
* @see org.usergrid.persistence.query.tree.Operand#visit(org.usergrid.persistence.query.tree.QueryVisitor)
*/
@Override
public void visit(QueryVisitor visitor) throws PersistenceException {
visitor.visit(this);
}
public void setProperty(String name){
setChild(0, new Property(name));
}
<|fim▁hole|> setChild(1, new StringLiteral(value));
}
public Property getProperty(){
return (Property) this.children.get(0);
}
public StringLiteral getString(){
return (StringLiteral) this.children.get(1);
}
}<|fim▁end|> | public void setValue(String value){ |
<|file_name|>Honeybee_EnergyPlus NoMass Opaque Material.py<|end_file_name|><|fim▁begin|>#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <[email protected]>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License<|fim▁hole|># along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to create a custom opaque material that has no mass, which can be plugged into the "Honeybee_EnergyPlus Construction" component.
_
It is important to note that this component creates a material with no mass and, because of this, the accuracy of the component is not as great as a material that has mass. However, this component is very useful if you only have an R-value for a material (or a construction) and you know that the mass is relatively small.
_
If you want to create a material that accounts for mass, you should use the "Honeybee_EnergyPlus Window Material" component.
-
Provided by Honeybee 0.0.66
Args:
_name: A text name for your NoMass Opaque Material.
_roughness_: A text value that indicated the roughness of your material. This can be either "VeryRough", "Rough", "MediumRough", "MediumSmooth", "Smooth", and "VerySmooth". The default is set to "Rough".
_R_Value: A number representing the R-Value of the material in m2-K/W.
_thermAbsp_: An number between 0 and 1 that represents the thermal abstorptance of the material. The default is set to 0.9, which is common for most non-metallic materials.
_solAbsp_: An number between 0 and 1 that represents the abstorptance of solar radiation by the material. The default is set to 0.7, which is common for most non-metallic materials.
_visAbsp_: An number between 0 and 1 that represents the abstorptance of visible light by the material. The default is set to 0.7, which is common for most non-metallic materials.
Returns:
EPMaterial: A no-mass opaque material that can be plugged into the "Honeybee_EnergyPlus Construction" component.
"""
ghenv.Component.Name = "Honeybee_EnergyPlus NoMass Opaque Material"
ghenv.Component.NickName = 'EPNoMassMat'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "06 | Energy | Material | Construction"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import Grasshopper.Kernel as gh
w = gh.GH_RuntimeMessageLevel.Warning
def checkInputs():
#Check to be sure that SHGC and VT are between 0 and 1.
checkData = True
def checkBtwZeroAndOne(variable, default, variableName):
if variable == None: newVariable = default
else:
if variable <= 1 and variable >= 0: newVariable = variable
else:
newVariable = 0
checkData = False
warning = variableName + " must be between 0 and 1."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return newVariable
thermAbs = checkBtwZeroAndOne(_thermAbsp_, None, "_thermAbsp_")
solAbsp = checkBtwZeroAndOne(_solAbsp_, None, "_solAbsp_")
visAbsp = checkBtwZeroAndOne(_visAbsp_, None, "_visAbsp_")
#Check the Roughness value.
if _roughness_ != None: _roughness = _roughness_.upper()
else: _roughness = None
if _roughness == None or _roughness == "VERYROUGH" or _roughness == "ROUGH" or _roughness == "MEDIUMROUGH" or _roughness == "MEDIUMSMOOTH" or _roughness == "SMOOTH" or _roughness == "VERYSMOOTH": pass
else:
checkData = False
warning = "_roughness_ is not valid."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return checkData
def main(name, roughness, R_Value, thermAbsp, solAbsp, visAbsp):
if roughness == None: roughness = "Rough"
if thermAbsp == None: thermAbsp = 0.9
if solAbsp == None: solAbsp = 0.7
if visAbsp == None: visAbsp = 0.7
values = [name.upper(), roughness, R_Value, thermAbsp, solAbsp, visAbsp]
comments = ["Name", "Roughness", "Thermal Resistance {m2-K/W}", "Thermal Absorptance", "Solar Absorptance", "Visible Absorptance"]
materialStr = "Material:NoMass,\n"
for count, (value, comment) in enumerate(zip(values, comments)):
if count!= len(values) - 1:
materialStr += str(value) + ", !" + str(comment) + "\n"
else:
materialStr += str(value) + "; !" + str(comment)
return materialStr
if _name and _R_Value:
checkData = checkInputs()
if checkData == True:
EPMaterial = main(_name, _roughness_, _R_Value, _thermAbsp_, _solAbsp_, _visAbsp_)<|fim▁end|> | |
<|file_name|>glsl_std_450.py<|end_file_name|><|fim▁begin|>"""Instruction descriptions for the "SPIR-V Extended Instructions for GLSL"
version 1.00, revision 2.
"""
INST_FORMAT = {
1 : {
'name' : 'Round',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
2 : {
'name' : 'RoundEven',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
3 : {
'name' : 'Trunc',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
4 : {
'name': 'FAbs',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
5 : {
'name' : 'SAbs',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
6 : {
'name' : 'FSign',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
7 : {
'name' : 'SSign',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
8 : {
'name' : 'Floor',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
9 : {
'name' : 'Ceil',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
10 : {
'name' : 'Fract',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
11 : {
'name' : 'Radians',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
12 : {
'name' : 'Degrees',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
13 : {
'name' : 'Sin',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
14 : {
'name' : 'Cos',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
15 : {
'name' : 'Tan',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
16 : {
'name' : 'Asin',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
17 : {
'name' : 'Acos',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
18 : {
'name' : 'Atan',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
19 : {
'name' : 'Sinh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
20 : {
'name' : 'Cosh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
21 : {
'name' : 'Tanh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
22 : {
'name' : 'Asinh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
23 : {
'name' : 'Acosh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
24 : {
'name' : 'Atanh',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
25 : {
'name' : 'Atan2',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
26 : {
'name' : 'Pow',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
27 : {
'name' : 'Exp',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
28 : {
'name' : 'Log',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
29 : {
'name' : 'Exp2',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
30: {
'name' : 'Log2',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
31 : {
'name' : 'Sqrt',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
32 : {
'name' : 'Inversesqrt',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
33 : {
'name' : 'Determinant',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
34 : {
'name' : 'MatrixInverse',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
35 : {
'name' : 'Modf',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
36 : {
'name' : 'ModfStruct',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
37 : {
'name' : 'FMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
38 : {
'name' : 'UMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},<|fim▁hole|> 'name' : 'SMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
40 : {
'name' : 'FMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
41 : {
'name' : 'UMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
42 : {
'name' : 'SMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
43 : {
'name' : 'FClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
44 : {
'name' : 'UClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
45 : {
'name' : 'SClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
46 : {
'name' : 'FMix',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
48 : {
'name' : 'Step',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
49 : {
'name' : 'Smoothstep',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
50 : {
'name' : 'Fma',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
51 : {
'name' : 'Frexp',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
52 : {
'name' : 'FrexpStruct',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
53 : {
'name' : 'Ldexp',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
54 : {
'name' : 'PackSnorm4x8',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
55 : {
'name' : 'PackUnorm4x8',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
56 : {
'name' : 'PackSnorm2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
57 : {
'name' : 'PackUnrom2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
58 : {
'name' : 'PackHalf2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
59 : {
'name' : 'PackDouble2x32',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
60 : {
'name' : 'PackSnorm2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
61 : {
'name' : 'UnpackUnorm2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
62 : {
'name' : 'UnpackHalf2x16',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
63 : {
'name' : 'UnpackSnorm4x8',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
64 : {
'name' : 'UnpackUnorm4x8',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
65 : {
'name' : 'UnpackDouble2x32',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
66 : {
'name' : 'Length',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
67 : {
'name' : 'Distance',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : True
},
68 : {
'name' : 'Cross',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
69 : {
'name' : 'Normalize',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
70 : {
'name' : 'FaceForward',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
71 : {
'name' : 'Reflect',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
72 : {
'name' : 'Refract',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
73 : {
'name' : 'FindILsb',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
74 : {
'name' : 'FindSMsb',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
75 : {
'name' : 'FindUMsb',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
76 : {
'name' : 'InterpolateAtCentroid',
'operands' : ['Id'],
'has_side_effects' : False,
'is_commutative' : False
},
77 : {
'name' : 'InterpolateAtSample',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
78 : {
'name' : 'InterpolateAtOffset',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
79 : {
'name' : 'NMin',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
80 : {
'name' : 'NMax',
'operands' : ['Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
81 : {
'name' : 'NClamp',
'operands' : ['Id', 'Id', 'Id'],
'has_side_effects' : False,
'is_commutative' : False
},
}<|fim▁end|> | 39 : { |
<|file_name|>api_incursions.go<|end_file_name|><|fim▁begin|>package esidev
import (
"net/http"
"time"
"github.com/gorilla/mux"
)
<|fim▁hole|>var _ = mux.NewRouter
func GetIncursions(w http.ResponseWriter, r *http.Request) {
var (
localV interface{}
err error
datasource string
)
// shut up warnings
localV = localV
err = err
j := `[ {
"constellation_id" : 20000607,
"faction_id" : 500019,
"has_boss" : true,
"infested_solar_systems" : [ 30004148, 30004149, 30004150, 30004151, 30004152, 30004153, 30004154 ],
"influence" : 0.9,
"staging_solar_system_id" : 30004154,
"state" : "mobilizing",
"type" : "Incursion"
} ]`
if err := r.ParseForm(); err != nil {
errorOut(w, r, err)
return
}
if r.Form.Get("datasource") != "" {
localV, err = processParameters(datasource, r.Form.Get("datasource"))
if err != nil {
errorOut(w, r, err)
return
}
datasource = localV.(string)
}
if r.Form.Get("page") != "" {
var (
localPage int32
localIntPage interface{}
)
localIntPage, err := processParameters(localPage, r.Form.Get("page"))
if err != nil {
errorOut(w, r, err)
return
}
localPage = localIntPage.(int32)
if localPage > 1 {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("[]"))
return
}
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(j))
}<|fim▁end|> | var _ time.Time |
<|file_name|>bootstrap-datetimepicker.min.js<|end_file_name|><|fim▁begin|>/*
//! version : 3.1.3
=========================================================
bootstrap-datetimepicker.js
https://github.com/Eonasdan/bootstrap-datetimepicker
=========================================================
The MIT License (MIT)
Copyright (c) 2014 Jonathan Peterson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
;(function (root, factory) {
'use strict';
if (typeof define === 'function' && define.amd) {
// AMD is used - Register as an anonymous module.
define(['jquery', 'moment'], factory);
} else if (typeof exports === 'object') {
factory(require('jquery'), require('moment'));
}
else {
// Neither AMD or CommonJS used. Use global variables.
if (!jQuery) {
throw new Error('bootstrap-datetimepicker requires jQuery to be loaded first');
}
if (!moment) {
throw new Error('bootstrap-datetimepicker requires moment.js to be loaded first');
}
factory(root.jQuery, moment);
}
}(this, function ($, moment) {
'use strict';
if (typeof moment === 'undefined') {
throw new Error('momentjs is required');
}
var dpgId = 0,
DateTimePicker = function (element, options) {
var defaults = $.fn.datetimepicker.defaults,
icons = {
time: 'glyphicon glyphicon-time',
date: 'glyphicon glyphicon-calendar',
up: 'glyphicon glyphicon-chevron-up',
down: 'glyphicon glyphicon-chevron-down'
},
picker = this,
errored = false,
dDate,
init = function () {
var icon = false, localeData, rInterval;
picker.options = $.extend({}, defaults, options);
picker.options.icons = $.extend({}, icons, picker.options.icons);
picker.element = $(element);
dataToOptions();
if (!(picker.options.pickTime || picker.options.pickDate)) {
throw new Error('Must choose at least one picker');
}
picker.id = dpgId++;
moment.locale(picker.options.language);
picker.date = moment();
picker.unset = false;
picker.isInput = picker.element.is('input');
picker.component = false;
if (picker.element.hasClass('input-group')) {
if (picker.element.find('.datepickerbutton').size() === 0) {//in case there is more then one 'input-group-addon' Issue #48
picker.component = picker.element.find('[class^="input-group-"]');
}
else {
picker.component = picker.element.find('.datepickerbutton');
}
}
picker.format = picker.options.format;
localeData = moment().localeData();
if (!picker.format) {
picker.format = (picker.options.pickDate ? localeData.longDateFormat('L') : '');
if (picker.options.pickDate && picker.options.pickTime) {
picker.format += ' ';
}
picker.format += (picker.options.pickTime ? localeData.longDateFormat('LT') : '');
if (picker.options.useSeconds) {
if (localeData.longDateFormat('LT').indexOf(' A') !== -1) {
picker.format = picker.format.split(' A')[0] + ':ss A';
}
else {
picker.format += ':ss';
}
}
}
picker.use24hours = (picker.format.toLowerCase().indexOf('a') < 0 && picker.format.indexOf('h') < 0);
if (picker.component) {
icon = picker.component.find('span');
}
if (picker.options.pickTime) {
if (icon) {
icon.addClass(picker.options.icons.time);
}
}
if (picker.options.pickDate) {
if (icon) {
icon.removeClass(picker.options.icons.time);
icon.addClass(picker.options.icons.date);
}
}
picker.options.widgetParent =
typeof picker.options.widgetParent === 'string' && picker.options.widgetParent ||
picker.element.parents().filter(function () {
return 'scroll' === $(this).css('overflow-y');
}).get(0) ||
'body';
picker.widget = $(getTemplate()).appendTo(picker.options.widgetParent);
picker.minViewMode = picker.options.minViewMode || 0;
if (typeof picker.minViewMode === 'string') {
switch (picker.minViewMode) {
case 'months':
picker.minViewMode = 1;
break;
case 'years':
picker.minViewMode = 2;
break;
default:
picker.minViewMode = 0;
break;
}
}
picker.viewMode = picker.options.viewMode || 0;
if (typeof picker.viewMode === 'string') {
switch (picker.viewMode) {
case 'months':
picker.viewMode = 1;
break;
case 'years':
picker.viewMode = 2;
break;
default:
picker.viewMode = 0;
break;
}
}
picker.viewMode = Math.max(picker.viewMode, picker.minViewMode);
picker.options.disabledDates = indexGivenDates(picker.options.disabledDates);
picker.options.enabledDates = indexGivenDates(picker.options.enabledDates);
picker.startViewMode = picker.viewMode;
picker.setMinDate(picker.options.minDate);
picker.setMaxDate(picker.options.maxDate);
fillDow();
fillMonths();
fillHours();
fillMinutes();
fillSeconds();
update();
showMode();
if (!getPickerInput().prop('disabled')) {
attachDatePickerEvents();
}
if (picker.options.defaultDate !== '' && getPickerInput().val() === '') {
picker.setValue(picker.options.defaultDate);
}
if (picker.options.minuteStepping !== 1) {
rInterval = picker.options.minuteStepping;
picker.date.minutes((Math.round(picker.date.minutes() / rInterval) * rInterval) % 60).seconds(0);
}
},
getPickerInput = function () {
var input;
if (picker.isInput) {
return picker.element;
}
input = picker.element.find('.datepickerinput');
if (input.size() === 0) {
input = picker.element.find('input');
}
else if (!input.is('input')) {
throw new Error('CSS class "datepickerinput" cannot be applied to non input element');
}
return input;
},
dataToOptions = function () {
var eData;
if (picker.element.is('input')) {
eData = picker.element.data();
}
else {
eData = picker.element.find('input').data();
}
if (eData.dateFormat !== undefined) {
picker.options.format = eData.dateFormat;
}
if (eData.datePickdate !== undefined) {
picker.options.pickDate = eData.datePickdate;
}
if (eData.datePicktime !== undefined) {
picker.options.pickTime = eData.datePicktime;
}
if (eData.dateUseminutes !== undefined) {
picker.options.useMinutes = eData.dateUseminutes;
}
if (eData.dateUseseconds !== undefined) {
picker.options.useSeconds = eData.dateUseseconds;
}
if (eData.dateUsecurrent !== undefined) {
picker.options.useCurrent = eData.dateUsecurrent;
}
if (eData.calendarWeeks !== undefined) {
picker.options.calendarWeeks = eData.calendarWeeks;
}
if (eData.dateMinutestepping !== undefined) {
picker.options.minuteStepping = eData.dateMinutestepping;
}
if (eData.dateMindate !== undefined) {
picker.options.minDate = eData.dateMindate;
}
if (eData.dateMaxdate !== undefined) {
picker.options.maxDate = eData.dateMaxdate;
}
if (eData.dateShowtoday !== undefined) {
picker.options.showToday = eData.dateShowtoday;
}
if (eData.dateCollapse !== undefined) {
picker.options.collapse = eData.dateCollapse;
}
if (eData.dateLanguage !== undefined) {
picker.options.language = eData.dateLanguage;
}
if (eData.dateDefaultdate !== undefined) {
picker.options.defaultDate = eData.dateDefaultdate;
}
if (eData.dateDisableddates !== undefined) {
picker.options.disabledDates = eData.dateDisableddates;
}
if (eData.dateEnableddates !== undefined) {
picker.options.enabledDates = eData.dateEnableddates;
}
if (eData.dateIcons !== undefined) {
picker.options.icons = eData.dateIcons;
}
if (eData.dateUsestrict !== undefined) {
picker.options.useStrict = eData.dateUsestrict;
}
if (eData.dateDirection !== undefined) {
picker.options.direction = eData.dateDirection;
}
if (eData.dateSidebyside !== undefined) {
picker.options.sideBySide = eData.dateSidebyside;
}
if (eData.dateDaysofweekdisabled !== undefined) {
picker.options.daysOfWeekDisabled = eData.dateDaysofweekdisabled;
}
},
place = function () {
var position = 'absolute',
offset = picker.component ? picker.component.offset() : picker.element.offset(),
$window = $(window),
placePosition;
picker.width = picker.component ? picker.component.outerWidth() : picker.element.outerWidth();
offset.top = offset.top + picker.element.outerHeight();
if (picker.options.direction === 'up') {
placePosition = 'top';
} else if (picker.options.direction === 'bottom') {
placePosition = 'bottom';
} else if (picker.options.direction === 'auto') {
if (offset.top + picker.widget.height() > $window.height() + $window.scrollTop() && picker.widget.height() + picker.element.outerHeight() < offset.top) {
placePosition = 'top';
} else {
placePosition = 'bottom';
}
}
if (placePosition === 'top') {
offset.bottom = $window.height() - offset.top + picker.element.outerHeight() + 3;
picker.widget.addClass('top').removeClass('bottom');
} else {
offset.top += 1;
picker.widget.addClass('bottom').removeClass('top');
}
if (picker.options.width !== undefined) {
picker.widget.width(picker.options.width);
}
if (picker.options.orientation === 'left') {
picker.widget.addClass('left-oriented');
offset.left = offset.left - picker.widget.width() + 20;
}
if (isInFixed()) {
position = 'fixed';
offset.top -= $window.scrollTop();
offset.left -= $window.scrollLeft();
}
if ($window.width() < offset.left + picker.widget.outerWidth()) {
offset.right = $window.width() - offset.left - picker.width;
offset.left = 'auto';
picker.widget.addClass('pull-right');
} else {
offset.right = 'auto';
picker.widget.removeClass('pull-right');
}
if (placePosition === 'top') {
picker.widget.css({
position: position,
bottom: offset.bottom,
top: 'auto',
left: offset.left,
right: offset.right
});
} else {
picker.widget.css({
position: position,
top: offset.top,
bottom: 'auto',
left: offset.left,
right: offset.right
});
}
},
notifyChange = function (oldDate, eventType) {
if (moment(picker.date).isSame(moment(oldDate)) && !errored) {
return;
}
errored = false;
picker.element.trigger({
type: 'dp.change',
date: moment(picker.date),
oldDate: moment(oldDate)
});
if (eventType !== 'change') {
picker.element.change();
}
},
notifyError = function (date) {
errored = true;
picker.element.trigger({
type: 'dp.error',
date: moment(date, picker.format, picker.options.useStrict)
});
},
update = function (newDate) {
moment.locale(picker.options.language);
var dateStr = newDate;
if (!dateStr) {
dateStr = getPickerInput().val();
if (dateStr) {
picker.date = moment(dateStr, picker.format, picker.options.useStrict);
}
if (!picker.date) {
picker.date = moment();
}
}
picker.viewDate = moment(picker.date).startOf('month');
fillDate();
fillTime();
},
fillDow = function () {
moment.locale(picker.options.language);
var html = $('<tr>'), weekdaysMin = moment.weekdaysMin(), i;
if (picker.options.calendarWeeks === true) {
html.append('<th class="cw">#</th>');
}
if (moment().localeData()._week.dow === 0) { // starts on Sunday
for (i = 0; i < 7; i++) {
html.append('<th class="dow">' + weekdaysMin[i] + '</th>');
}
} else {
for (i = 1; i < 8; i++) {
<|fim▁hole|> }
}
}
picker.widget.find('.datepicker-days thead').append(html);
},
fillMonths = function () {
moment.locale(picker.options.language);
var html = '', i, monthsShort = moment.monthsShort();
for (i = 0; i < 12; i++) {
html += '<span class="month">' + monthsShort[i] + '</span>';
}
picker.widget.find('.datepicker-months td').append(html);
},
fillDate = function () {
if (!picker.options.pickDate) {
return;
}
moment.locale(picker.options.language);
var year = picker.viewDate.year(),
month = picker.viewDate.month(),
startYear = picker.options.minDate.year(),
startMonth = picker.options.minDate.month(),
endYear = picker.options.maxDate.year(),
endMonth = picker.options.maxDate.month(),
currentDate,
prevMonth, nextMonth, html = [], row, clsName, i, days, yearCont, currentYear, months = moment.months();
picker.widget.find('.datepicker-days').find('.disabled').removeClass('disabled');
picker.widget.find('.datepicker-months').find('.disabled').removeClass('disabled');
picker.widget.find('.datepicker-years').find('.disabled').removeClass('disabled');
picker.widget.find('.datepicker-days th:eq(1)').text(
months[month] + ' ' + year);
prevMonth = moment(picker.viewDate, picker.format, picker.options.useStrict).subtract(1, 'months');
days = prevMonth.daysInMonth();
prevMonth.date(days).startOf('week');
if ((year === startYear && month <= startMonth) || year < startYear) {
picker.widget.find('.datepicker-days th:eq(0)').addClass('disabled');
}
if ((year === endYear && month >= endMonth) || year > endYear) {
picker.widget.find('.datepicker-days th:eq(2)').addClass('disabled');
}
nextMonth = moment(prevMonth).add(42, 'd');
while (prevMonth.isBefore(nextMonth)) {
if (prevMonth.weekday() === moment().startOf('week').weekday()) {
row = $('<tr>');
html.push(row);
if (picker.options.calendarWeeks === true) {
row.append('<td class="cw">' + prevMonth.week() + '</td>');
}
}
clsName = '';
if (prevMonth.year() < year || (prevMonth.year() === year && prevMonth.month() < month)) {
clsName += ' old';
} else if (prevMonth.year() > year || (prevMonth.year() === year && prevMonth.month() > month)) {
clsName += ' new';
}
if (prevMonth.isSame(moment({y: picker.date.year(), M: picker.date.month(), d: picker.date.date()}))) {
clsName += ' active';
}
if (isInDisableDates(prevMonth, 'day') || !isInEnableDates(prevMonth)) {
clsName += ' disabled';
}
if (picker.options.showToday === true) {
if (prevMonth.isSame(moment(), 'day')) {
clsName += ' today';
}
}
if (picker.options.daysOfWeekDisabled) {
for (i = 0; i < picker.options.daysOfWeekDisabled.length; i++) {
if (prevMonth.day() === picker.options.daysOfWeekDisabled[i]) {
clsName += ' disabled';
break;
}
}
}
row.append('<td class="day' + clsName + '">' + prevMonth.date() + '</td>');
currentDate = prevMonth.date();
prevMonth.add(1, 'd');
if (currentDate === prevMonth.date()) {
prevMonth.add(1, 'd');
}
}
picker.widget.find('.datepicker-days tbody').empty().append(html);
currentYear = picker.date.year();
months = picker.widget.find('.datepicker-months').find('th:eq(1)').text(year).end().find('span').removeClass('active');
if (currentYear === year) {
months.eq(picker.date.month()).addClass('active');
}
if (year - 1 < startYear) {
picker.widget.find('.datepicker-months th:eq(0)').addClass('disabled');
}
if (year + 1 > endYear) {
picker.widget.find('.datepicker-months th:eq(2)').addClass('disabled');
}
for (i = 0; i < 12; i++) {
if ((year === startYear && startMonth > i) || (year < startYear)) {
$(months[i]).addClass('disabled');
} else if ((year === endYear && endMonth < i) || (year > endYear)) {
$(months[i]).addClass('disabled');
}
}
html = '';
year = parseInt(year / 10, 10) * 10;
yearCont = picker.widget.find('.datepicker-years').find(
'th:eq(1)').text(year + '-' + (year + 9)).parents('table').find('td');
picker.widget.find('.datepicker-years').find('th').removeClass('disabled');
if (startYear > year) {
picker.widget.find('.datepicker-years').find('th:eq(0)').addClass('disabled');
}
if (endYear < year + 9) {
picker.widget.find('.datepicker-years').find('th:eq(2)').addClass('disabled');
}
year -= 1;
for (i = -1; i < 11; i++) {
html += '<span class="year' + (i === -1 || i === 10 ? ' old' : '') + (currentYear === year ? ' active' : '') + ((year < startYear || year > endYear) ? ' disabled' : '') + '">' + year + '</span>';
year += 1;
}
yearCont.html(html);
},
fillHours = function () {
moment.locale(picker.options.language);
var table = picker.widget.find('.timepicker .timepicker-hours table'), html = '', current, i, j;
table.parent().hide();
if (picker.use24hours) {
current = 0;
for (i = 0; i < 6; i += 1) {
html += '<tr>';
for (j = 0; j < 4; j += 1) {
html += '<td class="hour">' + padLeft(current.toString()) + '</td>';
current++;
}
html += '</tr>';
}
}
else {
current = 1;
for (i = 0; i < 3; i += 1) {
html += '<tr>';
for (j = 0; j < 4; j += 1) {
html += '<td class="hour">' + padLeft(current.toString()) + '</td>';
current++;
}
html += '</tr>';
}
}
table.html(html);
},
fillMinutes = function () {
var table = picker.widget.find('.timepicker .timepicker-minutes table'), html = '', current = 0, i, j, step = picker.options.minuteStepping;
table.parent().hide();
if (step === 1) {
step = 5;
}
for (i = 0; i < Math.ceil(60 / step / 4) ; i++) {
html += '<tr>';
for (j = 0; j < 4; j += 1) {
if (current < 60) {
html += '<td class="minute">' + padLeft(current.toString()) + '</td>';
current += step;
} else {
html += '<td></td>';
}
}
html += '</tr>';
}
table.html(html);
},
fillSeconds = function () {
var table = picker.widget.find('.timepicker .timepicker-seconds table'), html = '', current = 0, i, j;
table.parent().hide();
for (i = 0; i < 3; i++) {
html += '<tr>';
for (j = 0; j < 4; j += 1) {
html += '<td class="second">' + padLeft(current.toString()) + '</td>';
current += 5;
}
html += '</tr>';
}
table.html(html);
},
fillTime = function () {
if (!picker.date) {
return;
}
var timeComponents = picker.widget.find('.timepicker span[data-time-component]'),
hour = picker.date.hours(),
period = picker.date.format('A');
if (!picker.use24hours) {
if (hour === 0) {
hour = 12;
} else if (hour !== 12) {
hour = hour % 12;
}
picker.widget.find('.timepicker [data-action=togglePeriod]').text(period);
}
timeComponents.filter('[data-time-component=hours]').text(padLeft(hour));
timeComponents.filter('[data-time-component=minutes]').text(padLeft(picker.date.minutes()));
timeComponents.filter('[data-time-component=seconds]').text(padLeft(picker.date.second()));
},
click = function (e) {
e.stopPropagation();
e.preventDefault();
picker.unset = false;
var target = $(e.target).closest('span, td, th'), month, year, step, day, oldDate = moment(picker.date);
if (target.length === 1) {
if (!target.is('.disabled')) {
switch (target[0].nodeName.toLowerCase()) {
case 'th':
switch (target[0].className) {
case 'picker-switch':
showMode(1);
break;
case 'prev':
case 'next':
step = dpGlobal.modes[picker.viewMode].navStep;
if (target[0].className === 'prev') {
step = step * -1;
}
picker.viewDate.add(step, dpGlobal.modes[picker.viewMode].navFnc);
fillDate();
break;
}
break;
case 'span':
if (target.is('.month')) {
month = target.parent().find('span').index(target);
picker.viewDate.month(month);
} else {
year = parseInt(target.text(), 10) || 0;
picker.viewDate.year(year);
}
if (picker.viewMode === picker.minViewMode) {
picker.date = moment({
y: picker.viewDate.year(),
M: picker.viewDate.month(),
d: picker.viewDate.date(),
h: picker.date.hours(),
m: picker.date.minutes(),
s: picker.date.seconds()
});
set();
notifyChange(oldDate, e.type);
}
showMode(-1);
fillDate();
break;
case 'td':
if (target.is('.day')) {
day = parseInt(target.text(), 10) || 1;
month = picker.viewDate.month();
year = picker.viewDate.year();
if (target.is('.old')) {
if (month === 0) {
month = 11;
year -= 1;
} else {
month -= 1;
}
} else if (target.is('.new')) {
if (month === 11) {
month = 0;
year += 1;
} else {
month += 1;
}
}
picker.date = moment({
y: year,
M: month,
d: day,
h: picker.date.hours(),
m: picker.date.minutes(),
s: picker.date.seconds()
}
);
picker.viewDate = moment({
y: year, M: month, d: Math.min(28, day)
});
fillDate();
set();
notifyChange(oldDate, e.type);
}
break;
}
}
}
},
actions = {
incrementHours: function () {
checkDate('add', 'hours', 1);
},
incrementMinutes: function () {
checkDate('add', 'minutes', picker.options.minuteStepping);
},
incrementSeconds: function () {
checkDate('add', 'seconds', 1);
},
decrementHours: function () {
checkDate('subtract', 'hours', 1);
},
decrementMinutes: function () {
checkDate('subtract', 'minutes', picker.options.minuteStepping);
},
decrementSeconds: function () {
checkDate('subtract', 'seconds', 1);
},
togglePeriod: function () {
var hour = picker.date.hours();
if (hour >= 12) {
hour -= 12;
} else {
hour += 12;
}
picker.date.hours(hour);
},
showPicker: function () {
picker.widget.find('.timepicker > div:not(.timepicker-picker)').hide();
picker.widget.find('.timepicker .timepicker-picker').show();
},
showHours: function () {
picker.widget.find('.timepicker .timepicker-picker').hide();
picker.widget.find('.timepicker .timepicker-hours').show();
},
showMinutes: function () {
picker.widget.find('.timepicker .timepicker-picker').hide();
picker.widget.find('.timepicker .timepicker-minutes').show();
},
showSeconds: function () {
picker.widget.find('.timepicker .timepicker-picker').hide();
picker.widget.find('.timepicker .timepicker-seconds').show();
},
selectHour: function (e) {
var hour = parseInt($(e.target).text(), 10);
if (!picker.use24hours) {
if (picker.date.hours() >= 12) {
if (hour !== 12) {
hour += 12;
}
} else {
if (hour === 12) {
hour = 0;
}
}
}
picker.date.hours(hour);
actions.showPicker.call(picker);
},
selectMinute: function (e) {
picker.date.minutes(parseInt($(e.target).text(), 10));
actions.showPicker.call(picker);
},
selectSecond: function (e) {
picker.date.seconds(parseInt($(e.target).text(), 10));
actions.showPicker.call(picker);
}
},
doAction = function (e) {
var oldDate = moment(picker.date),
action = $(e.currentTarget).data('action'),
rv = actions[action].apply(picker, arguments);
stopEvent(e);
if (!picker.date) {
picker.date = moment({y: 1970});
}
set();
fillTime();
notifyChange(oldDate, e.type);
return rv;
},
stopEvent = function (e) {
e.stopPropagation();
e.preventDefault();
},
keydown = function (e) {
if (e.keyCode === 27) { // allow escape to hide picker
picker.hide();
}
},
change = function (e) {
moment.locale(picker.options.language);
var input = $(e.target), oldDate = moment(picker.date), newDate = moment(input.val(), picker.format, picker.options.useStrict);
if (newDate.isValid() && !isInDisableDates(newDate) && isInEnableDates(newDate)) {
update();
picker.setValue(newDate);
notifyChange(oldDate, e.type);
set();
}
else {
picker.viewDate = oldDate;
picker.unset = true;
notifyChange(oldDate, e.type);
notifyError(newDate);
}
},
showMode = function (dir) {
if (dir) {
picker.viewMode = Math.max(picker.minViewMode, Math.min(2, picker.viewMode + dir));
}
picker.widget.find('.datepicker > div').hide().filter('.datepicker-' + dpGlobal.modes[picker.viewMode].clsName).show();
},
attachDatePickerEvents = function () {
var $this, $parent, expanded, closed, collapseData;
picker.widget.on('click', '.datepicker *', $.proxy(click, this)); // this handles date picker clicks
picker.widget.on('click', '[data-action]', $.proxy(doAction, this)); // this handles time picker clicks
picker.widget.on('mousedown', $.proxy(stopEvent, this));
picker.element.on('keydown', $.proxy(keydown, this));
if (picker.options.pickDate && picker.options.pickTime) {
picker.widget.on('click.togglePicker', '.accordion-toggle', function (e) {
e.stopPropagation();
$this = $(this);
$parent = $this.closest('ul');
expanded = $parent.find('.in');
closed = $parent.find('.collapse:not(.in)');
if (expanded && expanded.length) {
collapseData = expanded.data('collapse');
if (collapseData && collapseData.transitioning) {
return;
}
expanded.collapse('hide');
closed.collapse('show');
$this.find('span').toggleClass(picker.options.icons.time + ' ' + picker.options.icons.date);
if (picker.component) {
picker.component.find('span').toggleClass(picker.options.icons.time + ' ' + picker.options.icons.date);
}
}
});
}
if (picker.isInput) {
picker.element.on({
'click': $.proxy(picker.show, this),
'focus': $.proxy(picker.show, this),
'change': $.proxy(change, this),
'blur': $.proxy(picker.hide, this)
});
} else {
picker.element.on({
'change': $.proxy(change, this)
}, 'input');
if (picker.component) {
picker.component.on('click', $.proxy(picker.show, this));
picker.component.on('mousedown', $.proxy(stopEvent, this));
} else {
picker.element.on('click', $.proxy(picker.show, this));
}
}
},
attachDatePickerGlobalEvents = function () {
$(window).on(
'resize.datetimepicker' + picker.id, $.proxy(place, this));
if (!picker.isInput) {
$(document).on(
'mousedown.datetimepicker' + picker.id, $.proxy(picker.hide, this));
}
},
detachDatePickerEvents = function () {
picker.widget.off('click', '.datepicker *', picker.click);
picker.widget.off('click', '[data-action]');
picker.widget.off('mousedown', picker.stopEvent);
if (picker.options.pickDate && picker.options.pickTime) {
picker.widget.off('click.togglePicker');
}
if (picker.isInput) {
picker.element.off({
'focus': picker.show,
'change': change,
'click': picker.show,
'blur' : picker.hide
});
} else {
picker.element.off({
'change': change
}, 'input');
if (picker.component) {
picker.component.off('click', picker.show);
picker.component.off('mousedown', picker.stopEvent);
} else {
picker.element.off('click', picker.show);
}
}
},
detachDatePickerGlobalEvents = function () {
$(window).off('resize.datetimepicker' + picker.id);
if (!picker.isInput) {
$(document).off('mousedown.datetimepicker' + picker.id);
}
},
isInFixed = function () {
if (picker.element) {
var parents = picker.element.parents(), inFixed = false, i;
for (i = 0; i < parents.length; i++) {
if ($(parents[i]).css('position') === 'fixed') {
inFixed = true;
break;
}
}
return inFixed;
} else {
return false;
}
},
set = function () {
moment.locale(picker.options.language);
var formatted = '';
if (!picker.unset) {
formatted = moment(picker.date).format(picker.format);
}
getPickerInput().val(formatted);
picker.element.data('date', formatted);
if (!picker.options.pickTime) {
picker.hide();
}
},
checkDate = function (direction, unit, amount) {
moment.locale(picker.options.language);
var newDate;
if (direction === 'add') {
newDate = moment(picker.date);
if (newDate.hours() === 23) {
newDate.add(amount, unit);
}
newDate.add(amount, unit);
}
else {
newDate = moment(picker.date).subtract(amount, unit);
}
if (isInDisableDates(moment(newDate.subtract(amount, unit))) || isInDisableDates(newDate)) {
notifyError(newDate.format(picker.format));
return;
}
if (direction === 'add') {
picker.date.add(amount, unit);
}
else {
picker.date.subtract(amount, unit);
}
picker.unset = false;
},
isInDisableDates = function (date, timeUnit) {
moment.locale(picker.options.language);
var maxDate = moment(picker.options.maxDate, picker.format, picker.options.useStrict),
minDate = moment(picker.options.minDate, picker.format, picker.options.useStrict);
if (timeUnit) {
maxDate = maxDate.endOf(timeUnit);
minDate = minDate.startOf(timeUnit);
}
if (date.isAfter(maxDate) || date.isBefore(minDate)) {
return true;
}
if (picker.options.disabledDates === false) {
return false;
}
return picker.options.disabledDates[date.format('YYYY-MM-DD')] === true;
},
isInEnableDates = function (date) {
moment.locale(picker.options.language);
if (picker.options.enabledDates === false) {
return true;
}
return picker.options.enabledDates[date.format('YYYY-MM-DD')] === true;
},
indexGivenDates = function (givenDatesArray) {
// Store given enabledDates and disabledDates as keys.
// This way we can check their existence in O(1) time instead of looping through whole array.
// (for example: picker.options.enabledDates['2014-02-27'] === true)
var givenDatesIndexed = {}, givenDatesCount = 0, i;
for (i = 0; i < givenDatesArray.length; i++) {
if (moment.isMoment(givenDatesArray[i]) || givenDatesArray[i] instanceof Date) {
dDate = moment(givenDatesArray[i]);
} else {
dDate = moment(givenDatesArray[i], picker.format, picker.options.useStrict);
}
if (dDate.isValid()) {
givenDatesIndexed[dDate.format('YYYY-MM-DD')] = true;
givenDatesCount++;
}
}
if (givenDatesCount > 0) {
return givenDatesIndexed;
}
return false;
},
padLeft = function (string) {
string = string.toString();
if (string.length >= 2) {
return string;
}
return '0' + string;
},
getTemplate = function () {
var
headTemplate =
'<thead>' +
'<tr>' +
'<th class="prev">‹</th><th colspan="' + (picker.options.calendarWeeks ? '6' : '5') + '" class="picker-switch"></th><th class="next">›</th>' +
'</tr>' +
'</thead>',
contTemplate =
'<tbody><tr><td colspan="' + (picker.options.calendarWeeks ? '8' : '7') + '"></td></tr></tbody>',
template = '<div class="datepicker-days">' +
'<table class="table-condensed">' + headTemplate + '<tbody></tbody></table>' +
'</div>' +
'<div class="datepicker-months">' +
'<table class="table-condensed">' + headTemplate + contTemplate + '</table>' +
'</div>' +
'<div class="datepicker-years">' +
'<table class="table-condensed">' + headTemplate + contTemplate + '</table>' +
'</div>',
ret = '';
if (picker.options.pickDate && picker.options.pickTime) {
ret = '<div class="bootstrap-datetimepicker-widget' + (picker.options.sideBySide ? ' timepicker-sbs' : '') + (picker.use24hours ? ' usetwentyfour' : '') + ' dropdown-menu" style="z-index:9999 !important;">';
if (picker.options.sideBySide) {
ret += '<div class="row">' +
'<div class="col-sm-6 datepicker">' + template + '</div>' +
'<div class="col-sm-6 timepicker">' + tpGlobal.getTemplate() + '</div>' +
'</div>';
} else {
ret += '<ul class="list-unstyled">' +
'<li' + (picker.options.collapse ? ' class="collapse in"' : '') + '>' +
'<div class="datepicker">' + template + '</div>' +
'</li>' +
'<li class="picker-switch accordion-toggle"><a class="btn" style="width:100%"><span class="' + picker.options.icons.time + '"></span></a></li>' +
'<li' + (picker.options.collapse ? ' class="collapse"' : '') + '>' +
'<div class="timepicker">' + tpGlobal.getTemplate() + '</div>' +
'</li>' +
'</ul>';
}
ret += '</div>';
return ret;
}
if (picker.options.pickTime) {
return (
'<div class="bootstrap-datetimepicker-widget dropdown-menu">' +
'<div class="timepicker">' + tpGlobal.getTemplate() + '</div>' +
'</div>'
);
}
return (
'<div class="bootstrap-datetimepicker-widget dropdown-menu">' +
'<div class="datepicker">' + template + '</div>' +
'</div>'
);
},
dpGlobal = {
modes: [
{
clsName: 'days',
navFnc: 'month',
navStep: 1
},
{
clsName: 'months',
navFnc: 'year',
navStep: 1
},
{
clsName: 'years',
navFnc: 'year',
navStep: 10
}
]
},
tpGlobal = {
hourTemplate: '<span data-action="showHours" data-time-component="hours" class="timepicker-hour"></span>',
minuteTemplate: '<span data-action="showMinutes" data-time-component="minutes" class="timepicker-minute"></span>',
secondTemplate: '<span data-action="showSeconds" data-time-component="seconds" class="timepicker-second"></span>'
};
tpGlobal.getTemplate = function () {
return (
'<div class="timepicker-picker">' +
'<table class="table-condensed">' +
'<tr>' +
'<td><a href="#" class="btn" data-action="incrementHours"><span class="' + picker.options.icons.up + '"></span></a></td>' +
'<td class="separator"></td>' +
'<td>' + (picker.options.useMinutes ? '<a href="#" class="btn" data-action="incrementMinutes"><span class="' + picker.options.icons.up + '"></span></a>' : '') + '</td>' +
(picker.options.useSeconds ?
'<td class="separator"></td><td><a href="#" class="btn" data-action="incrementSeconds"><span class="' + picker.options.icons.up + '"></span></a></td>' : '') +
(picker.use24hours ? '' : '<td class="separator"></td>') +
'</tr>' +
'<tr>' +
'<td>' + tpGlobal.hourTemplate + '</td> ' +
'<td class="separator">:</td>' +
'<td>' + (picker.options.useMinutes ? tpGlobal.minuteTemplate : '<span class="timepicker-minute">00</span>') + '</td> ' +
(picker.options.useSeconds ?
'<td class="separator">:</td><td>' + tpGlobal.secondTemplate + '</td>' : '') +
(picker.use24hours ? '' : '<td class="separator"></td>' +
'<td><button type="button" class="btn btn-primary" data-action="togglePeriod"></button></td>') +
'</tr>' +
'<tr>' +
'<td><a href="#" class="btn" data-action="decrementHours"><span class="' + picker.options.icons.down + '"></span></a></td>' +
'<td class="separator"></td>' +
'<td>' + (picker.options.useMinutes ? '<a href="#" class="btn" data-action="decrementMinutes"><span class="' + picker.options.icons.down + '"></span></a>' : '') + '</td>' +
(picker.options.useSeconds ?
'<td class="separator"></td><td><a href="#" class="btn" data-action="decrementSeconds"><span class="' + picker.options.icons.down + '"></span></a></td>' : '') +
(picker.use24hours ? '' : '<td class="separator"></td>') +
'</tr>' +
'</table>' +
'</div>' +
'<div class="timepicker-hours" data-action="selectHour">' +
'<table class="table-condensed"></table>' +
'</div>' +
'<div class="timepicker-minutes" data-action="selectMinute">' +
'<table class="table-condensed"></table>' +
'</div>' +
(picker.options.useSeconds ?
'<div class="timepicker-seconds" data-action="selectSecond"><table class="table-condensed"></table></div>' : '')
);
};
picker.destroy = function () {
detachDatePickerEvents();
detachDatePickerGlobalEvents();
picker.widget.remove();
picker.element.removeData('DateTimePicker');
if (picker.component) {
picker.component.removeData('DateTimePicker');
}
};
picker.show = function (e) {
if (getPickerInput().prop('disabled')) {
return;
}
if (picker.options.useCurrent) {
if (getPickerInput().val() === '') {
if (picker.options.minuteStepping !== 1) {
var mDate = moment(),
rInterval = picker.options.minuteStepping;
mDate.minutes((Math.round(mDate.minutes() / rInterval) * rInterval) % 60).seconds(0);
picker.setValue(mDate.format(picker.format));
} else {
picker.setValue(moment().format(picker.format));
}
notifyChange('', e.type);
}
}
// if this is a click event on the input field and picker is already open don't hide it
if (e && e.type === 'click' && picker.isInput && picker.widget.hasClass('picker-open')) {
return;
}
if (picker.widget.hasClass('picker-open')) {
picker.widget.hide();
picker.widget.removeClass('picker-open');
}
else {
picker.widget.show();
picker.widget.addClass('picker-open');
}
picker.height = picker.component ? picker.component.outerHeight() : picker.element.outerHeight();
place();
picker.element.trigger({
type: 'dp.show',
date: moment(picker.date)
});
attachDatePickerGlobalEvents();
if (e) {
stopEvent(e);
}
};
picker.disable = function () {
var input = getPickerInput();
if (input.prop('disabled')) {
return;
}
input.prop('disabled', true);
detachDatePickerEvents();
};
picker.enable = function () {
var input = getPickerInput();
if (!input.prop('disabled')) {
return;
}
input.prop('disabled', false);
attachDatePickerEvents();
};
picker.hide = function () {
// Ignore event if in the middle of a picker transition
var collapse = picker.widget.find('.collapse'), i, collapseData;
for (i = 0; i < collapse.length; i++) {
collapseData = collapse.eq(i).data('collapse');
if (collapseData && collapseData.transitioning) {
return;
}
}
picker.widget.hide();
picker.widget.removeClass('picker-open');
picker.viewMode = picker.startViewMode;
showMode();
picker.element.trigger({
type: 'dp.hide',
date: moment(picker.date)
});
detachDatePickerGlobalEvents();
};
picker.setValue = function (newDate) {
moment.locale(picker.options.language);
if (!newDate) {
picker.unset = true;
set();
} else {
picker.unset = false;
}
if (!moment.isMoment(newDate)) {
newDate = (newDate instanceof Date) ? moment(newDate) : moment(newDate, picker.format, picker.options.useStrict);
} else {
newDate = newDate.locale(picker.options.language);
}
if (newDate.isValid()) {
picker.date = newDate;
set();
picker.viewDate = moment({y: picker.date.year(), M: picker.date.month()});
fillDate();
fillTime();
}
else {
notifyError(newDate);
}
};
picker.getDate = function () {
if (picker.unset) {
return null;
}
return moment(picker.date);
};
picker.setDate = function (date) {
var oldDate = moment(picker.date);
if (!date) {
picker.setValue(null);
} else {
picker.setValue(date);
}
notifyChange(oldDate, 'function');
};
picker.setDisabledDates = function (dates) {
picker.options.disabledDates = indexGivenDates(dates);
if (picker.viewDate) {
update();
}
};
picker.setEnabledDates = function (dates) {
picker.options.enabledDates = indexGivenDates(dates);
if (picker.viewDate) {
update();
}
};
picker.setMaxDate = function (date) {
if (date === undefined) {
return;
}
if (moment.isMoment(date) || date instanceof Date) {
picker.options.maxDate = moment(date);
} else {
picker.options.maxDate = moment(date, picker.format, picker.options.useStrict);
}
if (picker.viewDate) {
update();
}
};
picker.setMinDate = function (date) {
if (date === undefined) {
return;
}
if (moment.isMoment(date) || date instanceof Date) {
picker.options.minDate = moment(date);
} else {
picker.options.minDate = moment(date, picker.format, picker.options.useStrict);
}
if (picker.viewDate) {
update();
}
};
init();
};
$.fn.datetimepicker = function (options) {
return this.each(function () {
var $this = $(this),
data = $this.data('DateTimePicker');
if (!data) {
$this.data('DateTimePicker', new DateTimePicker(this, options));
}
});
};
$.fn.datetimepicker.defaults = {
format: false,
pickDate: true,
pickTime: true,
useMinutes: true,
useSeconds: false,
useCurrent: true,
calendarWeeks: false,
minuteStepping: 1,
minDate: moment({y: 1900}),
maxDate: moment().add(100, 'y'),
showToday: true,
collapse: true,
language: moment.locale(),
defaultDate: '',
disabledDates: false,
enabledDates: false,
icons: {},
useStrict: false,
direction: 'auto',
sideBySide: false,
daysOfWeekDisabled: [],
widgetParent: false
};
}));<|fim▁end|> | if (i === 7) {
html.append('<th class="dow">' + weekdaysMin[0] + '</th>');
} else {
html.append('<th class="dow">' + weekdaysMin[i] + '</th>');
|
<|file_name|>test_debugger.py<|end_file_name|><|fim▁begin|>''' Test idlelib.debugger.
Coverage: 19%
'''
from idlelib import debugger
from test.support import requires
requires('gui')
import unittest
from tkinter import Tk
class NameSpaceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = Tk()
cls.root.withdraw()<|fim▁hole|> del cls.root
def test_init(self):
debugger.NamespaceViewer(self.root, 'Test')
if __name__ == '__main__':
unittest.main(verbosity=2)<|fim▁end|> |
@classmethod
def tearDownClass(cls):
cls.root.destroy() |
<|file_name|>todo.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import json
from frappe.model.document import Document
from frappe.utils import get_fullname, parse_addr
exclude_from_linked_with = True
class ToDo(Document):
DocType = 'ToDo'
def validate(self):
self._assignment = None
if self.is_new():
if self.assigned_by == self.allocated_to:
assignment_message = frappe._("{0} self assigned this task: {1}").format(get_fullname(self.assigned_by), self.description)
else:
assignment_message = frappe._("{0} assigned {1}: {2}").format(get_fullname(self.assigned_by), get_fullname(self.allocated_to), self.description)
self._assignment = {
"text": assignment_message,
"comment_type": "Assigned"
}
else:
# NOTE the previous value is only available in validate method
if self.get_db_value("status") != self.status:
if self.allocated_to == frappe.session.user:
removal_message = frappe._("{0} removed their assignment.").format(
get_fullname(frappe.session.user))
else:
removal_message = frappe._("Assignment of {0} removed by {1}").format(
get_fullname(self.allocated_to), get_fullname(frappe.session.user))
self._assignment = {
"text": removal_message,
"comment_type": "Assignment Completed"
}
def on_update(self):
if self._assignment:
self.add_assign_comment(**self._assignment)
self.update_in_reference()
def on_trash(self):
self.delete_communication_links()
self.update_in_reference()
def add_assign_comment(self, text, comment_type):
if not (self.reference_type and self.reference_name):
return
frappe.get_doc(self.reference_type, self.reference_name).add_comment(comment_type, text)
def delete_communication_links(self):
# unlink todo from linked comments
return frappe.db.delete("Communication Link", {
"link_doctype": self.doctype,
"link_name": self.name
})
def update_in_reference(self):
if not (self.reference_type and self.reference_name):
return
try:
assignments = frappe.get_all("ToDo", filters={
"reference_type": self.reference_type,
"reference_name": self.reference_name,
"status": ("!=", "Cancelled")
}, pluck="allocated_to")
assignments.reverse()
frappe.db.set_value(self.reference_type, self.reference_name,
"_assign", json.dumps(assignments), update_modified=False)
except Exception as e:
if frappe.db.is_table_missing(e) and frappe.flags.in_install:
# no table
return
elif frappe.db.is_column_missing(e):
from frappe.database.schema import add_column
add_column(self.reference_type, "_assign", "Text")
self.update_in_reference()
else:
raise
@classmethod
def get_owners(cls, filters=None):
"""Returns list of owners after applying filters on todo's.
"""
rows = frappe.get_all(cls.DocType, filters=filters or {}, fields=['allocated_to'])
return [parse_addr(row.allocated_to)[1] for row in rows if row.allocated_to]
# NOTE: todo is viewable if a user is an owner, or set as assigned_to value, or has any role that is allowed to access ToDo doctype.
def on_doctype_update():
frappe.db.add_index("ToDo", ["reference_type", "reference_name"])
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
todo_roles = frappe.permissions.get_doctype_roles('ToDo')
if 'All' in todo_roles:
todo_roles.remove('All')
if any(check in todo_roles for check in frappe.get_roles(user)):
return None
else:
return """(`tabToDo`.allocated_to = {user} or `tabToDo`.assigned_by = {user})"""\
.format(user=frappe.db.escape(user))
def has_permission(doc, ptype="read", user=None):
user = user or frappe.session.user
todo_roles = frappe.permissions.get_doctype_roles('ToDo', ptype)
if 'All' in todo_roles:
todo_roles.remove('All')
if any(check in todo_roles for check in frappe.get_roles(user)):
return True
else:
return doc.allocated_to==user or doc.assigned_by==user
<|fim▁hole|> 'description': description
}).insert()<|fim▁end|> | @frappe.whitelist()
def new_todo(description):
frappe.get_doc({
'doctype': 'ToDo', |
<|file_name|>boltdb_store_test.go<|end_file_name|><|fim▁begin|>package storage
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"testing"
)
func TestSetBinlogPosition(t *testing.T) {
dir, err := ioutil.TempDir("", "example")
if err != nil {
log.Fatal(err)<|fim▁hole|> store := &BoltDBStore{}
store.Open(file)
store.SetBinlogPosition(&BinlogInformation{File: "binlog001.log", Position: 1234567890})
binlogInfo, err := store.GetBinlogPosition()
if err != nil || binlogInfo.File != "binlog001.log" || binlogInfo.Position != 1234567890 {
t.Error("failed")
}
}<|fim▁end|> | }
defer os.RemoveAll(dir)
file := filepath.Join(dir, "temp.db")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.