file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
linuxscsi.py | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic linux scsi subsystem and Multipath utilities.
Note, this is not iSCSI.
"""
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _, _LW, _LE
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)")
class LinuxSCSI(executor.Executor):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
super(LinuxSCSI, self).__init__(root_helper, execute,
*args, **kwargs)
def echo_scsi_command(self, path, content):
"""Used to echo strings to scsi subsystem."""
args = ["-a", path]
kwargs = dict(process_input=content,
run_as_root=True,
root_helper=self._root_helper)
self._execute('tee', *args, **kwargs)
def get_name_from_path(self, path):
|
def remove_scsi_device(self, device):
"""Removes a scsi device based upon /dev/sdX name."""
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
if os.path.exists(path):
# flush any outstanding IO first
self.flush_device_io(device)
LOG.debug("Remove SCSI device(%s) with %s" % (device, path))
self.echo_scsi_command(path, "1")
def wait_for_volume_removal(self, volume_path):
"""This is used to ensure that volumes are gone."""
def _wait_for_volume_removal(volume_path):
LOG.debug("Waiting for SCSI mount point %s to be removed.",
volume_path)
if os.path.exists(volume_path):
if self.tries >= self.scan_attempts:
msg = _LE("Exceeded the number of attempts to detect "
"volume removal.")
LOG.error(msg)
raise exception.VolumePathNotRemoved(
volume_path=volume_path)
LOG.debug("%(path)s still exists, rescanning. Try number: "
"%(tries)s",
{'path': volume_path, 'tries': self.tries})
self.tries = self.tries + 1
else:
LOG.debug("SCSI mount point %s has been removed.", volume_path)
raise loopingcall.LoopingCallDone()
# Setup a loop here to give the kernel time
# to remove the volume from /dev/disk/by-path/
self.tries = 0
self.scan_attempts = 3
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_volume_removal, volume_path)
timer.start(interval=2).wait()
def get_device_info(self, device):
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
root_helper=self._root_helper)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def remove_multipath_device(self, multipath_name):
"""This removes LUNs associated with a multipath device
and the multipath device itself.
"""
LOG.debug("remove multipath device %s" % multipath_name)
mpath_dev = self.find_multipath_device(multipath_name)
if mpath_dev:
devices = mpath_dev['devices']
LOG.debug("multipath LUNs to remove %s" % devices)
for device in devices:
self.remove_scsi_device(device['device'])
self.flush_multipath_device(mpath_dev['id'])
def flush_device_io(self, device):
"""This is used to flush any remaining IO in the buffers."""
try:
LOG.debug("Flushing IO for device %s" % device)
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
msg = _("Failed to flush IO buffers prior to removing"
" device: (%(code)s)") % {'code': exc.exit_code}
LOG.warn(msg)
def flush_multipath_device(self, device):
try:
LOG.debug("Flush multipath device %s" % device)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def flush_multipath_devices(self):
try:
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def find_multipath_device(self, device):
"""Find a multipath device associated with a LUN device name.
device can be either a /dev/sdX entry or a multipath id.
"""
mdev = None
devices = []
out = None
try:
(out, _err) = self._execute('multipath', '-l', device,
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
lines = [line for line in lines
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if lines:
# Use the device name, be it the WWID, mpathN or custom alias
# of a device to build the device path. This should be the
# first item on the first line of output from `multipath -l
# ${path}` or `multipath -l ${wwid}`..
mdev_name = lines[0].split(" ")[0]
mdev = '/dev/mapper/%s' % mdev_name
# Find the WWID for the LUN if we are using mpathN or aliases.
wwid_search = MULTIPATH_WWID_REGEX.search(lines[0])
if wwid_search is not None:
mdev_id = wwid_search.group('wwid')
else:
mdev_id = mdev_name
# Confirm that the device is present.
try:
os.stat(mdev)
except OSError:
LOG.warn(_LW("Couldn't find multipath device %s"), mdev)
return None
LOG.debug("Found multipath device = %(mdev)s"
% {'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"id": mdev_id,
"name": mdev_name,
"devices": devices}
return info
return None
| """Translates /dev/disk/by-path/ entry to /dev/sdX."""
name = os.path.realpath(path)
if name.startswith("/dev/"):
return name
else:
return None |
17.bin_triangle.py | def | ():
n = int(input("Enter no. of rows: "))
for i in range(n):
for j in range(i+1):
print(str((i + j + 1) % 2) + " ", end='')
print()
if __name__ == '__main__':
main()
| main |
rxCommands.ts | import { BehaviorSubject, empty, ObservableInput } from 'rxjs';
import { rxUser } from './rxUser';
import { filter, switchMap, map, tap } from 'rxjs/operators';
import { firestore } from './firebase';
import { collectionData } from 'rxfire/firestore';
import { ICommand } from '..';
import { Command } from './db/db';
/**
* @description rxCommands is the behavior subject for getting the layout of all commands
* @note this pulls directly from firebase and not local database as it doesn't update very much therefore, should not need to be hindered by using local db like users
*/
export const rxCommands = rxUser.pipe(
filter(x => !!x),
switchMap(
(authUser): ObservableInput<ICommand[]> => {
if (!authUser) {
return empty();
}
const ref = firestore
.collection('users')
.doc(authUser.uid)
.collection('commands');
return collectionData(ref);
}
),
map((commands: ICommand[]) =>
commands.map(
(command): Command => {
let newCommand = new Command(
command.name,
| command.name,
command.permissions || [],
command.reply,
command.cost || 0,
command.enabled
);
if (newCommand.name.includes(' ')) {
newCommand.delete();
newCommand = new Command(
command.name.replace(' ', '-'),
command.name.replace(' ', '-'),
command.permissions || [],
command.reply,
command.cost || 0,
command.enabled
);
newCommand.save();
}
return newCommand;
}
)
)
); | |
app.module.ts | import { UserLoginModule } from './userLogin/userlogin.module';
import { Module } from '@nestjs/common';
import { AuthModule } from './auth/auth.module';
import { MongooseModule } from '@nestjs/mongoose';
@Module({
imports: [AuthModule, MongooseModule.forRoot(
`mongodb+srv://${process.env.MONGODB_USER}:${process.env.MONGODB_PASSWORD}@cluster0.uzbvj.mongodb.net/${process.env.MONGODB_DB}?retryWrites=true&w=majority`
), UserLoginModule],
})
export class | {}
| AppModule |
hangman.py | import random
NUMBERS = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
def read_file():
WORDS = []
with open("./archivos/data.txt", "r", encoding="utf-8") as f:
for line in f:
WORDS.append(line.replace("\n", ""))
return WORDS
def random_word(words):
idx = random.randint(0, len(words) - 1)
return words[idx]
def main():
print("* - * - * - * - * - * - * - * - *- *- *")
print("B I E N V E N I D O A H A N G M A N")
print("* - * - * - * - * - * - * - * - *- *- *")
print("\n")
print("¡Adivina la palabra oculta!")
tries = 0
words = read_file()
current_word = random_word(words)
hidden_word = ['-' for i in current_word]
print(hidden_word)
| current_letter = input("Ingresa una letra: ")
for i in range(len(NUMBERS)):
if current_letter == NUMBERS[i]:
raise ValueError("No ingreses números, solamente letras, por favor")
letter_indexes = []
for idx in range(len(current_word)):
if current_letter == current_word[idx]:
letter_indexes.append(idx)
if len(letter_indexes) == 0:
tries += 1
if tries == 7:
print(hidden_word)
print("")
print("¡Perdiste! La palabra correta era {}".format(current_word))
break
else:
for idx in letter_indexes:
hidden_word[idx] = current_letter
print(hidden_word)
letter_indexes = []
try:
hidden_word.index("-")
except ValueError:
print("¡Ganaste! La palabra era {}".format(current_word))
break
except ValueError as ve:
print(ve)
if __name__ == "__main__":
main() | try:
while True: |
api.module.ts | import { DomainModule } from '@domain/domain.module';
import { Module } from '@nestjs/common';
import CategoriesController from './categories/categories.controller';
import LanguagesController from './languages/languages.controller';
import PaymentMethodsController from './paymentMethod/paymentMethods.controller';
import ScheduleController from './schedule/schedule.controller';
import BusinessController from './business/business.controller';
| @Module({
imports: [DomainModule],
controllers: [
CategoriesController,
LanguagesController,
PaymentMethodsController,
ScheduleController,
BusinessController,
],
})
export class ApiModule {} | |
delon.module.ts | /**
* 进一步对基础模块的导入提炼
* 有关模块注册指导原则请参考:https://ng-alain.com/docs/module
*/
import { NgModule, Optional, SkipSelf, ModuleWithProviders } from '@angular/core';
import { throwIfAlreadyLoaded } from '@core';
import { AlainThemeModule } from '@delon/theme';
import { DelonACLModule } from '@delon/acl';
// #region mock
import { DelonMockModule } from '@delon/mock'; |
// #region reuse-tab
/**
* 若需要[路由复用](https://ng-alain.com/components/reuse-tab)需要:
* 1、增加 `REUSETAB_PROVIDES`
* 2、在 `src/app/layout/default/default.component.html` 修改:
* ```html
* <section class="alain-default__content">
* <reuse-tab></reuse-tab>
* <router-outlet></router-outlet>
* </section>
* ```
*/
import { RouteReuseStrategy } from '@angular/router';
import { ReuseTabService, ReuseTabStrategy } from '@delon/abc/reuse-tab';
const REUSETAB_PROVIDES = [
// {
// provide: RouteReuseStrategy,
// useClass: ReuseTabStrategy,
// deps: [ReuseTabService],
// },
];
// #endregion
// #region global config functions
import { PageHeaderConfig } from '@delon/abc';
export function fnPageHeaderConfig(): PageHeaderConfig {
return {
...new PageHeaderConfig(),
homeI18n: 'home',
};
}
import { DelonAuthConfig } from '@delon/auth';
export function fnDelonAuthConfig(): DelonAuthConfig {
return {
...new DelonAuthConfig(),
login_url: '/passport/login',
};
}
// tslint:disable-next-line: no-duplicate-imports
import { STConfig } from '@delon/abc';
export function fnSTConfig(): STConfig {
return {
...new STConfig(),
modal: { size: 'lg' },
};
}
const GLOBAL_CONFIG_PROVIDES = [
// TIPS:@delon/abc 有大量的全局配置信息,例如设置所有 `st` 的页码默认为 `20` 行
{ provide: STConfig, useFactory: fnSTConfig },
{ provide: PageHeaderConfig, useFactory: fnPageHeaderConfig },
{ provide: DelonAuthConfig, useFactory: fnDelonAuthConfig },
];
// #endregion
@NgModule({
imports: [AlainThemeModule.forRoot(), DelonACLModule.forRoot(), ...MOCK_MODULES],
})
export class DelonModule {
constructor(@Optional() @SkipSelf() parentModule: DelonModule) {
throwIfAlreadyLoaded(parentModule, 'DelonModule');
}
static forRoot(): ModuleWithProviders {
return {
ngModule: DelonModule,
providers: [...REUSETAB_PROVIDES, ...GLOBAL_CONFIG_PROVIDES],
};
}
} | import * as MOCKDATA from '../../_mock';
import { environment } from '@env/environment';
const MOCK_MODULES = true ? [DelonMockModule.forRoot({ data: MOCKDATA })] : [];
// #endregion |
mesh.rs | //! This module defines a [polygon mesh](https://en.wikipedia.org/wiki/Polygon_mesh).
use crate::prelude::*;
use crate::control::callback::CallbackFn;
use crate::data::dirty;
use crate::data::dirty::traits::*;
use crate::debug::stats::Stats;
use crate::system::gpu::shader::Context;
use num_enum::IntoPrimitive;
use enso_shapely::shared;
// ===============
// === Exports ===
// ===============
/// Common data types.
pub mod types {
pub use super::Mesh;
pub use crate::system::gpu::types::*;
}
pub use types::*;
// --------------------------------------------------
/// Container for all scopes owned by a mesh.
#[derive(Debug)]
pub struct Scopes {
/// Point Scope. A point is simply a point in space. Points are often assigned with such
/// variables as 'position' or 'color'.
pub point: AttributeScope,
/// Vertex Scope. A vertex is a reference to a point. Primitives use vertices to reference
/// points. For example, the corners of a polygon, the center of a sphere, or a control vertex
/// of a spline curve. Primitives can share points, while vertices are unique to a primitive.
pub vertex: AttributeScope,
/// Primitive Scope. Primitives refer to a unit of geometry, lower-level than an object but
/// above points. There are several different types of primitives, including polygon faces or
/// Bezier/NURBS surfaces.
pub primitive: AttributeScope,
/// Instance Scope. Instances are virtual copies of the same geometry. They share point,
/// vertex, and primitive variables.
pub instance: AttributeScope,
}
/// A singleton for each of scope types.
#[derive(Copy, Clone, Debug, Display, IntoPrimitive, PartialEq)]
#[allow(missing_docs)]
#[repr(u8)]
pub enum ScopeType {
Point,
Vertex,
Primitive,
Instance,
}
impl From<ScopeType> for usize {
fn from(t: ScopeType) -> Self {
Into::<u8>::into(t).into()
}
}
// === Types ===
/// Dirty flag remembering which scopes were mutated.
pub type ScopesDirty = dirty::SharedEnum<u8, ScopeType, Box<dyn Fn()>>;
// === Implementation ===
macro_rules! update_scopes {
($self:ident . {$($name:ident),*} {$($uname:ident),*}) => {$(
if $self.scopes_dirty.check(&ScopeType::$uname) {
$self.scopes.$name.update()
}
)*}
}
// ============
// === Mesh ===
// ============
// === Definition ===
shared! { Mesh
/// A polygon mesh is a collection of vertices, edges and faces that defines the shape of a
/// polyhedral object. Mesh describes the shape of the display element. It consist of several
/// scopes containing sets of variables. See the documentation of `Scopes` to learn more.
///
/// Please note, that there are other, higher-level scopes defined by other structures, including:
///
/// - Symbol Scope
/// Object refers to the whole geometry with all of its instances.
///
/// - Global Scope
/// Global scope is shared by all objects and it contains some universal global variables, like
/// the current 'time' counter.
///
/// Each scope can contain named attributes which can be accessed from within materials. If the same
/// name was defined in various scopes, it gets resolved to the var defined in the most specific
/// scope. For example, if var 'color' was defined in both 'instance' and 'point' scope, the 'point'
/// definition overlaps the other one.
#[derive(Debug)]
pub struct MeshData {
scopes : Scopes,
scopes_dirty : ScopesDirty,
logger : Logger,
stats : Stats,
}
impl {
/// Creates new mesh with attached dirty callback.
pub fn new<OnMut:CallbackFn>
(logger:Logger, stats:&Stats, on_mut:OnMut) -> Self {
stats.inc_mesh_count();
let stats = stats.clone();
let scopes_logger = Logger::new_sub(&logger,"scopes_dirty");
let scopes_dirty = ScopesDirty::new(scopes_logger,Box::new(on_mut));
let scopes = debug!(logger, "Initializing.", || {
macro_rules! new_scope { ({ $($name:ident),* } { $($uname:ident),* } ) => {$(
let sub_logger = Logger::new_sub(&logger,stringify!($name));
let status_mod = ScopeType::$uname;
let scs_dirty = scopes_dirty.clone_ref();
let callback = move || {scs_dirty.set(status_mod)};
let $name = AttributeScope::new(sub_logger,&stats,callback);
)*}}
new_scope! ({point,vertex,primitive,instance}{Point,Vertex,Primitive,Instance});
Scopes {point,vertex,primitive,instance}
});
Self {scopes,scopes_dirty,logger,stats}
}
/// Point scope accessor.
pub fn point_scope(&self) -> AttributeScope {
self.scopes.point.clone_ref()
}
/// Vertex scope accessor.
pub fn vertex_scope(&self) -> AttributeScope {
self.scopes.vertex.clone_ref()
}
/// Primitive scope accessor.
pub fn primitive_scope(&self) -> AttributeScope {
self.scopes.primitive.clone_ref()
}
/// Instance scope accessor.
pub fn instance_scope(&self) -> AttributeScope {
self.scopes.instance.clone_ref()
}
/// Check dirty flags and update the state accordingly.
pub fn update(&mut self) {
debug!(self.logger, "Updating.", || {
if self.scopes_dirty.check_all() {
update_scopes!{
self.{point,vertex,primitive,instance}{Point,Vertex,Primitive,Instance}
}
self.scopes_dirty.unset_all()
}
})
}
/// Browses all scopes and finds where a variable was defined. Scopes are browsed in a
/// hierarchical order. To learn more about the ordering see the documentation of `Mesh`.
pub fn lookup_variable<S:Str>(&self, name:S) -> Option<ScopeType> {
let name = name.as_ref();
if self.scopes.point . contains(name) { Some(ScopeType::Point) }
else if self.scopes.vertex . contains(name) { Some(ScopeType::Vertex) }
else if self.scopes.primitive . contains(name) { Some(ScopeType::Primitive) }
else if self.scopes.instance . contains(name) { Some(ScopeType::Instance) }
else {None}
}
/// Gets reference to scope based on the scope type.
pub fn scope_by_type(&self, scope_type:ScopeType) -> AttributeScope {
match scope_type {
ScopeType::Point => &self.scopes.point,
ScopeType::Vertex => &self.scopes.vertex,
ScopeType::Primitive => &self.scopes.primitive,
ScopeType::Instance => &self.scopes.instance,
}.clone_ref()
}
/// Set the WebGL context. See the main architecture docs of this library to learn more.
pub(crate) fn set_context(&self, context:Option<&Context>) {
macro_rules! set_scope_context { ($($name:ident),*) => {
$( self.scopes.$name.set_context(context); )*
}}
set_scope_context!(point,vertex,primitive,instance);
}
}}
impl Drop for MeshData {
fn drop(&mut self) |
}
| {
self.stats.dec_mesh_count();
} |
bitcoin_es.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="es" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About ViretCoin</source>
<translation>Acerca de ViretCoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>ViretCoin</b> version</source>
<translation><b>ViretCoin</b> versión</translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The ViretCoin developers</source>
<translation>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The ViretCoin developers</translation>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Este es un software experimental.
Distribuido bajo la licencia MIT/X11, vea el archivo adjunto
COPYING o http://www.opensource.org/licenses/mit-license.php.
Este producto incluye software desarrollado por OpenSSL Project para su uso en
el OpenSSL Toolkit (http://www.openssl.org/) y software criptográfico escrito por
Eric Young ([email protected]) y el software UPnP escrito por Thomas Bernard.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Libreta de Direcciones</translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Haga doble clic para editar una etiqueta o dirección </translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Crear una nueva dirección</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copiar la dirección seleccionada al portapapeles del sistema</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Nueva Dirección</translation>
</message>
<message>
<location line="-46"/>
<source>These are your ViretCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Estas son las direcciones de ViretCoin para recibir pagos. Es posible que desee dar una diferente a cada remitente para que pueda realizar un seguimiento de quien te está pagando.</translation>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>&Copiar dirección</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Enseñar &QR Code</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a ViretCoin address</source>
<translation>Firmar un mensaje para demostrar que es dueño de su dirección de ViretCoin</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Firmar &Mensaje</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Borrar de la lista la dirección seleccionada</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified ViretCoin address</source>
<translation>Verifique el mensaje para asegurarse que fue firmado por una dirección específica de ViretCoin</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Verifique Mensaje</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Eliminar</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>Copiar &etiqueta</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>&Editar</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation>Exportar Data de Libro de Direcciones</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivos de columnas separadas por coma (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Error exportando</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>No se pudo escribir en el archivo %1</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Diálogo de contraseña</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Introducir contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nueva contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Repita la nueva contraseña</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation>Sirve para desactivar SendMoney cuando la cuenta del Sistema Operativo está comprometida. No ofrece seguridad real.</translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation>Para "Staking" solamente</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Introduzca la nueva contraseña del monedero.<br/>Por favor elija una con <b>10 o más caracteres aleatorios</b>, u <b>ocho o más palabras</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Cifrar el monedero</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Esta operación requiere su contraseña para desbloquear el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Desbloquear monedero</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Esta operación requiere su contraseña para descifrar el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Descifrar el monedero</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Cambiar contraseña</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Introduzca la contraseña anterior del monedero y la nueva. </translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirmar cifrado del monedero</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation>Advertencia: Si encripta su cartera y pierde su frase de contraseña, puede <b>PERDER TODAS SUS MONEDAS</ b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>¿Seguro que desea cifrar su monedero?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANTE: Cualquier copia de seguridad que haya realizado previamente de su archivo de monedero debe reemplazarse con el nuevo archivo de monedero cifrado. Por razones de seguridad, las copias de seguridad previas del archivo de monedero no cifradas serán inservibles en cuanto comience a usar el nuevo monedero cifrado.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Aviso: ¡La tecla de bloqueo de mayúsculas está activada!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Monedero cifrado</translation>
</message>
<message>
<location line="-58"/>
<source>ViretCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation>ViretCoin cerrará para terminar el proceso de encriptación. Recuerde que la encriptación de su monedero no puede proteger completamente que sus monedas sean robadas por malware infectando su computadora.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Ha fallado el cifrado del monedero</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Ha fallado el cifrado del monedero debido a un error interno. El monedero no ha sido cifrado.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Las contraseñas no coinciden.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Ha fallado el desbloqueo del monedero</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La contraseña introducida para descifrar el monedero es incorrecta.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Ha fallado el descifrado del monedero</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Se ha cambiado correctamente la contraseña del monedero.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+280"/>
<source>Sign &message...</source>
<translation>Firmar &mensaje...</translation>
</message>
<message>
<location line="+242"/>
<source>Synchronizing with network...</source>
<translation>Sincronizando con la red…</translation>
</message>
<message>
<location line="-308"/>
<source>&Overview</source>
<translation>&Vista general</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Mostrar vista general del monedero</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transacciones</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Examinar el historial de transacciones</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation>&Libreta de Direcciones</translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Editar la lista de direcciones y etiquetas almacenadas</translation>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation>&Recibir monedas</translation>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Mostrar la lista de direcciones para recibir pagos</translation>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation>&Enviar monedas</translation>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>&Salir</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Salir de la aplicación</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about ViretCoin</source>
<translation>Mostrar información sobre ViretCoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Acerca de &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Mostrar información acerca de Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opciones...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&Cifrar monedero…</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Guardar copia de seguridad del monedero...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Cambiar la contraseña…</translation>
</message>
<message numerus="yes">
<location line="+250"/>
<source>~%n block(s) remaining</source>
<translation><numerusform>~%n bloque restante</numerusform><numerusform>~%n bloques restantes</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation>Descargado %1 de %2 bloques de historial de transacciones (%3% completado).</translation>
</message>
<message>
<location line="-247"/>
<source>&Export...</source>
<translation>&Exportar...</translation>
</message>
<message>
<location line="-62"/>
<source>Send coins to a ViretCoin address</source>
<translation>Enviar monedas a una dirección de ViretCoin</translation>
</message>
<message>
<location line="+45"/>
<source>Modify configuration options for ViretCoin</source>
<translation>Modificar las opciones de configuración para ViretCoin</translation>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation>Exportar los datos en la ficha actual a un archivo</translation>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation>Cifrar o descifrar el monedero</translation>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>Copia de seguridad del monedero en otra ubicación</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Cambiar la contraseña utilizada para el cifrado del monedero</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>Ventana de &depuración</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Abrir la consola de depuración y diagnóstico</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Verificar mensaje...</translation>
</message>
<message>
<location line="-200"/>
<source>ViretCoin</source>
<translation>ViretCoin</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<location line="+178"/>
<source>&About ViretCoin</source>
<translation>Acerca de ViretCoin</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Mostrar / Ocultar</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation>Desbloquear el monedero</translation>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation>&Bloquear monedero</translation>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation>Bloquear monedero</translation>
</message>
<message>
<location line="+34"/>
<source>&File</source>
<translation>&Archivo</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Configuración</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>A&yuda</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Barra de pestañas</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation>Barra de herramientas de acciones</translation>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>ViretCoin client</source>
<translation>Cliente ViretCoin</translation>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to ViretCoin network</source>
<translation><numerusform>%n conexión activa a la red ViretCoin</numerusform><numerusform>%n conexiones activas a la red ViretCoin</numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation>Descargado %1 bloques de historial de transacciones.</translation>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation>Staking.<br>Su contribución es %1<br>Contribución de la red es %2<br>Tiempo esperado para ganar la recompensa es %3</translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation>No esta "Staking" porque monedera está bloqueada</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation>No esta "Staking" porque monedera está desconectada</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation>No esta "Staking" porque monedera está sincronizando</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation>No esta "Staking" ya que no tiene monedas maduras</translation>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation><numerusform>hace %n segundo</numerusform><numerusform>hace %n segundos</numerusform></translation>
</message>
<message>
<location line="-284"/>
<source>&Unlock Wallet...</source>
<translation>&Desbloquear Monedero...</translation>
</message>
<message numerus="yes">
<location line="+288"/>
<source>%n minute(s) ago</source>
<translation><numerusform>hace %n minuto</numerusform><numerusform>hace %n minutos</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation><numerusform>hace %n hora</numerusform><numerusform>hace %n horas</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation><numerusform>hace %n dia</numerusform><numerusform>hace %n dias</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Actualizado</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Actualizando...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation>Último bloque recibido se generó en %1.</translation>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Esta transacción se encuentra sobre el límite de tamaño. Usted todavía puede enviarlo a un costo de %1, lo que va a los nodos que procesan sus transacciones y ayuda a apoyar la red. ¿Quieres pagar la cuota?</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation>Confirme tarifa de transacción</translation>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Transacción enviada</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Transacción entrante</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Fecha: %1
Cantidad: %2
Tipo: %3
Dirección: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation>Manejar URI</translation>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid ViretCoin address or malformed URI parameters.</source>
<translation>URI no se puede analizar! Esto puede ser causado por una dirección de ViretCoin no válida o parámetros de URI malformados.</translation>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>desbloqueado</b></translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>bloqueado</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation>Copia de Seguridad de Monedero</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Data de Monedero (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Copia de Seguridad a fracasado</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Hubo un error al tratar de salvar los datos de su monedero a la nueva ubicación.</translation>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation><numerusform>%n segundo</numerusform><numerusform>%n segundos</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation><numerusform>%n minuto</numerusform><numerusform>%n minutos</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n día</numerusform><numerusform>%n días</numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation>No estás "Staking"</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. ViretCoin can no longer continue safely and will quit.</source>
<translation>Se ha producido un error fatal. ViretCoin ya no puede continuar de forma segura y cerrará.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation>Alerta de red</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation>Control de Moneda</translation>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>Tasa:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Envío pequeño:</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation>no</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>Después de tasas:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>Cambio:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>(des)marcar todos</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>Modo árbol</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>Modo lista</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Cuantía</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>Confirmaciones</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>Prioridad</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Copiar cuantía</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Copiar identificador de transacción</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>Copiar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Copiar después de aplicar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Copiar bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Copiar prioridad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Copiar envío pequeño</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Copiar cambio</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>lo más alto</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>alto</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>medio-alto</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>medio</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>bajo-medio</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>bajo</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>lo más bajo</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation>DUST</translation>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>si</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation>Esta etiqueta se convierte en rojo, si el tamaño de la transacción es mayor que 10000 bytes.
Esto significa que se requiere una cuota de al menos el %1 por kb.
Puede variar + / - 1 Byte por entrada.</translation>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation>Las transacciones con mayor prioridad son más probables en entrar hacia un bloque.
Esta etiqueta se convierte en rojo, si la prioridad es menor que "medium".
Esto significa que se requiere una cuota de al menos el %1 por kb.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation>Esta etiqueta se convierte en rojo, si cualquier recipiente recibe una cantidad menor que %1.
Esto significa que se requiere una cuota de al menos %2.
Las cantidades inferiores a 0.546 veces la cuota mínima del relé se muestran en forma de DUST.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation>Esta etiqueta se convierte en rojo, si el cambio es menor que %1.
Esto significa que se requiere una cuota de al menos %2.</translation>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>Enviar desde %1 (%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(cambio)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Editar Dirección</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etiqueta</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>La etiqueta asociada con esta entrada de la libreta de direcciones</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Dirección</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>La dirección asociada con esta entrada de la libreta de direcciones. Esto sólo puede ser modificada para direcciones de envío.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Nueva dirección de recepción</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nueva dirección de envío</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Editar dirección de recepción</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Editar dirección de envío</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>La dirección introducida "%1" ya está presente en la libreta de direcciones.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid ViretCoin address.</source>
<translation>La dirección introducida "%1" no es una dirección válida de ViretCoin.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>No se pudo desbloquear el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Ha fallado la generación de la nueva clave.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>ViretCoin-Qt</source>
<translation>ViretCoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>Versión</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>opciones de líneas de comandos</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>opciones del interfaz de usuario</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Defina el idioma, por ejemplo "de_DE" (predeterminado: región del sistema)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Iniciar minimizado</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Mostrar pantalla de bienvenida al iniciar (predeterminado: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opciones</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Principal</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation>Tarifa de transacción opcional por kB que ayuda a asegurarse de que sus transacciones se procesan rápidamente. La mayoría de las transacciones son 1 kB. Cuota de 0.01 recomendada.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Comisión de &transacciones</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation>Cantidad reservada no participa en el "Staking" y por lo tanto se puede gastar en cualquier momento.</translation>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation>Reserva</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start ViretCoin after logging in to the system.</source>
<translation>Iniciar ViretCoin automáticamente después de entrar en el sistema.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start ViretCoin on system login</source>
<translation>&Iniciar ViretCoin al inicio del sistema</translation>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation>Separe el bloque y las bases de datos de direcciones al apagar el equipo. Esto significa que se puede mover a otro directorio de datos, pero desacelera el apagado. El monedero siempre está separado.</translation>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation>&Separe la bases de datos al apagar el equipo</translation>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Red</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the ViretCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Abrir automáticamente el puerto de cliente ViretCoin en el router. Esto sólo funciona cuando el router es compatible con UPnP y está habilitado.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Mapear el puerto usando &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the ViretCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Conéctese a la red de ViretCoin a través de un SOCKS proxy (e.g. cuando se conecta a través de Tor)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Conéctese a través de un SOCKS proxy</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Dirección &IP del proxy:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Dirección IP del proxy (e.g. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Puerto:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Puerto del servidor proxy (ej. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>&Versión SOCKS:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Versión SOCKS del proxy (ej. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Ventana</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Minimizar la ventana a la bandeja de iconos del sistema.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimizar a la bandeja en vez de a la barra de tareas</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimizar en lugar de salir de la aplicación al cerrar la ventana. Cuando esta opción está activa, la aplicación solo se puede cerrar seleccionando Salir desde el menú.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimizar al cerrar</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Interfaz</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>I&dioma de la interfaz de usuario</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting ViretCoin.</source>
<translation>El idioma del interfaz de usuario se puede configurar aquí. Esta configuración se aplicará después de reiniciar ViretCoin.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Mostrar las cantidades en la &unidad:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Elegir la subdivisión predeterminada para mostrar cantidades en la interfaz y cuando se envían bitcoins.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show ViretCoin addresses in the transaction list or not.</source>
<translation>Tanto para mostrar direcciones de ViretCoin en la lista de transacciones o no.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Mostrar las direcciones en la lista de transacciones</translation>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation>Mostrar o no funcionalidad de Coin Control</translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation>Mostrar moneda y Coin Control (expertos solamente!)</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&Aceptar</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Cancelar</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Aplicar</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>predeterminado</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation>Advertencia</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting ViretCoin.</source>
<translation>Esta configuración se aplicará después de reiniciar ViretCoin.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>La dirección proxy indicada es inválida.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Desde</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the ViretCoin network after a connection is established, but this process has not completed yet.</source>
<translation>La información mostrada puede estar fuera de fecha. Su monedera se sincroniza automáticamente con la red ViretCoin después de que se establece una conexión, pero este proceso no se ha completado todavía.</translation>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation>Stake:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Sin confirmar:</translation>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation>Disponible:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>Su actual balance disponible</translation>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>No disponible:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Saldo recién minado que aún no está disponible.</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation>Total:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>Su balance actual total</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Movimientos recientes</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Total de transacciones que aún no se han confirmado, y aún no cuentan para el balance actual</translation>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation>Total de las monedas que fueron "Staked", y aún no cuentan para el balance actual</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>desincronizado</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Código Diálogo QR</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Solicitar Pago</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Etiqueta:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Mensaje:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Guardar como....</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Error codificando URI en código QR.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>La cantidad introducida es inválida, compruebe por favor.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>La URI es demasiado larga, pruebe a acortar el texto para la etiqueta / mensaje</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Guardar código QR</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>Imagenes PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Nombre del cliente</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation>N/D</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Versión del cliente</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Información</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Utilizando la versión OpenSSL</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Hora de inicio</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Red</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Número de conexiones</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>En testnet</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Cadena de bloques</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Número actual de bloques</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Bloques totales estimados</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Hora del último bloque</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Abrir</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Opciones de línea de comandos</translation>
</message>
<message>
<location line="+7"/>
<source>Show the ViretCoin-Qt help message to get a list with possible ViretCoin command-line options.</source>
<translation>Mostrar el mensaje de ayuda de ViretCoin-Qt para obtener una lista con las posibles opciones de línea de comandos para ViretCoin.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Mostrar</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Consola</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Fecha de compilación</translation>
</message>
<message>
<location line="-104"/>
<source>ViretCoin - Debug window</source>
<translation>ViretCoin - Ventana de depuración</translation>
</message>
<message>
<location line="+25"/>
<source>ViretCoin Core</source>
<translation>Núcleo ViretCoin</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Archivo de registro de depuración</translation>
</message>
<message>
<location line="+7"/>
<source>Open the ViretCoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Abre el archivo de registro de ViretCoin del directorio de datos actual. Esto puede tardar algunos segundos para archivos grandes.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Borrar consola</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the ViretCoin RPC console.</source>
<translation>Bienvenido a la consola RPC de ViretCoin</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Use las flechas arriba y abajo para navegar por el historial y <b>Control+L</b> para vaciar la pantalla.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Escriba <b>help</b> para ver un resumen de los comandos disponibles.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Enviar bitcoins</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>Características de Coin Control</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation>Entradas...</translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>Seleccionado automáticamente</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>Fondos insuficientes!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation>0</translation>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Cuantía:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 BC</source>
<translation>123.456 BC {0.00 ?}</translation>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation>medio</translation>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>Tasa:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Envío pequeño:</translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation>no</translation>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>Después de tasas:</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation>Cambio</translation>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation>dirección de cambio personalizada</translation>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Enviar a múltiples destinatarios de una vez</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Añadir &destinatario</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Elimina todos los campos de transacciones</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Vaciar &todo</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 BC</source>
<translation>123.456 BC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Confirmar el envío</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Enviar</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a ViretCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation>Introduce una dirección ViretCoin (p.ej. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copiar cuantía</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>Copiar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Copiar después de aplicar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Copiar bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Copiar prioridad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Copiar envío pequeño</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Copiar Cambio</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> para %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Confirmar el envío de bitcoins</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Estás seguro que quieres enviar %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>y</translation>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>La dirección de recepción no es válida, compruébela de nuevo.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>La cantidad por pagar tiene que ser mayor de 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>La cantidad sobrepasa su saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>El total sobrepasa su saldo cuando se incluye la tasa de envío de %1</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Se ha encontrado una dirección duplicada. Solo se puede enviar a cada dirección una vez por operación de envío.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation>Error: Falla al crear la transacción.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Error: La transacción ha sido rechazada. Esto puede ocurrir si algunas de sus monedas en el monedero ya se gastaron, por ejemplo, si se usa una copia del wallet.dat y se gastaron las monedas de la copia pero no se han marcado como gastadas aquí.</translation>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid ViretCoin address</source>
<translation>ADVERTENCIA: Dirección ViretCoin inválida</translation>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation>ADVERTENCIA: dirección de cambio desconocida</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Formulario</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Ca&ntidad:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>&Pagar a:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Etiquete esta dirección para añadirla a la libreta</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>&Etiqueta:</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation>La dirección a la que se quiere enviar el pago (p.ej. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation>Elije dirección de la libreta de direcciones</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Elimina este beneficiario</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a ViretCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation>Introduce una dirección ViretCoin (p.ej. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Firmas - Firmar / verificar un mensaje</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>&Firmar mensaje</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Puede firmar mensajes con sus direcciones para demostrar que las posee. Tenga cuidado de no firmar cualquier cosa vaga, ya que los ataques de phishing pueden tratar de engañarle para suplantar su identidad. Firme solo declaraciones totalmente detalladas con las que usted esté de acuerdo.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation>La dirección a firmar con un mensaje (p.ej. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation>Elije una dirección de la libreta de direcciones</translation>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Introduzca el mensaje que desea firmar aquí</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copiar la firma actual al portapapeles del sistema</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this ViretCoin address</source>
<translation>Firma el mensaje para demostrar que posees esta dirección ViretCoin.</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Vaciar todos los campos de la firma de mensaje</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Vaciar &todo</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&Verificar mensaje</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Introduzca la dirección para la firma, el mensaje (asegurándose de copiar tal cual los saltos de línea, espacios, tabulaciones, etc.) y la firma a continuación para verificar el mensaje. Tenga cuidado de no asumir más información de lo que dice el propio mensaje firmado para evitar fraudes basados en ataques de tipo man-in-the-middle.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation>La dirección firmada con un mensaje (p.ej. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified ViretCoin address</source>
<translation>Verifique el mensaje para asegurarse de que se firmó con la dirección ViretCoin especificada.</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Vaciar todos los campos de la verificación de mensaje</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a ViretCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation>Introduce una dirección ViretCoin (p.ej B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Haga clic en "Firmar mensaje" para generar la firma</translation>
</message>
<message>
<location line="+3"/>
<source>Enter ViretCoin signature</source>
<translation>Introduce la firma ViretCoin</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>La dirección introducida es inválida.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Verifique la dirección e inténtelo de nuevo.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>La dirección introducida no corresponde a una clave.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Se ha cancelado el desbloqueo del monedero. </translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>No se dispone de la clave privada para la dirección introducida.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Ha fallado la firma del mensaje.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Mensaje firmado.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>No se puede decodificar la firma.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Compruebe la firma e inténtelo de nuevo.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>La firma no coincide con el resumen del mensaje.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>La verificación del mensaje ha fallado.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Mensaje verificado.</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation><numerusform>Abierto para %n bloque</numerusform><numerusform>Abierto para %n bloques</numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation>en conflicto</translation>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/fuera de línea</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/no confirmado</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 confirmaciones</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Estado</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, transmitir a través de %n nodo</numerusform><numerusform>, transmitir a través de %n nodos</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Fuente</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generado</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>De</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Para</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>dirección propia</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>etiqueta</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Crédito</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>disponible en %n bloque más</numerusform><numerusform>disponible en %n bloques más</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>no aceptada</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Débito</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Comisión de transacción</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Cantidad neta</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Mensaje</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Comentario</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Identificador de transacción</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 20 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Las monedas generadas deben madurar 20 bloques antes de que puedan gastarse. Cuando generaste este bloque, este fue transmitido a la red para ser añadido a la cadena de bloques. Si falla al introducirse en la cadena, su estado cambiará a "no aceptado" y no se podrá gastar. Esto ocasionalmente puede ocurrir si otro nodo genera un bloque a unos segundos que el tuyo.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Información de depuración</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transacción</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>entradas</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>verdadero</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falso</translation>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, todavía no se ha sido difundido satisfactoriamente</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>desconocido</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Detalles de transacción</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Esta ventana muestra información detallada sobre la transacción</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmado (%1 confirmaciones)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Abrir para %n bloque más</numerusform><numerusform>Abrir para %n bloques más</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation>Sin conexión</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation>Sin confirmar</translation>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation>Confirmando (%1 de %2 confirmaciones recomendadas)</translation>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation>En conflicto</translation>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>No vencidos (%1 confirmaciones. Estarán disponibles al cabo de %2)</translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Este bloque no ha sido recibido por otros nodos y probablemente no sea aceptado!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generado pero no aceptado</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Recibidos de</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pago propio</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(nd)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Estado de transacción. Pasa el ratón sobre este campo para ver el número de confirmaciones.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Fecha y hora en que se recibió la transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tipo de transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Dirección de destino de la transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Cantidad retirada o añadida al saldo.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Todo</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Hoy</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Esta semana</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Este mes</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Mes pasado</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Este año</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Rango...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>A usted mismo</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Otra</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Introduzca una dirección o etiqueta que buscar</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Cantidad mínima</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copiar cuantía</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Copiar identificador de transacción</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Editar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Mostrar detalles de la transacción</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation>Exportar datos de transacción</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivos de columnas separadas por coma (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Error al exportar</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source> | <location line="+100"/>
<source>Range:</source>
<translation>Rango:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>para</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation>Enviando...</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>ViretCoin version</source>
<translation>versión ViretCoin</translation>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or viretcoind</source>
<translation>Envía un comando a -server o viretcoind</translation>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Muestra comandos
</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Recibir ayuda para un comando
</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>Opciones:
</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: viretcoin.conf)</source>
<translation>Especifica un archivo de configuración (por defecto: viretcoin.conf)</translation>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: viretcoind.pid)</source>
<translation>Especifica un archivo pid (por defecto: viretcoind.pid)</translation>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation>Especificar archivo de monedero (dentro del directorio de datos)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Especificar directorio para los datos</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Establecer el tamaño de caché de la base de datos en megabytes (predeterminado: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation>Ajusta el tamaño de la base de datos del registro en megabytes (por defecto: 100)</translation>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 29483 or testnet: 39483)</source>
<translation>Escuchando conexiones en el puerto <port> (por defecto: 29483 o testnet: 39483)</translation>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Mantener como máximo <n> conexiones a pares (predeterminado: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Conectar a un nodo para obtener direcciones de pares y desconectar</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Especifique su propia dirección pública</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation>Enlazar a la dirección dada. Utilice la notación [host]:puerto para IPv6</translation>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation>Pon tus monedas en participación "Stake" para dar soporte a la red y ganar alguna recompensa (por defecto: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Umbral para la desconexión de pares con mal comportamiento (predeterminado: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Número de segundos en que se evita la reconexión de pares con mal comportamiento (predeterminado: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Ha ocurrido un error al configurar el puerto RPC %u para escucha en IPv4: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation>Separa el bloque y la base de datos de direcciones. Aumenta el tiempo de apagado (por defecto: 0)</translation>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Error: La transacción ha sido rechazada. Esto puede ocurrir si algunas de sus monedas en el monedero ya se gastaron, por ejemplo, si se usa una copia del wallet.dat y se gastaron las monedas de la copia pero no se han marcado como gastadas aquí.</translation>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation>Error: Esta transacción requiere de una comisión de transacción de al menos %s debido a su tamaño, complejidad, o uso de fondos recibidos recientemente.</translation>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 29484 or testnet: 39484)</source>
<translation>Escuchar conexiones JSON-RPC en <port> (predeterminado: 29484 o testnet: 39484)</translation>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Aceptar comandos consola y JSON-RPC
</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation>Error: Fallo al crear la transacción.</translation>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation>Error: Monedero bloqueado, no es posible crear una transacción</translation>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation>Importando el archivo de datos de la cadena de bloques.</translation>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation>Importando el archivo de datos de arranque de la cadena de bloques.</translation>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Ejecutar en segundo plano como daemon y aceptar comandos
</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Usar la red de pruebas
</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Aceptar conexiones desde el exterior (predeterminado: 1 si no -proxy o -connect)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Ha ocurrido un error al configurar el puerto RPC %u para escuchar mediante IPv6. Recurriendo a IPv4: %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation>Error al inicializar el entorno de base de datos %s! Para recuperar, HAGA UNA COPIA DE SEGURIDAD DEL DIRECTORIO, a continuación, elimine todo de ella excepto el archivo wallet.dat.</translation>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Establecer el tamaño máximo de las transacciones alta-prioridad/baja-comisión en bytes (por defecto: 27000)</translation>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Aviso: ¡-paytxfee tiene un valor muy alto! Esta es la comisión que pagará si envía una transacción.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong ViretCoin will not work properly.</source>
<translation>Advertencia: Verifique que la fecha y hora del equipo sean correctas! Si su reloj es erróneo ViretCoin no funcionará correctamente.</translation>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Aviso: ¡Error al leer wallet.dat! Todas las claves se han leído correctamente, pero podrían faltar o ser incorrectos los datos de transacciones o las entradas de la libreta de direcciones.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Aviso: ¡Recuperados datos de wallet.dat corrupto! El wallet.dat original se ha guardado como wallet.{timestamp}.bak en %s; si hubiera errores en su saldo o transacciones, deberá restaurar una copia de seguridad.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Intento de recuperar claves privadas de un wallet.dat corrupto</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>Opciones de creación de bloques:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>Conectar sólo a los nodos (o nodo) especificados</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Descubrir dirección IP propia (predeterminado: 1 al escuchar sin -externalip)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Ha fallado la escucha en todos los puertos. Use -listen=0 si desea esto.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation>Encontrar pares usando la búsqueda de DNS (por defecto: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation>Política de puntos de control de sincronización (por defecto: estricta)</translation>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation>Dirección -tor inválida: '%s'</translation>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation>Cantidad no válida para -reservebalance=<amount></translation>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Búfer de recepción máximo por conexión, <n>*1000 bytes (predeterminado: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Búfer de recepción máximo por conexión, , <n>*1000 bytes (predeterminado: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Conectarse solo a nodos de la red <net> (IPv4, IPv6 o Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Salida de información de depuración extra. Implica todas las opciones -debug* de depuración</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Salida extra de información de depuración de red</translation>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation>Prefijar salida de depuración con marca de tiempo</translation>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>Opciones SSL: (ver la Bitcoin Wiki para instrucciones de configuración SSL)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Selecciona la versión de socks proxy a usar (4-5, por defecto: 5)</translation>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Enviar información de trazas/depuración a la consola en lugar de al archivo debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Enviar información de rastreo / depurado al depurador</translation>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Establecer el tamaño máximo de bloque en bytes (por defecto: 250000)</translation>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Establecer tamaño mínimo de bloque en bytes (predeterminado: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Reducir el archivo debug.log al iniciar el cliente (predeterminado: 1 sin -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Especificar el tiempo máximo de conexión en milisegundos (predeterminado: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation>No es posible firmar el punto de control, clave de punto de control incorrecta?%s</translation>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Usar UPnP para asignar el puerto de escucha (predeterminado: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Usar UPnP para asignar el puerto de escucha (predeterminado: 1 al escuchar)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Usar proxy para alcanzar a ver los servicios ocultos (por defecto: los mismos que -proxy)</translation>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>Nombre de usuario para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation>Verificando la integridad de la base de datos...</translation>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation>ADVERTENCIA: violación de un punto de control sincronizado detectada, se saltara!</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation>Advertencia: Espacio en disco bajo!</translation>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Aviso: Esta versión es obsoleta, actualización necesaria!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrupto. Ha fallado la recuperación.</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>Contraseña para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=viretcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "ViretCoin Alert" [email protected]
</source>
<translation>%s, debe establecer un rpcpassword en el fichero de configuración:
%s
Se recomienda utilizar la siguiente contraseña aleatoria:
rpcuser=viretcoinrpc
rpcpassword=%s
(no necesita recordar este password)
El nombre de usuario y contraseña no debe ser el mismo.
Si no existe el archivo, créelo con permisos de sólo lectura para el dueño.
También se recomienda establecer alertnotify para ser notificado de los problemas;
por ejemplo: alertnotify=echo %%s | mail -s "ViretCoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation>Encontrar pares usando IRC (por defecto:1) {0)?}</translation>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation>Sincronizar el tiempo con otros nodos. Desactivar si el tiempo en su sistema es preciso, por ejemplo si usa sincronización con NTP (por defecto: 1)</translation>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation>Al crear transacciones, ignorar las entradas con valor inferior a esta (por defecto: 0.01)</translation>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Permitir conexiones JSON-RPC desde la dirección IP especificada
</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Enviar comando al nodo situado en <ip> (predeterminado: 127.0.0.1)
</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Ejecutar un comando cuando cambia el mejor bloque (%s en cmd se sustituye por el hash de bloque)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Ejecutar comando cuando una transacción del monedero cambia (%s en cmd se remplazará por TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation>Requerir confirmaciones para cambio (por defecto: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation>Exigir a los scripts de transacción que usen los operadores PUSH canónicos (por defecto: 1)</translation>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Ejecutar comando cuando una alerta relevante sea recibida (%s en la linea de comandos es reemplazado por un mensaje)</translation>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Actualizar el monedero al último formato</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Ajustar el número de claves en reserva <n> (predeterminado: 100)
</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Volver a examinar la cadena de bloques en busca de transacciones del monedero perdidas</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation>Cuantos bloques para comprobar al inicio (por defecto: 2500, 0 = todos)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation>Cómo de minuciosa es la verificación del bloque (0-6, por defecto: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation>Importar bloques desde el archivo externo blk000?.dat</translation>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Usar OpenSSL (https) para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Certificado del servidor (predeterminado: server.cert)
</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Clave privada del servidor (predeterminado: server.pem)
</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Cifras aceptables: (por defecto: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation>Error: Monedero desbloqueado sólo para hacer "stake", no es posible crear una transacción.</translation>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation>ADVERTENCIA: Punto de control no válido encontrado! Las transacciones que se muestran pueden no ser correctas! Puede que tenga que actualizar o notificar a los desarrolladores.</translation>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>Este mensaje de ayuda
</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation>El monedero %s reside fuera del directorio de datos %s.</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. ViretCoin is probably already running.</source>
<translation>No se puede obtener un bloqueo en el directorio de datos %s. ViretCoin probablemente ya esté en funcionamiento.</translation>
</message>
<message>
<location line="-98"/>
<source>ViretCoin</source>
<translation>ViretCoin</translation>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>No es posible conectar con %s en este sistema (bind ha dado el error %d, %s)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation>Conecte a través del socks proxy</translation>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Permitir búsquedas DNS para -addnode, -seednode y -connect</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>Cargando direcciones...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation>Error al cargar blkindex.dat</translation>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Error al cargar wallet.dat: el monedero está dañado</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of ViretCoin</source>
<translation>Error cargando wallet.dat: El monedero requiere una nueva versión de ViretCoin</translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart ViretCoin to complete</source>
<translation>El monedero necesita ser reescrito: reinicie ViretCoin para completar</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Error al cargar wallet.dat</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Dirección -proxy inválida: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>La red especificada en -onlynet '%s' es desconocida</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Solicitada versión de proxy -socks desconocida: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>No se puede resolver la dirección de -bind: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>No se puede resolver la dirección de -externalip: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Cantidad inválida para -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation>Error: no se pudo iniciar el nodo</translation>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation>Enviando...</translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Cuantía no válida</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Fondos insuficientes</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Cargando el índice de bloques...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Añadir un nodo al que conectarse y tratar de mantener la conexión abierta</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. ViretCoin is probably already running.</source>
<translation>No se puede enlazar a %s en este equipo. ViretCoin probablemente ya esté en funcionamiento.</translation>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Comisión por KB a añadir a las transacciones que envía</translation>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation>Cantidad no válida para -mininput=<amount>: '%s'</translation>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Cargando monedero...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>No se puede rebajar el monedero</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation>No se puede inicializar el keypool</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>No se puede escribir la dirección predeterminada</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Reexplorando...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Generado pero no aceptado</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>Para utilizar la opción %s</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Tiene que establecer rpcpassword=<contraseña> en el fichero de configuración: ⏎
%s ⏎
Si el archivo no existe, créelo con permiso de lectura solamente del propietario.</translation>
</message>
</context>
</TS> | <translation>No se puede escribir en el archivo %1.</translation>
</message>
<message> |
txargs_marshall.go | // Copyright 2020 Celo Org
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package airgap
import (
"encoding/json"
"github.com/ethereum/go-ethereum/common"
)
type txArgsRawData struct {
From common.Address `json:"from"`
Value *string `json:"value,omitempty"`
To *common.Address `json:"to,omitempty"`
Method *string `json:"method,omitempty"`
Args []interface{} `json:"args,omitempty"`
}
type callParamsRawData struct {
txArgsRawData
BlockNumber *string `json:"block_number,omitempty"`
}
func (data *txArgsRawData) transform(args *TxArgs) error {
var err error
args.From = data.From
args.Value, err = stringToBigInt(data.Value)
if err != nil |
args.To = data.To
args.Method, err = stringToMethod(data.Method)
if err != nil {
return err
}
args.Args = data.Args
return nil
}
func (args *TxArgs) transform() *txArgsRawData {
var data txArgsRawData
data.From = args.From
data.Value = bigIntToString(args.Value)
data.To = args.To
if args.Method != nil {
var str = args.Method.String()
data.Method = &str
}
data.Args = args.Args
return &data
}
func (args TxArgs) MarshalJSON() ([]byte, error) {
return json.Marshal(args.transform())
}
func (params CallParams) MarshalJSON() ([]byte, error) {
return json.Marshal(callParamsRawData{
txArgsRawData: *params.TxArgs.transform(),
BlockNumber: bigIntToString(params.BlockNumber),
})
}
func (args *TxArgs) UnmarshalJSON(b []byte) error {
var err error
var data txArgsRawData
if err = json.Unmarshal(b, &data); err != nil {
return err
}
return data.transform(args)
}
func (params *CallParams) UnmarshalJSON(b []byte) error {
var err error
var data callParamsRawData
if err = json.Unmarshal(b, &data); err != nil {
return err
}
err = data.txArgsRawData.transform(¶ms.TxArgs)
if err != nil {
return err
}
params.BlockNumber, err = stringToBigInt(data.BlockNumber)
return err
}
| {
return err
} |
attribRecord.js | 'use strict';
const layerRecord = require('./layerRecord.js')();
const attribFC = require('./attribFC.js')();
/**
* @class AttribRecord
*/
class | extends layerRecord.LayerRecord {
// this class has functions common to layers that have attributes
get clickTolerance () { return this._tolerance; }
/**
* Create a layer record with the appropriate geoApi layer type. Layer config
* should be fully merged with all layer options defined (i.e. this constructor
* will not apply any defaults).
* @param {Object} layerClass the ESRI api object for the layer
* @param {Object} esriRequest the ESRI api object for making web requests with proxy support
* @param {Object} apiRef object pointing to the geoApi. allows us to call other geoApi functions.
* @param {Object} config layer config values
* @param {Object} esriLayer an optional pre-constructed layer
* @param {Function} epsgLookup an optional lookup function for EPSG codes (see geoService for signature)
*/
constructor (layerClass, esriRequest, apiRef, config, esriLayer, epsgLookup) {
super(layerClass, apiRef, config, esriLayer, epsgLookup);
this._esriRequest = esriRequest;
this._tolerance = this.config.tolerance;
}
/**
* Get the best user-friendly name of a field. Uses alias if alias is defined, else uses the system attribute name.
*
* @param {String} attribName the attribute name we want a nice name for
* @return {Promise} resolves to the best available user friendly attribute name
*/
aliasedFieldName (attribName) {
return this._featClasses[this._defaultFC].aliasedFieldName(attribName);
}
/**
* Retrieves attributes from a layer for a specified feature index
* @return {Promise} promise resolving with formatted attributes to be consumed by the datagrid and esri feature identify
*/
getFormattedAttributes () {
return this._featClasses[this._defaultFC].getFormattedAttributes();
}
/**
* Test if an attribute field has a date data type.
*
* @param {String} attribName the attribute name to check if it's a date field
* @return {Promise} resolves with a boolean indicating if attrib name is a date field.
*/
checkDateType (attribName) {
return this._featClasses[this._defaultFC].checkDateType(attribName);
}
/**
* Returns attribute data for this layer.
*
* @function getAttribs
* @returns {Promise} resolves with a layer attribute data object
*/
getAttribs () {
return this._featClasses[this._defaultFC].getAttribs();
}
/**
* Returns layer-specific data for this Record
*
* @function getLayerData
* @returns {Promise} resolves with a layer data object
*/
getLayerData () {
return this._featClasses[this._defaultFC].getLayerData();
}
/**
* Extract the feature name from a feature as best we can.
*
* @function getFeatureName
* @param {String} objId the object id of the attribute
* @param {Object} attribs the dictionary of attributes for the feature.
* @returns {String} the name of the feature
*/
getFeatureName (objId, attribs) {
return this._featClasses[this._defaultFC].getFeatureName(objId, attribs);
}
/**
* Fetches a graphic for the given object id.
* Will attempt local copy (unless overridden), will hit the server if not available.
*
* @function fetchGraphic
* @param {Integer} objId ID of object being searched for
* @param {Object} opts object containing option parametrs
* - map map wrapper object of current map. only required if requesting geometry
* - geom boolean. indicates if return value should have geometry included. default to false
* - attribs boolean. indicates if return value should have attributes included. default to false
* @returns {Promise} resolves with a bundle of information. .graphic is the graphic; .layerFC for convenience
*/
fetchGraphic (objId, opts) {
return this._featClasses[this._defaultFC].fetchGraphic(objId, opts);
}
/**
* Will attempt to zoom the map view so the a graphic is prominent.
*
* @function zoomToGraphic
* @param {Integer} objId Object ID of grahpic being searched for
* @param {Object} map wrapper object for the map we want to zoom
* @param {Object} offsetFraction an object with decimal properties `x` and `y` indicating percentage of offsetting on each axis
* @return {Promise} resolves after the map is done moving
*/
zoomToGraphic (objId, map, offsetFraction) {
return this._featClasses[this._defaultFC].zoomToGraphic(objId, map, offsetFraction);
}
/**
* Get feature count of a feature layer.
*
* @function getFeatureCount
* @param {String} url server url of the feature layer. empty string for file based layers
* @return {Promise} resolves with an integer indicating the feature count. -1 if error occured.
*/
getFeatureCount (url) {
if (url) {
// wrapping server call in a function, as we regularly encounter sillyness
// where we need to execute the count request twice.
// having a function (with finalTry flag) lets us handle the double-request
const esriServerCount = (layerUrl, finalTry = false) => {
// extract info for this service
const defService = this._esriRequest({
url: `${layerUrl}/query`,
content: {
f: 'json',
where: '1=1',
returnCountOnly: true,
returnGeometry: false
},
callbackParamName: 'callback',
handleAs: 'json',
});
return new Promise(resolve => {
defService.then(serviceResult => {
if (serviceResult && (typeof serviceResult.error === 'undefined') &&
(typeof serviceResult.count !== 'undefined')) {
// we got a row count
resolve(serviceResult.count);
} else if (!finalTry) {
// do a second attempt
resolve(esriServerCount(layerUrl, true));
} else {
// tells the app it failed
resolve(-1);
}
}, error => {
// failed to load service info.
// TODO any tricks to avoid duplicating the error case in both blocks?
if (!finalTry) {
// do a second attempt
resolve(esriServerCount(layerUrl, true));
} else {
// tells the app it failed
console.warn(error);
resolve(-1);
}
});
});
};
return esriServerCount(url);
} else {
// file based layer. count local features
return Promise.resolve(this._layer.graphics.length);
}
}
/**
* Transforms esri key-value attribute object into key value array with format suitable
* for consumption by the details pane.
*
* @param {Object} attribs attribute key-value mapping, potentially with aliases as keys
* @param {Array} fields optional. fields definition array for layer. no aliasing done if not provided
* @return {Array} attribute data transformed into a list, with potential field aliasing applied
*/
attributesToDetails (attribs, fields) {
// TODO make this extensible / modifiable / configurable to allow different details looks for different data
// simple array of text mapping for demonstration purposes. fancy grid formatting later?
// ignore any functions hanging around on the attribute.
return Object.keys(attribs)
.filter(key => typeof attribs[key] !== 'function')
.map(key => {
const fieldType = fields ? fields.find(f => f.name === key) : null;
return {
key: attribFC.AttribFC.aliasedFieldNameDirect(key, fields), // need synchronous variant of alias lookup
value: attribs[key],
type: fieldType ? fieldType.type : fieldType
};
});
}
}
module.exports = () => ({
AttribRecord
});
| AttribRecord |
setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for poets.
This file was generated with PyScaffold 1.3, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import os
import sys
import inspect
from distutils.cmd import Command
import versioneer
import setuptools
from setuptools.command.test import test as TestCommand
from setuptools import setup
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Change these settings according to your needs
MAIN_PACKAGE = "poets"
DESCRIPTION = "Python Open Earth Observation Tools"
LICENSE = "BSD 3 Clause"
URL = "http://rs.geo.tuwien.ac.at/poets"
AUTHOR = "Thomas Mistelbauer"
EMAIL = "[email protected]"
COVERAGE_XML = False
COVERAGE_HTML = False
JUNIT_XML = False
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Programming Language :: Python']
# Add here console scripts like ['hello_world = poets.module:function']
CONSOLE_SCRIPTS = []
# Versioneer configuration
versioneer.VCS = 'git'
versioneer.versionfile_source = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.versionfile_build = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = MAIN_PACKAGE + '-'
class PyTest(TestCommand):
user_options = [("cov=", None, "Run coverage"),
("cov-xml=", None, "Generate junit xml report"),
("cov-html=", None, "Generate junit html report"),
("junitxml=", None, "Generate xml of test results")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov is not None:
self.cov = ["--cov", self.cov, "--cov-report", "term-missing"]
if self.cov_xml:
self.cov.extend(["--cov-report", "xml"])
if self.cov_html:
self.cov.extend(["--cov-report", "html"])
if self.junitxml is not None:
self.junitxml = ["--junitxml", self.junitxml]
def run_tests(self):
try:
import pytest
except:
raise RuntimeError("py.test is not installed, "
"run: pip install pytest")
params = {"args": self.test_args}
if self.cov:
params["args"] += self.cov
if self.junitxml:
params["args"] += self.junitxml
errno = pytest.main(**params)
sys.exit(errno)
def sphinx_builder():
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class NoSphinx(Command):
user_options = []
def initialize_options(self):
raise RuntimeError("Sphinx documentation is not installed, "
"run: pip install sphinx")
return NoSphinx
class BuildSphinxDocs(BuildDoc):
def run(self):
if self.builder == "doctest":
|
else:
BuildDoc.run(self)
return BuildSphinxDocs
class ObjKeeper(type):
instances = {}
def __init__(cls, name, bases, dct):
cls.instances[cls] = []
def __call__(cls, *args, **kwargs):
cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args,
**kwargs))
return cls.instances[cls][-1]
def capture_objs(cls):
from six import add_metaclass
module = inspect.getmodule(cls)
name = cls.__name__
keeper_class = add_metaclass(ObjKeeper)(cls)
setattr(module, name, keeper_class)
cls = getattr(module, name)
return keeper_class.instances[cls]
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.splitlines() if req != '']
def read(fname):
return open(os.path.join(__location__, fname)).read()
def setup_package():
# Assemble additional setup commands
cmdclass = versioneer.get_cmdclass()
cmdclass['docs'] = sphinx_builder()
cmdclass['doctest'] = sphinx_builder()
cmdclass['test'] = PyTest
# Some helper variables
version = versioneer.get_version()
docs_path = os.path.join(__location__, "docs")
docs_build_path = os.path.join(docs_path, "_build")
install_reqs = get_install_requirements("requirements.txt")
command_options = {
'docs': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path)},
'doctest': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path),
'builder': ('setup.py', 'doctest')},
'test': {'test_suite': ('setup.py', 'tests'),
'cov': ('setup.py', 'poets')}}
if JUNIT_XML:
command_options['test']['junitxml'] = ('setup.py', 'junit.xml')
if COVERAGE_XML:
command_options['test']['cov_xml'] = ('setup.py', True)
if COVERAGE_HTML:
command_options['test']['cov_html'] = ('setup.py', True)
setup(name=MAIN_PACKAGE,
version=version,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
package_data={'poets':
[os.path.join('shape', 'ancillary', '*.dbf'),
os.path.join('shape', 'ancillary', '*.README'),
os.path.join('shape', 'ancillary', '*.shp'),
os.path.join('shape', 'ancillary', '*.shx'),
os.path.join('web', 'static', 'js', '*.js*'),
os.path.join('web', 'static', 'ol2.13.1', '*.js*'),
os.path.join('web', 'static', 'ol2.13.1', 'theme',
'default', '*.css*'),
os.path.join('web', 'static', 'ol2.13.1', 'theme',
'default', 'img', '*.*'),
os.path.join('web', 'static', 'ol2.13.1', 'img',
'*.*'),
os.path.join('web', 'static', 'ol2.13.1', '*.*'),
os.path.join('web', 'static', 'slider', 'js',
'*.js*'),
os.path.join('web', 'static', 'slider', 'css',
'*.css*'),
os.path.join('web', 'static', 'slider', 'less',
'*.less*'),
os.path.join('web', 'templates', '*.html*'),
os.path.join('web', 'static', '*.ico'),
os.path.join('web', 'static', '*.png')]
},
install_requires=install_reqs,
setup_requires=['six'],
cmdclass=cmdclass,
tests_require=['pytest-cov', 'pytest'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS})
if __name__ == "__main__":
setup_package()
| import sphinx.ext.doctest as doctest
# Capture the DocTestBuilder class in order to return the total
# number of failures when exiting
ref = capture_objs(doctest.DocTestBuilder)
BuildDoc.run(self)
errno = ref[-1].total_failures
sys.exit(errno) |
verify.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use aptos_config::config::{RocksdbConfig, NO_OP_STORAGE_PRUNER_CONFIG};
use aptos_global_constants::{
CONSENSUS_KEY, FULLNODE_NETWORK_KEY, OPERATOR_ACCOUNT, OPERATOR_KEY, OWNER_ACCOUNT, OWNER_KEY,
SAFETY_DATA, VALIDATOR_NETWORK_KEY, WAYPOINT,
};
use aptos_management::{
config::ConfigPath, error::Error, secure_backend::ValidatorBackend,
storage::StorageWrapper as Storage,
};
use aptos_temppath::TempPath;
use aptos_types::{
account_address::AccountAddress, account_config, account_state::AccountState,
network_address::NetworkAddress, on_chain_config::ValidatorSet,
validator_config::ValidatorConfig, waypoint::Waypoint,
};
use aptos_vm::AptosVM;
use aptosdb::AptosDB;
use executor::db_bootstrapper;
use std::{
convert::TryFrom,
fmt::Write,
fs::File,
io::Read,
path::{Path, PathBuf},
str::FromStr,
sync::Arc,
};
use storage_interface::{DbReader, DbReaderWriter};
use structopt::StructOpt;
/// Prints the public information within a store
#[derive(Debug, StructOpt)]
pub struct Verify {
#[structopt(flatten)]
config: ConfigPath,
#[structopt(flatten)]
backend: ValidatorBackend,
/// If specified, compares the internal state to that of a
/// provided genesis. Note, that a waypont might diverge from
/// the provided genesis after execution has begun.
#[structopt(long, verbatim_doc_comment)]
genesis_path: Option<PathBuf>,
}
impl Verify {
pub fn execute(self) -> Result<String, Error> {
let config = self
.config
.load()?
.override_validator_backend(&self.backend.validator_backend)?;
let validator_storage = config.validator_backend();
verify_genesis(validator_storage, self.genesis_path.as_deref())
}
}
pub fn verify_genesis(
validator_storage: Storage,
genesis_path: Option<&Path>,
) -> Result<String, Error> {
let mut buffer = String::new();
writeln!(buffer, "Data stored in SecureStorage:").unwrap();
write_break(&mut buffer);
writeln!(buffer, "Keys").unwrap();
write_break(&mut buffer);
write_ed25519_key(&validator_storage, &mut buffer, CONSENSUS_KEY);
write_x25519_key(&validator_storage, &mut buffer, FULLNODE_NETWORK_KEY);
write_ed25519_key(&validator_storage, &mut buffer, OWNER_KEY); | write_ed25519_key(&validator_storage, &mut buffer, VALIDATOR_NETWORK_KEY);
write_break(&mut buffer);
writeln!(buffer, "Data").unwrap();
write_break(&mut buffer);
write_string(&validator_storage, &mut buffer, OPERATOR_ACCOUNT);
write_string(&validator_storage, &mut buffer, OWNER_ACCOUNT);
write_safety_data(&validator_storage, &mut buffer, SAFETY_DATA);
write_waypoint(&validator_storage, &mut buffer, WAYPOINT);
write_break(&mut buffer);
if let Some(genesis_path) = genesis_path {
compare_genesis(validator_storage, &mut buffer, genesis_path)?;
}
Ok(buffer)
}
fn write_assert(buffer: &mut String, name: &str, value: bool) {
let value = if value { "match" } else { "MISMATCH" };
writeln!(buffer, "{} - {}", name, value).unwrap();
}
fn write_break(buffer: &mut String) {
writeln!(
buffer,
"====================================================================================",
)
.unwrap();
}
fn write_ed25519_key(storage: &Storage, buffer: &mut String, key: &'static str) {
let value = storage
.ed25519_public_from_private(key)
.map(|v| v.to_string())
.unwrap_or_else(|e| e.to_string());
writeln!(buffer, "{} - {}", key, value).unwrap();
}
fn write_x25519_key(storage: &Storage, buffer: &mut String, key: &'static str) {
let value = storage
.x25519_public_from_private(key)
.map(|v| v.to_string())
.unwrap_or_else(|e| e.to_string());
writeln!(buffer, "{} - {}", key, value).unwrap();
}
fn write_string(storage: &Storage, buffer: &mut String, key: &'static str) {
let value = storage.string(key).unwrap_or_else(|e| e.to_string());
writeln!(buffer, "{} - {}", key, value).unwrap();
}
fn write_safety_data(storage: &Storage, buffer: &mut String, key: &'static str) {
let value = storage
.value::<consensus_types::safety_data::SafetyData>(key)
.map(|v| v.to_string())
.unwrap_or_else(|e| e.to_string());
writeln!(buffer, "{} - {}", key, value).unwrap();
}
fn write_waypoint(storage: &Storage, buffer: &mut String, key: &'static str) {
let value = storage
.string(key)
.map(|value| {
if value.is_empty() {
"empty".into()
} else {
Waypoint::from_str(&value)
.map(|c| c.to_string())
.unwrap_or_else(|_| "Invalid waypoint".into())
}
})
.unwrap_or_else(|e| e.to_string());
writeln!(buffer, "{} - {}", key, value).unwrap();
}
fn compare_genesis(
storage: Storage,
buffer: &mut String,
genesis_path: &Path,
) -> Result<(), Error> {
// Compute genesis and waypoint and compare to given waypoint
let db_path = TempPath::new();
let (db_rw, expected_waypoint) = compute_genesis(genesis_path, db_path.path())?;
let actual_waypoint = storage.waypoint(WAYPOINT)?;
write_assert(buffer, WAYPOINT, actual_waypoint == expected_waypoint);
// Fetch on-chain validator config and compare on-chain keys to local keys
let validator_account = storage.account_address(OWNER_ACCOUNT)?;
let validator_config = validator_config(validator_account, db_rw.reader.clone())?;
let actual_consensus_key = storage.ed25519_public_from_private(CONSENSUS_KEY)?;
let expected_consensus_key = &validator_config.consensus_public_key;
write_assert(
buffer,
CONSENSUS_KEY,
&actual_consensus_key == expected_consensus_key,
);
let actual_validator_key = storage.x25519_public_from_private(VALIDATOR_NETWORK_KEY)?;
let actual_fullnode_key = storage.x25519_public_from_private(FULLNODE_NETWORK_KEY)?;
let network_addrs: Vec<NetworkAddress> = validator_config
.validator_network_addresses()
.unwrap_or_default();
let expected_validator_key = network_addrs
.get(0)
.and_then(|addr: &NetworkAddress| addr.find_noise_proto());
write_assert(
buffer,
VALIDATOR_NETWORK_KEY,
Some(actual_validator_key) == expected_validator_key,
);
let expected_fullnode_key = validator_config.fullnode_network_addresses().ok().and_then(
|addrs: Vec<NetworkAddress>| addrs.get(0).and_then(|addr| addr.find_noise_proto()),
);
write_assert(
buffer,
FULLNODE_NETWORK_KEY,
Some(actual_fullnode_key) == expected_fullnode_key,
);
Ok(())
}
/// Compute the ledger given a genesis writeset transaction and return access to that ledger and
/// the waypoint for that state.
fn compute_genesis(
genesis_path: &Path,
db_path: &Path,
) -> Result<(DbReaderWriter, Waypoint), Error> {
let aptosdb = AptosDB::open(
db_path,
false,
NO_OP_STORAGE_PRUNER_CONFIG,
RocksdbConfig::default(),
true, /* account_count_migration */
)
.map_err(|e| Error::UnexpectedError(e.to_string()))?;
let db_rw = DbReaderWriter::new(aptosdb);
let mut file = File::open(genesis_path)
.map_err(|e| Error::UnexpectedError(format!("Unable to open genesis file: {}", e)))?;
let mut buffer = vec![];
file.read_to_end(&mut buffer)
.map_err(|e| Error::UnexpectedError(format!("Unable to read genesis: {}", e)))?;
let genesis = bcs::from_bytes(&buffer)
.map_err(|e| Error::UnexpectedError(format!("Unable to parse genesis: {}", e)))?;
let waypoint = db_bootstrapper::generate_waypoint::<AptosVM>(&db_rw, &genesis)
.map_err(|e| Error::UnexpectedError(e.to_string()))?;
db_bootstrapper::maybe_bootstrap::<AptosVM>(&db_rw, &genesis, waypoint)
.map_err(|e| Error::UnexpectedError(format!("Unable to commit genesis: {}", e)))?;
Ok((db_rw, waypoint))
}
/// Read from the ledger the validator config from the validator set for the specified account
fn validator_config(
validator_account: AccountAddress,
reader: Arc<dyn DbReader>,
) -> Result<ValidatorConfig, Error> {
let blob = reader
.get_latest_account_state(account_config::validator_set_address())
.map_err(|e| {
Error::UnexpectedError(format!("ValidatorSet Account issue {}", e.to_string()))
})?
.ok_or_else(|| Error::UnexpectedError("ValidatorSet Account not found".into()))?;
let account_state = AccountState::try_from(&blob)
.map_err(|e| Error::UnexpectedError(format!("Failed to parse blob: {}", e)))?;
let validator_set: ValidatorSet = account_state
.get_validator_set()
.map_err(|e| Error::UnexpectedError(format!("ValidatorSet issue {}", e.to_string())))?
.ok_or_else(|| Error::UnexpectedError("ValidatorSet does not exist".into()))?;
let info = validator_set
.payload()
.iter()
.find(|vi| vi.account_address() == &validator_account)
.ok_or_else(|| {
Error::UnexpectedError(format!(
"Unable to find Validator account {:?}",
&validator_account
))
})?;
Ok(info.config().clone())
} | write_ed25519_key(&validator_storage, &mut buffer, OPERATOR_KEY); |
handlers_test.go | // Copyright 2016 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"errors"
"net/http"
"net/http/httptest"
"testing"
"golang.org/x/net/context"
"go.chromium.org/luci/server/auth/signing"
"go.chromium.org/luci/server/caching"
"go.chromium.org/luci/server/router"
. "github.com/smartystreets/goconvey/convey"
. "go.chromium.org/luci/common/testing/assertions"
)
func withSigner(s signing.Signer) router.MiddlewareChain {
return router.NewMiddlewareChain(
func(c *router.Context, next router.Handler) {
c.Context = ModifyConfig(c.Context, func(cfg Config) Config {
cfg.Signer = s
return cfg
})
next(c)
},
)
}
func TestCertificatesHandler(t *testing.T) {
t.Parallel()
call := func(s signing.Signer) (*signing.PublicCertificates, error) {
r := router.New()
InstallHandlers(r, withSigner(s))
ts := httptest.NewServer(r)
// Note: there are two contexts. One for outter /certificates call
// (this one), and another for /certificates request handler (it is setup
// in the middleware chain above).
ctx := caching.WithEmptyProcessCache(context.Background())
ctx = ModifyConfig(ctx, func(cfg Config) Config {
cfg.AnonymousTransport = func(context.Context) http.RoundTripper {
return http.DefaultTransport
}
return cfg
})
return signing.FetchCertificates(ctx, ts.URL+"/auth/api/v1/server/certificates")
}
Convey("Works", t, func() {
certs, err := call(&phonySigner{})
So(err, ShouldBeNil)
So(len(certs.Certificates), ShouldEqual, 1)
})
Convey("No signer", t, func() {
_, err := call(nil)
So(err, ShouldErrLike, "HTTP code (404)")
})
Convey("Error getting certs", t, func() {
_, err := call(&phonySigner{errors.New("fail")})
So(err, ShouldErrLike, "HTTP code (500)")
})
}
func TestServiceInfoHandler(t *testing.T) {
t.Parallel()
Convey("Works", t, func() {
r := router.New()
signer := &phonySigner{}
InstallHandlers(r, withSigner(signer)) | w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/auth/api/v1/server/info", nil)
r.ServeHTTP(w, req)
So(w.Code, ShouldEqual, 200)
So(w.Body.String(), ShouldResemble,
`{"app_id":"phony-app","app_runtime":"go",`+
`"app_runtime_version":"go1.5.1",`+
`"app_version":"1234-abcdef","service_account_name":`+
`"[email protected]"}`+"\n")
signer.err = errors.New("fail")
w = httptest.NewRecorder()
req, _ = http.NewRequest("GET", "/auth/api/v1/server/info", nil)
r.ServeHTTP(w, req)
So(w.Code, ShouldEqual, 500)
So(w.Body.String(), ShouldResemble, "{\"error\":\"Can't grab service info - fail\"}\n")
})
}
///
type phonySigner struct {
err error
}
func (s *phonySigner) SignBytes(c context.Context, blob []byte) (string, []byte, error) {
if s.err != nil {
return "", nil, s.err
}
return "phonyKey", []byte("signature"), nil
}
func (s *phonySigner) Certificates(c context.Context) (*signing.PublicCertificates, error) {
if s.err != nil {
return nil, s.err
}
return &signing.PublicCertificates{
Certificates: []signing.Certificate{
{
KeyName: "phonyKey",
X509CertificatePEM: "phonyPEM",
},
},
}, nil
}
func (s *phonySigner) ServiceInfo(c context.Context) (*signing.ServiceInfo, error) {
if s.err != nil {
return nil, s.err
}
return &signing.ServiceInfo{
AppID: "phony-app",
AppRuntime: "go",
AppRuntimeVersion: "go1.5.1",
AppVersion: "1234-abcdef",
ServiceAccountName: "[email protected]",
}, nil
} | |
objects_delete_user_test.go | package pubnub
import (
"fmt"
"testing"
h "github.com/pubnub/go/tests/helpers"
"github.com/stretchr/testify/assert"
)
func AssertDeleteUser(t *testing.T, checkQueryParam, testContext bool) {
assert := assert.New(t)
pn := NewPubNub(NewDemoConfig())
queryParam := map[string]string{
"q1": "v1",
"q2": "v2",
}
if !checkQueryParam {
queryParam = nil
}
o := newDeleteUserBuilder(pn)
if testContext {
o = newDeleteUserBuilderWithContext(pn, backgroundContext)
}
o.ID("id0")
o.QueryParam(queryParam)
path, err := o.opts.buildPath()
assert.Nil(err)
h.AssertPathsEqual(t,
fmt.Sprintf("/v1/objects/%s/users/%s", pn.Config.SubscribeKey, "id0"),
path, []int{})
body, err := o.opts.buildBody()
assert.Nil(err)
assert.Empty(body)
if checkQueryParam {
u, _ := o.opts.buildQuery()
assert.Equal("v1", u.Get("q1"))
assert.Equal("v2", u.Get("q2"))
}
}
func TestDeleteUser(t *testing.T) {
AssertDeleteUser(t, true, false)
}
func TestDeleteUserContext(t *testing.T) {
AssertDeleteUser(t, true, true)
}
func TestDeleteUserResponseValueError(t *testing.T) {
assert := assert.New(t)
pn := NewPubNub(NewDemoConfig())
opts := &deleteUserOpts{
pubnub: pn,
}
jsonBytes := []byte(`s`)
_, _, err := newPNDeleteUserResponse(jsonBytes, opts, StatusResponse{})
assert.Equal("pubnub/parsing: Error unmarshalling response: {s}", err.Error())
}
func TestDeleteUserResponseValuePass(t *testing.T) {
assert := assert.New(t)
pn := NewPubNub(NewDemoConfig())
opts := &deleteUserOpts{
pubnub: pn,
}
jsonBytes := []byte(`{"status":200,"data":null}`)
r, _, err := newPNDeleteUserResponse(jsonBytes, opts, StatusResponse{})
assert.Equal(nil, r.Data)
| assert.Nil(err)
} |
|
cloudstorage.go | // Package cloud provides utilities to implement a petrific storage using a cloud-based object-storage (S3/Openstack Swift style)
package cloud
import (
"bytes"
"code.laria.me/petrific/config"
"code.laria.me/petrific/logging"
"code.laria.me/petrific/objects"
"code.laria.me/petrific/storage"
"errors"
"fmt"
"math/rand"
"strings"
"time"
)
type CloudStorage interface {
Get(key string) ([]byte, error)
Has(key string) (bool, error)
Put(key string, content []byte) error
Delete(key string) error
List(prefix string) ([]string, error)
Close() error
}
var (
NotFoundErr = errors.New("Object not found") // Cloud object could not be found
)
type CloudBasedObjectStorage struct {
CS CloudStorage
Prefix string
index storage.Index
}
func (cbos CloudBasedObjectStorage) objidToKey(id objects.ObjectId) string {
return cbos.Prefix + "obj/" + id.String()
}
func (cbos CloudBasedObjectStorage) readIndex(name string) (storage.Index, error) {
index := storage.NewIndex()
b, err := cbos.CS.Get(name)
if err != nil {
return index, err
}
err = index.Load(bytes.NewReader(b))
return index, err
}
func (cbos *CloudBasedObjectStorage) Init() error {
cbos.index = storage.NewIndex()
// Load and combine all indexes, keep only the one with the "largest" name (see also Close())
index_names, err := cbos.CS.List(cbos.Prefix + "index/")
if err != nil {
return err
}
max_index := ""
for _, index_name := range index_names {
index, err := cbos.readIndex(index_name)
if err != nil {
return err
}
cbos.index.Combine(index)
}
for _, index_name := range index_names {
if index_name != max_index {
if err := cbos.CS.Delete(index_name); err != nil {
return err
}
}
}
return nil
}
func (cbos CloudBasedObjectStorage) Get(id objects.ObjectId) ([]byte, error) {
return cbos.CS.Get(cbos.objidToKey(id))
}
func (cbos CloudBasedObjectStorage) Has(id objects.ObjectId) (bool, error) {
return cbos.CS.Has(cbos.objidToKey(id))
}
func (cbos CloudBasedObjectStorage) Set(id objects.ObjectId, typ objects.ObjectType, b []byte) error {
if err := cbos.CS.Put(cbos.objidToKey(id), b); err != nil {
return err
}
// can be used to repopulate the index
if err := cbos.CS.Put(cbos.Prefix+"typeof/"+id.String(), []byte(typ)); err != nil {
return err
}
cbos.index.Set(id, typ)
return nil
}
func (cbos CloudBasedObjectStorage) List(typ objects.ObjectType) ([]objects.ObjectId, error) {
return cbos.index.List(typ), nil
}
func (cbos CloudBasedObjectStorage) retryGet(key string, tries int, retryDelay time.Duration, log *logging.Log) (p []byte, err error) {
for i := 0; i < tries; i++ {
p, err = cbos.CS.Get(key)
if err == nil {
return
}
log.Info().Printf("Failed getting %s (err=%s), retrying...", key, err)
time.Sleep(retryDelay)
}
return
}
func (cbos CloudBasedObjectStorage) restoreIndex(log *logging.Log) error {
prefix := cbos.Prefix + "typeof/"
typeof_objs, err := cbos.CS.List(prefix)
if err != nil {
return err
}
for _, key := range typeof_objs {
log.Debug().Printf("processing %s", key)
id, err := objects.ParseObjectId(key[len(prefix):])
if err != nil {
log.Error().Printf("Skip %s, can't parse id: %s", key, err)
continue
}
// At least OVHs Swift object storage apparently doesn't like being sent
// many small requests in a short amount of time. This retries getting
// an object, if the cloud storage returned an error.
p, err := cbos.retryGet(key, 3, 10*time.Second, log)
if err != nil {
return err
}
ot := objects.ObjectType(strings.TrimSpace(string(p)))
if !ot.IsKnown() {
log.Error().Printf("Skip %s, unknown object type %s", key, ot)
continue
}
cbos.index.Set(id, ot)
}
return nil
}
func (cbos CloudBasedObjectStorage) Subcmds() map[string]storage.StorageSubcmd {
return map[string]storage.StorageSubcmd{
"restore-index": func(args []string, log *logging.Log, conf config.Config) int {
if err := cbos.restoreIndex(log); err != nil {
log.Error().Print(err)
return 1
}
return 0
},
}
}
func (cbos CloudBasedObjectStorage) Close() (outerr error) {
defer func() {
err := cbos.CS.Close()
if outerr == nil {
outerr = err
}
}()
// We need to adress the problem of parallel index creation here.
// We handle this by adding a random hex number to the index name.
// When loading the index, all "index/*" objects will be read and combined
// and all but the one with the largest number will be deleted.
buf := new(bytes.Buffer)
if outerr = cbos.index.Save(buf); outerr != nil {
return outerr
}
index_name := fmt.Sprintf("%sindex/%016x", cbos.Prefix, rand.Int63())
return cbos.CS.Put(index_name, buf.Bytes())
}
type cloudObjectStorageCreator func(conf config.Config, name string) (CloudStorage, error)
func cloudStorageCreator(cloudCreator cloudObjectStorageCreator) storage.CreateStorageFromConfig | {
return func(conf config.Config, name string) (storage.Storage, error) {
var cbos CloudBasedObjectStorage
var storageconf struct {
Prefix string `toml:"prefix,omitempty"`
}
if err := conf.GetStorageConfData(name, &storageconf); err != nil {
return nil, err
}
cbos.Prefix = storageconf.Prefix
var err error
if cbos.CS, err = cloudCreator(conf, name); err != nil {
return nil, err
}
err = cbos.Init()
return cbos, err
}
} |
|
browser-context.ts | import { GAME_TYPES } from "../ab-protocol/src/lib";
import { AuthData } from "../app-context/auth-data";
import { EventQueueProcessor } from "../app-context/eventqueue-processor";
import { HandlerCollections } from "../app-context/handler-collections";
import { IContext } from "../app-context/icontext";
import { ILogger } from "../app-context/ilogger";
import { IWebSocketFactory } from "../app-context/iwebsocket-factory";
import { Settings } from "../app-context/settings";
import { State } from "../app-context/state";
import { TimerManager } from "../app-context/timer-manager";
import { Connection } from "../connectivity/connection";
import { EventQueue } from "../events/event-queue";
import { BrowserVisibilityHandler } from "../handlers/browser-visibility-handler";
import { FlagCookieHandler } from "../handlers/flag-cookie-handler";
import { IMessageHandler } from "../handlers/imessage-handler";
import { ChatRenderHandler } from "../handlers/render/chat-render-handler";
import { CtfGameOverRenderHandler } from "../handlers/render/ctf-game-over-render-handler";
import { EachSecondRenderHandler } from "../handlers/render/each-second-render-handler";
import { ExplosionVisualizationHandler } from "../handlers/render/explosion-visualization-handler";
import { GameRenderHandler } from "../handlers/render/game-render-handler";
import { GoliFartVisualizationHandler } from "../handlers/render/golifart-visualization-handler";
import { KillVisualizationHandler } from "../handlers/render/kill-visualization-handler";
import { MissileChemtrailHandler } from "../handlers/render/missile-chemtrail-handler";
import { ServerAnnouncementRenderHandler } from "../handlers/render/server-announcement-render-handler";
import { ShakeAndShowMessageOnKillHandler } from "../handlers/render/shake-and-show-message-on-kill-handler";
import { ShakeOnHitHandler } from "../handlers/render/shake-on-hit-handler";
import { BrowserInitialization } from "./browser-initialization";
import { BrowserLogger } from "./browser-logger";
import { BrowserWebSocketFactory } from "./browser-websocket-factory";
import { Renderer } from "./renderers/renderer";
import { AircraftSelection } from "./user-input/aircraft-selection";
import { ApplyUpgrades } from "./user-input/apply-upgrades";
import { ChatInput } from "./user-input/chat-input";
import { KeyboardAndMouseInput } from "./user-input/keyboard-and-mouse-input";
import { PlayerDropDownMenu } from "./user-input/player-dropdownmenu";
import { StayActiveHandler } from "../handlers/stay-active-handler";
import { DropFlag } from "./user-input/drop-flag";
import { AutoFire } from "./user-input/auto-fire";
import { AutoFireHandler } from "../handlers/auto-fire-handler";
import { Spectate } from "./user-input/spectate";
import { SwitchSides } from "./user-input/switch-sides";
import { BotState } from "../botting/bot-state";
import { BotHeartbeatHandler } from "../handlers/bot/bot-heartbeat-handler";
import { AutoPilotToFlag } from "./user-input/autopilot-to-flag";
import { AutoBoostHandler } from "../handlers/auto-boost-handler";
export class | implements IContext {
public gameType: GAME_TYPES;
public settings: Settings;
public logger: ILogger;
public eventQueue: EventQueue;
public tm: TimerManager;
public processor: EventQueueProcessor;
public state: State;
public handlers: IMessageHandler[];
public webSocketFactory: IWebSocketFactory;
public connection: Connection;
public isActive: boolean;
public auth: AuthData;
// browser-only:
public isBrowserVisible: boolean;
public renderer = new Renderer(this);
public botstate = new BotState(this);
private chatInput = new ChatInput(this);
private aircraftSelection = new AircraftSelection(this);
private upgradeSelection = new ApplyUpgrades(this);
private dropFlag = new DropFlag(this);
private autofire = new AutoFire(this);
private spectate = new Spectate(this);
private autoPilotToFlag = new AutoPilotToFlag(this);
private switchSides = new SwitchSides(this); // adds event handler to button
private playerDropdownMenu = new PlayerDropDownMenu(this, this.chatInput, this.renderer);
private browserInitialization = new BrowserInitialization(this);
private browserVisibilityHandler = new BrowserVisibilityHandler(this);
private keyboardInput = new KeyboardAndMouseInput(
this,
this.chatInput,
this.upgradeSelection,
this.dropFlag,
this.autofire,
this.spectate,
this.aircraftSelection,
this.playerDropdownMenu);
constructor() {
this.browserInitialization.detectVisibilityChange();
this.browserInitialization.throttleZoom();
this.logger = new BrowserLogger();
this.webSocketFactory = new BrowserWebSocketFactory();
this.settings = new Settings();
this.eventQueue = new EventQueue();
this.tm = new TimerManager();
this.processor = new EventQueueProcessor(this);
this.state = new State();
this.connection = new Connection(this);
this.handlers = [
...HandlerCollections.getDefaultHandlers(this),
new ExplosionVisualizationHandler(this),
new KillVisualizationHandler(this),
new GoliFartVisualizationHandler(this),
new GameRenderHandler(this),
new ChatRenderHandler(this),
new EachSecondRenderHandler(this),
new ServerAnnouncementRenderHandler(this),
new CtfGameOverRenderHandler(this),
new ShakeAndShowMessageOnKillHandler(this),
new ShakeOnHitHandler(this),
new FlagCookieHandler(this),
new MissileChemtrailHandler(this),
// client tools
new StayActiveHandler(this),
new AutoFireHandler(this),
new AutoBoostHandler(this),
// bot
new BotHeartbeatHandler(this)
];
}
public async start(): Promise<any> {
this.logger.info("Initializing app");
this.processor.startProcessingEventQueue();
await this.connection.init();
this.isActive = true;
this.logger.info("Initialization finished");
}
public setBrowserVisibility(isVisible: boolean) {
this.isBrowserVisible = isVisible;
if (!isVisible) {
this.browserVisibilityHandler.clearKeys();
}
}
}
| BrowserContext |
process_dialogs.py | import string
import types
from module_info import *
from module_triggers import *
from module_dialogs import *
from process_common import *
from process_operations import *
speaker_pos = 0
ipt_token_pos = 1
sentence_conditions_pos = 2
text_pos = 3
opt_token_pos = 4
sentence_consequences_pos = 5
sentence_voice_over_pos = 6
#-------------------------------------------------------
def save_dialog_states(dialog_states):
file = open(export_dir + "dialog_states.txt","w")
for dialog_state in dialog_states:
file.write("%s\n"%dialog_state)
file.close()
#def compile_variables(cookies_list):
# for trigger in triggers:
# for consequence in trigger[trigger_consequences_pos]:
# compile_statement(consequence,cookies_list)
# for sentence in sentences:
# for consequence in sentence[sentence_consequences_pos]:
# compile_statement(consequence,cookies_list)
# for trigger in triggers:
# for condition in trigger[trigger_conditions_pos]:
# compile_statement(condition,cookies_list)
# for sentence in sentences:
# for condition in sentence[sentence_conditions_pos]:
# compile_statement(condition,cookies_list)
# return cookies_list
def save_triggers(variable_list,variable_uses,triggers,tag_uses,quick_strings):
file = open(export_dir + "triggers.txt","w")
file.write("triggersfile version 1\n")
file.write("%d\n"%len(triggers))
for i in xrange(len(triggers)):
trigger = triggers[i]
file.write("%f %f %f "%(trigger[trigger_check_pos],trigger[trigger_delay_pos],trigger[trigger_rearm_pos]))
save_statement_block(file,0,1,trigger[trigger_conditions_pos] , variable_list, variable_uses,tag_uses,quick_strings)
save_statement_block(file,0,1,trigger[trigger_consequences_pos], variable_list, variable_uses,tag_uses,quick_strings)
# for condition in trigger[trigger_conditions_pos]:
# save_operation(file,condition,variable_list)
# file.write(" %d "%(len(trigger[trigger_consequences_pos])))
# for consequence in trigger[trigger_consequences_pos]:
# save_operation(file,consequence,variable_list)
file.write("\n")
file.close()
#=================================================================
def compile_sentence_tokens(sentences):
|
def create_auto_id(sentence,auto_ids):
text = convert_to_identifier(sentence[text_pos])
done = 0
i = 20
lt = len(text)
if (i > lt):
i = lt
auto_id = "dlga_" + text[0:i]
done = 0
if auto_ids.has_key(auto_id) and (auto_ids[auto_id] == text):
done = 1
while (i <= lt) and not done:
auto_id = "dlga_" + text[0:i]
if auto_ids.has_key(auto_id):
if auto_ids[auto_id] == text:
done = 1
else:
i += 1
else:
done = 1
auto_ids[auto_id] = text
if not done:
number = 1
new_auto_id = auto_id + str(number)
while auto_ids.has_key(new_auto_id):
number += 1
new_auto_id = auto_id + str(number)
auto_id = new_auto_id
auto_ids[auto_id] = text
return auto_id
def create_auto_id2(sentence,auto_ids):
text = sentence[text_pos]
token_ipt = convert_to_identifier(sentence[ipt_token_pos])
token_opt = convert_to_identifier(sentence[opt_token_pos])
done = 0
auto_id = "dlga_" + token_ipt + ":" + token_opt
done = 0
if not auto_ids.has_key(auto_id):
done = 1
else:
if auto_ids.has_key(auto_id) and (auto_ids[auto_id] == text):
done = 1
if not done:
number = 1
new_auto_id = auto_id + "." + str(number)
while auto_ids.has_key(new_auto_id):
number += 1
new_auto_id = auto_id + "." + str(number)
auto_id = new_auto_id
auto_ids[auto_id] = text
return auto_id
def save_sentences(variable_list,variable_uses,sentences,tag_uses,quick_strings,input_states,output_states):
file = open(export_dir + "conversation.txt","w")
file.write("dialogsfile version 2\n")
file.write("%d\n"%len(sentences))
# Create an empty dictionary
auto_ids = {}
for i in xrange(len(sentences)):
sentence = sentences[i]
try:
dialog_id = create_auto_id2(sentence,auto_ids)
file.write("%s %d %d "%(dialog_id,sentence[speaker_pos],input_states[i]))
save_statement_block(file, 0, 1, sentence[sentence_conditions_pos], variable_list,variable_uses,tag_uses,quick_strings)
file.write("%s "%(string.replace(sentence[text_pos]," ","_")))
if (len(sentence[text_pos]) == 0):
file.write("NO_TEXT ")
file.write(" %d "%(output_states[i]))
save_statement_block(file, 0, 1, sentence[sentence_consequences_pos], variable_list,variable_uses,tag_uses,quick_strings)
if (len(sentence) > sentence_voice_over_pos):
file.write("%s "%sentence[sentence_voice_over_pos])
else:
file.write("NO_VOICEOVER ")
file.write("\n")
except:
print "Error in dialog line:"
print sentence
file.close()
# Registered cookies is a list which enables the order of cookies to remain fixed across changes.
# In order to remove cookies not used anymore, edit the cookies_registery.py and remove all entries.
print "exporting triggers..."
variable_uses = []
variables = load_variables(export_dir,variable_uses)
tag_uses = load_tag_uses(export_dir)
quick_strings = load_quick_strings(export_dir)
#compile_variables(variables)
save_triggers(variables,variable_uses,triggers,tag_uses,quick_strings)
print "exporting dialogs..."
(input_states,output_states) = compile_sentence_tokens(dialogs)
save_sentences(variables,variable_uses,dialogs,tag_uses,quick_strings,input_states,output_states)
save_variables(export_dir,variables,variable_uses)
save_tag_uses(export_dir, tag_uses)
save_quick_strings(export_dir,quick_strings)
#print "finished."
| input_tokens = []
output_tokens = []
dialog_states = ["start","party_encounter","prisoner_liberated","enemy_defeated","party_relieved","event_triggered","close_window","trade","exchange_members", "trade_prisoners","buy_mercenaries","view_char","training","member_chat","prisoner_chat"]
dialog_state_usages = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
for sentence in sentences:
output_token_id = -1
output_token = sentence[opt_token_pos]
found = 0
for i_t in xrange(len(dialog_states)):
if output_token == dialog_states[i_t]:
output_token_id = i_t
found = 1
break
if not found:
dialog_states.append(output_token)
dialog_state_usages.append(0)
output_token_id = len(dialog_states) - 1
output_tokens.append(output_token_id)
for sentence in sentences:
input_token_id = -1
input_token = sentence[ipt_token_pos]
found = 0
for i_t in xrange(len(dialog_states)):
if input_token == dialog_states[i_t]:
input_token_id = i_t
dialog_state_usages[i_t] = dialog_state_usages[i_t] + 1
found = 1
break
if not found:
print sentence[ipt_token_pos]
print sentence[text_pos]
print sentence[opt_token_pos]
print "**********************************************************************************"
print "ERROR: INPUT TOKEN NOT FOUND:" + input_token
print "**********************************************************************************"
print "**********************************************************************************"
input_tokens.append(input_token_id)
save_dialog_states(dialog_states)
for i_t in xrange(len(dialog_states)):
if dialog_state_usages[i_t] == 0:
print "ERROR: Output token not found: " + dialog_states[i_t]
return (input_tokens, output_tokens) |
JSONView.js | /*
* Copyright (C) 2011 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @implements {UI.Searchable}
*/
SourceFrame.JSONView = class extends UI.VBox {
/**
* @param {!SourceFrame.ParsedJSON} parsedJSON
*/
constructor(parsedJSON) {
super();
this._initialized = false;
this.registerRequiredCSS('source_frame/jsonView.css');
this._parsedJSON = parsedJSON;
this.element.classList.add('json-view');
/** @type {?UI.SearchableView} */
this._searchableView;
/** @type {!ObjectUI.ObjectPropertiesSection} */
this._treeOutline;
/** @type {number} */
this._currentSearchFocusIndex = 0;
/** @type {!Array.<!UI.TreeElement>} */
this._currentSearchTreeElements = [];
/** @type {?RegExp} */
this._searchRegex = null;
}
/**
* @param {string} content
* @return {!Promise<?UI.SearchableView>}
*/
static async createView(content) {
// We support non-strict JSON parsing by parsing an AST tree which is why we offload it to a worker.
const parsedJSON = await SourceFrame.JSONView._parseJSON(content);
if (!parsedJSON || typeof parsedJSON.data !== 'object') {
return null;
}
const jsonView = new SourceFrame.JSONView(parsedJSON);
const searchableView = new UI.SearchableView(jsonView);
searchableView.setPlaceholder(Common.UIString('Find'));
jsonView._searchableView = searchableView;
jsonView.show(searchableView.element);
return searchableView;
}
/**
* @param {?Object} obj
* @return {!UI.SearchableView}
*/
static createViewSync(obj) {
const jsonView = new SourceFrame.JSONView(new SourceFrame.ParsedJSON(obj, '', ''));
const searchableView = new UI.SearchableView(jsonView);
searchableView.setPlaceholder(Common.UIString('Find'));
jsonView._searchableView = searchableView;
jsonView.show(searchableView.element);
jsonView.element.setAttribute('tabIndex', 0);
return searchableView;
}
/**
* @param {?string} text
* @return {!Promise<?SourceFrame.ParsedJSON>}
*/
static _parseJSON(text) {
let returnObj = null;
if (text) {
returnObj = SourceFrame.JSONView._extractJSON(/** @type {string} */ (text));
}
if (!returnObj) {
return Promise.resolve(/** @type {?SourceFrame.ParsedJSON} */ (null));
}
return Formatter.formatterWorkerPool().parseJSONRelaxed(returnObj.data).then(handleReturnedJSON);
/**
* @param {*} data
* @return {?SourceFrame.ParsedJSON}
*/
function handleReturnedJSON(data) {
if (!data) {
return null;
}
returnObj.data = data;
return returnObj;
}
}
/**
* @param {string} text
* @return {?SourceFrame.ParsedJSON}
*/
static _extractJSON(text) {
// Do not treat HTML as JSON.
if (text.startsWith('<')) {
return null;
}
let inner = SourceFrame.JSONView._findBrackets(text, '{', '}');
const inner2 = SourceFrame.JSONView._findBrackets(text, '[', ']');
inner = inner2.length > inner.length ? inner2 : inner;
// Return on blank payloads or on payloads significantly smaller than original text.
if (inner.length === -1 || text.length - inner.length > 80) {
return null;
}
const prefix = text.substring(0, inner.start);
const suffix = text.substring(inner.end + 1);
text = text.substring(inner.start, inner.end + 1);
// Only process valid JSONP.
if (suffix.trim().length && !(suffix.trim().startsWith(')') && prefix.trim().endsWith('('))) {
return null;
}
return new SourceFrame.ParsedJSON(text, prefix, suffix);
}
/**
* @param {string} text
* @param {string} open
* @param {string} close
* @return {{start: number, end: number, length: number}}
*/
static _findBrackets(text, open, close) {
const start = text.indexOf(open);
const end = text.lastIndexOf(close);
let length = end - start - 1;
if (start === -1 || end === -1 || end < start) {
length = -1;
}
return {start: start, end: end, length: length};
}
/**
* @override
*/
wasShown() {
this._initialize();
}
_initialize() {
if (this._initialized) {
return;
}
this._initialized = true;
const obj = SDK.RemoteObject.fromLocalObject(this._parsedJSON.data);
const title = this._parsedJSON.prefix + obj.description + this._parsedJSON.suffix;
this._treeOutline = new ObjectUI.ObjectPropertiesSection(
obj, title, undefined, undefined, undefined, undefined, true /* showOverflow */);
this._treeOutline.enableContextMenu();
this._treeOutline.setEditable(false);
this._treeOutline.expand();
this.element.appendChild(this._treeOutline.element);
this._treeOutline.firstChild().select(true /* omitFocus */, false /* selectedByUser */);
}
/**
* @param {number} index
*/
_jumpToMatch(index) {
if (!this._searchRegex) {
return;
}
const previousFocusElement = this._currentSearchTreeElements[this._currentSearchFocusIndex];
if (previousFocusElement) {
previousFocusElement.setSearchRegex(this._searchRegex);
}
const newFocusElement = this._currentSearchTreeElements[index];
if (newFocusElement) {
this._updateSearchIndex(index);
newFocusElement.setSearchRegex(this._searchRegex, UI.highlightedCurrentSearchResultClassName);
newFocusElement.reveal();
} else {
this._updateSearchIndex(0);
}
}
/**
* @param {number} count
*/
_updateSearchCount(count) {
if (!this._searchableView) {
return;
}
this._searchableView.updateSearchMatchesCount(count);
}
/**
* @param {number} index
*/
_updateSearchIndex(index) {
this._currentSearchFocusIndex = index;
if (!this._searchableView) {
return;
}
this._searchableView.updateCurrentMatchIndex(index);
}
/**
* @override
*/
searchCanceled() {
this._searchRegex = null;
this._currentSearchTreeElements = [];
for (let element = this._treeOutline.rootElement(); element; element = element.traverseNextTreeElement(false)) {
if (!(element instanceof ObjectUI.ObjectPropertyTreeElement)) {
continue;
}
element.revertHighlightChanges();
}
this._updateSearchCount(0);
this._updateSearchIndex(0);
}
/**
* @override
* @param {!UI.SearchableView.SearchConfig} searchConfig
* @param {boolean} shouldJump
* @param {boolean=} jumpBackwards
*/
performSearch(searchConfig, shouldJump, jumpBackwards) {
let newIndex = this._currentSearchFocusIndex;
const previousSearchFocusElement = this._currentSearchTreeElements[newIndex];
this.searchCanceled();
this._searchRegex = searchConfig.toSearchRegex(true);
for (let element = this._treeOutline.rootElement(); element; element = element.traverseNextTreeElement(false)) {
if (!(element instanceof ObjectUI.ObjectPropertyTreeElement)) {
continue;
}
const hasMatch = element.setSearchRegex(this._searchRegex);
if (hasMatch) {
this._currentSearchTreeElements.push(element);
}
if (previousSearchFocusElement === element) {
const currentIndex = this._currentSearchTreeElements.length - 1;
if (hasMatch || jumpBackwards) {
newIndex = currentIndex;
} else {
newIndex = currentIndex + 1;
}
}
}
this._updateSearchCount(this._currentSearchTreeElements.length);
if (!this._currentSearchTreeElements.length) {
this._updateSearchIndex(0);
return;
}
newIndex = mod(newIndex, this._currentSearchTreeElements.length);
this._jumpToMatch(newIndex);
}
/**
* @override
*/
jumpToNextSearchResult() {
if (!this._currentSearchTreeElements.length) {
return;
}
const newIndex = mod(this._currentSearchFocusIndex + 1, this._currentSearchTreeElements.length);
this._jumpToMatch(newIndex);
}
/**
* @override
*/
jumpToPreviousSearchResult() {
if (!this._currentSearchTreeElements.length) {
return;
}
const newIndex = mod(this._currentSearchFocusIndex - 1, this._currentSearchTreeElements.length);
this._jumpToMatch(newIndex);
}
/**
* @override
* @return {boolean}
*/
supportsCaseSensitiveSearch() {
return true;
}
/**
* @override
* @return {boolean}
*/
supportsRegexSearch() { |
/**
* @unrestricted
*/
SourceFrame.ParsedJSON = class {
/**
* @param {*} data
* @param {string} prefix
* @param {string} suffix
*/
constructor(data, prefix, suffix) {
this.data = data;
this.prefix = prefix;
this.suffix = suffix;
}
}; | return true;
}
}; |
GroundSystem.py | #
# GSC-18128-1, "Core Flight Executive Version 6.7"
#
# Copyright (c) 2006-2019 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cFS Ground System Version 2.0.0
#
#!/usr/bin/env python3
#
import shlex
import subprocess
import sys
from pathlib import Path
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from RoutingService import RoutingService
from Ui_MainWindow import Ui_MainWindow
from _version import __version__ as _version
from _version import _version_string
__version__ = _version
ROOTDIR = Path(sys.argv[0]).resolve().parent
#
# CFS Ground System: Setup and manage the main window
#
class GroundSystem(QMainWindow, Ui_MainWindow):
HDR_VER_1_OFFSET = 0
HDR_VER_2_OFFSET = 4
#
# Init the class
#
def __init__(self):
super().__init__()
self.setupUi(self)
self.RoutingService = None
self.alert = QMessageBox()
self.pushButtonStartTlm.clicked.connect(self.startTlmSystem)
self.pushButtonStartCmd.clicked.connect(self.startCmdSystem)
self.cbTlmHeaderVer.currentIndexChanged.connect(self.setTlmOffset)
self.cbCmdHeaderVer.currentIndexChanged.connect(self.setCmdOffsets)
for sb in (self.sbTlmOffset, self.sbCmdOffsetPri, self.sbCmdOffsetSec):
sb.valueChanged.connect(self.saveOffsets)
# Init lists
self.ipAddressesList = ['All']
self.spacecraftNames = ['All']
def closeEvent(self, evnt):
if self.RoutingService:
self.RoutingService.stop()
print("Stopped routing service")
super().closeEvent(evnt)
# Read the selected spacecraft from combo box on GUI
def getSelectedSpacecraftAddress(self):
return self.comboBoxIpAddresses.currentText().strip()
# Returns the name of the selected spacecraft
def | (self):
return self.spacecraftNames[self.ipAddressesList.index(
self.getSelectedSpacecraftAddress())].strip()
#
# Display popup with error
#
def DisplayErrorMessage(self, message):
print(message)
self.alert.setText(message)
self.alert.setIcon(QMessageBox.Warning)
self.alert.exec_()
# Start the telemetry system for the selected spacecraft
def startTlmSystem(self):
# Setup the subscription (to let the telemetry
# system know the messages it will be receiving)
subscription = '--sub=GroundSystem'
selectedSpacecraft = self.getSelectedSpacecraftName()
if selectedSpacecraft != 'All':
subscription += f'.{selectedSpacecraft}.TelemetryPackets'
# Open Telemetry System
system_call = f'python3 {ROOTDIR}/Subsystems/tlmGUI/TelemetrySystem.py {subscription}'
args = shlex.split(system_call)
subprocess.Popen(args)
# Start command system
@staticmethod
def startCmdSystem():
subprocess.Popen(
['python3', f'{ROOTDIR}/Subsystems/cmdGui/CommandSystem.py'])
# Start FDL-FUL gui system
def startFDLSystem(self):
selectedSpacecraft = self.getSelectedSpacecraftName()
if selectedSpacecraft == 'All':
self.DisplayErrorMessage(
'Cannot open FDL manager.\nNo spacecraft selected.')
else:
subscription = f'--sub=GroundSystem.{selectedSpacecraft}'
subprocess.Popen([
'python3', f'{ROOTDIR}/Subsystems/fdlGui/FdlSystem.py',
subscription
])
def setTlmOffset(self):
selectedVer = self.cbTlmHeaderVer.currentText().strip()
if selectedVer == "Custom":
self.sbTlmOffset.setEnabled(True)
else:
self.sbTlmOffset.setEnabled(False)
if selectedVer == "1":
self.sbTlmOffset.setValue(self.HDR_VER_1_OFFSET)
elif selectedVer == "2":
self.sbTlmOffset.setValue(self.HDR_VER_2_OFFSET)
def setCmdOffsets(self):
selectedVer = self.cbCmdHeaderVer.currentText().strip()
if selectedVer == "Custom":
self.sbCmdOffsetPri.setEnabled(True)
self.sbCmdOffsetSec.setEnabled(True)
else:
self.sbCmdOffsetPri.setEnabled(False)
self.sbCmdOffsetSec.setEnabled(False)
if selectedVer == "1":
self.sbCmdOffsetPri.setValue(self.HDR_VER_1_OFFSET)
elif selectedVer == "2":
self.sbCmdOffsetPri.setValue(self.HDR_VER_2_OFFSET)
self.sbCmdOffsetSec.setValue(self.HDR_VER_1_OFFSET)
def saveOffsets(self):
offsets = bytes((self.sbTlmOffset.value(), self.sbCmdOffsetPri.value(),
self.sbCmdOffsetSec.value()))
with open("/tmp/OffsetData", "wb") as f:
f.write(offsets)
# Update the combo box list in gui
def updateIpList(self, ip, name):
self.ipAddressesList.append(ip)
self.spacecraftNames.append(name)
self.comboBoxIpAddresses.addItem(ip)
# Start the routing service (see RoutingService.py)
def initRoutingService(self):
self.RoutingService = RoutingService()
self.RoutingService.signalUpdateIpList.connect(self.updateIpList)
self.RoutingService.start()
#
# Main
#
if __name__ == "__main__":
# Report Version Number upon startup
print(_version_string)
# Init app
app = QApplication(sys.argv)
# Init main window
MainWindow = GroundSystem()
# Show and put window on front
MainWindow.show()
MainWindow.raise_()
# Start the Routing Service
MainWindow.initRoutingService()
MainWindow.saveOffsets()
# Execute the app
sys.exit(app.exec_())
| getSelectedSpacecraftName |
listProductDetails.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200601preview
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func ListProductDetails(ctx *pulumi.Context, args *ListProductDetailsArgs, opts ...pulumi.InvokeOption) (*ListProductDetailsResult, error) |
type ListProductDetailsArgs struct {
ProductName string `pulumi:"productName"`
RegistrationName string `pulumi:"registrationName"`
ResourceGroup string `pulumi:"resourceGroup"`
}
// Extended description about the product required for installing it into Azure Stack.
type ListProductDetailsResult struct {
ComputeRole string `pulumi:"computeRole"`
DataDiskImages []DataDiskImageResponse `pulumi:"dataDiskImages"`
GalleryPackageBlobSasUri string `pulumi:"galleryPackageBlobSasUri"`
IsSystemExtension bool `pulumi:"isSystemExtension"`
OsDiskImage OsDiskImageResponse `pulumi:"osDiskImage"`
ProductKind string `pulumi:"productKind"`
SupportMultipleExtensions bool `pulumi:"supportMultipleExtensions"`
Uri string `pulumi:"uri"`
Version string `pulumi:"version"`
VmOsType string `pulumi:"vmOsType"`
VmScaleSetEnabled bool `pulumi:"vmScaleSetEnabled"`
}
| {
var rv ListProductDetailsResult
err := ctx.Invoke("azure-native:azurestack/v20200601preview:listProductDetails", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
} |
camera.rs | use crate::math::matrix4::Matrix4;
pub struct PerspectiveCamera {
aspect: f32,
far: f32,
fovy: f32,
near: f32,
projection_matrix: [f32; 16],
projection_matrix_inverse: [f32; 16],
}
impl PerspectiveCamera {
pub fn new(fovy: f32, aspect: f32, near: f32, far: f32) -> Self {
let mut camera = PerspectiveCamera {
aspect: aspect,
far: far,
fovy: fovy,
near: near,
projection_matrix: Matrix4::create(),
projection_matrix_inverse: Matrix4::create(),
};
camera.update_projection_matrix();
camera
}
pub fn set_aspect(&mut self, aspect: f32) -> &mut Self {
self.aspect = aspect;
self.update_projection_matrix();
self
}
pub fn update_projection_matrix(&mut self) {
Matrix4::make_perspective(
&mut self.projection_matrix, | self.far,
);
Matrix4::invert(
Matrix4::copy(&mut self.projection_matrix_inverse, &self.projection_matrix)
);
}
pub fn borrow_projection_matrix(&self) -> &[f32; 16] {
&self.projection_matrix
}
pub fn borrow_projection_matrix_inverse(&self) -> &[f32; 16] {
&self.projection_matrix_inverse
}
} | self.fovy,
self.aspect,
self.near, |
__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| __version__ = '0.8.0'
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.tasks # noqa | __all__ = ['pdb']
|
config.py | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.config.base import BaseConfigurationData
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.logger.config import LoggerStorageConfiguration
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
from programy.storage.stores.nosql.redis.config import RedisStorageConfiguration
from programy.storage.factory import StorageFactory
from programy.utils.substitutions.substitues import Substitutions
class StorageConfiguration(BaseConfigurationData):
def __init__(self):
BaseConfigurationData.__init__(self, name="storage")
self._entity_store = {}
self._store_configs = {}
@property
def entity_store(self):
return self._entity_store
@property
def storage_configurations(self):
return self._store_configs
def check_for_license_keys(self, license_keys):
BaseConfigurationData.check_for_license_keys(self, license_keys)
def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):
storage = configuration_file.get_section(self._section_name, configuration)
if storage is not None:
entities = configuration_file.get_section("entities", storage)
entity_types = configuration_file.get_child_section_keys("entities", storage)
for entity in entity_types:
entity_config = configuration_file.get_section(entity, entities)
self._entity_store[entity] = entity_config
stores = configuration_file.get_section("stores", storage)
store_names = configuration_file.get_child_section_keys("stores", storage)
for store in store_names:
store_config = configuration_file.get_section(store, stores)
keys = configuration_file.get_keys(store_config)
if 'type' not in keys:
YLogger.error(None, "'type' section missing from client config stores element [%s], ignoring config", store)
continue
if 'config' not in keys:
YLogger.error(None, "'config' section missing from client config stores element [%s], ignoring config", store)
continue
type = configuration_file.get_option(store_config, 'type', subs=subs)
if type == 'sql':
config = SQLStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
elif type == 'mongo':
config = MongoStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
elif type == 'redis':
config = RedisStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
elif type == 'file':
config = FileStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
elif type == 'logger':
config = LoggerStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
self._store_configs[store] = config
else:
YLogger.warning(self, "'storage' section missing from client config, using to defaults")
self._entity_store = {} | self.add_default_stores(self._store_configs)
def create_storage_config(self):
config = {}
config['entities'] = {}
self.add_default_entities(config['entities'])
config['stores'] = {}
self.add_default_stores(config['stores'])
def to_yaml(self, data, defaults=True):
data['entities'] = {}
data['stores'] = {}
if defaults is True:
self.add_default_entities(data['entities'])
self.add_default_stores(data['stores'])
else:
data['entities'] = {}
for key, value in self._entity_store.items():
data['entities'][key] = value
for name, value in self._store_configs.items():
data['stores'][name] = {}
value.to_yaml(data['stores'][name], defaults)
@staticmethod
def add_default_stores(amap):
sql = SQLStorageConfiguration()
amap['sqlite'] = {'type': 'sql',
'config': sql.create_sqlstorage_config()}
mongo = MongoStorageConfiguration()
amap['mongo'] = {'type': 'mongo',
'config': mongo.create_mongostorage_config()}
redis = RedisStorageConfiguration()
amap['redis'] = {'type': 'redis',
'config': redis.create_redisstorage_config()}
file = FileStorageConfiguration()
amap['file'] = {'type': 'file',
'config': file.create_filestorage_config()}
logger = LoggerStorageConfiguration()
amap['logger'] = {'type': 'logger',
'config': logger.create_loggerstorage_config()}
@staticmethod
def add_default_entities(amap):
amap[StorageFactory.USERS] = 'sqlite'
amap[StorageFactory.LINKED_ACCOUNTS] = 'sqlite'
amap[StorageFactory.LINKS] = 'sqlite'
amap[StorageFactory.CATEGORIES] = 'file'
amap[StorageFactory.ERRORS] = 'file'
amap[StorageFactory.DUPLICATES] = 'file'
amap[StorageFactory.LEARNF] = 'file'
amap[StorageFactory.CONVERSATIONS] = 'file'
amap[StorageFactory.MAPS] = 'file'
amap[StorageFactory.SETS] = 'file'
amap[StorageFactory.RDF] = 'file'
amap[StorageFactory.DENORMAL] = 'file'
amap[StorageFactory.NORMAL] = 'file'
amap[StorageFactory.GENDER] = 'file'
amap[StorageFactory.PERSON] = 'file'
amap[StorageFactory.PERSON2] = 'file'
amap[StorageFactory.REGEX_TEMPLATES] = 'file'
amap[StorageFactory.PROPERTIES] = 'file'
amap[StorageFactory.DEFAULTS] = 'file'
amap[StorageFactory.VARIABLES] = 'file'
amap[StorageFactory.TWITTER] = 'file'
amap[StorageFactory.SPELLING_CORPUS] = 'file'
amap[StorageFactory.LICENSE_KEYS] = 'file'
amap[StorageFactory.PATTERN_NODES] = 'file'
amap[StorageFactory.TEMPLATE_NODES] = 'file'
amap[StorageFactory.BINARIES] = 'file'
amap[StorageFactory.BRAINTREE] = 'file'
amap[StorageFactory.PREPROCESSORS] = 'file'
amap[StorageFactory.POSTPROCESSORS] = 'file'
amap[StorageFactory.USERGROUPS] = 'file' | self.add_default_entities(self._entity_store)
self._store_configs = {} |
constant.py | import os
CELERY_BROKER_URL_DOCKER = "amqp://admin:mypass@rabbit:5672/"
CELERY_BROKER_URL_LOCAL = "amqp://localhost/"
CM_REGISTER_Q = "rpc_queue_CM_register" # Do no change this value
CM_NAME = "CM - Heat sources potential" | RPC_CM_ALIVE = "rpc_queue_CM_ALIVE" # Do no change this value
RPC_Q = "rpc_queue_CM_compute" # Do no change this value
CM_ID = 11 # CM_ID is defined by the enegy research center of Martigny (CREM)
PORT_LOCAL = int("500" + str(CM_ID))
PORT_DOCKER = 80
# TODO ********************setup this URL depending on which version you are running***************************
CELERY_BROKER_URL = CELERY_BROKER_URL_DOCKER
PORT = PORT_DOCKER
# TODO ********************setup this URL depending on which version you are running***************************
TRANFER_PROTOCOLE = "http://"
INPUTS_CALCULATION_MODULE = [
{
"input_name": "Maximum distance to consider the heat source within the urban areas",
"input_type": "input",
"input_parameter_name": "within_dist",
"input_value": "150",
"input_priority": 0,
"input_unit": "m",
"input_min": 50,
"input_max": 2000,
"cm_id": CM_ID, # Do no change this value
},
{
"input_name": "Maximum distance to consider the heat source near the urban areas, all the areas above this threshold will be classified as far from the urban areas",
"input_type": "input",
"input_parameter_name": "near_dist",
"input_value": "1000",
"input_priority": 0,
"input_unit": "m",
"input_min": 200,
"input_max": 10000,
"cm_id": CM_ID, # Do no change this value
},
]
WIKIURL = os.environ.get("WIKIURL", "https://wiki.hotmaps.eu/en/")
SIGNATURE = {
"category": "Supply",
"authorized_scale": ["NUTS 3", "NUTS 2", "NUTS 0", "LAU 2"],
"cm_name": CM_NAME,
"layers_needed": [
# "urban areas" or "corine land cover",
],
"type_layer_needed": [],
"vectors_needed": [],
# vector layers should be added here
"type_vectors_needed": [
{"type": "wwtp_capacity", "description": "Heatsource capacity"},
{"type": "wwtp_power", "description": "Heatsource power"},
],
"cm_url": "Do not add something",
"cm_description": "This computation module calculates the potential of waste water treatment plants that can be utilized in the selected area",
"cm_id": CM_ID,
"wiki_url": WIKIURL + "CM-Heat-source-potential",
"inputs_calculation_module": INPUTS_CALCULATION_MODULE,
} | |
update_test.go | package todos
import (
"context"
"testing"
"github.com/Fs02/go-todo-backend/scores/scorestest"
"github.com/go-rel/rel"
"github.com/go-rel/reltest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestUpdate(t *testing.T) {
var (
ctx = context.TODO()
repository = reltest.New()
scores = &scorestest.Service{}
service = New(repository, scores)
todo = Todo{ID: 1, Title: "Sleep"}
changes = rel.NewChangeset(&todo)
)
todo.Title = "Wake up"
repository.ExpectUpdate(changes).ForType("todos.Todo")
assert.Nil(t, service.Update(ctx, &todo, changes))
assert.NotEmpty(t, todo.ID)
repository.AssertExpectations(t)
scores.AssertExpectations(t)
}
func TestUpdate_completed(t *testing.T) {
var (
ctx = context.TODO()
repository = reltest.New()
scores = &scorestest.Service{}
service = New(repository, scores)
todo = Todo{ID: 1, Title: "Sleep"}
changes = rel.NewChangeset(&todo)
)
todo.Completed = true
repository.ExpectTransaction(func(repository *reltest.Repository) {
scores.On("Earn", mock.Anything, "todo completed", 1).Return(nil)
repository.ExpectUpdate(changes).ForType("todos.Todo")
})
assert.Nil(t, service.Update(ctx, &todo, changes))
assert.NotEmpty(t, todo.ID)
repository.AssertExpectations(t)
scores.AssertExpectations(t)
}
func | (t *testing.T) {
var (
ctx = context.TODO()
repository = reltest.New()
scores = &scorestest.Service{}
service = New(repository, scores)
todo = Todo{ID: 1, Title: "Sleep", Completed: true}
changes = rel.NewChangeset(&todo)
)
todo.Completed = false
repository.ExpectTransaction(func(repository *reltest.Repository) {
scores.On("Earn", mock.Anything, "todo uncompleted", -2).Return(nil)
repository.ExpectUpdate(changes).ForType("todos.Todo")
})
assert.Nil(t, service.Update(ctx, &todo, changes))
assert.NotEmpty(t, todo.ID)
repository.AssertExpectations(t)
scores.AssertExpectations(t)
}
func TestUpdate_validateError(t *testing.T) {
var (
ctx = context.TODO()
repository = reltest.New()
scores = &scorestest.Service{}
service = New(repository, scores)
todo = Todo{ID: 1, Title: "Sleep"}
changes = rel.NewChangeset(&todo)
)
todo.Title = ""
assert.Equal(t, ErrTodoTitleBlank, service.Update(ctx, &todo, changes))
repository.AssertExpectations(t)
scores.AssertExpectations(t)
}
| TestUpdate_uncompleted |
merge.go | package dict
// 合并
func Merge(list ...map[string]interface{}) map[string]interface{} {
if list == nil {
| := make(map[string]interface{})
for _, m := range list {
if m != nil {
for k, v := range m {
r[k] = v
}
}
}
return r
}
| return nil
}
r |
schedule_test.go | package schedulewatcher
import (
"encoding/json"
"testing"
"time"
"github.com/choria-io/go-choria/aagent/model"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func Test(t *testing.T) |
var _ = Describe("ScheduleWatcher", func() {
var (
mockctl *gomock.Controller
mockMachine *model.MockMachine
watch *Watcher
now time.Time
)
BeforeEach(func() {
mockctl = gomock.NewController(GinkgoT())
mockMachine = model.NewMockMachine(mockctl)
now = time.Unix(1606924953, 0)
mockMachine.EXPECT().Name().Return("schedule").AnyTimes()
mockMachine.EXPECT().Identity().Return("ginkgo").AnyTimes()
mockMachine.EXPECT().InstanceID().Return("1234567890").AnyTimes()
mockMachine.EXPECT().Version().Return("1.0.0").AnyTimes()
mockMachine.EXPECT().TimeStampSeconds().Return(now.Unix()).AnyTimes()
wi, err := New(mockMachine, "ginkgo", []string{"always"}, "fail", "success", "2m", time.Second, map[string]interface{}{
"schedules": []string{"1 * * * *"},
})
Expect(err).ToNot(HaveOccurred())
watch = wi.(*Watcher)
watch.properties = nil
watch.items = []*scheduleItem{}
watch.state = On
})
AfterEach(func() {
mockctl.Finish()
})
Describe("setProperties", func() {
It("Should parse valid properties", func() {
err := watch.setProperties(map[string]interface{}{
"duration": "1h",
"schedules": []string{"* * * * *", "1 * * * *"},
})
Expect(err).ToNot(HaveOccurred())
Expect(watch.properties.Duration).To(Equal(time.Hour))
Expect(watch.properties.Schedules).To(HaveLen(2))
Expect(watch.items).To(HaveLen(2))
Expect(watch.items[0].spec).To(Equal("* * * * *"))
Expect(watch.items[1].spec).To(Equal("1 * * * *"))
})
It("Should handle errors", func() {
err := watch.setProperties(map[string]interface{}{})
Expect(err).To(MatchError("no schedules defined"))
watch.properties = nil
err = watch.setProperties(map[string]interface{}{
"schedules": []string{"* * * * *", "1 * * * *"},
})
Expect(err).ToNot(HaveOccurred())
Expect(watch.properties.Duration).To(Equal(time.Minute))
Expect(watch.items).To(HaveLen(2))
Expect(watch.properties.Schedules).To(HaveLen(2))
})
It("Should handle startup splays", func() {
err := watch.setProperties(map[string]interface{}{
"start_splay": "1m",
"duration": "1m",
"schedules": []string{"* * * * *", "1 * * * *"},
})
Expect(err).To(MatchError("start splay 1m0s is bigger than half the duration 1m0s"))
err = watch.setProperties(map[string]interface{}{
"start_splay": "10s",
"duration": "1m",
"schedules": []string{"* * * * *", "1 * * * *"},
})
Expect(err).ToNot(HaveOccurred())
})
})
Describe("CurrentState", func() {
It("Should be a valid state", func() {
cs := watch.CurrentState()
csj, err := cs.(*StateNotification).JSON()
Expect(err).ToNot(HaveOccurred())
event := map[string]interface{}{}
err = json.Unmarshal(csj, &event)
Expect(err).ToNot(HaveOccurred())
delete(event, "id")
Expect(event).To(Equal(map[string]interface{}{
"time": "2020-12-02T16:02:33Z",
"type": "io.choria.machine.watcher.schedule.v1.state",
"subject": "ginkgo",
"specversion": "1.0",
"source": "io.choria.machine",
"datacontenttype": "application/json",
"data": map[string]interface{}{
"id": "1234567890",
"identity": "ginkgo",
"machine": "schedule",
"name": "ginkgo",
"protocol": "io.choria.machine.watcher.schedule.v1.state",
"type": "schedule",
"version": "1.0.0",
"timestamp": float64(now.Unix()),
"state": "on",
},
}))
})
})
})
| {
RegisterFailHandler(Fail)
RunSpecs(t, "AAgent/Watchers/ScheduleWatcher")
} |
impossible.rs | // Copyright 2017 Serde Developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module contains `Impossible` serializer and its implementations.
use lib::*;
use ser::{self, Serialize, SerializeSeq, SerializeTuple, SerializeTupleStruct,
SerializeTupleVariant, SerializeMap, SerializeStruct, SerializeStructVariant};
/// Helper type for implementing a `Serializer` that does not support
/// serializing one of the compound types.
///
/// This type cannot be instantiated, but implements every one of the traits
/// corresponding to the [`Serializer`] compound types: [`SerializeSeq`],
/// [`SerializeTuple`], [`SerializeTupleStruct`], [`SerializeTupleVariant`],
/// [`SerializeMap`], [`SerializeStruct`], and [`SerializeStructVariant`].
///
/// ```rust
/// # #[macro_use]
/// # extern crate serde;
/// #
/// # use serde::ser::{Serializer, Impossible};
/// # use serde::private::ser::Error;
/// #
/// # struct MySerializer;
/// #
/// impl Serializer for MySerializer {
/// type Ok = ();
/// type Error = Error;
///
/// type SerializeSeq = Impossible<(), Error>;
/// /* other associated types */
///
/// /// This data format does not support serializing sequences.
/// fn serialize_seq(self,
/// len: Option<usize>)
/// -> Result<Self::SerializeSeq, Error> {
/// // Given Impossible cannot be instantiated, the only
/// // thing we can do here is to return an error.
/// # stringify! {
/// Err(...)
/// # };
/// # unimplemented!()
/// }
///
/// /* other Serializer methods */
/// # __serialize_unimplemented! {
/// # bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str bytes none some
/// # unit unit_struct unit_variant newtype_struct newtype_variant
/// # tuple tuple_struct tuple_variant map struct struct_variant
/// # }
/// }
/// #
/// # fn main() {}
/// ```
///
/// [`Serializer`]: trait.Serializer.html
/// [`SerializeSeq`]: trait.SerializeSeq.html
/// [`SerializeTuple`]: trait.SerializeTuple.html
/// [`SerializeTupleStruct`]: trait.SerializeTupleStruct.html
/// [`SerializeTupleVariant`]: trait.SerializeTupleVariant.html
/// [`SerializeMap`]: trait.SerializeMap.html
/// [`SerializeStruct`]: trait.SerializeStruct.html
/// [`SerializeStructVariant`]: trait.SerializeStructVariant.html
pub struct Impossible<Ok, Error> {
void: Void,
ok: PhantomData<Ok>,
error: PhantomData<Error>,
}
enum Void {}
impl<Ok, Error> SerializeSeq for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = value;
match self.void {}
}
fn end(self) -> Result<Ok, Error> {
match self.void {}
}
}
impl<Ok, Error> SerializeTuple for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = value;
match self.void {}
}
fn end(self) -> Result<Ok, Error> {
match self.void {}
}
}
impl<Ok, Error> SerializeTupleStruct for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = value;
match self.void {}
}
fn end(self) -> Result<Ok, Error> {
match self.void {}
}
}
impl<Ok, Error> SerializeTupleVariant for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = value;
match self.void {}
}
fn end(self) -> Result<Ok, Error> {
match self.void {}
}
}
impl<Ok, Error> SerializeMap for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = key;
match self.void {}
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = value;
match self.void {}
}
fn end(self) -> Result<Ok, Error> {
match self.void {}
}
}
impl<Ok, Error> SerializeStruct for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = key;
let _ = value;
match self.void {}
}
fn end(self) -> Result<Ok, Error> {
match self.void {}
}
}
impl<Ok, Error> SerializeStructVariant for Impossible<Ok, Error>
where
Error: ser::Error,
{
type Ok = Ok;
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Error>
where
T: Serialize,
{
let _ = key;
let _ = value;
match self.void {}
}
| }
} | fn end(self) -> Result<Ok, Error> {
match self.void {} |
webpack.config.js | var path=require('path')
//如果需要配置插件,需要在导出的对象上添加plugins节点
var htmlWebpackPlugin=require('html-webpack-plugin')
//导出配置的对象
module.exports={
//入口文件
entry : path.join(__dirname,'./src/main.js'),
//出口文件
output : {
path : path.join(__dirname,'./dist'), //指定输出文件的路径
filename : 'bundle.js' //指定输出文件的名字
},
//插入对象节点
plugins : [
new htmlWebpackPlugin({
template : path.join(__dirname,'./src/index.html'), //指定模板的路径
filename : 'index.html' //设置生成内存页面的文件名字
})
],
//配置第三方的loader(载入程序)模块
module : {
//第三方模块匹配规则
rules : [
//处理css文件的loader
{test : /\.(css|scss)$/,use : ['style-loader','css-loader','sass-loader']}, | {test : /\.(jpg|png|jpeg|gif|bmp)$/,use : 'url-loader?limit=349950?name=[hash:8]-[name].[ext]'},
//处理字体图标的loader
{test : /\.(ttf|eot|svg|woff|woff2)$/,use : 'url-loader'},
//把Es6中的高级语法转换成浏览器可以识别的低级语言
{test : /\.js$/,use : 'babel-loader',exclude : /node_modules/},
//处理vue文件的loader
{test : /\.vue$/,use : 'vue-loader'}
]
}
} | //处理图片文件的loader 这里的limit是限制图片的大小(字节) |
generate_go_ethereum_fixture.py | import contextlib
import json
import os
import pprint
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from cytoolz import (
merge,
valmap,
)
from eth_utils.curried import (
apply_formatter_if,
is_bytes,
is_checksum_address,
is_dict,
is_same_address,
remove_0x_prefix,
to_hex,
to_text,
to_wei,
)
from webu import Webu
from webu.utils.module_testing.emitter_contract import (
EMITTER_ABI,
EMITTER_BYTECODE,
EMITTER_ENUM,
)
from webu.utils.module_testing.math_contract import (
MATH_ABI,
MATH_BYTECODE,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'webupy-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"extraData": "0x7765623370792d746573742d636861696e",
"gasLimit": "0x47d5cc",
"difficulty": "0x01",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {
remove_0x_prefix(COINBASE): {
'balance': str(to_wei(1000000000, 'huc')),
},
remove_0x_prefix(RAW_TXN_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
remove_0x_prefix(UNLOCKABLE_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
},
"config": {
"chainId": 131277322940537, # the string 'webupy' as an integer
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_open_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
def get_ghuc_binary():
from ghuc.install import (
get_executable_path,
install_ghuc,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
ghuc_version = os.environ['GETH_VERSION']
_ghuc_binary = get_executable_path(ghuc_version)
if not os.path.exists(_ghuc_binary):
install_ghuc(ghuc_version)
assert os.path.exists(_ghuc_binary)
return _ghuc_binary
else:
return 'ghuc'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def | (proc):
try:
yield proc
finally:
kill_proc_gracefully(proc)
@contextlib.contextmanager
def get_ghuc_process(ghuc_binary,
datadir,
genesis_file_path,
ghuc_ipc_path,
ghuc_port):
init_datadir_command = (
ghuc_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_ghuc_command = (
ghuc_binary,
'--datadir', datadir,
'--ipcpath', ghuc_ipc_path,
'--ethash.dagsondisk', '1',
'--gcmode', 'archive',
'--nodiscover',
'--port', ghuc_port,
'--coinbase', COINBASE[2:],
)
popen_proc = subprocess.Popen(
run_ghuc_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
with popen_proc as proc:
with graceful_kill_on_exit(proc) as graceful_proc:
yield graceful_proc
output, errors = proc.communicate()
print(
"Ghuc Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def write_config_json(config, datadir):
bytes_to_hex = apply_formatter_if(is_bytes, to_hex)
config_json_dict = valmap(bytes_to_hex, config)
config_path = os.path.join(datadir, 'config.json')
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config_json_dict))
config_file.write('\n')
def generate_go_happyuc_fixture(destination_dir):
with contextlib.ExitStack() as stack:
datadir = stack.enter_context(tempdir())
keystore_dir = os.path.join(datadir, 'keystore')
ensure_path_exists(keystore_dir)
keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME)
with open(keyfile_path, 'w') as keyfile:
keyfile.write(KEYFILE_DATA)
genesis_file_path = os.path.join(datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(GENESIS_DATA))
ghuc_ipc_path_dir = stack.enter_context(tempdir())
ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc')
ghuc_port = get_open_port()
ghuc_binary = get_ghuc_binary()
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
chain_data = setup_chain_state(webu)
# close ghuc by exiting context
# must be closed before copying data dir
verify_chain_state(webu, chain_data)
# verify that chain state is still valid after closing
# and re-opening ghuc
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
verify_chain_state(webu, chain_data)
static_data = {
'raw_txn_account': RAW_TXN_ACCOUNT,
'keyfile_pw': KEYFILE_PW,
}
config = merge(chain_data, static_data)
pprint.pprint(config)
write_config_json(config, datadir)
shutil.copytree(datadir, destination_dir)
def verify_chain_state(webu, chain_data):
receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash'])
latest = webu.eth.getBlock('latest')
assert receipt.blockNumber <= latest.number
def mine_transaction_hash(webu, txn_hash):
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
receipt = webu.eth.getTransactionReceipt(txn_hash)
if receipt is not None:
webu.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError("Math contract deploy transaction not mined during wait period")
def mine_block(webu):
origin_block_number = webu.eth.blockNumber
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
block_number = webu.eth.blockNumber
if block_number > origin_block_number:
webu.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def deploy_contract(webu, name, factory):
webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
def setup_chain_state(webu):
coinbase = webu.eth.coinbase
assert is_same_address(coinbase, COINBASE)
#
# Math Contract
#
math_contract_factory = webu.eth.contract(
abi=MATH_ABI,
bytecode=MATH_BYTECODE,
)
math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory)
assert is_dict(math_deploy_receipt)
#
# Emitter Contract
#
emitter_contract_factory = webu.eth.contract(
abi=EMITTER_ABI,
bytecode=EMITTER_BYTECODE,
)
emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory)
emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress'])
txn_hash_with_log = emitter_contract.transact({
'from': webu.eth.coinbase,
}).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321)
print('TXN_HASH_WITH_LOG:', txn_hash_with_log)
txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log)
block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash'])
print('BLOCK_HASH_WITH_LOG:', block_with_log['hash'])
#
# Empty Block
#
empty_block_number = mine_block(webu)
print('MINED_EMPTY_BLOCK')
empty_block = webu.eth.getBlock(empty_block_number)
assert is_dict(empty_block)
assert not empty_block['transactions']
print('EMPTY_BLOCK_HASH:', empty_block['hash'])
#
# Block with Transaction
#
webu.personal.unlockAccount(coinbase, KEYFILE_PW)
webu.miner.start(1)
mined_txn_hash = webu.eth.sendTransaction({
'from': coinbase,
'to': coinbase,
'value': 1,
'gas': 21000,
'gas_price': webu.eth.gasPrice,
})
mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash)
print('MINED_TXN_HASH:', mined_txn_hash)
block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash'])
print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash'])
ghuc_fixture = {
'math_deploy_txn_hash': math_deploy_receipt['transactionHash'],
'math_address': math_deploy_receipt['contractAddress'],
'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'],
'emitter_address': emitter_deploy_receipt['contractAddress'],
'txn_hash_with_log': txn_hash_with_log,
'block_hash_with_log': block_with_log['hash'],
'empty_block_hash': empty_block['hash'],
'mined_txn_hash': mined_txn_hash,
'block_with_txn_hash': block_with_txn['hash'],
}
return ghuc_fixture
if __name__ == '__main__':
fixture_dir = sys.argv[1]
generate_go_happyuc_fixture(fixture_dir)
| graceful_kill_on_exit |
package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import re
import time
from os.path import basename
from subprocess import PIPE, Popen
from sys import platform, stdout
from llnl.util import tty
from spack import *
is_windows = platform == 'win32'
if not is_windows:
from fcntl import F_GETFL, F_SETFL, fcntl
from os import O_NONBLOCK
re_optline = re.compile(r'\s+[0-9]+\..*\((serial|smpar|dmpar|dm\+sm)\)\s+')
re_paroptname = re.compile(r'\((serial|smpar|dmpar|dm\+sm)\)')
re_paroptnum = re.compile(r'\s+([0-9]+)\.\s+\(')
re_nestline = re.compile(r'\(([0-9]+=[^)0-9]+)+\)')
re_nestoptnum = re.compile(r'([0-9]+)=')
re_nestoptname = re.compile(r'=([^,)]+)')
def setNonBlocking(fd):
"""
Set the given file descriptor to non-blocking
Non-blocking pipes are not supported on windows
"""
flags = fcntl(fd, F_GETFL) | O_NONBLOCK
fcntl(fd, F_SETFL, flags)
def collect_platform_options(stdoutpipe):
# Attempt to parse to collect options
optiondict = {}
for line in stdoutpipe.splitlines():
if re_optline.match(line):
numbers = re_paroptnum.findall(line)
entries = re_paroptname.findall(line)
paropts = dict(zip(entries, numbers))
platline = re_optline.sub("", line).strip()
optiondict[platline] = paropts
return optiondict
def collect_nesting_options(stdoutpipe):
nestoptline = re_nestline.search(stdoutpipe)[0]
nestoptnum = re_nestoptnum.findall(nestoptline)
nestoptname = re_nestoptname.findall(nestoptline)
nestoptname = [x.replace(" ", "_") for x in nestoptname]
return dict(zip(nestoptname, nestoptnum))
class Wrf(Package):
"""The Weather Research and Forecasting (WRF) Model
is a next-generation mesoscale numerical weather prediction system designed
for both atmospheric research and operational forecasting applications.
"""
homepage = "https://www.mmm.ucar.edu/weather-research-and-forecasting-model"
url = "https://github.com/wrf-model/WRF/archive/v4.2.tar.gz"
maintainers = ["MichaelLaufer", "ptooley"]
version("4.3.3", sha256='1b98b8673513f95716c7fc54e950dfebdb582516e22758cd94bc442bccfc0b86')
version("4.3.2", sha256='2c682da0cd0fd13f57d5125eef331f9871ec6a43d860d13b0c94a07fa64348ec')
version("4.3.1", sha256='6c9a69d05ee17d2c80b3699da173cfe6fdf65487db7587c8cc96bfa9ceafce87')
version("4.2", sha256="c39a1464fd5c439134bbd39be632f7ce1afd9a82ad726737e37228c6a3d74706")
version("4.0", sha256="9718f26ee48e6c348d8e28b8bc5e8ff20eafee151334b3959a11b7320999cf65")
version("3.9.1.1", sha256="a04f5c425bedd262413ec88192a0f0896572cc38549de85ca120863c43df047a", url="https://github.com/wrf-model/WRF/archive/V3.9.1.1.tar.gz")
resource(name='elec',
url='https://master.dl.sourceforge.net/project/wrfelec/WRFV3911_elec.beta_release.01.tgz',
sha256='eaaece04711a2883f39349f0857468b42af1a6f8d0985759ce5dfde4058316b4',
when='@3.9.1.1+elec',
destination='.'
)
variant(
"build_type",
default="dmpar",
values=("serial", "smpar", "dmpar", "dm+sm"),
)
variant(
"nesting",
default="basic",
values=("no_nesting", "basic", "preset_moves", "vortex_following"),
)
variant(
"compile_type",
default="em_real",
values=(
"em_real",
"em_quarter_ss",
"em_b_wave",
"em_les",
"em_heldsuarez",
"em_tropical_cyclone",
"em_hill2d_x",
"em_squall2d_x",
"em_squall2d_y",
"em_grav2d_x",
"em_seabreeze2d_x",
"em_scm_xy",
),
)
variant(
"pnetcdf",
default=True,
description="Parallel IO support through Pnetcdf library",
)
variant(
"elec",
default=False,
description="Compile support for the storm electrification package"
+ "for the WRF-ARW"
)
conflicts("@4.0:", when="+elec",
msg="WRF_ELEC is only supported in V3.9.1.1")
patch("patches/3.9/netcdf_backport.patch", when="@3.9.1.1")
patch("patches/3.9/tirpc_detect.patch", when="@3.9.1.1")
patch("patches/3.9/add_aarch64.patch", when="@3.9.1.1")
patch("patches/3.9/force_flags.patch", when="@3.9.1.1 %gcc@10:")
patch("patches/3.9/configure_aocc_2.3.patch", when="@3.9.1.1 %aocc@:2.4.0")
patch("patches/3.9/configure_aocc_3.0.patch", when="@3.9.1.1 %[email protected]")
patch("patches/3.9/configure_aocc_3.1.patch", when="@3.9.1.1 %[email protected]")
patch("patches/3.9/fujitsu.patch", when="@3.9.1.1 %fj")
patch("patches/3.9/add_elec_support.patch", when="@3.9.1.1+elec")
patch("patches/3.9/add_elec_changes.patch", when="@3.9.1.1+elec")
# These patches deal with netcdf & netcdf-fortran being two diff things
# Patches are based on:
# https://github.com/easybuilders/easybuild-easyconfigs/blob/master/easybuild/easyconfigs/w/WRF/WRF-3.5_netCDF-Fortran_separate_path.patch
patch("patches/4.0/arch.Config.pl.patch", when="@4.0")
patch("patches/4.0/arch.configure.defaults.patch", when="@4.0")
patch("patches/4.0/arch.conf_tokens.patch", when="@4.0")
patch("patches/4.0/arch.postamble.patch", when="@4.0")
patch("patches/4.0/configure.patch", when="@4.0")
patch("patches/4.0/external.io_netcdf.makefile.patch", when="@4.0")
patch("patches/4.0/Makefile.patch", when="@4.0")
patch("patches/4.0/tirpc_detect.patch", when="@4.0")
patch("patches/4.0/add_aarch64.patch", when="@4.0")
patch("patches/4.2/arch.Config.pl.patch", when="@4.2:")
patch("patches/4.2/arch.configure.defaults.patch", when="@4.2")
patch("patches/4.2/arch.conf_tokens.patch", when="@4.2:")
patch("patches/4.2/arch.postamble.patch", when="@4.2")
patch("patches/4.2/configure.patch", when="@4.2:")
patch("patches/4.2/external.io_netcdf.makefile.patch", when="@4.2:")
patch("patches/4.2/var.gen_be.Makefile.patch", when="@4.2:")
patch("patches/4.2/Makefile.patch", when="@4.2")
patch("patches/4.2/tirpc_detect.patch", when="@4.2")
patch("patches/4.2/add_aarch64.patch", when="@4.2:")
patch("patches/4.2/configure_aocc_2.3.patch", when="@4.2 %aocc@:2.4.0")
patch("patches/4.2/configure_aocc_3.0.patch", when="@4.2: %[email protected]:3.2.0")
patch("patches/4.2/hdf5_fix.patch", when="@4.2: %aocc")
patch("patches/4.2/derf_fix.patch", when="@4.2 %aocc")
# Various syntax fixes found by FPT tool
patch("https://github.com/wrf-model/WRF/commit/6502d5d9c15f5f9a652dec244cc12434af737c3c.patch?full_index=1",
sha256="c5162c23a132b377132924f8f1545313861c6cee5a627e9ebbdcf7b7b9d5726f", when="@4.2 %fj")
patch("patches/4.2/configure_fujitsu.patch", when="@4 %fj")
patch("patches/4.3/Makefile.patch", when="@4.3:")
patch("patches/4.3/arch.postamble.patch", when="@4.3:")
patch("patches/4.3/fujitsu.patch", when="@4.3: %fj")
# Syntax errors in physics routines
patch("https://github.com/wrf-model/WRF/commit/7c6fd575b7a8fe5715b07b38db160e606c302956.patch?full_index=1",
sha256="1ce97f4fd09e440bdf00f67711b1c50439ac27595ea6796efbfb32e0b9a1f3e4", when="@4.3.1")
patch("https://github.com/wrf-model/WRF/commit/238a7d219b7c8e285db28fe4f0c96ebe5068d91c.patch?full_index=1",
sha256="27c7268f6c84b884d21e4afad0bab8554b06961cf4d6bfd7d0f5a457dcfdffb1", when="@4.3.1")
depends_on("pkgconfig", type=("build"))
depends_on("libtirpc")
depends_on("mpi")
# According to:
# http://www2.mmm.ucar.edu/wrf/users/docs/user_guide_v4/v4.0/users_guide_chap2.html#_Required_Compilers_and_1
# Section: "Required/Optional Libraries to Download"
depends_on("parallel-netcdf", when="+pnetcdf")
depends_on("netcdf-c")
depends_on("netcdf-fortran")
depends_on("jasper")
depends_on("libpng")
depends_on("zlib")
depends_on("perl")
depends_on("jemalloc", when="%aocc")
# not sure if +fortran is required, but seems like a good idea
depends_on("hdf5+fortran+hl+mpi")
# build script use csh
depends_on("tcsh", type=("build"))
# time is not installed on all systems b/c bash provides it
# this fixes that for csh install scripts
depends_on("time", type=("build"))
depends_on("m4", type="build")
depends_on("libtool", type="build")
depends_on("boxmg4wrf", type="build", when="+elec")
depends_on("tar", type="build", when="+elec")
phases = ["configure", "build", "install"]
def setup_run_environment(self, env):
env.set("WRF_HOME", self.prefix)
env.append_path("PATH", self.prefix.main)
env.append_path("PATH", self.prefix.tools)
def setup_build_environment(self, env):
env.set("NETCDF", self.spec["netcdf-c"].prefix)
if "+pnetcdf" in self.spec:
env.set("PNETCDF", self.spec["parallel-netcdf"].prefix)
# This gets used via the applied patch files
env.set("NETCDFF", self.spec["netcdf-fortran"].prefix)
env.set("PHDF5", self.spec["hdf5"].prefix)
env.set("JASPERINC", self.spec["jasper"].prefix.include)
env.set("JASPERLIB", self.spec["jasper"].prefix.lib)
if self.spec.satisfies("%gcc@10:"):
args = "-w -O2 -fallow-argument-mismatch -fallow-invalid-boz"
env.set("FCFLAGS", args)
env.set("FFLAGS", args)
if self.spec.satisfies("%aocc"):
env.set("WRFIO_NCD_LARGE_FILE_SUPPORT", 1)
env.set("HDF5", self.spec["hdf5"].prefix)
env.prepend_path('PATH', ancestor(self.compiler.cc))
if self.spec.satisfies("+elec"):
env.set("WRF_ELEC", 1)
env.set("BOXMGLIBDIR", self.spec["boxmg4wrf"].prefix)
def patch(self):
# Let's not assume csh is intalled in bin
files = glob.glob("*.csh")
filter_file("^#!/bin/csh -f", "#!/usr/bin/env csh", *files)
filter_file("^#!/bin/csh", "#!/usr/bin/env csh", *files)
def answer_configure_question(self, outputbuf):
# Platform options question:
if "Please select from among the following" in outputbuf:
options = collect_platform_options(outputbuf)
comp_pair = "%s/%s" % (
basename(self.compiler.fc).split("-")[0],
basename(self.compiler.cc).split("-")[0],
)
compiler_matches = dict(
(x, y) for x, y in options.items() if comp_pair in x.lower()
)
if len(compiler_matches) > 1:
tty.warn("Found multiple potential build options")
try:
compiler_key = min(compiler_matches.keys(), key=len)
tty.warn("Selected build option %s." % compiler_key)
return (
"%s\n"
% compiler_matches[compiler_key][
self.spec.variants["build_type"].value
]
)
except KeyError:
InstallError(
"build_type %s unsupported for %s compilers"
% (self.spec.variants["build_type"].value, comp_pair)
)
if "Compile for nesting?" in outputbuf:
options = collect_nesting_options(outputbuf)
try:
return "%s\n" % options[self.spec.variants["nesting"].value]
except KeyError:
InstallError("Failed to parse correct nesting option")
def do_configure_fixup(self):
# Fix mpi compiler wrapper aliases
# In version 4.2 the file to be patched is called
# configure.defaults, while in earlier versions
# it's configure_new.defaults
if self.spec.satisfies("@3.9.1.1"):
config = FileFilter(join_path('arch', 'configure_new.defaults'))
else:
config = FileFilter(join_path('arch', 'configure.defaults'))
if self.spec.satisfies("@3.9.1.1 %gcc"):
config.filter(r'^DM_FC.*mpif90 -f90=\$\(SFC\)',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc))
config.filter(r'^DM_CC.*mpicc -cc=\$\(SCC\)',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc))
if self.spec.satisfies("%aocc"):
config.filter(
'^DM_FC.*mpif90 -DMPI2SUPPORT',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc + ' -DMPI2_SUPPORT')
)
config.filter(
'^DM_.CC*mpicc -DMPI2SUPPORT',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc) + ' -DMPI2_SUPPORT'
)
if self.spec.satisfies("@4.2: %intel"):
config.filter('^DM_FC.*mpif90',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc))
config.filter('^DM_CC.*mpicc',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc))
@run_before('configure')
def untar(self):
tar = which('tar')
tar('-xvf', 'WRFV3911_elec/elec.tgz')
def configure(self, spec, prefix):
# Remove broken default options...
|
@run_after("configure")
def patch_for_libmvec(self):
if self.spec.satisfies("@3.9.1.1 %aocc"):
fp = self.package_dir + "/patches/3.9/aocc_lmvec.patch"
which('patch')('-s', '-p1', '-i', '{0}'.format(fp), '-d', '.')
def run_compile_script(self):
csh_bin = self.spec["tcsh"].prefix.bin.csh
csh = Executable(csh_bin)
if self.spec.satisfies("+elec"):
num_jobs = str(1)
else:
# num of compile jobs capped at 20 in wrf
num_jobs = str(min(int(make_jobs), 10))
# Now run the compile script and track the output to check for
# failure/success We need to do this because upstream use `make -i -k`
# and the custom compile script will always return zero regardless of
# success or failure
result_buf = csh(
"./compile",
"-j",
num_jobs,
self.spec.variants["compile_type"].value,
output=str,
error=str
)
print(result_buf)
if "Executables successfully built" in result_buf:
return True
return False
def build(self, spec, prefix):
result = self.run_compile_script()
if not result:
tty.warn(
"Compilation failed first time (WRF idiosyncrasies?) "
"- trying again..."
)
result = self.run_compile_script()
if not result:
raise InstallError(
"Compile failed. Check the output log for details."
)
def install(self, spec, prefix):
# Save all install files as many are needed for WPS and WRF runs
install_tree(".", prefix)
| self.do_configure_fixup()
if self.spec.compiler.name not in ["intel", "gcc", "aocc", "fj"]:
raise InstallError(
"Compiler %s not currently supported for WRF build."
% self.spec.compiler.name
)
p = Popen("./configure", stdin=PIPE, stdout=PIPE, stderr=PIPE)
if not is_windows:
setNonBlocking(p.stdout)
setNonBlocking(p.stderr)
# Because of WRFs custom configure scripts that require interactive
# input we need to parse and respond to questions. The details can
# vary somewhat with the exact version, so try to detect and fail
# gracefully on unexpected questions.
stallcounter = 0
outputbuf = ""
while True:
line = p.stderr.readline().decode()
if not line:
line = p.stdout.readline().decode()
if not line:
if p.poll() is not None:
returncode = p.returncode
break
if stallcounter > 300:
raise InstallError(
"Output stalled for 30s, presumably an "
"undetected question."
)
time.sleep(0.1) # Try to do a bit of rate limiting
stallcounter += 1
continue
stdout.write(line)
stallcounter = 0
outputbuf += line
if (
"Enter selection" in outputbuf
or "Compile for nesting" in outputbuf
):
answer = self.answer_configure_question(outputbuf)
p.stdin.write(answer.encode())
p.stdin.flush()
outputbuf = ""
if returncode != 0:
raise InstallError("Configure failed - unknown error") |
test_lsgan.py | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import unittest
import keras2onnx
import onnx
import numpy as np
from keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/lsgan/lsgan.py
class LSGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model(z, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
# (!!!) No softmax
model.add(Dense(1))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestLSGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self): | os.remove(fl)
def test_LSGAN(self):
keras_model = LSGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main() | for fl in self.model_files: |
benchmark_config_spec.py | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes that verify and transform benchmark configuration input.
See perfkitbenchmarker/configs/__init__.py for more information about
configuration files.
"""
import copy
import os
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import flags
from perfkitbenchmarker import os_types
from perfkitbenchmarker import providers
from perfkitbenchmarker import static_virtual_machine
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec
_DEFAULT_DISK_COUNT = 1
_DEFAULT_VM_COUNT = 1
class _FlagsDecoder(option_decoders.TypeVerifier):
"""Processes the flags override dictionary of a benchmark config object."""
def __init__(self, **kwargs):
super(_FlagsDecoder, self).__init__(default=None, none_ok=True,
valid_types=(dict,), **kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Processes the flags override dictionary of a benchmark config object.
Args:
value: None or dict mapping flag name string to flag override value.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Command-line flag values.
Returns:
dict mapping flag name string to Flag object. The flag values to use
when running the benchmark.
"""
config_flags = super(_FlagsDecoder, self).Decode(value, component_full_name,
flag_values)
merged_flag_values = copy.deepcopy(flag_values)
if config_flags:
for key, value in config_flags.iteritems():
if key not in merged_flag_values:
raise errors.Config.UnrecognizedOption(
'Unrecognized option {0}.{1}. Each option within {0} must '
'correspond to a valid command-line flag.'.format(
self._GetOptionFullName(component_full_name), key))
if not merged_flag_values[key].present:
try:
merged_flag_values[key].Parse(value)
except flags.IllegalFlagValue as e:
raise errors.Config.InvalidValue(
'Invalid {0}.{1} value: "{2}" (of type "{3}").{4}{5}'.format(
self._GetOptionFullName(component_full_name), key, value,
value.__class__.__name__, os.linesep, e))
return merged_flag_values.FlagDict()
class _PerCloudConfigSpec(spec.BaseSpec):
"""Contains one config dict attribute per cloud provider.
The name of each attribute is the name of the cloud provider.
"""
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword arguments
to construct in order to decode the named option.
"""
result = super(_PerCloudConfigSpec, cls)._GetOptionDecoderConstructions()
for cloud in providers.VALID_CLOUDS:
result[cloud] = option_decoders.TypeVerifier, {'default': None,
'valid_types': (dict,)}
return result
class _PerCloudConfigDecoder(option_decoders.TypeVerifier):
"""Decodes the disk_spec or vm_spec option of a VM group config object."""
def __init__(self, **kwargs):
super(_PerCloudConfigDecoder, self).__init__(valid_types=(dict,), **kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Decodes the disk_spec or vm_spec option of a VM group config object.
Args:
value: None or dict mapping cloud provider name string to a dict.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
_PerCloudConfigSpec decoded from the input dict.
"""
input_dict = super(_PerCloudConfigDecoder, self).Decode(
value, component_full_name, flag_values)
return None if input_dict is None else _PerCloudConfigSpec(
self._GetOptionFullName(component_full_name), flag_values=flag_values,
**input_dict)
class _StaticVmDecoder(option_decoders.TypeVerifier):
"""Decodes an item of the static_vms list of a VM group config object."""
def __init__(self, **kwargs):
super(_StaticVmDecoder, self).__init__(valid_types=(dict,), **kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Decodes an item of the static_vms list of a VM group config object.
Args:
value: dict mapping static VM config option name string to corresponding
option value.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
StaticVmSpec decoded from the input dict.
Raises:
errors.Config.InvalidValue upon invalid input value.
"""
input_dict = super(_StaticVmDecoder, self).Decode(
value, component_full_name, flag_values)
return static_virtual_machine.StaticVmSpec(
self._GetOptionFullName(component_full_name), **input_dict)
class _StaticVmListDecoder(option_decoders.ListDecoder):
"""Decodes the static_vms list of a VM group config object."""
def __init__(self, **kwargs):
super(_StaticVmListDecoder, self).__init__(
default=list, item_decoder=_StaticVmDecoder(), **kwargs)
class _VmGroupSpec(spec.BaseSpec):
"""Configurable options of a VM group.
Attributes:
cloud: string. Cloud provider of the VMs in this group.
disk_count: int. Number of data disks to attach to each VM in this group.
disk_spec: BaseDiskSpec. Configuration for all data disks to be attached to
VMs in this group.
os_type: string. OS type of the VMs in this group.
static_vms: None or list of StaticVmSpecs. Configuration for all static VMs
in this group.
vm_count: int. Number of VMs in this group, including static VMs and
provisioned VMs.
vm_spec: BaseVmSpec. Configuration for provisioned VMs in this group.
"""
def __init__(self, component_full_name, flag_values=None, **kwargs):
super(_VmGroupSpec, self).__init__(component_full_name,
flag_values=flag_values, **kwargs)
providers.LoadProvider(self.cloud.lower())
if self.disk_spec:
disk_config = getattr(self.disk_spec, self.cloud, None)
if disk_config is None:
raise errors.Config.MissingOption(
'{0}.cloud is "{1}", but {0}.disk_spec does not contain a '
'configuration for "{1}".'.format(component_full_name, self.cloud))
disk_spec_class = disk.GetDiskSpecClass(self.cloud)
self.disk_spec = disk_spec_class(
'{0}.disk_spec.{1}'.format(component_full_name, self.cloud),
flag_values=flag_values, **disk_config)
vm_config = getattr(self.vm_spec, self.cloud, None)
if vm_config is None:
raise errors.Config.MissingOption(
'{0}.cloud is "{1}", but {0}.vm_spec does not contain a '
'configuration for "{1}".'.format(component_full_name, self.cloud))
vm_spec_class = virtual_machine.GetVmSpecClass(self.cloud)
self.vm_spec = vm_spec_class(
'{0}.vm_spec.{1}'.format(component_full_name, self.cloud),
flag_values=flag_values, **vm_config)
@classmethod
def _GetOptionDecoderConstructions(cls):
|
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(_VmGroupSpec, cls)._ApplyFlags(config_values, flag_values)
if flag_values['cloud'].present or 'cloud' not in config_values:
config_values['cloud'] = flag_values.cloud
if flag_values['os_type'].present or 'os_type' not in config_values:
config_values['os_type'] = flag_values.os_type
if 'vm_count' in config_values and config_values['vm_count'] is None:
config_values['vm_count'] = flag_values.num_vms
class _VmGroupsDecoder(option_decoders.TypeVerifier):
"""Validates the vm_groups dictionary of a benchmark config object."""
def __init__(self, **kwargs):
super(_VmGroupsDecoder, self).__init__(valid_types=(dict,), **kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Verifies vm_groups dictionary of a benchmark config object.
Args:
value: dict mapping VM group name string to the corresponding VM group
config dict.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
dict mapping VM group name string to _VmGroupSpec.
Raises:
errors.Config.InvalidValue upon invalid input value.
"""
vm_group_configs = super(_VmGroupsDecoder, self).Decode(
value, component_full_name, flag_values)
result = {}
for vm_group_name, vm_group_config in vm_group_configs.iteritems():
result[vm_group_name] = _VmGroupSpec(
'{0}.{1}'.format(self._GetOptionFullName(component_full_name),
vm_group_name),
flag_values=flag_values, **vm_group_config)
return result
class BenchmarkConfigSpec(spec.BaseSpec):
"""Configurable options of a benchmark run.
Attributes:
description: None or string. Description of the benchmark to run.
flags: flags.FlagValues. Values to use for each flag while executing the
benchmark.
vm_groups: dict mapping VM group name string to _VmGroupSpec. Configurable
options for each VM group used by the benchmark.
"""
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Can be overridden by derived classes to add options or impose additional
requirements on existing options.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword arguments
to construct in order to decode the named option.
"""
result = super(BenchmarkConfigSpec, cls)._GetOptionDecoderConstructions()
result.update({
'description': (option_decoders.StringDecoder, {'default': None}),
'flags': (_FlagsDecoder, {}),
'vm_groups': (_VmGroupsDecoder, {})})
return result
def _DecodeAndInit(self, component_full_name, config, decoders, flag_values):
"""Initializes spec attributes from provided config option values.
Args:
component_full_name: string. Fully qualified name of the configurable
component containing the config options.
config: dict mapping option name string to option value.
flag_values: flags.FlagValues. Runtime flags that may override provided
config option values. These flags have already been applied to the
current config, but they may be passed to the decoders for propagation
to deeper spec constructors.
decoders: OrderedDict mapping option name string to ConfigOptionDecoder.
"""
# Decode benchmark-specific flags first and use them while decoding the
# rest of the BenchmarkConfigSpec's options.
decoders = decoders.copy()
self.flags = decoders.pop('flags').Decode(config.pop('flags', None),
component_full_name, flag_values)
with flag_util.FlagDictSubstitution(flag_values, lambda: self.flags):
super(BenchmarkConfigSpec, self)._DecodeAndInit(
component_full_name, config, decoders, flag_values)
| """Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword arguments
to construct in order to decode the named option.
"""
result = super(_VmGroupSpec, cls)._GetOptionDecoderConstructions()
result.update({
'cloud': (option_decoders.EnumDecoder, {
'valid_values': providers.VALID_CLOUDS}),
'disk_count': (option_decoders.IntDecoder, {
'default': _DEFAULT_DISK_COUNT, 'min': 0}),
'disk_spec': (_PerCloudConfigDecoder, {'default': None,
'none_ok': True}),
'os_type': (option_decoders.EnumDecoder, {
'valid_values': os_types.ALL}),
'static_vms': (_StaticVmListDecoder, {}),
'vm_count': (option_decoders.IntDecoder, {
'default': _DEFAULT_VM_COUNT, 'min': 0}),
'vm_spec': (_PerCloudConfigDecoder, {})})
return result |
accessors_offchain.go | package rawdb
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/shard"
staking "github.com/harmony-one/harmony/staking/types"
)
// ReadShardState retrieves shard state of a specific epoch.
func ReadShardState(
db DatabaseReader, epoch *big.Int,
) (*shard.State, error) {
data, err := db.Get(shardStateKey(epoch))
if err != nil {
return nil, ctxerror.New(MsgNoShardStateFromDB,
"epoch", epoch,
).WithCause(err)
}
ss, err2 := shard.DecodeWrapper(data)
if err2 != nil {
return nil, ctxerror.New("cannot decode sharding state",
"epoch", epoch,
).WithCause(err2)
}
return ss, nil
}
// WriteShardStateBytes stores sharding state into database.
func WriteShardStateBytes(db DatabaseWriter, epoch *big.Int, data []byte) (err error) {
if err = db.Put(shardStateKey(epoch), data); err != nil {
return ctxerror.New("cannot write sharding state",
"epoch", epoch,
).WithCause(err)
}
utils.Logger().Info().Str("epoch", epoch.String()).Int("size", len(data)).Msg("wrote sharding state")
return nil
}
// ReadLastCommits retrieves the commit signatures on the current block of blockchain.
func ReadLastCommits(db DatabaseReader) ([]byte, error) {
var data []byte
data, err := db.Get(lastCommitsKey)
if err != nil {
return nil, ctxerror.New("cannot read last commits from rawdb").WithCause(err)
}
return data, nil
}
// WriteLastCommits stores the commit signatures collected on the newly confirmed block into database.
func WriteLastCommits(
db DatabaseWriter, data []byte,
) (err error) {
if err = db.Put(lastCommitsKey, data); err != nil {
return ctxerror.New("cannot write last commits").WithCause(err)
}
utils.Logger().Info().
Int("size", len(data)).
Msg("wrote last commits")
return nil
}
// ReadCrossLinkShardBlock retrieves the blockHash given shardID and blockNum
func ReadCrossLinkShardBlock(db DatabaseReader, shardID uint32, blockNum uint64) ([]byte, error) {
return db.Get(crosslinkKey(shardID, blockNum))
}
// WriteCrossLinkShardBlock stores the blockHash given shardID and blockNum
func WriteCrossLinkShardBlock(db DatabaseWriter, shardID uint32, blockNum uint64, data []byte) error {
return db.Put(crosslinkKey(shardID, blockNum), data)
}
// DeleteCrossLinkShardBlock deletes the blockHash given shardID and blockNum
func DeleteCrossLinkShardBlock(db DatabaseDeleter, shardID uint32, blockNum uint64) error {
return db.Delete(crosslinkKey(shardID, blockNum))
}
// ReadShardLastCrossLink read the last cross link of a shard
func ReadShardLastCrossLink(db DatabaseReader, shardID uint32) ([]byte, error) {
return db.Get(shardLastCrosslinkKey(shardID))
}
// WriteShardLastCrossLink stores the last cross link of a shard
func WriteShardLastCrossLink(db DatabaseWriter, shardID uint32, data []byte) error {
return db.Put(shardLastCrosslinkKey(shardID), data)
}
// ReadPendingCrossLinks retrieves last pending crosslinks.
func ReadPendingCrossLinks(db DatabaseReader) ([]byte, error) {
return db.Get(pendingCrosslinkKey)
}
// WritePendingCrossLinks stores last pending crosslinks into database.
func WritePendingCrossLinks(db DatabaseWriter, bytes []byte) error {
return db.Put(pendingCrosslinkKey, bytes)
}
// DeletePendingCrossLinks stores last pending crosslinks into database.
func DeletePendingCrossLinks(db DatabaseDeleter) error {
return db.Delete(pendingCrosslinkKey)
}
// ReadPendingSlashingCandidates retrieves last pending slashing candidates
func ReadPendingSlashingCandidates(db DatabaseReader) ([]byte, error) {
return db.Get(pendingSlashingKey)
}
// WritePendingSlashingCandidates stores last pending slashing candidates into database.
func WritePendingSlashingCandidates(db DatabaseWriter, bytes []byte) error {
return db.Put(pendingSlashingKey, bytes)
}
// DeletePendingSlashingCandidates stores last pending slashing candidates into database.
func DeletePendingSlashingCandidates(db DatabaseDeleter) error {
return db.Delete(pendingSlashingKey)
}
// ReadCXReceipts retrieves all the transactions of receipts given destination shardID, number and blockHash
func ReadCXReceipts(db DatabaseReader, shardID uint32, number uint64, hash common.Hash) (types.CXReceipts, error) {
data, err := db.Get(cxReceiptKey(shardID, number, hash))
if err != nil || len(data) == 0 {
utils.Logger().Info().Err(err).Uint64("number", number).Int("dataLen", len(data)).Msg("ReadCXReceipts")
return nil, err
}
cxReceipts := types.CXReceipts{}
if err := rlp.DecodeBytes(data, &cxReceipts); err != nil {
return nil, err
}
return cxReceipts, nil
}
// WriteCXReceipts stores all the transaction receipts given destination shardID, blockNumber and blockHash
func WriteCXReceipts(db DatabaseWriter, shardID uint32, number uint64, hash common.Hash, receipts types.CXReceipts) error {
bytes, err := rlp.EncodeToBytes(receipts)
if err != nil {
utils.Logger().Error().Msg("[WriteCXReceipts] Failed to encode cross shard tx receipts")
}
// Store the receipt slice
if err := db.Put(cxReceiptKey(shardID, number, hash), bytes); err != nil {
utils.Logger().Error().Msg("[WriteCXReceipts] Failed to store cxreceipts")
}
return err
}
// ReadCXReceiptsProofSpent check whether a CXReceiptsProof is unspent
func ReadCXReceiptsProofSpent(db DatabaseReader, shardID uint32, number uint64) (byte, error) {
data, err := db.Get(cxReceiptSpentKey(shardID, number))
if err != nil || len(data) == 0 {
return NAByte, ctxerror.New("[ReadCXReceiptsProofSpent] Cannot find the key", "shardID", shardID, "number", number).WithCause(err)
}
return data[0], nil
}
// WriteCXReceiptsProofSpent write CXReceiptsProof as spent into database
func WriteCXReceiptsProofSpent(dbw DatabaseWriter, cxp *types.CXReceiptsProof) error {
shardID := cxp.MerkleProof.ShardID
blockNum := cxp.MerkleProof.BlockNum.Uint64()
return dbw.Put(cxReceiptSpentKey(shardID, blockNum), []byte{SpentByte})
}
// DeleteCXReceiptsProofSpent removes unspent indicator of a given blockHash
func DeleteCXReceiptsProofSpent(db DatabaseDeleter, shardID uint32, number uint64) {
if err := db.Delete(cxReceiptSpentKey(shardID, number)); err != nil {
utils.Logger().Error().Msg("Failed to delete receipts unspent indicator")
}
}
// ReadValidatorSnapshot retrieves validator's snapshot by its address
func ReadValidatorSnapshot(
db DatabaseReader, addr common.Address, epoch *big.Int,
) (*staking.ValidatorWrapper, error) {
data, err := db.Get(validatorSnapshotKey(addr, epoch))
if err != nil || len(data) == 0 {
utils.Logger().Info().Err(err).Msg("ReadValidatorSnapshot")
return nil, err
}
v := staking.ValidatorWrapper{}
if err := rlp.DecodeBytes(data, &v); err != nil {
utils.Logger().Error().Err(err).
Str("address", addr.Hex()).
Msg("Unable to decode validator snapshot from database")
return nil, err
}
return &v, nil
}
// WriteValidatorSnapshot stores validator's snapshot by its address
func WriteValidatorSnapshot(batch DatabaseWriter, v *staking.ValidatorWrapper, epoch *big.Int) error {
bytes, err := rlp.EncodeToBytes(v)
if err != nil {
utils.Logger().Error().Msg("[WriteValidatorSnapshot] Failed to encode")
return err
}
if err := batch.Put(validatorSnapshotKey(v.Address, epoch), bytes); err != nil {
utils.Logger().Error().Msg("[WriteValidatorSnapshot] Failed to store to database")
return err
}
return err
}
// DeleteValidatorSnapshot removes the validator's snapshot by its address
func DeleteValidatorSnapshot(db DatabaseDeleter, addr common.Address, epoch *big.Int) {
if err := db.Delete(validatorSnapshotKey(addr, epoch)); err != nil {
utils.Logger().Error().Msg("Failed to delete snapshot of a validator")
}
}
// ReadValidatorStats retrieves validator's stats by its address
func ReadValidatorStats(
db DatabaseReader, addr common.Address,
) (*staking.ValidatorStats, error) {
data, err := db.Get(validatorStatsKey(addr))
if err != nil || len(data) == 0 {
utils.Logger().Info().Err(err).Msg("ReadValidatorStats")
return nil, err
}
stats := staking.ValidatorStats{}
if err := rlp.DecodeBytes(data, &stats); err != nil {
utils.Logger().Error().Err(err).
Str("address", addr.Hex()).
Msg("Unable to decode validator stats from database")
return nil, err
}
return &stats, nil
}
// WriteValidatorStats stores validator's stats by its address
func WriteValidatorStats(
batch DatabaseWriter, addr common.Address, stats *staking.ValidatorStats,
) error {
bytes, err := rlp.EncodeToBytes(stats)
if err != nil {
utils.Logger().Error().Msg("[WriteValidatorStats] Failed to encode")
return err
}
if err := batch.Put(validatorStatsKey(addr), bytes); err != nil {
utils.Logger().Error().Msg("[WriteValidatorStats] Failed to store to database")
return err
}
return err
}
// ReadValidatorList retrieves staking validator by its address
// Return only active validators if activeOnly==true, otherwise, return all validators
func | (db DatabaseReader, activeOnly bool) ([]common.Address, error) {
key := validatorListKey
if activeOnly {
key = activeValidatorListKey
}
data, err := db.Get(key)
if err != nil || len(data) == 0 {
return []common.Address{}, nil
}
addrs := []common.Address{}
if err := rlp.DecodeBytes(data, &addrs); err != nil {
utils.Logger().Error().Err(err).Msg("Unable to Decode validator List from database")
return nil, err
}
return addrs, nil
}
// WriteValidatorList stores staking validator's information by its address
// Writes only for active validators if activeOnly==true, otherwise, writes for all validators
func WriteValidatorList(db DatabaseWriter, addrs []common.Address, activeOnly bool) error {
key := validatorListKey
if activeOnly {
key = activeValidatorListKey
}
bytes, err := rlp.EncodeToBytes(addrs)
if err != nil {
utils.Logger().Error().Msg("[WriteValidatorList] Failed to encode")
}
if err := db.Put(key, bytes); err != nil {
utils.Logger().Error().Msg("[WriteValidatorList] Failed to store to database")
}
return err
}
// ReadDelegationsByDelegator retrieves the list of validators delegated by a delegator
func ReadDelegationsByDelegator(db DatabaseReader, delegator common.Address) ([]staking.DelegationIndex, error) {
data, err := db.Get(delegatorValidatorListKey(delegator))
if err != nil || len(data) == 0 {
return []staking.DelegationIndex{}, nil
}
addrs := []staking.DelegationIndex{}
if err := rlp.DecodeBytes(data, &addrs); err != nil {
utils.Logger().Error().Err(err).Msg("Unable to Decode delegations from database")
return nil, err
}
return addrs, nil
}
// WriteDelegationsByDelegator stores the list of validators delegated by a delegator
func WriteDelegationsByDelegator(db DatabaseWriter, delegator common.Address, indices []staking.DelegationIndex) error {
bytes, err := rlp.EncodeToBytes(indices)
if err != nil {
utils.Logger().Error().Msg("[writeDelegationsByDelegator] Failed to encode")
}
if err := db.Put(delegatorValidatorListKey(delegator), bytes); err != nil {
utils.Logger().Error().Msg("[writeDelegationsByDelegator] Failed to store to database")
}
return err
}
// ReadBlockRewardAccumulator ..
func ReadBlockRewardAccumulator(db DatabaseReader, number uint64) (*big.Int, error) {
data, err := db.Get(blockRewardAccumKey(number))
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(data), nil
}
// WriteBlockRewardAccumulator ..
func WriteBlockRewardAccumulator(db DatabaseWriter, newAccum *big.Int, number uint64) error {
return db.Put(blockRewardAccumKey(number), newAccum.Bytes())
}
//// Resharding ////
// ReadEpochBlockNumber retrieves the epoch block number for the given epoch,
// or nil if the given epoch is not found in the database.
func ReadEpochBlockNumber(db DatabaseReader, epoch *big.Int) (*big.Int, error) {
data, err := db.Get(epochBlockNumberKey(epoch))
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(data), nil
}
// WriteEpochBlockNumber stores the given epoch-number-to-epoch-block-number in the database.
func WriteEpochBlockNumber(db DatabaseWriter, epoch, blockNum *big.Int) error {
return db.Put(epochBlockNumberKey(epoch), blockNum.Bytes())
}
// ReadEpochVrfBlockNums retrieves the VRF block numbers for the given epoch
func ReadEpochVrfBlockNums(db DatabaseReader, epoch *big.Int) ([]byte, error) {
return db.Get(epochVrfBlockNumbersKey(epoch))
}
// WriteEpochVrfBlockNums stores the VRF block numbers for the given epoch
func WriteEpochVrfBlockNums(db DatabaseWriter, epoch *big.Int, data []byte) error {
return db.Put(epochVrfBlockNumbersKey(epoch), data)
}
// ReadEpochVdfBlockNum retrieves the VDF block number for the given epoch
func ReadEpochVdfBlockNum(db DatabaseReader, epoch *big.Int) ([]byte, error) {
return db.Get(epochVdfBlockNumberKey(epoch))
}
// WriteEpochVdfBlockNum stores the VDF block number for the given epoch
func WriteEpochVdfBlockNum(db DatabaseWriter, epoch *big.Int, data []byte) error {
return db.Put(epochVdfBlockNumberKey(epoch), data)
}
//// Resharding ////
| ReadValidatorList |
main.rs | // Silence some warnings so they don't distract from the exercise.
#![allow(unused_variables)]
fn main() {
let width = 4;
let height = 7;
let depth = 10;
// 1. Try running this code with `cargo run` and take a look at the error.
// | let area = area_of(width, height);
println!("Area is {}", area);
// 2. The area that was calculated is not correct! Go fix the area_of() function below, then run
// the code again and make sure it worked (you should get an area of 28).
// 3. Uncomment the line below. It doesn't work yet because the `volume` function doesn't exist.
// Create the `volume` function! It should:
// - Take three arguments of type i32
// - Multiply the three arguments together
// - Return the result (which should be 280 when you run the program).
//
// If you get stuck, remember that this is *very* similar to what `area_of` does.
//
println!("Volume is {}", volume(width, height, depth));
}
fn area_of(x: i32, y: i32) -> i32 {
// 2a. Fix this function to correctly compute the area of a rectangle given
// dimensions x and y by multiplying x and y and returning the result.
//
x * y
// Challenge: It isn't idiomatic (the normal way a Rust programmer would do things) to use
// `return` on the last line of a function. Change the last line to be a
// "tail expression" that returns a value without using `return`.
// Hint: `cargo clippy` will warn you about this exact thing.
}
fn volume(x: i32, y: i32, z: i32) -> i32 {
x * y * z
} | // See if you can fix the error. It is right around here, somewhere. If you succeed, then
// doing `cargo run` should succeed and print something out. |
close.py | import numpy as np
from scipy.spatial.distance import euclidean
from typing import Union
import pandas
class CLOSE(object):
def __init__(self, data: pandas.DataFrame, measure: Union[str, callable] = 'mse', minPts: int = None, output: bool = False,
jaccard: bool = False, weighting: bool = False, exploitation_term: bool = False):
"""
Params:
data (pandas.DataFrame) - pandas dataframe with columns order 'object_id', 'time', 'cluster_id' containing cluster belongings,
features ..
Note: outliers should have negative labels/cluster_ids, these should be different for different times
Optional:
measure (str or callable) - for used quality measure, possible measures:
'sse', 'mse', 'mae', 'max', 'dbi', 'exploit'
minPts (int) - used minPts for density-based quality measure
output (boolean) - whether intermediate results should be printed
jaccard (boolean) - whether the jaccard index should be used for proportion
weighting (boolean) - whether the weighting function should be used for subsequence_score
exploitation_term (boolean) - whether the exploitation term should be included in CLOSE calculation
"""
self._data = data
self._column_names = data.columns.values
self._object_column_name = self._column_names[0]
self._time_column_name = self._column_names[1]
self._cluster_column_name = self._column_names[2]
self._jaccard = jaccard
self._weighting = weighting
self._exp_term = exploitation_term
self._minPts = minPts
self._output = output
self.pos_measures = {### Measures for Clusters
'sse': self.calc_sse, # NOTE: sse is not between 0 and 1
'mse': self.calc_mse, # NOTE: mse is only between 0 and 1, if data is normalized
'mae': self.calc_mae, # NOTE: mae is only between 0 and 1, if data is normalized
'max': self.calc_max_dist,
'dbi': self.calc_min_pts,
'None': self.return_zero,
### Measures for Time Clusterings
'exploit': self.calc_exploit_at_t}
if measure in self.pos_measures:
self.measure = self.pos_measures[measure]
elif callable(measure):
self.measure = measure
else:
self.measure = self.pos_measures['mse']
def rate_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
"""
Optional:
start_time (int) - time that should be considered as beginning
end_time (int) - time which should be rated up to
return_measures (boolean) - whether additional information such as average stability
and quality should be returned
Returns:
CLOSE score (float): rating of clustering regarding all clusters
(dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information
if 'return_measures' is True
"""
cluster_ratings = self.rate_clusters(start_time, end_time)
gr_clusters = self._data.groupby(self._cluster_column_name)
score = 0
avg_quality = 0
avg_stab = 0
for cluster in cluster_ratings:
cluster_objects = gr_clusters.get_group(cluster)[self._object_column_name].unique()
cluster_time = gr_clusters.get_group(cluster)[self._time_column_name].iloc[0]
feature_list = self.get_feature_list(cluster_objects, cluster_time)
measure = self.measure(feature_list)
avg_quality += measure
avg_stab += cluster_ratings[cluster]
score += (cluster_ratings[cluster] * (1 - measure))
num_clusters = len(cluster_ratings)
num_timestamps = self.get_num_timestamps(start_time, end_time)
if num_clusters <= 0:
if self._output:
print('Clustering has no Clusters!!')
return 0
avg_quality /= num_clusters
if self._output:
print('Average Quality: ', str(avg_quality))
avg_stab /= num_clusters
if self._output:
print('Average Stability: ', str(avg_stab))
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters)**2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': avg_stab,
'quality': avg_quality,
'pre-factor': (1 - (num_timestamps / num_clusters) ** 2)}
def rate_time_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
"""
Optional:
start_time (optional) - int: time that should be considered as beginning
end_time (optional) - int: time which should be rated up to
return_measures (boolean) - whether additional information such as average stability and quality should be returned
Returns:
CLOSE score (float) - rating of clustering regarding all time clusterings
(dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information
if 'return_measures' is True
"""
cluster_ratings = self.rate_clusters(start_time, end_time)
num_timestamps, timestamps = self.get_num_timestamps(start_time, end_time, return_timestamps=True)
score = 0
if return_measures:
quality = 0
stability = 0
for time in timestamps:
if not return_measures:
score += self.calc_t_clustering_rating(cluster_ratings, time)
else:
cur_scores = self.calc_t_clustering_rating(cluster_ratings, time, return_measures=True)
score += cur_scores['score']
quality += cur_scores['quality']
stability += cur_scores['stability']
if return_measures:
quality /= num_timestamps
stability /= num_timestamps
num_clusters = len(cluster_ratings)
if num_clusters <= 0:
if self._output:
print('Over-Time Clustering has no Clusters!!')
return 0
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': stability,
'quality': quality,
'pre-factor': factor}
def calc_t_clustering_rating(self, cluster_ratings: dict, time: int, return_measures: bool = False) -> Union[float, dict]:
"""
Params:
cluster_ratings (dict) - {<object_id>: <rating>} with ratings of objects
time (int) - time that should be considered
Optional:
return_measures (boolean) - whether additional information such as average stability and quality should be returned
Output:
CLOSE score (float) - rating of clustering at considered time
(dict): with key 'score', 'stability', 'quality' with additional information if 'return_measures' is True
"""
avg_stab = 0
clusters_at_time = self._data[self._data[self._time_column_name] == time][self._cluster_column_name].unique()
clusters_at_time = np.delete(clusters_at_time, np.where(clusters_at_time < 0))
for cluster in clusters_at_time:
try:
avg_stab += cluster_ratings[cluster]
except:
continue
num_clusters = len(clusters_at_time)
if num_clusters <= 0:
if self._output:
print('Time Clustering at Time ', str(time), ' has no Clusters!!')
return 0
avg_stab /= num_clusters
if self._output:
print('Average Stability at Time ', str(time), ' : ', str(avg_stab))
quality = self.measure(time)
if self._output:
print('Quality of Clustering at Time ' , str(time), ' : ', str(quality))
t_clustering_score = avg_stab * quality
if not return_measures:
return t_clustering_score
else:
return {
'score': t_clustering_score,
'stability': avg_stab,
'quality': quality
}
def rate_clusters(self, start_time: int = None, end_time: int = None, id: Union[int, str, list] = None) -> dict:
"""
Optional:
start_time (int) - time that should be considered as beginning
end_time (int) - time which should be rated up to
id (int, str, list or None) - representing the cluster_ids that should be rated. If id is None,
all objects are rated
Returns:
ratings (dict) - {<cluster_id>: <rating>} with ratings of clusters
"""
ids_to_rate = self.get_ids_to_rate(id, self._cluster_column_name, start_time, end_time)
ids = ids_to_rate[:]
# don't rate outliers
for i in ids_to_rate:
if int(i) < 0:
ids.remove(i)
ratings = self.calc_cluster_rating(ids, start_time)
return ratings
def calc_cluster_rating(self, ids_to_rate: Union[list, np.ndarray], start_time: int = None) -> dict:
"""
Params:
ids_to_rate (array-like) - list of clusters that should be rated
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings - dict {<cluster_id>: <rating>} with ratings of clusters
"""
if start_time is None:
start_time = np.min(self._data[self._time_column_name].unique())
ratings = {}
cluster_compositions = self.obtain_cluster_compositions()
gr_clusters = self._data.groupby(self._cluster_column_name)
# iterate over all cluster ids
for id in ids_to_rate:
time = gr_clusters.get_group(id)[self._time_column_name].iloc[0]
# rate the clusters of all timestamps except of the first one
if time != start_time:
num_merged_clusters = len(cluster_compositions[id])
obj_list = gr_clusters.get_group(id)[self._object_column_name].unique().tolist()
obj_ratings = self.calc_object_rating(cluster_compositions, obj_list, time)
score = 0
for obj in obj_ratings:
score += obj_ratings[obj]
try:
score /= len(obj_ratings)
except ZeroDivisionError:
if self._output:
print('Cluster ', str(id), ' has no non-outlier members.')
else:
continue
clusters = list(cluster_compositions[id].keys())
num_timestamps = len(self._data.loc[self._data[self._cluster_column_name].isin(clusters)]
[self._time_column_name].unique())
try:
div = num_merged_clusters / num_timestamps
score /= div
except ZeroDivisionError:
if self._output:
print("<<ZeroDivisionError - Cluster Score>> Cluster ID: ", str(id), " Merged Clusters: ", str(num_merged_clusters),
" Num Timestamps: ", str(num_timestamps))
else:
continue
ratings[id] = score
# clusters of the first timestamp have a stability of 1.0
else:
ratings[id] = 1.0
return ratings
def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict:
"""
Optional:
id (int, str, list or None) - representing the data points that should be rated. If id is None,
all objects are rated
start_time (int) - time that should be considered as beginning
end_time (int) - representing the timestamp which should be rated up to
Returns:
ratings (dict) - {<object_id>: <rating>} with ratings of objects
"""
ids_to_rate = self.get_ids_to_rate(id, self._object_column_name)
if end_time is None:
end_time = np.max(self._data[self._time_column_name].unique())
cluster_compositions = self.obtain_cluster_compositions()
ratings = self.calc_object_rating(cluster_compositions, ids_to_rate, end_time, start_time)
return ratings
def calc_object_rating(self, cluster_composition: dict, ids_to_rate: Union[list, np.ndarray], end_time: int, start_time: int = None) -> dict:
|
def calc_exploit(self) -> float:
"""
Returns:
exploitation_term (float) - exploitation term for whole clustering
"""
num_objects = len(self._data[self._object_column_name].unique())
num_no_outliers = len(self._data[self._data[self._cluster_column_name] >= 0][self._object_column_name].unique())
return num_no_outliers / num_objects
######## HELPER FUNCTIONS ########
def get_feature_list(self, objects: Union[list, np.ndarray], time: int) -> np.ndarray:
"""
Params:
objects (array-like) - list of objects_ids that belong to considered cluster
time (int) - time of cluster that is considered
Output:
feature_list (list) - list of lists containing the features of objects in the considered cluster
"""
feature_list = []
for obj in objects:
features = self._data[
(self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)]
try:
features = \
features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name],
axis=1).iloc[0].tolist()
except IndexError:
print(">>INDEXERROR - FEATURE LIST<< ID: ", str(obj), ", Time: ", str(time))
continue
if len(features) <= 0:
print("No features found for object ", str(obj))
continue
feature_list.append(features)
return np.array(feature_list)
def get_num_timestamps(self, start_time: int, end_time: int, return_timestamps: bool = False) -> int:
"""
Params:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Optional:
return_timestamps (boolean) - list of all timestamps
Returns:
num_timestamps (int) - number of timestamps between start_time and end_time
"""
timestamp_list = self._data[self._time_column_name].unique()
if start_time is not None:
timestamp_list = [i for i in timestamp_list if i >= start_time]
if end_time is not None:
timestamp_list = [i for i in timestamp_list if i <= end_time]
num_timestamps = len(timestamp_list)
if not return_timestamps:
return num_timestamps
else:
return num_timestamps, timestamp_list
def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list:
"""
Params:
id (int, str, list or None) - representing the data points that should be rated. If id is None, all objects are rated
id_name (str) - either self._cluster_column_name or self._object_column_name, which ids to extract
Optional:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Returns:
ids_to_rate (list) - list of ids that should be rated
"""
if id is None:
data = self._data.copy()
if start_time is not None:
data = data[data[self._time_column_name] >= start_time]
if end_time is not None:
data = data[data[self._time_column_name] <= end_time]
ids_to_rate = data[id_name].unique().tolist()
elif isinstance(id, int) or isinstance(id, str):
ids_to_rate = [id]
elif isinstance(id, list):
ids_to_rate = id[:]
else:
raise Exception('id has to be int, str, list or None')
return ids_to_rate
def obtain_cluster_compositions(self) -> dict:
"""
Returns:
cluster_compositions (dict) - dict of dicts {<cluster_id>: {<cluster_id>: <proportion>}} with cluster compositions
Example:
{5: {1: 1.0, 2: 0.1, 4: 0.5}} describes that
100% of cluster 1, 10% of cluster 2 and 50% of cluster 4 belong to cluster 5
"""
cluster_compositions = {}
g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name])
if not self._jaccard:
cluster_members = self._data.groupby(self._cluster_column_name).count()
# iterate over all clusters - 'group' contains the time and cluster_id
# and 'objects' is the corresponding dataframe
for group, objects in g_clusters:
# Ignore outliers
if int(group[1]) < 0:
continue
objects = objects[self._object_column_name].values.tolist()
# temporal intersection
# select considered clusters with later timestamps than the current one to check which clusters the
# current one merged into and count, how many objects of the current cluster are in the considered clusters
# example of a series from the dataframe: [cluster_id, count] with [2, 10]
# meaning: 10 objects of the current cluster merged into the cluster with the id 2
temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) &
(self._data[self._time_column_name] > group[0])]).groupby(self._cluster_column_name).count()
# iterate over all clusters which the current cluster has merged into
# 'cluster' contains the cluster_id
# and 'con_objects' is the corresponding number of objects of the temporal intersection
for cluster, num_objects in temp_intersection.iterrows():
# Ignore outliers
if int(cluster) < 0:
continue
# for all considered clusters save the proportion of the current cluster that merged into the considered
# one
# example: {3: {2: 0.3}, 4: {2: 0.1}}
# meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4
if cluster not in cluster_compositions:
cluster_compositions[cluster] = {}
if self._jaccard:
# cardinality of the union of both considered clusters
card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) |
(self._data[self._cluster_column_name] == group[1])]
[self._object_column_name].unique())
# jaccard distance
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(card_union), 3)
else:
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(cluster_members.loc[group[1]].values[1]), 3)
if group[1] not in cluster_compositions:
cluster_compositions[group[1]] = {}
return cluster_compositions
######## QUALITY MEASURES ########
@staticmethod
def calc_sse(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
sse (float) - sum of squared errors to centroid of cluster
"""
centroid = np.average(feature_list, axis=0)
sse = np.sum(np.power(feature_list - centroid[None, :], 2))
return sse
def calc_mse(self, feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
mse (float) - mean squared error of cluster
"""
sse = self.calc_sse(feature_list)
return sse / len(feature_list)
@staticmethod
def calc_mae(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
mae (float) - mean average errors to centroid of cluster
"""
centroid = np.average(feature_list, axis=0)
mae = np.average(np.abs(feature_list - centroid[None, :]))
return mae
@staticmethod
def calc_max_dist(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
max_dist (float) - maximal distance of cluster member to centroid of cluster
"""
max_dist = 0
for i in range(len(feature_list) - 1):
for j in range(i + 1, len(feature_list)):
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
if cur_dist > max_dist:
max_dist = cur_dist
max_dist /= 2 ** (1 / 2)
return max_dist
def calc_min_pts(self, feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
avg_dist (float) - average distance of cluster members to their minPts neighbor
"""
avg_dist = 0
for i in range(len(feature_list)):
dist_list = [10] * self._minPts
for j in range(len(feature_list)):
if i == j:
continue
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
for k in range(len(dist_list)):
if cur_dist < dist_list[k]:
dist_list.insert(k, cur_dist)
dist_list.pop(self._minPts)
avg_dist += dist_list[self._minPts - 1]
avg_dist /= len(feature_list)
return avg_dist
@staticmethod
def return_zero():
"""
Function is used if no quality measure should be used in CLOSE
This is the case when only the exploitation term is considered
Returns:
0
"""
return 0
def calc_exploit_at_t(self, time: int) -> float:
"""
Params:
time (int) - time to be considered
Returns:
rating (float) - exploitation rating of time clustering
"""
num_objects_at_t = len(self._data[self._data[self._time_column_name] == time][self._object_column_name].unique())
num_no_outliers = len(self._data[(self._data[self._time_column_name] == time) &
(self._data[self._cluster_column_name] >= 0)][self._object_column_name].unique())
return num_no_outliers / num_objects_at_t
| """
Params:
cluster_composition (dict) - {<cluster_id>: {<contained_cluster_id>: <proportion>}} containing the proportions of
clusters (contained_cluster_id) that belong to cluster (cluster_id)
ids_to_rate (array-like) - list of data points that should be rated
end_time (int) - representing the timestamp which should be rated up to
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings - dict {<object_id>: <rating>} with ratings of objects
"""
ratings = {}
gr_clusters = self._data.groupby(self._object_column_name)
# iterate over object ids
for id in ids_to_rate:
cur_group = gr_clusters.get_group(id)
cur_group = cur_group[cur_group[self._time_column_name] <= end_time]
if start_time is not None:
cur_group = cur_group[cur_group[self._time_column_name] >= start_time]
try:
# id of the cluster of the last considered timestamp
last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[
0]
except IndexError:
print(">>INDEXERROR - LAST CLUSTER<< ID: ", str(id), ", Start Time: ", str(start_time), ", End Time: ",
str(end_time))
continue
# if object is an outlier for the considered timestamp, it is skipped
if int(last_cluster) < 0:
continue
cluster_ids = cur_group[self._cluster_column_name].unique()
object_ratings = []
num_clusters = 0
has_outlier = False
for cluster in cluster_ids:
if cluster == last_cluster:
continue
# Add the proportion of clusters before last timestamp, that merged in last cluster
else:
# outliers get worst rating of 0.0
if int(cluster) < 0:
object_ratings.append(0.0)
has_outlier = True
else:
object_ratings.append(cluster_composition[last_cluster][cluster])
num_clusters += 1
if not has_outlier and len(object_ratings) == 0:
# print(str(id) + " has no data before t=" + str(end_time))
continue
if self._weighting:
try:
weighting_denominator = 0
for i in range(1, num_clusters + 1):
weighting_denominator += i
if num_clusters > 0:
object_rating = 0
for i in range(num_clusters):
object_rating += object_ratings[i] * ((i + 1) / weighting_denominator)
else:
continue
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
else:
try:
object_rating = np.sum(object_ratings)
object_rating /= num_clusters
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
ratings[id] = round(object_rating, 3)
return ratings |
response.py | from datetime import datetime
from http.client import responses as STATUS_MESSAGES
from http.cookies import SimpleCookie
import mimetypes
import os
from .case_insensitive_dict import CaseInsensitiveDict
class Response(BaseException):
def __init__(self):
self.status = 404
self.headers = CaseInsensitiveDict()
self.cookies = []
self._body = b''
self._file = None
def __str__(self):
return self.to_str()
def __repr__(self):
return self.to_str()
def to_str(self):
|
@property
def body(self):
return self._body
@body.setter
def body(self, value):
if type(value) is not bytes:
value = str(value).encode('utf-8')
self._body = value
def file(self, path, type=None, download=False, name=None):
self._file = path
if type is None:
type, _ = mimetypes.guess_type(path)
self.headers['Content-Type'] = type or 'application/octet-stream'
self.headers['Content-Disposition'] = 'attachment' if download else 'inline'
self.headers['Content-Disposition'] += '; filename="{}"'.format(name or os.path.basename(path))
self.headers['Content-Length'] = str(os.stat(path).st_size)
def set_cookie(self, key, value, expires=None, domain=None, path=None, secure=False, http_only=True, same_site=True):
cookie = SimpleCookie({key: value}).get(key).OutputString()
if expires:
cookie += '; Expires=' + expires.strftime('%a, %d %b %Y %T') + ' GMT'
if domain:
cookie += '; Domain=' + domain
if path:
cookie += '; Path=' + path
if secure:
cookie += '; Secure'
if http_only:
cookie += '; HttpOnly'
if same_site:
cookie += '; SameSite=Strict'
self.cookies.append(cookie)
def unset_cookie(self, key, domain=None, path=None):
cookie = key + '=; Expires=' + datetime(1970, 1, 1).strftime('%a, %d %b %Y %T') + ' GMT'
if domain:
cookie += '; Domain=' + domain
if path:
cookie += '; Path=' + path
self.cookies.append(cookie)
def wsgi(self, start_respose):
start_respose(self._wsgi_status(), self._wsgi_headers())
if self._file:
return self._wsgi_file()
return self._wsgi_body()
def _wsgi_status(self):
return str(self.status) + ' ' + STATUS_MESSAGES.get(self.status, '')
def _wsgi_headers(self):
headers = list(self.headers.items())
for cookie in self.cookies:
headers.append(('Set-Cookie', cookie))
return headers
def _wsgi_body(self):
return (self.body,)
def _wsgi_file(self):
with open(self._file, 'rb') as f:
mbyte = 1024 ** 2
while True:
chunk = f.read(mbyte)
if not chunk:
break
yield chunk
| cls = type(self).__name__
headers = len(self.headers)
bytes = len(self._body)
return '<{}:{}h:{}b>'.format(cls, headers, bytes) |
relay.py | from __future__ import absolute_import
import six
from django.db import models
from django.utils import timezone
from sentry.db.models import Model
from django.utils.functional import cached_property
import semaphore
class Relay(Model):
__core__ = True
relay_id = models.CharField(max_length=64, unique=True)
public_key = models.CharField(max_length=200)
first_seen = models.DateTimeField(default=timezone.now)
last_seen = models.DateTimeField(default=timezone.now)
is_internal = models.BooleanField(default=False)
class Meta:
app_label = "sentry"
db_table = "sentry_relay"
@cached_property | def public_key_object(self):
return semaphore.PublicKey.parse(self.public_key)
def has_org_access(self, org):
# Internal relays always have access
if self.is_internal:
return True
# Use the normalized form of the public key for the check
return six.text_type(self.public_key_object) in org.get_option("sentry:trusted-relays", []) | |
commands.py | from enum import Enum
from schematics.exceptions import DataError
from schematics.models import Model
from schematics.types import StringType, DateTimeType, DecimalType
class ResultStatus(str, Enum):
OK = 'ok'
PENDING = 'pending'
ERROR = 'error'
class | (object):
def __init__(self, status: ResultStatus, **kwargs):
self._kwargs = kwargs
self.status = status
def __repr__(self):
return '<{}>({}) {}'.format(type(self).__name__, self.status, self._kwargs)
class Command(Model):
"""
Command is an immutable data structure holding object
"""
def is_valid(self):
try:
self.validate()
except DataError:
return False
return True
def validation_errors(self):
try:
self.validate()
return None
except Exception as e:
return e
def __repr__(self):
return '<{}>({})'.format(type(self).__name__, self.__dict__['_data'])
class AddItemCommand(Command):
seller_id = StringType(required=True)
title = StringType(required=True)
description = StringType()
starting_price = DecimalType()
end_date = DateTimeType()
| CommandResult |
leaf.rs | use crate::two_radix::node::DecimationNode;
#[derive(Debug, PartialEq, Clone)]
pub struct DecimationLeaf
{
pub lhs: Vec<DecimationNode>,
pub rhs: Vec<DecimationNode>,
pub stage: usize,
}
impl IntoIterator for DecimationLeaf
{
type Item = DecimationNode;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter
{
self.lhs.into_iter()
.chain(self.rhs.into_iter())
.collect::<Vec<_>>()
.into_iter()
}
}
impl DecimationLeaf
{
pub fn new(lhs: Vec<DecimationNode>, rhs: Vec<DecimationNode>, stage: usize) -> Self { Self{ lhs, rhs, stage } }
pub fn new_empty() -> Self { Self { lhs: Vec::new(), rhs: Vec::new(), stage: 1 } }
// takes two leaves and builds them into a single parent leaf.
// concatenating assigns the stage to the nodes, and indexes them correctly.
pub fn generate_parent(self, other: Self) -> Self
{
assert_eq!(self.stage, other.stage);
assert_eq!(self.lhs.len(), other.lhs.len());
assert_eq!(self.rhs.len(), other.rhs.len());
assert_eq!(self.lhs.len(), self.rhs.len());
let mut parent = Self::new_empty();
parent.stage = self.stage+1;
for (i, (mut lhs, mut rhs)) in self.into_iter()
.zip( other.into_iter() )
.enumerate()
{
lhs.index=i;
rhs.index=i;
lhs.stage+=1;
rhs.stage+=1;
lhs.twiddle=false;
rhs.twiddle=true;
parent.lhs.push(lhs);
parent.rhs.push(rhs);
}
parent
}
// applies the butterfly input on the decimation nodes.
pub fn transform(self) -> Self
{
let mut leaf = Self::new_empty();
for (lhs, rhs) in self.lhs
.into_iter()
.zip( self.rhs.into_iter() )
{
let (lhs_node, rhs_node)=lhs.map_butterflies(rhs);
leaf.lhs.push(lhs_node);
leaf.rhs.push(rhs_node);
}
leaf
}
}
#[cfg(tests)]
mod tests
{
use super::*;
use crate::two_radix::node::DecimationNode;
#[test]
fn test_concatenation()
{
let a0 = DecimationNode::new(Complex::from(0.0), 1, 0, false);
let a4 = DecimationNode::new(Complex::from(4.0), 1, 1, true);
let a2 = DecimationNode::new(Complex::from(2.0), 1, 0, false);
let a6 = DecimationNode::new(Complex::from(6.0), 1, 1, true);
let a1 = DecimationNode::new(Complex::from(1.0), 1, 0, false);
let a5 = DecimationNode::new(Complex::from(5.0), 1, 1, true);
let a3 = DecimationNode::new(Complex::from(3.0), 1, 0, false);
let a7 = DecimationNode::new(Complex::from(7.0), 1, 1, true);
let concat = DecimationLeaf::generate_parent(
DecimationLeaf::new(vec![a0, a4], vec![a2, a6], 1),
DecimationLeaf::new(vec![a1, a5], vec![a3, a7], 1)
);
let exp = DecimationLeaf::new(vec![a0, a4, a2, a6], vec![a1, a5, a3, a7], 2);
let conc_lhs = concat.lhs
.into_iter()
.map(|node| node.element)
.collect::<Vec<_>>();
let conc_rhs = concat.rhs
.into_iter()
.map(|node| node.element)
.collect::<Vec<_>>(); | .into_iter()
.map(|node| node.element)
.collect::<Vec<_>>();
let exp_rhs = exp.rhs
.into_iter()
.map(|node| node.element)
.collect::<Vec<_>>();
assert_eq!(exp_lhs, conc_lhs);
assert_eq!(exp_rhs, conc_rhs);
}
} | let exp_lhs = exp.lhs |
aggregates.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Declaration of built-in (aggregate) functions.
//! This module contains built-in aggregates' enumeration and metadata.
//!
//! Generally, an aggregate has:
//! * a signature
//! * a return type, that is a function of the incoming argument's types
//! * the computation, that must accept each valid signature
//!
//! * Signature: see `Signature`
//! * Return type: a function `(arg_types) -> return_type`. E.g. for min, ([f32]) -> f32, ([f64]) -> f64.
use super::{
functions::{Signature, Volatility},
Accumulator, AggregateExpr, PhysicalExpr,
};
use crate::error::{DataFusionError, Result};
use crate::physical_plan::coercion_rule::aggregate_rule::{coerce_exprs, coerce_types};
use crate::physical_plan::expressions;
use arrow::datatypes::{DataType, Field, Schema, TimeUnit};
use expressions::{
avg_return_type, correlation_return_type, covariance_return_type, stddev_return_type,
sum_return_type, variance_return_type,
};
use std::{fmt, str::FromStr, sync::Arc};
/// the implementation of an aggregate function
pub type AccumulatorFunctionImplementation =
Arc<dyn Fn() -> Result<Box<dyn Accumulator>> + Send + Sync>;
/// This signature corresponds to which types an aggregator serializes
/// its state, given its return datatype.
pub type StateTypeFunction =
Arc<dyn Fn(&DataType) -> Result<Arc<Vec<DataType>>> + Send + Sync>;
/// Enum of all built-in aggregate functions
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Hash)]
pub enum AggregateFunction {
/// count
Count,
/// sum
Sum,
/// min
Min,
/// max
Max,
/// avg
Avg,
/// Approximate aggregate function
ApproxDistinct,
/// array_agg
ArrayAgg,
/// Variance (Sample)
Variance,
/// Variance (Population)
VariancePop,
/// Standard Deviation (Sample)
Stddev,
/// Standard Deviation (Population)
StddevPop,
/// Covariance (Sample)
Covariance,
/// Covariance (Population)
CovariancePop,
/// Correlation
Correlation,
}
impl fmt::Display for AggregateFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// uppercase of the debug.
write!(f, "{}", format!("{:?}", self).to_uppercase())
}
}
impl FromStr for AggregateFunction {
type Err = DataFusionError;
fn from_str(name: &str) -> Result<AggregateFunction> {
Ok(match name {
"min" => AggregateFunction::Min,
"max" => AggregateFunction::Max,
"count" => AggregateFunction::Count,
"avg" => AggregateFunction::Avg,
"sum" => AggregateFunction::Sum,
"approx_distinct" => AggregateFunction::ApproxDistinct,
"array_agg" => AggregateFunction::ArrayAgg,
"var" => AggregateFunction::Variance,
"var_samp" => AggregateFunction::Variance,
"var_pop" => AggregateFunction::VariancePop,
"stddev" => AggregateFunction::Stddev,
"stddev_samp" => AggregateFunction::Stddev,
"stddev_pop" => AggregateFunction::StddevPop,
"covar" => AggregateFunction::Covariance,
"covar_samp" => AggregateFunction::Covariance,
"covar_pop" => AggregateFunction::CovariancePop,
"corr" => AggregateFunction::Correlation,
_ => {
return Err(DataFusionError::Plan(format!(
"There is no built-in function named {}",
name
)));
}
})
}
}
/// Returns the datatype of the aggregate function.
/// This is used to get the returned data type for aggregate expr.
pub fn return_type(
fun: &AggregateFunction,
input_expr_types: &[DataType],
) -> Result<DataType> {
// Note that this function *must* return the same type that the respective physical expression returns
// or the execution panics.
let coerced_data_types = coerce_types(fun, input_expr_types, &signature(fun))?;
match fun {
// TODO If the datafusion is compatible with PostgreSQL, the returned data type should be INT64.
AggregateFunction::Count | AggregateFunction::ApproxDistinct => {
Ok(DataType::UInt64)
}
AggregateFunction::Max | AggregateFunction::Min => {
// For min and max agg function, the returned type is same as input type.
// The coerced_data_types is same with input_types.
Ok(coerced_data_types[0].clone())
}
AggregateFunction::Sum => sum_return_type(&coerced_data_types[0]),
AggregateFunction::Variance => variance_return_type(&coerced_data_types[0]),
AggregateFunction::VariancePop => variance_return_type(&coerced_data_types[0]),
AggregateFunction::Covariance => covariance_return_type(&coerced_data_types[0]),
AggregateFunction::CovariancePop => {
covariance_return_type(&coerced_data_types[0])
}
AggregateFunction::Correlation => correlation_return_type(&coerced_data_types[0]),
AggregateFunction::Stddev => stddev_return_type(&coerced_data_types[0]),
AggregateFunction::StddevPop => stddev_return_type(&coerced_data_types[0]),
AggregateFunction::Avg => avg_return_type(&coerced_data_types[0]),
AggregateFunction::ArrayAgg => Ok(DataType::List(Box::new(Field::new(
"item",
coerced_data_types[0].clone(),
true,
)))),
}
}
/// Create a physical aggregation expression.
/// This function errors when `input_phy_exprs`' can't be coerced to a valid argument type of the aggregation function.
pub fn create_aggregate_expr(
fun: &AggregateFunction,
distinct: bool,
input_phy_exprs: &[Arc<dyn PhysicalExpr>],
input_schema: &Schema,
name: impl Into<String>,
) -> Result<Arc<dyn AggregateExpr>> {
let name = name.into();
// get the coerced phy exprs if some expr need to be wrapped with the try cast.
let coerced_phy_exprs =
coerce_exprs(fun, input_phy_exprs, input_schema, &signature(fun))?;
if coerced_phy_exprs.is_empty() {
return Err(DataFusionError::Plan(format!(
"Invalid or wrong number of arguments passed to aggregate: '{}'",
name,
)));
}
let coerced_exprs_types = coerced_phy_exprs
.iter()
.map(|e| e.data_type(input_schema))
.collect::<Result<Vec<_>>>()?;
// get the result data type for this aggregate function
let input_phy_types = input_phy_exprs
.iter()
.map(|e| e.data_type(input_schema))
.collect::<Result<Vec<_>>>()?;
let return_type = return_type(fun, &input_phy_types)?;
Ok(match (fun, distinct) {
(AggregateFunction::Count, false) => Arc::new(expressions::Count::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Count, true) => Arc::new(expressions::DistinctCount::new(
coerced_exprs_types,
coerced_phy_exprs,
name,
return_type,
)),
(AggregateFunction::Sum, false) => Arc::new(expressions::Sum::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Sum, true) => {
return Err(DataFusionError::NotImplemented(
"SUM(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::ApproxDistinct, _) => {
Arc::new(expressions::ApproxDistinct::new(
coerced_phy_exprs[0].clone(),
name,
coerced_exprs_types[0].clone(),
))
}
(AggregateFunction::ArrayAgg, false) => Arc::new(expressions::ArrayAgg::new(
coerced_phy_exprs[0].clone(),
name,
coerced_exprs_types[0].clone(),
)),
(AggregateFunction::ArrayAgg, true) => {
Arc::new(expressions::DistinctArrayAgg::new(
coerced_phy_exprs[0].clone(),
name,
coerced_exprs_types[0].clone(),
))
}
(AggregateFunction::Min, _) => Arc::new(expressions::Min::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Max, _) => Arc::new(expressions::Max::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Avg, false) => Arc::new(expressions::Avg::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Avg, true) => {
return Err(DataFusionError::NotImplemented(
"AVG(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::Variance, false) => Arc::new(expressions::Variance::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Variance, true) => {
return Err(DataFusionError::NotImplemented(
"VAR(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::VariancePop, false) => {
Arc::new(expressions::VariancePop::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
))
}
(AggregateFunction::VariancePop, true) => {
return Err(DataFusionError::NotImplemented(
"VAR_POP(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::Covariance, false) => Arc::new(expressions::Covariance::new(
coerced_phy_exprs[0].clone(),
coerced_phy_exprs[1].clone(),
name,
return_type,
)),
(AggregateFunction::Covariance, true) => {
return Err(DataFusionError::NotImplemented(
"COVAR(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::CovariancePop, false) => {
Arc::new(expressions::CovariancePop::new(
coerced_phy_exprs[0].clone(),
coerced_phy_exprs[1].clone(),
name,
return_type,
))
}
(AggregateFunction::CovariancePop, true) => {
return Err(DataFusionError::NotImplemented(
"COVAR_POP(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::Stddev, false) => Arc::new(expressions::Stddev::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::Stddev, true) => {
return Err(DataFusionError::NotImplemented(
"STDDEV(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::StddevPop, false) => Arc::new(expressions::StddevPop::new(
coerced_phy_exprs[0].clone(),
name,
return_type,
)),
(AggregateFunction::StddevPop, true) => {
return Err(DataFusionError::NotImplemented(
"STDDEV_POP(DISTINCT) aggregations are not available".to_string(),
));
}
(AggregateFunction::Correlation, false) => {
Arc::new(expressions::Correlation::new(
coerced_phy_exprs[0].clone(),
coerced_phy_exprs[1].clone(),
name,
return_type,
))
}
(AggregateFunction::Correlation, true) => {
return Err(DataFusionError::NotImplemented(
"CORR(DISTINCT) aggregations are not available".to_string(),
));
}
})
}
static STRINGS: &[DataType] = &[DataType::Utf8, DataType::LargeUtf8];
static NUMERICS: &[DataType] = &[
DataType::Int8,
DataType::Int16,
DataType::Int32,
DataType::Int64,
DataType::UInt8,
DataType::UInt16,
DataType::UInt32,
DataType::UInt64,
DataType::Float32,
DataType::Float64,
];
static TIMESTAMPS: &[DataType] = &[
DataType::Timestamp(TimeUnit::Second, None),
DataType::Timestamp(TimeUnit::Millisecond, None),
DataType::Timestamp(TimeUnit::Microsecond, None),
DataType::Timestamp(TimeUnit::Nanosecond, None),
];
static DATES: &[DataType] = &[DataType::Date32, DataType::Date64];
/// the signatures supported by the function `fun`.
pub fn signature(fun: &AggregateFunction) -> Signature {
// note: the physical expression must accept the type returned by this function or the execution panics.
match fun {
AggregateFunction::Count
| AggregateFunction::ApproxDistinct
| AggregateFunction::ArrayAgg => Signature::any(1, Volatility::Immutable),
AggregateFunction::Min | AggregateFunction::Max => {
let valid = STRINGS
.iter()
.chain(NUMERICS.iter())
.chain(TIMESTAMPS.iter())
.chain(DATES.iter())
.cloned()
.collect::<Vec<_>>();
Signature::uniform(1, valid, Volatility::Immutable)
}
AggregateFunction::Avg
| AggregateFunction::Sum
| AggregateFunction::Variance
| AggregateFunction::VariancePop
| AggregateFunction::Stddev
| AggregateFunction::StddevPop => {
Signature::uniform(1, NUMERICS.to_vec(), Volatility::Immutable)
}
AggregateFunction::Covariance | AggregateFunction::CovariancePop => {
Signature::uniform(2, NUMERICS.to_vec(), Volatility::Immutable)
}
AggregateFunction::Correlation => {
Signature::uniform(2, NUMERICS.to_vec(), Volatility::Immutable)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
use crate::physical_plan::expressions::{
ApproxDistinct, ArrayAgg, Avg, Correlation, Count, Covariance, DistinctArrayAgg,
DistinctCount, Max, Min, Stddev, Sum, Variance,
};
#[test]
fn test_count_arragg_approx_expr() -> Result<()> {
let funcs = vec![
AggregateFunction::Count,
AggregateFunction::ArrayAgg,
AggregateFunction::ApproxDistinct,
];
let data_types = vec![
DataType::UInt32,
DataType::Int32,
DataType::Float32,
DataType::Float64,
DataType::Decimal(10, 2),
DataType::Utf8,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
match fun {
AggregateFunction::Count => {
assert!(result_agg_phy_exprs.as_any().is::<Count>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::UInt64, true),
result_agg_phy_exprs.field().unwrap()
);
}
AggregateFunction::ApproxDistinct => {
assert!(result_agg_phy_exprs.as_any().is::<ApproxDistinct>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::UInt64, false),
result_agg_phy_exprs.field().unwrap()
);
}
AggregateFunction::ArrayAgg => {
assert!(result_agg_phy_exprs.as_any().is::<ArrayAgg>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new(
"c1",
DataType::List(Box::new(Field::new(
"item",
data_type.clone(),
true
))),
false
),
result_agg_phy_exprs.field().unwrap()
);
}
_ => {}
};
let result_distinct = create_aggregate_expr(
&fun,
true,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
match fun {
AggregateFunction::Count => {
assert!(result_distinct.as_any().is::<DistinctCount>());
assert_eq!("c1", result_distinct.name());
assert_eq!(
Field::new("c1", DataType::UInt64, true),
result_distinct.field().unwrap()
);
}
AggregateFunction::ApproxDistinct => {
assert!(result_distinct.as_any().is::<ApproxDistinct>());
assert_eq!("c1", result_distinct.name());
assert_eq!(
Field::new("c1", DataType::UInt64, false),
result_distinct.field().unwrap()
);
}
AggregateFunction::ArrayAgg => {
assert!(result_distinct.as_any().is::<DistinctArrayAgg>());
assert_eq!("c1", result_distinct.name());
assert_eq!(
Field::new(
"c1",
DataType::List(Box::new(Field::new(
"item",
data_type.clone(),
true
))),
false
),
result_agg_phy_exprs.field().unwrap()
);
}
_ => {}
};
}
}
Ok(())
}
#[test]
fn test_min_max_expr() -> Result<()> {
let funcs = vec![AggregateFunction::Min, AggregateFunction::Max];
let data_types = vec![
DataType::UInt32,
DataType::Int32,
DataType::Float32,
DataType::Float64,
DataType::Decimal(10, 2),
DataType::Utf8,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
match fun {
AggregateFunction::Min => {
assert!(result_agg_phy_exprs.as_any().is::<Min>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", data_type.clone(), true),
result_agg_phy_exprs.field().unwrap()
);
}
AggregateFunction::Max => {
assert!(result_agg_phy_exprs.as_any().is::<Max>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", data_type.clone(), true),
result_agg_phy_exprs.field().unwrap()
);
}
_ => {}
};
}
}
Ok(())
}
#[test]
fn test_sum_avg_expr() -> Result<()> {
let funcs = vec![AggregateFunction::Sum, AggregateFunction::Avg];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
match fun {
AggregateFunction::Sum => {
assert!(result_agg_phy_exprs.as_any().is::<Sum>());
assert_eq!("c1", result_agg_phy_exprs.name());
let expect_type = match data_type {
DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64 => DataType::UInt64,
DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64 => DataType::Int64,
DataType::Float32 | DataType::Float64 => DataType::Float64,
_ => data_type.clone(),
};
assert_eq!(
Field::new("c1", expect_type.clone(), true),
result_agg_phy_exprs.field().unwrap()
);
}
AggregateFunction::Avg => {
assert!(result_agg_phy_exprs.as_any().is::<Avg>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
);
}
_ => {}
};
}
}
Ok(())
}
#[test]
fn test_variance_expr() -> Result<()> {
let funcs = vec![AggregateFunction::Variance];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Variance {
assert!(result_agg_phy_exprs.as_any().is::<Variance>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn test_var_pop_expr() -> Result<()> {
let funcs = vec![AggregateFunction::VariancePop];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Variance {
assert!(result_agg_phy_exprs.as_any().is::<Variance>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn | () -> Result<()> {
let funcs = vec![AggregateFunction::Stddev];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Variance {
assert!(result_agg_phy_exprs.as_any().is::<Stddev>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn test_stddev_pop_expr() -> Result<()> {
let funcs = vec![AggregateFunction::StddevPop];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema =
Schema::new(vec![Field::new("c1", data_type.clone(), true)]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![Arc::new(
expressions::Column::new_with_schema("c1", &input_schema).unwrap(),
)];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..1],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Variance {
assert!(result_agg_phy_exprs.as_any().is::<Stddev>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn test_covar_expr() -> Result<()> {
let funcs = vec![AggregateFunction::Covariance];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema = Schema::new(vec![
Field::new("c1", data_type.clone(), true),
Field::new("c2", data_type.clone(), true),
]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![
Arc::new(
expressions::Column::new_with_schema("c1", &input_schema)
.unwrap(),
),
Arc::new(
expressions::Column::new_with_schema("c2", &input_schema)
.unwrap(),
),
];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..2],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Covariance {
assert!(result_agg_phy_exprs.as_any().is::<Covariance>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn test_covar_pop_expr() -> Result<()> {
let funcs = vec![AggregateFunction::CovariancePop];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema = Schema::new(vec![
Field::new("c1", data_type.clone(), true),
Field::new("c2", data_type.clone(), true),
]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![
Arc::new(
expressions::Column::new_with_schema("c1", &input_schema)
.unwrap(),
),
Arc::new(
expressions::Column::new_with_schema("c2", &input_schema)
.unwrap(),
),
];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..2],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Covariance {
assert!(result_agg_phy_exprs.as_any().is::<Covariance>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn test_corr_expr() -> Result<()> {
let funcs = vec![AggregateFunction::Correlation];
let data_types = vec![
DataType::UInt32,
DataType::UInt64,
DataType::Int32,
DataType::Int64,
DataType::Float32,
DataType::Float64,
];
for fun in funcs {
for data_type in &data_types {
let input_schema = Schema::new(vec![
Field::new("c1", data_type.clone(), true),
Field::new("c2", data_type.clone(), true),
]);
let input_phy_exprs: Vec<Arc<dyn PhysicalExpr>> = vec![
Arc::new(
expressions::Column::new_with_schema("c1", &input_schema)
.unwrap(),
),
Arc::new(
expressions::Column::new_with_schema("c2", &input_schema)
.unwrap(),
),
];
let result_agg_phy_exprs = create_aggregate_expr(
&fun,
false,
&input_phy_exprs[0..2],
&input_schema,
"c1",
)?;
if fun == AggregateFunction::Covariance {
assert!(result_agg_phy_exprs.as_any().is::<Correlation>());
assert_eq!("c1", result_agg_phy_exprs.name());
assert_eq!(
Field::new("c1", DataType::Float64, true),
result_agg_phy_exprs.field().unwrap()
)
}
}
}
Ok(())
}
#[test]
fn test_min_max() -> Result<()> {
let observed = return_type(&AggregateFunction::Min, &[DataType::Utf8])?;
assert_eq!(DataType::Utf8, observed);
let observed = return_type(&AggregateFunction::Max, &[DataType::Int32])?;
assert_eq!(DataType::Int32, observed);
// test decimal for min
let observed = return_type(&AggregateFunction::Min, &[DataType::Decimal(10, 6)])?;
assert_eq!(DataType::Decimal(10, 6), observed);
// test decimal for max
let observed =
return_type(&AggregateFunction::Max, &[DataType::Decimal(28, 13)])?;
assert_eq!(DataType::Decimal(28, 13), observed);
Ok(())
}
#[test]
fn test_sum_return_type() -> Result<()> {
let observed = return_type(&AggregateFunction::Sum, &[DataType::Int32])?;
assert_eq!(DataType::Int64, observed);
let observed = return_type(&AggregateFunction::Sum, &[DataType::UInt8])?;
assert_eq!(DataType::UInt64, observed);
let observed = return_type(&AggregateFunction::Sum, &[DataType::Float32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Sum, &[DataType::Float64])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Sum, &[DataType::Decimal(10, 5)])?;
assert_eq!(DataType::Decimal(20, 5), observed);
let observed = return_type(&AggregateFunction::Sum, &[DataType::Decimal(35, 5)])?;
assert_eq!(DataType::Decimal(38, 5), observed);
Ok(())
}
#[test]
fn test_sum_no_utf8() {
let observed = return_type(&AggregateFunction::Sum, &[DataType::Utf8]);
assert!(observed.is_err());
}
#[test]
fn test_sum_upcasts() -> Result<()> {
let observed = return_type(&AggregateFunction::Sum, &[DataType::UInt32])?;
assert_eq!(DataType::UInt64, observed);
Ok(())
}
#[test]
fn test_count_return_type() -> Result<()> {
let observed = return_type(&AggregateFunction::Count, &[DataType::Utf8])?;
assert_eq!(DataType::UInt64, observed);
let observed = return_type(&AggregateFunction::Count, &[DataType::Int8])?;
assert_eq!(DataType::UInt64, observed);
let observed =
return_type(&AggregateFunction::Count, &[DataType::Decimal(28, 13)])?;
assert_eq!(DataType::UInt64, observed);
Ok(())
}
#[test]
fn test_avg_return_type() -> Result<()> {
let observed = return_type(&AggregateFunction::Avg, &[DataType::Float32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Avg, &[DataType::Float64])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Avg, &[DataType::Int32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Avg, &[DataType::Decimal(10, 6)])?;
assert_eq!(DataType::Decimal(14, 10), observed);
let observed = return_type(&AggregateFunction::Avg, &[DataType::Decimal(36, 6)])?;
assert_eq!(DataType::Decimal(38, 10), observed);
Ok(())
}
#[test]
fn test_avg_no_utf8() {
let observed = return_type(&AggregateFunction::Avg, &[DataType::Utf8]);
assert!(observed.is_err());
}
#[test]
fn test_variance_return_type() -> Result<()> {
let observed = return_type(&AggregateFunction::Variance, &[DataType::Float32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Variance, &[DataType::Float64])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Variance, &[DataType::Int32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Variance, &[DataType::UInt32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Variance, &[DataType::Int64])?;
assert_eq!(DataType::Float64, observed);
Ok(())
}
#[test]
fn test_variance_no_utf8() {
let observed = return_type(&AggregateFunction::Variance, &[DataType::Utf8]);
assert!(observed.is_err());
}
#[test]
fn test_stddev_return_type() -> Result<()> {
let observed = return_type(&AggregateFunction::Stddev, &[DataType::Float32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Stddev, &[DataType::Float64])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Stddev, &[DataType::Int32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Stddev, &[DataType::UInt32])?;
assert_eq!(DataType::Float64, observed);
let observed = return_type(&AggregateFunction::Stddev, &[DataType::Int64])?;
assert_eq!(DataType::Float64, observed);
Ok(())
}
#[test]
fn test_stddev_no_utf8() {
let observed = return_type(&AggregateFunction::Stddev, &[DataType::Utf8]);
assert!(observed.is_err());
}
}
| test_stddev_expr |
index.ts | import {addFeature} from "./features/addFeature";
import {groupRecoveryFeature} from "./features/groupRecovery/groupRecoveryFeature"; | import {Parser} from "./Parser";
import {groupRecoveryBaseFeature} from "./features/groupRecovery/groupRecoveryBaseFeature";
import {ICST} from "./_types/CST/ICST";
import {isError} from "./parser/isError";
import {unitOrVarBaseFeature} from "./features/variables/unitOrVarBaseFeature";
import {subtractFeature} from "./features/subtractFeature";
import {divideFeature} from "./features/divideFeature";
import {implicitMultiplyFeature} from "./features/implicitMultiplyFeature";
import {EvaluationContext} from "./parser/AST/EvaluationContext";
import {unitConfigContextIdentifier} from "./features/variables/unitConfigContextIdentifier";
import {varBaseFeature} from "./features/variables/varBaseFeature";
import {unitBaseFeature} from "./features/variables/unitBaseFeature";
import {conversionFeature} from "./features/conversionFeature";
import {unarySubtractFeature} from "./features/unarySubtractFeature";
import {unaryAddFeature} from "./features/unaryAddFeature";
import {moduloFeature} from "./features/moduloFeature";
import {powerFeature} from "./features/powerFeature";
import {factorialFeature} from "./features/factorialFunction";
import {number} from "./features/util/number/number";
import {formattedNumberBaseFeature} from "./features/number/formattedNumberBaseFeature";
import {binaryNumberBaseFeature} from "./features/number/binaryNumberBaseFeature";
import {hexadecimalNumberBaseFeature} from "./features/number/hexadecimalNumberBaseFeature";
import {octalNumberBaseFeature} from "./features/number/octalNumberBaseFeature";
import {formatNumber} from "./features/util/number/formatNumber";
// Things that remain to be done:
// TODO: Add more units and dimensions, E.g. american units, hz, angles and temperatures
// TODO: Add more functions, E.g. floor, ceil, max, min, sin, cos, log, root
// TODO: Add date type
// TODO: Add list type
// TODO: Make a high level wrapper such that it's easy to use for the default case
// TODO: Make the index export all relevant things
const parser = new Parser({
features: [
groupRecoveryFeature,
addFeature,
subtractFeature,
multiplyFeature,
divideFeature,
implicitMultiplyFeature,
conversionFeature,
unarySubtractFeature,
unaryAddFeature,
moduloFeature,
powerFeature,
factorialFeature,
],
baseFeatures: [
formattedNumberBaseFeature,
functionBaseFeature,
groupRecoveryBaseFeature,
unitOrVarBaseFeature,
varBaseFeature,
unitBaseFeature,
numberBaseFeature,
binaryNumberBaseFeature,
hexadecimalNumberBaseFeature,
octalNumberBaseFeature,
],
});
// const input = "(41 as hexadecimal)meter second kg^-1";
// const input = "10.4";
const input = "(1011 as binary) m/s + F1 as hexadecimal) km/h in base5";
const result = parser.parse(input);
if ("errors" in result) {
console.log(...result.errors.map(({multilineMessage}) => multilineMessage));
} else {
const tree = result.ast.tree;
if (tree.type == "function") {
const arg = tree.args[0];
if (arg.type == "multiply") console.log(arg.left);
}
if (tree.type == "multiply") {
const f1 = tree.left;
if (f1.type == "number") {
}
}
if (result.containsCorrection) {
console.log(`"${input}" could also have been:`);
console.time("alternatives");
for (let altResult of result.getCorrectionAlternatives()) {
debugger;
console.log(altResult.cst + "");
}
console.timeEnd("alternatives");
}
console.time("eval");
const context = new EvaluationContext().augment(unitConfigContextIdentifier, {
customUnits: true,
});
const evalResult = parser.evaluate(input, context);
console.timeEnd("eval");
if (isError(evalResult)) {
evalResult.errors.forEach(error => console.log(error.multilineMessage));
} else if (evalResult.isA(number)) {
const {value, approxEquals} = formatNumber(evalResult, context);
console.log(input + " " + (approxEquals ? "~=" : "="));
console.log(value);
}
}
// const result2 = parse("$4+max(4*2)+5").parse();
// console.log(result2);
function toString(tree: ICST, debug: boolean = false): string {
if ("text" in tree) return tree.text;
const children = tree.children.map(child => toString(child, debug)).join("");
return debug ? `[${children}]` : children;
} | import {functionBaseFeature} from "./features/functions/functionBaseFeature";
import {multiplyFeature} from "./features/multiplyFeature";
import {numberBaseFeature} from "./features/number/numberBaseFeature"; |
types.py | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import copy
import datetime
import time
__docformat__ = "epytext"
class Attr(object):
"""
Encapsulates information about an attribute in the JSON encoding of the
object. It identifies properties of the attribute such as whether it's
read-only, its type, etc.
"""
DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def __init__(self, atype=None, rw=True, is_api_list=False):
self._atype = atype
self._is_api_list = is_api_list
self.rw = rw
def to_json(self, value, preserve_ro):
"""
Returns the JSON encoding of the given attribute value.
If the value has a 'to_json_dict' object, that method is called. Otherwise,
the following values are returned for each input type:
- datetime.datetime: string with the API representation of a date.
- dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects.
- python list: python list (or ApiList) with JSON encoding of items
- the raw value otherwise
"""
if hasattr(value, 'to_json_dict'):
return value.to_json_dict(preserve_ro)
elif isinstance(value, dict) and self._atype == ApiConfig:
return config_to_api_list(value)
elif isinstance(value, datetime.datetime):
return value.strftime(self.DATE_FMT)
elif isinstance(value, list) or isinstance(value, tuple):
if self._is_api_list:
return ApiList(value).to_json_dict()
else:
return [ self.to_json(x, preserve_ro) for x in value ]
else:
return value
def from_json(self, resource_root, data):
"""
Parses the given JSON value into an appropriate python object.
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the input
is a python list.
- the raw value otherwise
"""
if data is None:
return None
if self._atype == datetime.datetime:
return datetime.datetime.strptime(data, self.DATE_FMT)
elif self._atype == ApiConfig:
# ApiConfig is special. We want a python dictionary for summary views,
# but an ApiList for full views. Try to detect each case from the JSON
# data.
if not data['items']:
return { }
first = data['items'][0]
return json_to_config(data, len(first) == 2)
elif self._is_api_list:
return ApiList.from_json_dict(data, resource_root, self._atype)
elif isinstance(data, list):
return [ self.from_json(resource_root, x) for x in data ]
elif hasattr(self._atype, 'from_json_dict'):
return self._atype.from_json_dict(data, resource_root)
else:
return data
class ROAttr(Attr):
"""
Subclass that just defines the attribute as read-only.
"""
def __init__(self, atype=None, is_api_list=False):
Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list)
def check_api_version(resource_root, min_version):
"""
Checks if the resource_root's API version it at least the given minimum
version.
"""
if resource_root.version < min_version:
raise Exception("API version %s is required but %s is in use."
% (min_version, resource_root.version))
def call(method, path, ret_type,
ret_is_list=False, data=None, params=None, api_version=1):
"""
Generic function for calling a resource method and automatically dealing with
serialization of parameters and deserialization of return values.
@param method: method to call (must be bound to a resource;
e.g., "resource_root.get").
@param path: the full path of the API method to call.
@param ret_type: return type of the call.
@param ret_is_list: whether the return type is an ApiList.
@param data: Optional data to send as payload to the call.
@param params: Optional query parameters for the call.
@param api_version: minimum API version for the call.
"""
check_api_version(method.im_self, api_version)
if data is not None:
data = json.dumps(Attr(is_api_list=True).to_json(data, False))
ret = method(path, data=data, params=params)
else:
ret = method(path, params=params)
if ret_type is None:
return
elif ret_is_list:
return ApiList.from_json_dict(ret, method.im_self, ret_type)
elif isinstance(ret, list):
return [ ret_type.from_json_dict(x, method.im_self) for x in ret ]
else:
return ret_type.from_json_dict(ret, method.im_self)
class BaseApiObject(object):
"""
The BaseApiObject helps with (de)serialization from/to JSON.
The derived class has two ways of defining custom attributes:
- Overwriting the '_ATTRIBUTES' field with the attribute dictionary
- Override the _get_attributes() method, in case static initialization of
the above field is not possible.
It's recommended that the _get_attributes() implementation do caching to
avoid computing the dictionary on every invocation.
The derived class's constructor must call the base class's init() static
method. All constructor arguments (aside from self and resource_root) must
be keywords arguments with default values (typically None), or
from_json_dict() will not work.
"""
_ATTRIBUTES = { }
_WHITELIST = ( '_resource_root', '_attributes' )
@classmethod
def _get_attributes(cls):
"""
Returns a map of property names to attr instances (or None for default
attribute behavior) describing the properties of the object.
By default, this method will return the class's _ATTRIBUTES field.
Classes can override this method to do custom initialization of the
attributes when needed.
"""
return cls._ATTRIBUTES
@staticmethod
def init(obj, resource_root, attrs=None):
"""
Wraper around the real constructor to avoid issues with the 'self'
argument. Call like this, from a subclass's constructor:
- BaseApiObject.init(self, locals())
"""
# This works around http://bugs.python.org/issue2646
# We use unicode strings as keys in kwargs.
str_attrs = { }
if attrs:
for k, v in attrs.iteritems():
if k not in ('self', 'resource_root'):
str_attrs[k] = v
BaseApiObject.__init__(obj, resource_root, **str_attrs)
def __init__(self, resource_root, **attrs):
"""
Initializes internal state and sets all known writable properties of the
object to None. Then initializes the properties given in the provided
attributes dictionary.
@param resource_root: API resource object.
@param attrs: optional dictionary of attributes to set. This should only
contain r/w attributes.
"""
self._resource_root = resource_root
for name, attr in self._get_attributes().iteritems():
object.__setattr__(self, name, None)
if attrs:
self._set_attrs(attrs, from_json=False)
def _set_attrs(self, attrs, allow_ro=False, from_json=True):
"""
Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values.
"""
for k, v in attrs.iteritems():
attr = self._check_attr(k, allow_ro)
if attr and from_json:
v = attr.from_json(self._get_resource_root(), v)
object.__setattr__(self, k, v)
def __setattr__(self, name, val):
if name not in BaseApiObject._WHITELIST:
self._check_attr(name, False)
object.__setattr__(self, name, val)
def _check_attr(self, name, allow_ro):
if name not in self._get_attributes():
raise AttributeError('Invalid property %s for class %s.' %
(name, self.__class__.__name__))
attr = self._get_attributes()[name]
if not allow_ro and attr and not attr.rw:
raise AttributeError('Attribute %s of class %s is read only.' %
(name, self.__class__.__name__))
return attr
def _get_resource_root(self):
return self._resource_root
def _update(self, api_obj):
"""Copy state from api_obj to this object."""
if not isinstance(self, api_obj.__class__):
raise ValueError(
"Class %s does not derive from %s; cannot update attributes." %
(self.__class__, api_obj.__class__))
for name in self._get_attributes().keys():
try:
val = getattr(api_obj, name)
setattr(self, name, val)
except AttributeError, ignored:
pass
def to_json_dict(self, preserve_ro=False):
dic = { }
for name, attr in self._get_attributes().iteritems():
if not preserve_ro and attr and not attr.rw:
continue
try:
value = getattr(self, name)
if value is not None:
if attr:
dic[name] = attr.to_json(value, preserve_ro)
else:
dic[name] = value
except AttributeError:
pass
return dic
def __str__(self):
"""
Default implementation of __str__. Uses the type name and the first
attribute retrieved from the attribute map to create the string.
"""
name = self._get_attributes().keys()[0]
value = getattr(self, name, None)
return "<%s>: %s = %s" % (self.__class__.__name__, name, value)
@classmethod
def from_json_dict(cls, dic, resource_root):
obj = cls(resource_root)
obj._set_attrs(dic, allow_ro=True)
return obj
class BaseApiResource(BaseApiObject):
"""
A specialization of BaseApiObject that provides some utility methods for
resources. This class allows easier serialization / deserialization of
parameters and return values.
"""
def _api_version(self):
"""
Returns the minimum API version for this resource. Defaults to 1.
"""
return 1
def _path(self):
"""
Returns the path to the resource.
e.g., for a service 'foo' in cluster 'bar', this should return
'/clusters/bar/services/foo'.
"""
raise NotImplementedError
def _require_min_api_version(self, version):
"""
Raise an exception if the version of the api is less than the given version.
@param version: The minimum required version.
"""
actual_version = self._get_resource_root().version
version = max(version, self._api_version())
if actual_version < version:
raise Exception("API version %s is required but %s is in use."
% (version, actual_version))
def _cmd(self, command, data=None, params=None, api_version=1):
"""
Invokes a command on the resource. Commands are expected to be under the
"commands/" sub-resource.
"""
return self._post("commands/" + command, ApiCommand,
data=data, params=params, api_version=api_version)
def _get_config(self, rel_path, view, api_version=1):
"""
Retrieves an ApiConfig list from the given relative path.
"""
self._require_min_api_version(api_version)
params = view and dict(view=view) or None
resp = self._get_resource_root().get(self._path() + '/' + rel_path,
params=params)
return json_to_config(resp, view == 'full')
def _update_config(self, rel_path, config, api_version=1):
self._require_min_api_version(api_version)
resp = self._get_resource_root().put(self._path() + '/' + rel_path,
data=config_to_json(config))
return json_to_config(resp, False)
def _delete(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('delete', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _get(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('get', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _post(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('post', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _put(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('put', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
path = self._path()
if rel_path:
path += '/' + rel_path
return call(getattr(self._get_resource_root(), method),
path,
ret_type,
ret_is_list,
data,
params,
api_version)
class ApiList(BaseApiObject):
"""A list of some api object"""
LIST_KEY = "items"
def __init__(self, objects, resource_root=None, **attrs):
BaseApiObject.__init__(self, resource_root, **attrs)
# Bypass checks in BaseApiObject.__setattr__
object.__setattr__(self, 'objects', objects)
def __str__(self):
|
def to_json_dict(self, preserve_ro=False):
ret = BaseApiObject.to_json_dict(self, preserve_ro)
attr = Attr()
ret[ApiList.LIST_KEY] = [ attr.to_json(x, preserve_ro) for x in self.objects ]
return ret
def __len__(self):
return self.objects.__len__()
def __iter__(self):
return self.objects.__iter__()
def __getitem__(self, i):
return self.objects.__getitem__(i)
def __getslice(self, i, j):
return self.objects.__getslice__(i, j)
@classmethod
def from_json_dict(cls, dic, resource_root, member_cls=None):
if not member_cls:
member_cls = cls._MEMBER_CLASS
attr = Attr(atype=member_cls)
items = []
if ApiList.LIST_KEY in dic:
items = [ attr.from_json(resource_root, x) for x in dic[ApiList.LIST_KEY] ]
ret = cls(items)
# If the class declares custom attributes, populate them based on the input
# dict. The check avoids extra overhead for the common case, where we just
# have a plain list. _set_attrs() also does not understand the "items"
# attribute, so it can't be in the input data.
if cls._ATTRIBUTES:
if ApiList.LIST_KEY in dic:
dic = copy.copy(dic)
del dic[ApiList.LIST_KEY]
ret._set_attrs(dic, allow_ro=True)
return ret
class ApiHostRef(BaseApiObject):
_ATTRIBUTES = {
'hostId' : None,
}
def __init__(self, resource_root, hostId=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostRef>: %s" % (self.hostId)
class ApiServiceRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'peerName' : None,
}
def __init__(self, resource_root, serviceName=None, clusterName=None,
peerName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiClusterRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
}
def __init__(self, resource_root, clusterName = None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'roleName' : None,
}
def __init__(self, resource_root, serviceName=None, roleName=None,
clusterName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleConfigGroupRef(BaseApiObject):
_ATTRIBUTES = {
'roleConfigGroupName' : None,
}
def __init__(self, resource_root, roleConfigGroupName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiCommand(BaseApiObject):
SYNCHRONOUS_COMMAND_ID = -1
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
cls._ATTRIBUTES = {
'id' : ROAttr(),
'name' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'active' : ROAttr(),
'success' : ROAttr(),
'resultMessage' : ROAttr(),
'clusterRef' : ROAttr(ApiClusterRef),
'serviceRef' : ROAttr(ApiServiceRef),
'roleRef' : ROAttr(ApiRoleRef),
'hostRef' : ROAttr(ApiHostRef),
'children' : ROAttr(ApiCommand, is_api_list=True),
'parent' : ROAttr(ApiCommand),
'resultDataUrl' : ROAttr(),
'canRetry' : ROAttr(),
}
return cls._ATTRIBUTES
def __str__(self):
return "<ApiCommand>: '%s' (id: %s; active: %s; success: %s)" % (
self.name, self.id, self.active, self.success)
def _path(self):
return '/commands/%d' % self.id
def fetch(self):
"""
Retrieve updated data about the command from the server.
@return: A new ApiCommand object.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
resp = self._get_resource_root().get(self._path())
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def wait(self, timeout=None):
"""
Wait for command to finish.
@param timeout: (Optional) Max amount of time (in seconds) to wait. Wait
forever by default.
@return: The final ApiCommand object, containing the last known state.
The command may still be running in case of timeout.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
SLEEP_SEC = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
while True:
cmd = self.fetch()
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
time.sleep(min(SLEEP_SEC, deadline - now))
else:
time.sleep(SLEEP_SEC)
def abort(self):
"""
Abort a running command.
@return: A new ApiCommand object with the updated information.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
path = self._path() + '/abort'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def retry(self):
"""
Retry a failed or aborted command.
Note: The retry will only work for ClusterUpgrade command for now.
@return: A new ApiCommand object with the updated information.
"""
path = self._path() + '/retry'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
class ApiBulkCommandList(ApiList):
_ATTRIBUTES = {
'errors' : ROAttr(),
}
_MEMBER_CLASS = ApiCommand
class ApiCommandMetadata(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'argSchema' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiCommandMetadata>: %s (%s)" % (self.name, self.argSchema)
#
# Metrics.
#
class ApiMetricData(BaseApiObject):
"""Metric reading data."""
_ATTRIBUTES = {
'timestamp' : ROAttr(datetime.datetime),
'value' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
class ApiMetric(BaseApiObject):
"""Metric information."""
_ATTRIBUTES = {
'name' : ROAttr(),
'context' : ROAttr(),
'unit' : ROAttr(),
'data' : ROAttr(ApiMetricData),
'displayName' : ROAttr(),
'description' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
#
# Activities.
#
class ApiActivity(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'parent' : ROAttr(),
'startTime' : ROAttr(),
'finishTime' : ROAttr(),
'id' : ROAttr(),
'status' : ROAttr(),
'user' : ROAttr(),
'group' : ROAttr(),
'inputDir' : ROAttr(),
'outputDir' : ROAttr(),
'mapper' : ROAttr(),
'combiner' : ROAttr(),
'reducer' : ROAttr(),
'queueName' : ROAttr(),
'schedulerPriority' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiActivity>: %s (%s)" % (self.name, self.status)
#
# Replication
#
class ApiCmPeer(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'url' : None,
'username' : None,
'password' : None,
'type' : None,
'clouderaManagerCreatedUser' : None,
}
def __str__(self):
return "<ApiPeer>: %s (%s)" % (self.name, self.url)
class ApiLicensedFeatureUsage(BaseApiObject):
_ATTRIBUTES = {
'totals' : ROAttr(),
'clusters' : ROAttr(),
}
class ApiHdfsReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'sourcePath' : None,
'destinationPath' : None,
'mapreduceServiceName' : None,
'userName' : None,
'numMaps' : None,
'dryRun' : None,
'bandwidthPerMap' : None,
'logPath' : None,
'schedulerPoolName' : None,
'abortOnError' : None,
'preservePermissions' : None,
'preserveBlockSize' : None,
'preserveReplicationCount' : None,
'removeMissingFiles' : None,
'skipChecksumChecks' : None,
'skipTrash' : None,
'replicationStrategy' : None,
'preserveXAttrs' : None,
'exclusionFilters' : None,
}
class ApiHdfsReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'progress' : ROAttr(),
'counters' : ROAttr(),
'numBytesDryRun' : ROAttr(),
'numFilesDryRun' : ROAttr(),
'numFilesExpected' : ROAttr(),
'numBytesExpected' : ROAttr(),
'numFilesCopied' : ROAttr(),
'numBytesCopied' : ROAttr(),
'numFilesSkipped' : ROAttr(),
'numBytesSkipped' : ROAttr(),
'numFilesDeleted' : ROAttr(),
'numFilesCopyFailed' : ROAttr(),
'numBytesCopyFailed' : ROAttr(),
'setupError' : ROAttr(),
'jobId' : ROAttr(),
'jobDetailsUri' : ROAttr(),
'dryRun' : ROAttr(),
'snapshottedDirs' : ROAttr(),
'failedFiles' : ROAttr(),
'runAsUser' : ROAttr(),
}
class ApiHiveTable(BaseApiObject):
_ATTRIBUTES = {
'database' : None,
'tableName' : None,
}
def __str__(self):
return "<ApiHiveTable>: %s, %s" % (self.database, self.tableName)
class ApiImpalaUDF(BaseApiObject):
_ATTRIBUTES = {
'database' : ROAttr(),
'signature' : ROAttr(),
}
def __str__(self):
return "<ApiImpalaUDF>: %s, %s" % (self.database, self.signature)
class ApiHiveReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'tableFilters' : Attr(ApiHiveTable),
'exportDir' : None,
'force' : None,
'replicateData' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'dryRun' : None,
'replicateImpalaMetadata' : None,
}
class ApiHiveReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'tableCount' : ROAttr(),
'tables' : ROAttr(ApiHiveTable),
'impalaUDFCount' : ROAttr(),
'impalaUDFs' : ROAttr(ApiImpalaUDF),
'errorCount' : ROAttr(),
'errors' : ROAttr(),
'dataReplicationResult' : ROAttr(ApiHdfsReplicationResult),
'dryRun' : ROAttr(),
'runAsUser' : ROAttr(),
'phase' : ROAttr(),
}
class ApiReplicationCommand(ApiCommand):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsReplicationResult),
'hiveResult' : ROAttr(ApiHiveReplicationResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiReplicationSchedule(BaseApiObject):
_ATTRIBUTES = {
'startTime' : Attr(datetime.datetime),
'endTime' : Attr(datetime.datetime),
'interval' : None,
'intervalUnit' : None,
'paused' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'hiveArguments' : Attr(ApiHiveReplicationArguments),
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'id' : ROAttr(),
'nextRun' : ROAttr(datetime.datetime),
'history' : ROAttr(ApiReplicationCommand),
'active' : None
}
class ApiHBaseSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'tableRegExps' : None,
'storage' : None,
}
class ApiHdfsSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'pathPatterns' : None,
}
class ApiHBaseSnapshot(BaseApiObject):
_ATTRIBUTES = {
'snapshotName' : None,
'tableName' : None,
'creationTime' : ROAttr(datetime.datetime),
'storage' : None,
}
class ApiHBaseSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'tableName' : ROAttr(),
'snapshotName' : ROAttr(),
'error' : ROAttr(),
'storage' : ROAttr(),
}
class ApiHdfsSnapshot(BaseApiObject):
_ATTRIBUTES = {
'path' : None,
'snapshotName' : None,
'snapshotPath' : None,
'creationTime' : ROAttr(datetime.datetime),
}
class ApiHdfsSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'path' : ROAttr(),
'snapshotName' : ROAttr(),
'snapshotPath' : ROAttr(),
'error' : ROAttr(),
}
class ApiHBaseSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedTableCount' : ROAttr(),
'processedTables' : ROAttr(),
'unprocessedTableCount' : ROAttr(),
'unprocessedTables' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHBaseSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHBaseSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHBaseSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHBaseSnapshotError),
}
class ApiHdfsSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedPathCount' : ROAttr(),
'processedPaths' : ROAttr(),
'unprocessedPathCount' : ROAttr(),
'unprocessedPaths' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHdfsSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHdfsSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHdfsSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHdfsSnapshotError),
}
class ApiSnapshotCommand(BaseApiObject):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsSnapshotResult),
'hbaseResult' : ROAttr(ApiHBaseSnapshotResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiSnapshotPolicy(BaseApiObject):
"""
@type name: str
@ivar name: Name of the snapshot policy.
@type description: str
@ivar description: Description of the snapshot policy.
@type hourly_snapshots: int
@ivar hourly_snapshots: Number of hourly snapshots to be retained (default: 0).
@type daily_snapshots: int
@ivar daily_snapshots: Number of daily snapshots to be retained (default: 0).
@type weekly_snapshots: int
@ivar weekly_snapshots: Number of weekly snapshots to be retained (default: 0).
@type monthly_snapshots: int
@ivar monthly_snapshots: Number of monthly snapshots to be retained (default: 0).
@type yearly_snapshots: int
@ivar yearly_snapshots: Number of yearly snapshots to be retained (default: 0).
@type hours_for_hourly_snapshots: list of int
@ivar hours_for_hourly_snapshots: Hours of the day that hourly snapshots should be created.
Valid values are 0 to 23. If this list is empty, then hourly snapshots are
created for every hour.
@type minute_of_hour: int
@ivar minute_of_hour: Minute in the hour that hourly, daily, weekly, monthly and yearly
snapshots should be created. Valid values are 0 to 59 (default: 0).
@type hour_of_day: int
@ivar hour_of_day: Hour in the day that daily, weekly, monthly and yearly snapshots should be created.
Valid values are 0 to 23 (default: 0).
@type day_of_week: int
@ivar day_of_week: Day of the week that weekly snapshots should be created.
Valid values are 1 to 7, 1 representing Sunday (default: 1).
@type day_of_month: int
@ivar day_of_month: Day of the month that monthly and yearly snapshots should be created.
Values from 1 to 31 are allowed. Additionally 0 to -30 can be used to
specify offsets from the last day of the month (default: 1).
@type month_of_year: int
@ivar month_of_year: Month of the year that yearly snapshots should be created.
Valid values are 1 to 12, 1 representing January (default: 1).
@ivar alert_on_start: whether to generate alerts on start of snapshot creation/deletion activity.
@ivar alert_on_success: whether to generate alerts on successful completion of snapshot creation/deletion activity.
@ivar alert_on_fail: whether to generate alerts on failure of snapshot creation/deletion activity.
@ivar alert_on_abort: whether to generate alerts on abort of snapshot creation/deletion activity.
@ivar paused: whether to run the policy on schedule
@type hbaseArguments: ApiHBaseSnapshotPolicyArguments
@ivar hbaseArguments: HBase specific arguments for the replication job.
@type hdfsArguments: ApiHdfsSnapshotPolicyArguments
@ivar hdfsArguments: HDFS specific arguments for the replication job.
"""
_ATTRIBUTES = {
'name' : None,
'description' : None,
'hourlySnapshots' : None,
'dailySnapshots' : None,
'weeklySnapshots' : None,
'monthlySnapshots' : None,
'yearlySnapshots' : None,
'minuteOfHour' : None,
'hourOfDay' : None,
'dayOfWeek' : None,
'dayOfMonth' : None,
'monthOfYear' : None,
'hoursForHourlySnapshots' : None,
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'paused' : None,
'hbaseArguments' : Attr(ApiHBaseSnapshotPolicyArguments),
'hdfsArguments' : Attr(ApiHdfsSnapshotPolicyArguments),
'lastCommand' : ROAttr(ApiSnapshotCommand),
'lastSuccessfulCommand' : ROAttr(ApiSnapshotCommand),
}
#
# Batch.
#
class ApiBatchRequestElement(BaseApiObject):
"""One element in a batch request."""
_ATTRIBUTES = {
'method' : None,
'url' : None,
'body' : None,
'contentType' : None,
'acceptType' : None,
}
class ApiBatchResponseElement(BaseApiObject):
"""One element in a batch response."""
_ATTRIBUTES = {
'statusCode' : ROAttr(),
'response' : ROAttr(),
}
class ApiBatchResponseList(ApiList):
"""A list of batch response objects."""
_ATTRIBUTES = {
'success' : ROAttr(),
}
_MEMBER_CLASS = ApiBatchResponseElement
#
# Configuration helpers.
#
class ApiConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'required' : ROAttr(),
'default' : ROAttr(),
'displayName' : ROAttr(),
'description' : ROAttr(),
'relatedName' : ROAttr(),
'validationState' : ROAttr(),
'validationMessage' : ROAttr(),
'validationWarningsSuppressed' : ROAttr()
}
def __init__(self, resource_root, name=None, value=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiConfig>: %s = %s" % (self.name, self.value)
class ApiImpalaQuery(BaseApiObject):
_ATTRIBUTES = {
'queryId' : ROAttr(),
'queryState' : ROAttr(),
'queryType' : ROAttr(),
'statement' : ROAttr(),
'database' : ROAttr(),
'rowsProduced' : ROAttr(),
'coordinator' : ROAttr(ApiHostRef),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'detailsAvailable' : ROAttr(),
'attributes' : ROAttr(),
'durationMillis' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQuery>: %s" % (self.queryId)
class ApiImpalaQueryResponse(BaseApiObject):
_ATTRIBUTES = {
'queries' : ROAttr(ApiImpalaQuery),
'warnings' : ROAttr()
}
class ApiImpalaQueryDetailsResponse(BaseApiObject):
_ATTRIBUTES = {
'details' : ROAttr()
}
def __str__(self):
return "<AipImpalaQueryDetailsResponse> %s" % self.details
class ApiImpalaCancelResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiImpalaCancelResponse> %s" % self.warning
class ApiImpalaQueryAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQueryAttribute> %s" % name
class ApiMr2AppInformation(BaseApiObject):
_ATTRIBUTES = {
'jobState' : ROAttr()
}
def __str__(self):
return "<ApiMr2AppInformation>: %s" % (self.jobState)
class ApiYarnApplication(BaseApiObject):
_ATTRIBUTES = {
'applicationId' : ROAttr(),
'name' : ROAttr(),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'pool' : ROAttr(),
'state' : ROAttr(),
'progress' : ROAttr(),
'mr2AppInformation' : ROAttr(ApiMr2AppInformation),
'attributes' : ROAttr(),
'allocatedMB' : ROAttr(),
'allocatedVCores' : ROAttr(),
'runningContainers' : ROAttr(),
'applicationTags' : ROAttr(),
'allocatedMemorySeconds' : ROAttr(),
'allocatedVcoreSeconds' : ROAttr(),
'containerUsedMemorySeconds' : ROAttr(),
'containerUsedCpuSeconds' : ROAttr(),
'containerUsedVcoreSeconds' : ROAttr(),
'containerAllocatedMemorySeconds' : ROAttr(),
'containerAllocatedVcoreSeconds' : ROAttr(),
}
def __str__(self):
return "<ApiYarnApplication>: %s" % (self.applicationId)
class ApiYarnApplicationResponse(BaseApiObject):
_ATTRIBUTES = {
'applications' : ROAttr(ApiYarnApplication),
'warnings' : ROAttr()
}
class ApiYarnKillResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiYarnKillResponse> %s" % self.warning
class ApiYarnApplicationAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiYarnApplicationAttribute> %s" % name
class ApiTimeSeriesRequest(BaseApiObject):
_ATTRIBUTES = {
'query' : None,
'from' : None,
'to' : None,
'contentType' : None,
'desiredRollup' : None,
'mustUseDesiredRollup' : None
}
def __str__(self):
return "<ApiTimeSeriesRequest>: %s" % (self.query)
class ApiProductVersion(BaseApiObject):
_ATTRIBUTES = {
'version' : None,
'product' : None,
}
class ApiClusterTemplateConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'ref' : None,
'variable' : None,
'autoConfig' : None,
}
class ApiClusterTemplateRoleConfigGroup(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'roleType' : None,
'base' : None,
'displayName' : None,
'configs' : Attr(ApiClusterTemplateConfig),
}
class ApiClusterTemplateRole(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'roleType' : None,
}
class ApiClusterTemplateHostTemplate(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'cardinality' : None,
'roleConfigGroupsRefNames' : None,
}
class ApiClusterTemplateHostInfo(BaseApiObject):
_ATTRIBUTES = {
'hostName' : None,
'hostNameRange' : None,
'rackId' : None,
'hostTemplateRefName' : None,
'roleRefNames' : None,
}
class ApiClusterTemplateVariable(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
}
class ApiClusterTemplateRoleConfigGroupInfo(BaseApiObject):
_ATTRIBUTES = {
'rcgRefName' : None,
'name' : None,
}
class ApiClusterTemplateInstantiator(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'hosts' : Attr(ApiClusterTemplateHostInfo),
'variables' : Attr(ApiClusterTemplateVariable),
'roleConfigGroups' : Attr(ApiClusterTemplateRoleConfigGroupInfo),
}
class ApiClusterTemplateService(BaseApiObject):
_ATTRIBUTES = {
'refName' : None,
'serviceType' : None,
'serviceConfigs' : Attr(ApiClusterTemplateConfig),
'roleConfigGroups' : Attr(ApiClusterTemplateRoleConfigGroup),
'displayName' : None,
'roles' : Attr(ApiClusterTemplateRole),
}
class ApiClusterTemplate(BaseApiObject):
_ATTRIBUTES = {
'cdhVersion' : None,
'displayName' : None,
'cmVersion' : None,
"repositories" : None,
'products' : Attr(ApiProductVersion),
'services' : Attr(ApiClusterTemplateService),
'hostTemplates' : Attr(ApiClusterTemplateHostTemplate),
'instantiator' : Attr(ApiClusterTemplateInstantiator),
}
def config_to_api_list(dic):
"""
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = [ ]
for k, v in dic.iteritems():
config.append({ 'name' : k, 'value': v })
return { ApiList.LIST_KEY : config }
def config_to_json(dic):
"""
Converts a python dictionary into a JSON payload.
The payload matches the expected "apiConfig list" type used to update
configuration parameters using the API.
@param dic: Key-value pairs to convert.
@return: String with the JSON-encoded data.
"""
return json.dumps(config_to_api_list(dic))
def json_to_config(dic, full = False):
"""
Converts a JSON-decoded config dictionary to a python dictionary.
When materializing the full view, the values in the dictionary will be
instances of ApiConfig, instead of strings.
@param dic: JSON-decoded config dictionary.
@param full: Whether to materialize the full view of the config data.
@return: Python dictionary with config data.
"""
config = { }
for entry in dic['items']:
k = entry['name']
if full:
config[k] = ApiConfig.from_json_dict(entry, None)
else:
config[k] = entry.get('value')
return config
| return "<ApiList>(%d): [%s]" % (
len(self.objects),
", ".join([str(item) for item in self.objects])) |
generic_wavefront.py | import numpy, copy
import scipy.constants as codata
from srxraylib.util.data_structures import ScaledArray, ScaledMatrix
from wofry.propagator.wavefront import Wavefront, WavefrontDimension
from wofry.propagator.util.gaussian_schell_model import GaussianSchellModel1D
# needed for h5 i/o
import os
import sys
import time
from wofry.propagator.polarization import Polarization
try:
import h5py
except:
raise ImportError("h5py not available: input/output to files not working")
# --------------------------------------------------
# Wavefront 1D
# --------------------------------------------------
class GenericWavefront1D(Wavefront):
def __init__(self, wavelength=1e-10, electric_field_array=None, electric_field_array_pi=None):
self._wavelength = wavelength
self._electric_field_array = electric_field_array
self._electric_field_array_pi = electric_field_array_pi
def get_dimension(self):
return WavefrontDimension.ONE
def is_polarized(self):
if self._electric_field_array_pi is None:
return False
else:
return True
def duplicate(self):
if self.is_polarized():
return GenericWavefront1D(wavelength=self._wavelength,
electric_field_array=ScaledArray(np_array=copy.copy(self._electric_field_array.np_array),
scale=copy.copy(self._electric_field_array.scale)),
electric_field_array_pi=ScaledArray(np_array=copy.copy(self._electric_field_array_pi.np_array),
scale=copy.copy(self._electric_field_array_pi.scale)) )
else:
return GenericWavefront1D(wavelength=self._wavelength,
electric_field_array=ScaledArray(np_array=copy.copy(self._electric_field_array.np_array),
scale=copy.copy(self._electric_field_array.scale)))
@classmethod
def initialize_wavefront(cls, wavelength=1e-10, number_of_points=1000, polarization=Polarization.SIGMA):
sA = ScaledArray.initialize(np_array=numpy.full(number_of_points, (1.0 + 0.0j), dtype=complex))
if ((polarization == Polarization.PI) or (polarization == Polarization.TOTAL)):
sA_pi = ScaledArray.initialize(np_array=numpy.full(number_of_points, (0.0 + 0.0j), dtype=complex))
else:
sA_pi = None
return GenericWavefront1D(wavelength, sA, sA_pi)
@classmethod
def initialize_wavefront_from_steps(cls, x_start=-1.0, x_step=0.002, number_of_points=1000, wavelength=1e-10, polarization=Polarization.SIGMA):
sA = ScaledArray.initialize_from_steps(np_array=numpy.full(number_of_points, (1.0 + 0.0j), dtype=complex),
initial_scale_value=x_start,
scale_step=x_step)
if ((polarization == Polarization.PI) or (polarization == Polarization.TOTAL)):
sA_pi = ScaledArray.initialize_from_steps(np_array=numpy.full(number_of_points, (0.0 + 0.0j), dtype=complex),
initial_scale_value=x_start,
scale_step=x_step)
else:
sA_pi = None
return GenericWavefront1D(wavelength, sA, sA_pi)
@classmethod
def initialize_wavefront_from_range(cls, x_min=0.0, x_max=0.0, number_of_points=1000, wavelength=1e-10, polarization=Polarization.SIGMA ):
sA = ScaledArray.initialize_from_range(np_array=numpy.full(number_of_points, (1.0 + 0.0j), dtype=complex),
min_scale_value=x_min,
max_scale_value=x_max)
if ((polarization == Polarization.PI) or (polarization == Polarization.TOTAL)):
sA_pi = ScaledArray.initialize_from_range(np_array=numpy.full(number_of_points, (0.0 + 0.0j), dtype=complex),
min_scale_value=x_min,
max_scale_value=x_max)
else:
sA_pi = None
return GenericWavefront1D(wavelength, sA, sA_pi )
@classmethod
def initialize_wavefront_from_arrays(cls, x_array, y_array, y_array_pi=None, wavelength=1e-10):
if x_array.size != y_array.size:
raise Exception("Unmatched shapes for x and y")
sA = ScaledArray.initialize_from_steps(np_array=y_array,
initial_scale_value=x_array[0],
scale_step=(x_array[1]-x_array[0]))
if y_array_pi is not None:
sA_pi = ScaledArray.initialize_from_steps(np_array=y_array_pi,
initial_scale_value=x_array[0],
scale_step=(x_array[1]-x_array[0]))
else:
sA_pi = None
return GenericWavefront1D(wavelength, sA, sA_pi)
# main parameters
# grid
def size(self):
return self._electric_field_array.size()
def delta(self):
return self._electric_field_array.delta()
def offset(self):
return self._electric_field_array.offset()
def get_abscissas(self):
return self._electric_field_array.scale
def get_mesh_x(self):
return self.get_abscissas()
# photon energy
def get_wavelength(self):
return self._wavelength
def get_wavenumber(self):
return 2*numpy.pi/self._wavelength
def get_photon_energy(self):
m2ev = codata.c * codata.h / codata.e # lambda(m) = m2eV / energy(eV)
return m2ev / self._wavelength
# wavefront
def get_complex_amplitude(self, polarization=Polarization.SIGMA):
if polarization == Polarization.SIGMA:
return self._electric_field_array.np_array
elif polarization == Polarization.PI:
if self.is_polarized():
return self._electric_field_array_pi.np_array
else:
raise Exception("Wavefront is not polarized.")
else:
raise Exception("Only 0=SIGMA and 1=PI are valid polarization values.")
def get_amplitude(self, polarization=Polarization.SIGMA):
return numpy.absolute(self.get_complex_amplitude(polarization=polarization))
def get_phase(self,from_minimum_intensity=0.0,unwrap=0, polarization=Polarization.SIGMA):
phase = numpy.angle(self.get_complex_amplitude(polarization=polarization))
if (from_minimum_intensity > 0.0):
intensity = self.get_intensity()
intensity /= intensity.max()
bad_indices = numpy.where(intensity < from_minimum_intensity )
phase[bad_indices] = 0.0
if unwrap:
phase = numpy.unwrap(phase)
return phase
def get_intensity(self, polarization=Polarization.SIGMA):
if polarization == Polarization.TOTAL:
if self.is_polarized():
return self.get_amplitude(polarization=Polarization.SIGMA)**2 + \
self.get_amplitude(polarization=Polarization.PI)**2
else:
return self.get_amplitude(polarization=Polarization.SIGMA)**2
else:
return self.get_amplitude(polarization=polarization)**2
def get_integrated_intensity(self, polarization=Polarization.SIGMA):
return self.get_intensity(polarization=polarization).sum() * (self.get_abscissas()[1] - self.get_abscissas()[0])
# interpolated values
def get_interpolated_complex_amplitude(self, abscissa_value, polarization=Polarization.SIGMA): # singular
if polarization == Polarization.SIGMA:
return self._electric_field_array.interpolate_value(abscissa_value)
elif polarization == Polarization.PI:
if self.is_polarized():
return self._electric_field_array_pi.interpolate_value(abscissa_value)
else:
raise Exception("Wavefront is not polarized.")
else:
raise Exception("Only 0=SIGMA and 1=PI are valid polarization values.")
def get_interpolated_complex_amplitudes(self, abscissa_values, polarization=Polarization.SIGMA): # plural
if polarization == Polarization.SIGMA:
return self._electric_field_array.interpolate_values(abscissa_values)
elif polarization == Polarization.PI:
if self.is_polarized():
return self._electric_field_array_pi.interpolate_values(abscissa_values)
else:
raise Exception("Wavefront is not polarized.")
else:
raise Exception("Only 0=SIGMA and 1=PI are valid polarization values.")
def get_interpolated_amplitude(self, abscissa_value, polarization=Polarization.SIGMA): # singular!
return numpy.absolute(self.get_interpolated_complex_amplitude(abscissa_value,polarization=polarization))
def get_interpolated_amplitudes(self, abscissa_values, polarization=Polarization.SIGMA): # plural!
return numpy.absolute(self.get_interpolated_complex_amplitudes(abscissa_values,polarization=polarization))
def get_interpolated_phase(self, abscissa_value, polarization=Polarization.SIGMA): # singular!
complex_amplitude = self.get_interpolated_complex_amplitude(abscissa_value, polarization=polarization)
return numpy.arctan2(numpy.imag(complex_amplitude), numpy.real(complex_amplitude))
def get_interpolated_phases(self, abscissa_values, polarization=Polarization.SIGMA): # plural!
complex_amplitudes = self.get_interpolated_complex_amplitudes(abscissa_values, polarization=polarization)
return numpy.arctan2(numpy.imag(complex_amplitudes), numpy.real(complex_amplitudes))
def get_interpolated_intensity(self, abscissa_value, polarization=Polarization.SIGMA):
if polarization == Polarization.TOTAL:
interpolated_complex_amplitude = self.get_interpolated_amplitude(abscissa_value,polarization=Polarization.SIGMA)
if self.is_polarized():
interpolated_complex_amplitude_pi = self.get_interpolated_amplitude(abscissa_value,polarization=Polarization.PI)
return numpy.abs(interpolated_complex_amplitude)**2 + numpy.abs(interpolated_complex_amplitude_pi)**2
else:
return numpy.abs(interpolated_complex_amplitude)**2
elif polarization == Polarization.SIGMA:
interpolated_complex_amplitude = self.get_interpolated_amplitude(abscissa_value,polarization=Polarization.SIGMA)
return numpy.abs(interpolated_complex_amplitude)**2
elif polarization == Polarization.PI:
interpolated_complex_amplitude_pi = self.get_interpolated_amplitude(abscissa_value,polarization=Polarization.PI)
return numpy.abs(interpolated_complex_amplitude_pi)**2
else:
raise Exception("Wrong polarization value.")
def get_interpolated_intensities(self, abscissa_values, polarization=Polarization.SIGMA):
# return self.get_interpolated_amplitudes(abscissa_values,polarization=Polarization.SIGMA)**2
if polarization == Polarization.TOTAL:
interpolated_complex_amplitude = self.get_interpolated_complex_amplitude(abscissa_values,polarization=Polarization.SIGMA)
if self.is_polarized():
interpolated_complex_amplitude_pi = self.get_interpolated_complex_amplitude(abscissa_values,polarization=Polarization.PI)
return numpy.abs(interpolated_complex_amplitude)**2 + numpy.abs(interpolated_complex_amplitude_pi)**2
else:
return numpy.abs(interpolated_complex_amplitude)**2
elif polarization == Polarization.SIGMA:
interpolated_complex_amplitude = self.get_interpolated_complex_amplitude(abscissa_values,polarization=Polarization.SIGMA)
return numpy.abs(interpolated_complex_amplitude)**2
elif polarization == Polarization.PI:
interpolated_complex_amplitude_pi = self.get_interpolated_complex_amplitude(abscissa_values,polarization=Polarization.PI)
return numpy.abs(interpolated_complex_amplitude_pi)**2
else:
raise Exception("Wrong polarization value.")
# modifiers
def set_wavelength(self, wavelength):
self._wavelength = wavelength
def set_wavenumber(self, wavenumber):
self._wavelength = 2 * numpy.pi / wavenumber
def set_photon_energy(self, photon_energy):
m2ev = codata.c * codata.h / codata.e # lambda(m) = m2eV / energy(eV)
self._wavelength = m2ev / photon_energy
def set_complex_amplitude(self, complex_amplitude, complex_amplitude_pi=None):
if complex_amplitude.size != self._electric_field_array.size():
raise Exception("Complex amplitude array has different dimension")
self._electric_field_array.np_array = complex_amplitude
if complex_amplitude_pi is not None:
if self.is_polarized():
if complex_amplitude_pi.size != self._electric_field_array_pi.size():
raise Exception("Complex amplitude array has different dimension")
self._electric_field_array_pi.np_array = complex_amplitude_pi
else:
raise Exception('Cannot set PI-polarized complex amplitude to a non-polarized wavefront.')
def set_pi_complex_amplitude_to_zero(self):
if self.is_polarized():
self._electric_field_array_pi.np_array *= 0.0
def set_plane_wave_from_complex_amplitude(self, complex_amplitude=(1.0 + 0.0j), inclination=0.0):
self._electric_field_array.np_array = numpy.full(self._electric_field_array.size(), complex_amplitude, dtype=complex)
if inclination != 0.0:
self.add_phase_shifts( self.get_wavenumber() * self._electric_field_array.scale * numpy.tan(inclination) )
# if polarized, set arbitrary PI component to zero
self.set_pi_complex_amplitude_to_zero()
def set_plane_wave_from_amplitude_and_phase(self, amplitude=1.0, phase=0.0, inclination=0.0):
self.set_plane_wave_from_complex_amplitude(amplitude*numpy.cos(phase) + 1.0j*amplitude*numpy.sin(phase))
if inclination != 0.0:
self.add_phase_shifts( self.get_wavenumber() * self._electric_field_array.scale * numpy.tan(inclination) )
# if polarized, set arbitrary PI component to zero
self.set_pi_complex_amplitude_to_zero()
def set_spherical_wave(self, radius=1.0, center=0.0, complex_amplitude=1.0):
if radius == 0: raise Exception("Radius cannot be zero")
self._electric_field_array.np_array = complex_amplitude * numpy.exp(-1.0j * self.get_wavenumber() *
( (self._electric_field_array.scale - center)** 2) / (-2 * radius))
# if polarized, set arbitrary PI component to zero
self.set_pi_complex_amplitude_to_zero()
def set_gaussian_hermite_mode(self, sigma_x, mode_x, amplitude=1.0, shift=0.0, beta=100.0):
a1D = GaussianSchellModel1D(amplitude, sigma_x, beta*sigma_x)
real_amplitude = a1D.phi(mode_x, self.get_abscissas() - shift)
eigenvalue = a1D.beta(mode_x)
self.set_complex_amplitude(numpy.sqrt(eigenvalue)*real_amplitude+0.0j)
# if polarized, set arbitrary PI component to zero
self.set_pi_complex_amplitude_to_zero()
# note that amplitude is for "amplitude" not for intensity!
def set_gaussian(self, sigma_x, amplitude=1.0, shift=0.0):
self.set_gaussian_hermite_mode(sigma_x, 0, amplitude=amplitude, shift=shift)
# if polarized, set arbitrary PI component to zero
self.set_pi_complex_amplitude_to_zero()
def add_phase_shift(self, phase_shift, polarization=Polarization.SIGMA):
if polarization == Polarization.SIGMA:
self._electric_field_array.np_array *= numpy.exp(1.0j * phase_shift)
elif polarization == Polarization.PI:
if self.is_polarized():
self._electric_field_array_pi.np_array *= numpy.exp(1.0j * phase_shift)
else:
raise Exception("Wavefront is not polarized")
else:
raise Exception("Invalid polarization value (only 0=SIGMA or 1=PI are valid)")
def add_phase_shifts(self, phase_shifts, polarization=Polarization.SIGMA):
if polarization == Polarization.SIGMA:
if phase_shifts.size != self._electric_field_array.size():
raise Exception("Phase Shifts array has different dimension")
self._electric_field_array.np_array = numpy.multiply(self._electric_field_array.np_array, numpy.exp(1.0j * phase_shifts))
elif polarization == Polarization.PI:
if self.is_polarized():
if phase_shifts.size != self._electric_field_array_pi.size():
raise Exception("Phase Shifts array has different dimension")
self._electric_field_array_pi.np_array = numpy.multiply(self._electric_field_array_pi.np_array, numpy.exp(1.0j * phase_shifts))
else:
raise Exception("Wavefront is not polarized")
else:
raise Exception("Invalid polarization value (only 0=SIGMA or 1=PI are valid)")
def rescale_amplitude(self, factor, polarization=Polarization.SIGMA):
if polarization == Polarization.SIGMA:
self._electric_field_array.np_array *= factor
elif polarization == Polarization.PI:
if self.is_polarized():
self._electric_field_array_pi.np_array *= factor
else:
raise Exception("Wavefront is not polarized")
elif polarization == Polarization.TOTAL:
self.rescale_amplitude(factor, polarization=Polarization.SIGMA)
self.rescale_amplitude(factor, polarization=Polarization.PI)
else:
raise Exception("Invalid polarization value (only 0=SIGMA, 1=PI or 3=TOTAL are valid)")
def rescale_amplitudes(self, factors, polarization=Polarization.SIGMA):
if polarization == Polarization.SIGMA:
if factors.size != self._electric_field_array.size(): raise Exception("Factors array has different dimension")
self._electric_field_array.np_array = numpy.multiply(self._electric_field_array.np_array, factors)
elif polarization == Polarization.PI:
if self.is_polarized():
if factors.size != self._electric_field_array_pi.size(): raise Exception("Factors array has different dimension")
self._electric_field_array_pi.np_array = numpy.multiply(self._electric_field_array_pi.np_array, factors)
else:
raise Exception("Wavefront is not polarized")
elif polarization == Polarization.TOTAL:
self.rescale_amplitudes(factors, polarization=Polarization.SIGMA)
self.rescale_amplitudes(factors, polarization=Polarization.PI)
else:
raise Exception("Invalid polarization value (only 0=SIGMA, 1=PI or 3=TOTAL are valid)")
def clip(self, x_min, x_max, negative=False):
window = numpy.ones(self._electric_field_array.size())
if not negative:
lower_window = numpy.where(self.get_abscissas() < x_min)
upper_window = numpy.where(self.get_abscissas() > x_max)
if len(lower_window) > 0: window[lower_window] = 0
if len(upper_window) > 0: window[upper_window] = 0
else:
window_indices = numpy.where((self.get_abscissas() >= x_min) & (self.get_abscissas() <= x_max))
if len(window_indices) > 0:
window[window_indices] = 0.0
if self.is_polarized():
self.rescale_amplitudes(window,polarization=Polarization.TOTAL)
else:
self.rescale_amplitudes(window,polarization=Polarization.SIGMA)
def is_identical(self,wfr,decimal=7):
from numpy.testing import assert_array_almost_equal
try:
assert_array_almost_equal(self.get_complex_amplitude(),wfr.get_complex_amplitude(),decimal)
assert(self.is_polarized() == wfr.is_polarized())
if self.is_polarized():
assert_array_almost_equal(self.get_complex_amplitude(polarization=Polarization.PI),
wfr.get_complex_amplitude(polarization=Polarization.PI),decimal)
assert_array_almost_equal(self.get_abscissas(),wfr.get_abscissas(),decimal)
assert_array_almost_equal(self.get_photon_energy(),wfr.get_photon_energy(),decimal)
except:
return False
return True
#
# auxiliary methods get main wavefront phase curvature (radius)
#
def _figure_of_merit(self,radius,weight_with_intensity=True):
"""
Computes a "figure of merit" for finding the wavefront curvature.
A low value of the figure of metit means that the entered radius (checked)
corresponds to the radius of the wavefront.
If wavefront is polarized, the pi component is ignored.
:param radius:
:param weight_with_intensity:
:return: a positive scalar with the figure of merit
"""
x = self.get_abscissas()
new_phase = 1.0 * self.get_wavenumber() * (x**2) / (-2 * radius)
wavefront2 = self.duplicate()
wavefront2.add_phase_shifts(new_phase)
if weight_with_intensity:
out = numpy.abs(wavefront2.get_phase()*wavefront2.get_intensity()).sum()
else:
out = numpy.abs(wavefront2.get_phase()).sum()
return out
def scan_wavefront_curvature(self,rmin=-10000.0,rmax=10000.0,rpoints=100):
radii = numpy.linspace(rmax,rmin,rpoints)
fig_of_mer = numpy.zeros_like(radii)
for i,radius in enumerate(radii):
fig_of_mer[i] =self._figure_of_merit(radius)
return radii,fig_of_mer
def guess_wavefront_curvature(self,rmin=-10000.0,rmax=10000.0,rpoints=100):
from scipy.optimize import minimize
radii,fig_of_mer = self.scan_wavefront_curvature(rmin=rmin,rmax=rmax,rpoints=rpoints)
res = minimize(self._figure_of_merit, radii[numpy.argmin(fig_of_mer)], args=self, method='powell',options={'xtol': 1e-8, 'disp': True})
return res.x
#
# auxiliary function to dump h5 files
#
def _dump_arr_2_hdf5(self,_arr,_calculation, _filename, _subgroupname):
"""
Auxiliary routine to save_h5_file
:param _arr: (usually 2D) array to be saved on the hdf5 file inside the _subgroupname
:param _calculation
:param _filename: path to file for saving the wavefront
:param _subgroupname: container mechanism by which HDF5 files are organised
"""
sys.stdout.flush()
f = h5py.File(_filename, 'a')
try:
f1 = f.create_group(_subgroupname)
except:
f1 = f[_subgroupname]
fdata = f1.create_dataset(_calculation, data=_arr)
f.close()
def save_h5_file(self,filename,subgroupname="wfr",intensity=True,phase=False,overwrite=True,verbose=False):
"""
Auxiliary function to write wavefront data into a hdf5 generic file.
When using the append mode to write h5 files, overwriting does not work and makes the code crash. To avoid this
issue, try/except is used. If by any chance a file should be overwritten, it is firstly deleted and re-written.
:param self: input / output resulting Wavefront structure (instance of GenericWavefront2D);
:param filename: path to file for saving the wavefront
:param subgroupname: container mechanism by which HDF5 files are organised
:param intensity: writes intensity for sigma and pi polarisation (default=True)
:param amplitude:
:param phase:
:param overwrite: flag that should always be set to True to avoid infinity loop on the recursive part of the function.
:param verbose: if True, print some file i/o messages
"""
if overwrite:
try:
os.remove(filename)
except:
pass
try:
if not os.path.isfile(filename): # if file doesn't exist, create it.
sys.stdout.flush()
f = h5py.File(filename, 'w')
# point to the default data to be plotted
f.attrs['default'] = 'entry'
# give the HDF5 root some more attributes
f.attrs['file_name'] = filename
f.attrs['file_time'] = time.time()
f.attrs['creator'] = 'oasys-wofry'
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.close()
# always writes complex amplitude
x_polarization = self.get_complex_amplitude() # sigma
self._dump_arr_2_hdf5(x_polarization, "wfr_complex_amplitude_s", filename, subgroupname)
if self.is_polarized():
y_polarization = self.get_complex_amplitude(polarization=Polarization.PI) # pi
self._dump_arr_2_hdf5(y_polarization, "wfr_complex_amplitude_p", filename, subgroupname)
if intensity:
if self.is_polarized():
self._dump_arr_2_hdf5(self.get_intensity(polarization=Polarization.TOTAL),"intensity/wfr_intensity", filename, subgroupname)
self._dump_arr_2_hdf5(self.get_intensity(polarization=Polarization.SIGMA),"intensity/wfr_intensity_s", filename, subgroupname)
self._dump_arr_2_hdf5(self.get_intensity(polarization=Polarization.PI),"intensity/wfr_intensity_p", filename, subgroupname)
else:
self._dump_arr_2_hdf5(self.get_intensity(),"intensity/wfr_intensity", filename, subgroupname)
if phase:
if self.is_polarized():
self._dump_arr_2_hdf5(self.get_phase(polarization=Polarization.SIGMA)-self.get_phase(polarization=Polarization.PI),
"phase/wfr_phase", filename, subgroupname)
self._dump_arr_2_hdf5(self.get_phase(polarization=Polarization.SIGMA),"phase/wfr_phase_s", filename, subgroupname)
self._dump_arr_2_hdf5(self.get_phase(polarization=Polarization.PI),"phase/wfr_phase_p", filename, subgroupname)
else:
self._dump_arr_2_hdf5(self.get_phase(polarization=Polarization.SIGMA),"phase/wfr_phase", filename, subgroupname)
# add mesh and SRW information
f = h5py.File(filename, 'a')
f1 = f[subgroupname]
# point to the default data to be plotted
f1.attrs['NX_class'] = 'NXentry'
f1.attrs['default'] = 'intensity'
# TODO: add self interpreting decoder
# f1["wfr_method"] = "WOFRY"
f1["wfr_dimension"] = 1
f1["wfr_photon_energy"] = self.get_photon_energy()
x = self.get_abscissas()
| f1["wfr_mesh"] = numpy.array([x[0],x[-1],x.size])
# Add NX plot attribites for automatic plot with silx view
myflags = [intensity,phase]
mylabels = ['intensity','phase']
for i,label in enumerate(mylabels):
if myflags[i]:
f2 = f1[mylabels[i]]
f2.attrs['NX_class'] = 'NXdata'
f2.attrs['signal'] = 'wfr_%s'%(mylabels[i])
f2.attrs['axes'] = b'axis_x'
f3 = f2["wfr_%s"%(mylabels[i])]
# axis data
ds = f2.create_dataset('axis_x', data=1e6*x)
ds.attrs['units'] = 'microns'
ds.attrs['long_name'] = 'Pixel Size (microns)' # suggested Y axis plot label
f.close()
except:
# TODO: check exit??
if overwrite is not True:
raise Exception("Bad input argument")
os.remove(filename)
if verbose: print("save_h5_file: file deleted %s"%filename)
self.save_h5_file(filename,subgroupname, intensity=intensity, phase=phase, overwrite=False)
if verbose: print("save_h5_file: written/updated %s data in file: %s"%(subgroupname,filename))
@classmethod
def load_h5_file(cls,filename,filepath="wfr"):
try:
f = h5py.File(filename, 'r')
mesh = f[filepath+"/wfr_mesh"][()]
complex_amplitude_s = f[filepath+"/wfr_complex_amplitude_s"][()]
energy = f[filepath + "/wfr_photon_energy"][()]
try:
complex_amplitude_p = f[filepath + "/wfr_complex_amplitude_p"][()]
except:
complex_amplitude_p = None
f.close()
except:
raise Exception("Failed to load 2D wavefront from h5 file: "+filename)
wfr = cls.initialize_wavefront_from_arrays(
numpy.linspace(mesh[0],mesh[1],int(mesh[2])),
complex_amplitude_s,complex_amplitude_p)
wfr.set_photon_energy(energy)
return wfr
if __name__ == "__main__":
# w = GenericWavefront1D.initialize_wavefront_from_steps(polarization=Polarization.TOTAL)
# w.save_h5_file("/tmp/wf.h5",subgroupname="wfr",intensity=True,phase=False,overwrite=True,verbose=True)
# w2 = GenericWavefront1D.load_h5_file("/tmp/wf.h5",filepath="wfr")
# assert(w2.is_identical(w))
x = numpy.linspace(100e-6, -100e-6, 100)
y = numpy.exp( -(x-50e-6)**2 / 2 / (10e-6)**2 )
wfr = GenericWavefront1D.initialize_wavefront_from_arrays(x, numpy.sqrt(y) + 0j)
from srxraylib.plot.gol import plot
plot(1e6 * x,y,
1e6 * wfr.get_abscissas(), wfr.get_intensity(),
legend=["original", "wfr"]) | |
AdvancedOptionsView.tsx | import React from 'react';
import Grid from '@material-ui/core/Grid';
import TextField from '@material-ui/core/TextField';
import Button from '@material-ui/core/Button';
import Radio from '@material-ui/core/Radio';
import Typography from '@material-ui/core/Typography';
import FormControlLabel from '@material-ui/core/FormControlLabel';
import SaveIcon from '@material-ui/icons/Save';
import RadioGroup from '@material-ui/core/RadioGroup';
import Divider from '@material-ui/core/Divider';
import Switch from '@material-ui/core/Switch';
import Tooltip from '@material-ui/core/Tooltip';
// redux
import { useSelector, useDispatch } from 'react-redux';
import { RootState } from '../redux/store';
// my imports
import * as controller from '../controller';
import useStyles from '../styles';
import {
MeanType,
setMeanType,
toggleMultiLabel,
setZerosRight,
setZerosLeft,
} from '../redux/App.store';
const AdvancedOptionsView = () => {
const classes = useStyles();
const meanType = useSelector((state: RootState) => state.app.meanType);
const multiLabel = useSelector((state: RootState) => state.app.multiLabel);
const zerosLeft = useSelector((state: RootState) => state.app.zerosLeft);
const zerosRight = useSelector((state: RootState) => state.app.zerosRight);
const dispatch = useDispatch();
// functions
const handleSaveClicked = () => {
const options: controller.CommonOptions = {
meanType: meanType === MeanType.geo ? 'geo' : 'ari',
zerosLeft: zerosLeft,
zerosRight: zerosRight,
logScale: true,
multiLabel: multiLabel,
};
controller.setConfig(options);
};
return (
<>
<Grid item container>
<Grid item container xs={12}>
<Grid item>
<Tooltip title="O manual do MasterSizer recomenda a utilização da média geométrica.">
<Typography variant="h6">Tipo de média:</Typography>
</Tooltip>
</Grid>
</Grid>
<Grid item className={classes.typeDiaRadio}>
<RadioGroup
value={meanType === MeanType.geo ? 'geo' : 'ari'}
onChange={(e) =>
dispatch(
setMeanType(
e.target.value === 'geo' ? MeanType.geo : MeanType.ari
)
)
}
>
<FormControlLabel
value="geo"
control={<Radio color="primary" />}
label="Geométrica"
labelPlacement="end"
/>
<FormControlLabel
value="ari"
control={<Radio color="primary" />}
label="Aritimética"
labelPlacement="end"
/>
</RadioGroup>
</Grid>
<Grid item xs={12} className={classes.divider}>
<Divider />
</Grid>
<Grid item xs>
<FormControlLabel
control={<Switch />}
color="primary"
label="Não colocar legendas nos gráficos de múltiplos arquivos"
checked={!multiLabel}
onChange={() => dispatch(toggleMultiLabel())}
/>
</Grid>
<Grid item xs={12} className={classes.divider}>
<Divider />
</Grid>
<Grid item container spacing={1}>
<Grid container item xs={12}>
<Grid item>
<Tooltip title="Número de zeros a serem ignorados. Recomenda-se o menor valor possível; muitos zeros influenciam na otimização dos paramêtros do modelo.">
<Typography variant="h6">Ignorar zeros:</Typography>
</Tooltip>
</Grid>
</Grid>
<Grid container item className={classes.ZerosIgnore} spacing={2}>
<Grid item xs={12}>
<TextField
label="Zeros à esquerda"
type="number"
required
value={zerosLeft}
InputLabelProps={{
shrink: true,
}}
onChange={(e) => dispatch(setZerosLeft(Number(e.target.value)))}
/>
</Grid>
<Grid item xs={12}>
<TextField
label="Zeros à direita"
type="number"
required
value={zerosRight}
InputLabelProps={{
shrink: true,
}}
onChange={(e) =>
dispatch(setZerosRight(Number(e.target.value)))
}
/>
</Grid>
</Grid>
</Grid>
<Grid item container xs={12} justify={'flex-end'}>
<Grid item> | onClick={handleSaveClicked}
disabled={true}
>
Salvar configurações atuais
</Button>
</Grid>
</Grid>
</Grid>
</>
);
};
export default AdvancedOptionsView; | <Button
variant="contained"
color="secondary"
startIcon={<SaveIcon />} |
sliderSubroutines.js | import UserInpSliderOpenArrow from "../Images/InputSliderOpenArr.svg";
import UserInpSliderCloseArrow from "../Images/CloseInputSliderArr.svg";
export function | (state, ref1, ref2) {
if (state === 0) {
ref1.current.style.marginLeft = "1138px";
ref2.current.src = UserInpSliderCloseArrow;
return 1;
} else {
ref1.current.style.marginLeft = "1320px";
ref2.current.src = UserInpSliderOpenArrow;
return 0;
}
}
export function SliderBackgroundUpdator(refSlider, colour, value) {
if (value === undefined) {
value = refSlider.current.value;
}
let temp =
"linear-gradient(to right, #clr 0%, #clr #value%, #EFEFF1 #value%, #EFEFF1 100%)";
temp = temp.split("#clr").join(colour);
temp = temp.split("#value").join(value);
refSlider.current.style.background = temp;
}
| UserInpSliderSubroutine |
useProductAssociations.ts | import { ref, computed, ComputedRef, Ref, unref } from "vue-demi";
import {
Product,
CrossSelling,
} from "@shopware-pwa/commons/interfaces/models/content/product/Product";
import {
invokeGet,
invokePost,
getProductDetailsEndpoint,
} from "@shopware-pwa/shopware-6-client";
import { getApplicationContext } from "@shopware-pwa/composables";
/**
* interface for {@link IUseProductAssociations} composable
* @beta
*/
export interface IUseProductAssociations {
/**
* Start loading resources
*/
loadAssociations: (params: {
params: unknown;
method: "post" | "get";
}) => Promise<void>;
/**
* If it's loading - indicator
*/
isLoading: ComputedRef<boolean>;
productAssociations: ComputedRef<CrossSelling[]>;
}
/**
* Get product association entity. Options - {@link IUseProductAssociations}
*
* @example
* Example of possibilities:
*
* ```ts
* const { loading, loadAssociations, productAssociations } = useProductAssociation({product, associationContext: "cross-selling"})
* if (!productAssociations.value) {
* await loadAssociations()
* }
* ```
* @beta
*/
export function | (params: {
product: Ref<Product> | Product;
associationContext: "cross-selling" | "reviews";
}): IUseProductAssociations {
const COMPOSABLE_NAME = "useProductAssociations";
const contextName = COMPOSABLE_NAME;
const product = unref(params.product);
const association = params.associationContext;
const { apiInstance } = getApplicationContext({ contextName });
const isLoading = ref(false);
const associations = ref([]);
interface loadAssociationsParams {
params?: unknown;
method?: "post" | "get";
}
const loadAssociations = async ({
method,
params,
}: loadAssociationsParams = {}) => {
isLoading.value = true;
try {
if (method && method === "get") {
const response = await invokeGet(
{
address: `${getProductDetailsEndpoint(product.id)}/${association}${
params ? params : ""
}`,
},
apiInstance
);
associations.value = response?.data;
return;
}
const response = await invokePost(
{
address: `${getProductDetailsEndpoint(product.id)}/${association}`,
payload: params,
},
apiInstance
);
associations.value = response?.data;
} catch (error) {
console.error(
"[useProductAssociations][loadAssociations][error]:",
error
);
} finally {
isLoading.value = false;
}
};
return {
isLoading: computed(() => isLoading.value),
productAssociations: computed(() => associations.value || []),
loadAssociations,
};
}
| useProductAssociations |
user.go | package external
import (
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/jmoiron/sqlx"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
pb "github.com/brocaar/chirpstack-api/go/v3/as/external/api"
"github.com/brocaar/chirpstack-application-server/internal/api/external/auth"
"github.com/brocaar/chirpstack-application-server/internal/api/helpers"
"github.com/brocaar/chirpstack-application-server/internal/storage"
)
// UserAPI exports the User related functions.
type UserAPI struct {
validator auth.Validator
}
// InternalUserAPI exports the internal User related functions.
type InternalUserAPI struct {
validator auth.Validator
}
// NewUserAPI creates a new UserAPI.
func NewUserAPI(validator auth.Validator) *UserAPI {
return &UserAPI{
validator: validator,
}
}
// Create creates the given user.
func (a *UserAPI) Create(ctx context.Context, req *pb.CreateUserRequest) (*pb.CreateUserResponse, error) {
if req.User == nil {
return nil, grpc.Errorf(codes.InvalidArgument, "user must not be nil")
}
if err := a.validator.Validate(ctx,
auth.ValidateUsersAccess(auth.Create)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
// validate if the client has admin rights for the given organizations
// to which the user must be linked
for _, org := range req.Organizations {
if err := a.validator.Validate(ctx,
auth.ValidateIsOrganizationAdmin(org.OrganizationId)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
}
user := storage.User{
Username: req.User.Username,
SessionTTL: req.User.SessionTtl,
IsAdmin: req.User.IsAdmin,
IsActive: req.User.IsActive,
Email: req.User.Email,
Note: req.User.Note,
}
isAdmin, err := a.validator.GetIsAdmin(ctx)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
if !isAdmin {
// non-admin users are not able to modify the fields below
user.IsAdmin = false
user.IsActive = true
user.SessionTTL = 0
}
var userID int64
err = storage.Transaction(func(tx sqlx.Ext) error {
userID, err = storage.CreateUser(ctx, tx, &user, req.Password)
if err != nil {
return err
}
for _, org := range req.Organizations {
if err := storage.CreateOrganizationUser(ctx, tx, org.OrganizationId, userID, org.IsAdmin, org.IsDeviceAdmin, org.IsGatewayAdmin); err != nil {
return err
}
}
return nil
})
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
return &pb.CreateUserResponse{Id: userID}, nil
}
// Get returns the user matching the given ID.
func (a *UserAPI) Get(ctx context.Context, req *pb.GetUserRequest) (*pb.GetUserResponse, error) {
if err := a.validator.Validate(ctx,
auth.ValidateUserAccess(req.Id, auth.Read)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
user, err := storage.GetUser(ctx, storage.DB(), req.Id)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
resp := pb.GetUserResponse{
User: &pb.User{
Id: user.ID,
Username: user.Username,
SessionTtl: user.SessionTTL,
IsAdmin: user.IsAdmin,
IsActive: user.IsActive,
Email: user.Email,
Note: user.Note,
},
}
resp.CreatedAt, err = ptypes.TimestampProto(user.CreatedAt)
if err != nil { | return nil, helpers.ErrToRPCError(err)
}
return &resp, nil
}
// List lists the users.
func (a *UserAPI) List(ctx context.Context, req *pb.ListUserRequest) (*pb.ListUserResponse, error) {
if err := a.validator.Validate(ctx,
auth.ValidateUsersAccess(auth.List)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
users, err := storage.GetUsers(ctx, storage.DB(), int(req.Limit), int(req.Offset), req.Search)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
totalUserCount, err := storage.GetUserCount(ctx, storage.DB(), req.Search)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
resp := pb.ListUserResponse{
TotalCount: int64(totalUserCount),
}
for _, u := range users {
row := pb.UserListItem{
Id: u.ID,
Username: u.Username,
SessionTtl: u.SessionTTL,
IsAdmin: u.IsAdmin,
IsActive: u.IsActive,
}
row.CreatedAt, err = ptypes.TimestampProto(u.CreatedAt)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
row.UpdatedAt, err = ptypes.TimestampProto(u.UpdatedAt)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
resp.Result = append(resp.Result, &row)
}
return &resp, nil
}
// Update updates the given user.
func (a *UserAPI) Update(ctx context.Context, req *pb.UpdateUserRequest) (*empty.Empty, error) {
if req.User == nil {
return nil, grpc.Errorf(codes.InvalidArgument, "user must not be nil")
}
if err := a.validator.Validate(ctx,
auth.ValidateUserAccess(req.User.Id, auth.Update)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
userUpdate := storage.UserUpdate{
ID: req.User.Id,
Username: req.User.Username,
IsAdmin: req.User.IsAdmin,
IsActive: req.User.IsActive,
SessionTTL: req.User.SessionTtl,
Email: req.User.Email,
Note: req.User.Note,
}
err := storage.UpdateUser(ctx, storage.DB(), userUpdate)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
return &empty.Empty{}, nil
}
// Delete deletes the user matching the given ID.
func (a *UserAPI) Delete(ctx context.Context, req *pb.DeleteUserRequest) (*empty.Empty, error) {
if err := a.validator.Validate(ctx,
auth.ValidateUserAccess(req.Id, auth.Delete)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
err := storage.DeleteUser(ctx, storage.DB(), req.Id)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
return &empty.Empty{}, nil
}
// UpdatePassword updates the password for the user matching the given ID.
func (a *UserAPI) UpdatePassword(ctx context.Context, req *pb.UpdateUserPasswordRequest) (*empty.Empty, error) {
if err := a.validator.Validate(ctx,
auth.ValidateUserAccess(req.UserId, auth.UpdateProfile)); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
err := storage.UpdatePassword(ctx, storage.DB(), req.UserId, req.Password)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
return &empty.Empty{}, nil
}
// NewInternalUserAPI creates a new InternalUserAPI.
func NewInternalUserAPI(validator auth.Validator) *InternalUserAPI {
return &InternalUserAPI{
validator: validator,
}
}
// Login validates the login request and returns a JWT token.
func (a *InternalUserAPI) Login(ctx context.Context, req *pb.LoginRequest) (*pb.LoginResponse, error) {
jwt, err := storage.LoginUser(ctx, storage.DB(), req.Username, req.Password)
if nil != err {
return nil, helpers.ErrToRPCError(err)
}
return &pb.LoginResponse{Jwt: jwt}, nil
}
type claims struct {
Username string `json:"username"`
}
// Profile returns the user profile.
func (a *InternalUserAPI) Profile(ctx context.Context, req *empty.Empty) (*pb.ProfileResponse, error) {
if err := a.validator.Validate(ctx,
auth.ValidateActiveUser()); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
username, err := a.validator.GetUsername(ctx)
if nil != err {
return nil, helpers.ErrToRPCError(err)
}
// Get the user id based on the username.
user, err := storage.GetUserByUsername(ctx, storage.DB(), username)
if nil != err {
return nil, helpers.ErrToRPCError(err)
}
prof, err := storage.GetProfile(ctx, storage.DB(), user.ID)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
resp := pb.ProfileResponse{
User: &pb.User{
Id: prof.User.ID,
Username: prof.User.Username,
SessionTtl: prof.User.SessionTTL,
IsAdmin: prof.User.IsAdmin,
IsActive: prof.User.IsActive,
},
Settings: &pb.ProfileSettings{
DisableAssignExistingUsers: auth.DisableAssignExistingUsers,
},
}
for _, org := range prof.Organizations {
row := pb.OrganizationLink{
OrganizationId: org.ID,
OrganizationName: org.Name,
IsAdmin: org.IsAdmin,
IsDeviceAdmin: org.IsDeviceAdmin,
IsGatewayAdmin: org.IsGatewayAdmin,
}
row.CreatedAt, err = ptypes.TimestampProto(org.CreatedAt)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
row.UpdatedAt, err = ptypes.TimestampProto(org.UpdatedAt)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
resp.Organizations = append(resp.Organizations, &row)
}
return &resp, nil
}
// Branding returns UI branding.
func (a *InternalUserAPI) Branding(ctx context.Context, req *empty.Empty) (*pb.BrandingResponse, error) {
resp := pb.BrandingResponse{
Logo: brandingHeader,
Registration: brandingRegistration,
Footer: brandingFooter,
}
return &resp, nil
}
// GlobalSearch performs a global search.
func (a *InternalUserAPI) GlobalSearch(ctx context.Context, req *pb.GlobalSearchRequest) (*pb.GlobalSearchResponse, error) {
if err := a.validator.Validate(ctx,
auth.ValidateActiveUser()); err != nil {
return nil, grpc.Errorf(codes.Unauthenticated, "authentication failed: %s", err)
}
isAdmin, err := a.validator.GetIsAdmin(ctx)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
username, err := a.validator.GetUsername(ctx)
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
results, err := storage.GlobalSearch(ctx, storage.DB(), username, isAdmin, req.Search, int(req.Limit), int(req.Offset))
if err != nil {
return nil, helpers.ErrToRPCError(err)
}
var out pb.GlobalSearchResponse
for _, r := range results {
res := pb.GlobalSearchResult{
Kind: r.Kind,
Score: float32(r.Score),
}
if r.OrganizationID != nil {
res.OrganizationId = *r.OrganizationID
}
if r.OrganizationName != nil {
res.OrganizationName = *r.OrganizationName
}
if r.ApplicationID != nil {
res.ApplicationId = *r.ApplicationID
}
if r.ApplicationName != nil {
res.ApplicationName = *r.ApplicationName
}
if r.DeviceDevEUI != nil {
res.DeviceDevEui = r.DeviceDevEUI.String()
}
if r.DeviceName != nil {
res.DeviceName = *r.DeviceName
}
if r.GatewayMAC != nil {
res.GatewayMac = r.GatewayMAC.String()
}
if r.GatewayName != nil {
res.GatewayName = *r.GatewayName
}
out.Result = append(out.Result, &res)
}
return &out, nil
} | return nil, helpers.ErrToRPCError(err)
}
resp.UpdatedAt, err = ptypes.TimestampProto(user.UpdatedAt)
if err != nil { |
App.js | class App extends React.Component {
constructor() {
super()
this.state = {
img: 'https://wangwenyue.github.io/Music_Player/pics/3.jpg',
song: 'Chasing Pavement',
artist: 'Adele',
src: 'https://raw.githubusercontent.com/eddy0/ReactExpress/master/static/music/4.mp3',
isPlay: false,
mode: 'normal',
canPlay: false,
}
}
fetchData = () => {
let music = window.localStorage.getItem('music')
if (music && music.length > 0) {
music = JSON.parse(music)
this.setState(() => {
return {
...music
}})
}
fetchAlbum().then((data) => {
this.album = data
})
}
componentWillMount() {
this.fetchData()
}
saveData = () => {
let {src,song, currentTime, duration, artist, img, mode} = this.state
log('data', artist, song, )
localStorage.clear()
let data = JSON.stringify({src, song, currentTime, duration, artist, img, mode})
if (currentTime !== undefined) {
localStorage.music = data
}
}
componentDidMount() {
let isOnIOS = navigator.userAgent.match(/iPhone/i)
let eventName = isOnIOS ? "pagehide" : "beforeunload";
window.addEventListener( eventName , () => {
this.saveData()
})
if (this.state.currentTime ) {
let time = this.state.currentTime
this.audio.currentTime = time
// safari need to download the music fist then currentTime can be set
if (this.audio.currentTime === 0 && time !== 0) {
this.audio.oncanplay = () => {
this.audio.currentTime = this.state.currentTime
}
}
}
}
componentWillUnmount() {
// this.saveData()
}
handlePlay = (status) => {
this.setState((prevState) => ({
isLoading: false,
isPlay: !prevState.isPlay,
canPlay: true,
}))
if (status === true) {
this.audio.play()
} else {
this.audio.pause()
}
}
handleLoad = () => {
this.setState((prevState) => ({
isLoading: true,
isPlay: false,
}))
this.audio.addEventListener('canplay', () => {
if (this.state.canPlay === true && this.audio.readyState === 4) {
this.audio.play()
this.setState((prevState) => ({
isLoading: false,
isPlay: true,
}))
}
})
}
handleSwitch = (offset) => {
let index = this.album.findIndex((song) => song.song === this.state.song )
if (index > -1) {
let nextIndex =(this.album.length + index + offset ) % this.album.length
this.setState(() => ({
...this.album[nextIndex],
currentTime: 0
}))
this.audio.currentTime = 0
}
}
handleMode = () => {
let modes = ['repeat', 'shuffle', 'normal']
let index = modes.findIndex((mode) => this.state.mode === mode )
if (index > -1) {
let nextIndex =(index + 1 ) % modes.length
this.setState(() => ({
mode: modes[nextIndex]
}))
}
}
handleRepeatMode = () => {
this.audio.currentTime = 0
}
handleNormalMode = () => {
let index = this.album.findIndex((song) => song.song === this.state.song ) | let nextIndex =(index + 1 ) % this.album.length
this.setState(() => ({
...this.album[nextIndex]
}))
}
}
handleShuffleMode = () => {
let index = Math.floor( Math.random() * this.album.length )
this.setState(() => ({
...this.album[index]
}))
}
handleEnd = () => {
const map = {
'repeat': this.handleRepeatMode,
'normal': this.handleNormalMode,
'shuffle': this.handleShuffleMode,
}
let mode = this.state.mode
map[mode]()
}
updateTime = () => {
let audio = this.audio
let currentTime = audio.currentTime
let duration = audio.duration
this.setState(() => {
return {
currentTime,
duration,
audio,
}
})
}
changeBar = (percentage) => {
// this.audio.currentTime = this.audio.duration * percentage / 100
this.audio.currentTime = percentage
}
render() {
return(
<React.Fragment>
<div className='mask' style={{backgroundImage: `url(${this.state.img})`}}>
</div>
<div className="container" >
<Img img={this.state.img} />
<div className="music__info mt--md" >
<div className="info__song">{this.state.song}</div>
<div className="info__artist">{this.state.artist}</div>
</div>
<Progress currentTime = {this.state.currentTime}
duration={this.state.duration}
changeBar={this.changeBar}
audio={this.state.audio}
/>
<Controller {...this.state}
audio={this.state.audio }
handlePlay={this.handlePlay}
handleMode={this.handleMode}
handleSwitch={this.handleSwitch}
/>
</div>
<audio src={this.state.src}
ref={(audio) => this.audio = audio}
onTimeUpdate={this.updateTime}
onEnded={this.handleEnd}
onCanPlay={this.autoPlay}
onLoadStart={this.handleLoad}
/>
</React.Fragment>
)
}
} | if (index > -1) { |
c_path.rs | //! c_path.rs |
||
pdfassembler.ts | import { PDFDocument } from 'pdfjs-dist/lib/core/document';
// import { Jbig2Stream } from 'pdfjs-dist/lib/core/jbig2_stream';
// import { JpegStream } from 'pdfjs-dist/lib/core/jpeg_stream';
// import { Lexer, Parser } from 'pdfjs-dist/lib/core/parser';
import { PDFManager, LocalPdfManager } from 'pdfjs-dist/lib/core/pdf_manager';
import { Dict, Name, Ref } from 'pdfjs-dist/lib/core/primitives';
import {
DecodeStream, Stream, FlateStream, PredictorStream, DecryptStream,
Ascii85Stream, RunLengthStream, LZWStream
} from 'pdfjs-dist/lib/core/stream';
import { arraysToBytes, bytesToString } from 'pdfjs-dist/lib/shared/util';
import { deflate } from 'pako';
import * as queue from 'promise-queue';
export type TypedArray = Int8Array | Uint8Array | Int16Array | Uint16Array |
Int32Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array;
export type BinaryFile = Blob | File | ArrayBuffer | TypedArray;
export class | {
pdfManager: PDFManager = null;
userPassword = '';
ownerPassword = '';
nextNodeNum = 1;
pdfTree: any = Object.create(null);
recoveryMode = false;
objCache: any = Object.create(null);
objCacheQueue: any = Object.create(null);
pdfManagerArrays = [];
pdfAssemblerArrays = [];
promiseQueue: any = new queue(1);
indent: boolean|string|number = false;
compress = true;
encrypt = false; // not yet implemented
groupPages = true;
pageGroupSize = 16;
pdfVersion = '1.7';
constructor(inputData?: BinaryFile|Object, userPassword = '') {
if (userPassword.length) { this.userPassword = userPassword; }
if (typeof inputData === 'object') {
if (inputData instanceof Blob || inputData instanceof ArrayBuffer || inputData instanceof Uint8Array) {
this.promiseQueue.add(() => this.toArrayBuffer(inputData)
.then(arrayBuffer => this.pdfManager = new LocalPdfManager(1, arrayBuffer, userPassword, {}, ''))
.then(() => this.pdfManager.ensureDoc('checkHeader', []))
.then(() => this.pdfManager.ensureDoc('parseStartXRef', []))
.then(() => this.pdfManager.ensureDoc('parse', [this.recoveryMode]))
.then(() => this.pdfManager.ensureDoc('numPages'))
.then(() => this.pdfManager.ensureDoc('fingerprint'))
.then(() => {
this.pdfTree['/Root'] = this.resolveNodeRefs();
const infoDict = new Dict();
infoDict._map = this.pdfManager.pdfDocument.documentInfo;
this.pdfTree['/Info'] = this.resolveNodeRefs(infoDict) || {};
delete this.pdfTree['/Info']['/IsAcroFormPresent'];
delete this.pdfTree['/Info']['/IsXFAPresent'];
delete this.pdfTree['/Info']['/PDFFormatVersion'];
this.pdfTree['/Info']['/Producer'] = '(PDF Assembler)';
this.pdfTree['/Info']['/ModDate'] = '(' + this.toPdfDate() + ')';
this.flattenPageTree();
})
);
} else {
this.pdfTree = inputData;
}
} else {
this.pdfTree = {
'documentInfo': {},
'/Info': {
'/Producer': '(PDF Assembler)',
'/CreationDate': '(' + this.toPdfDate() + ')',
'/ModDate': '(' + this.toPdfDate() + ')',
},
'/Root': {
'/Type': '/Catalog',
'/Pages': {
'/Type': '/Pages',
'/Count': 1,
'/Kids': [ {
'/Type': '/Page',
'/MediaBox': [ 0, 0, 612, 792 ], // 8.5" x 11"
'/Contents': [],
'/Resources': {},
// To make a "hello world" pdf, replace the above two lines with:
// '/Contents': [ { 'stream': '1 0 0 1 72 708 cm BT /Helv 12 Tf (Hello world!) Tj ET' } ],
// '/Resources': { '/Font': { '/Helv': { '/Type': '/Font', '/Subtype': '/Type1', '/BaseFont': '/Helvetica' } } },
} ],
}
},
};
}
}
get pdfDocument(): Promise<PDFDocument> {
return this.promiseQueue.add(() => Promise.resolve(this.pdfManager && this.pdfManager.pdfDocument));
}
get numPages(): Promise<number> {
this.promiseQueue.add(() => this.flattenPageTree());
return this.promiseQueue.add(() => Promise.resolve(this.pdfTree['/Root']['/Pages']['/Count']));
}
get pdfObject() {
return this.promiseQueue.add(() => Promise.resolve(this.pdfTree));
}
getPDFDocument(): Promise<PDFDocument> {
return this.promiseQueue.add(() => Promise.resolve(this.pdfManager && this.pdfManager.pdfDocument));
}
countPages(): Promise<number> {
this.promiseQueue.add(() => this.flattenPageTree());
return this.promiseQueue.add(() => Promise.resolve(this.pdfTree['/Root']['/Pages']['/Count']));
}
getPDFStructure(): Promise<any> {
return this.promiseQueue.add(() => Promise.resolve(this.pdfTree));
}
toArrayBuffer(file: BinaryFile): Promise<ArrayBuffer> {
const typedArrays = [
Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array,
Uint32Array, Uint8ClampedArray, Float32Array, Float64Array
];
return file instanceof ArrayBuffer ? Promise.resolve(file) :
typedArrays.some(typedArray => file instanceof typedArray) ?
Promise.resolve(<ArrayBuffer>(<TypedArray>file).buffer) :
file instanceof Blob ?
new Promise((resolve, reject) => {
const fileReader = new FileReader();
fileReader.onload = () => resolve(fileReader.result);
fileReader.onerror = () => reject(fileReader.error);
fileReader.readAsArrayBuffer(<Blob>file);
}) :
Promise.resolve(new ArrayBuffer(0));
}
resolveNodeRefs(
node = this.pdfManager.pdfDocument.catalog.catDict, name?, parent?, contents = false
) {
if (node instanceof Ref) {
const refKey = `${node.num}-${node.gen}`;
if (this.objCache[refKey] === undefined) {
this.objCache[refKey] = null; // Stops recursive loops
const refNode = this.pdfManager.pdfDocument.xref.fetch(node);
this.objCache[refKey] = this.resolveNodeRefs(refNode, name, parent, contents);
if (
typeof this.objCache[refKey] === 'object' &&
this.objCache[refKey] !== null &&
!(this.objCache[refKey] instanceof Array)
) {
Object.assign(this.objCache[refKey], { num: 0, gen: 0 });
}
if (this.objCacheQueue[refKey] !== undefined) {
Object.keys(this.objCacheQueue[refKey]).forEach(fixName =>
this.objCacheQueue[refKey][fixName].forEach(fixParent =>
fixParent[fixName] = this.objCache[refKey]
)
);
delete this.objCacheQueue[refKey];
}
} else if (this.objCache[refKey] === null) {
if (this.objCacheQueue[refKey] === undefined) { this.objCacheQueue[refKey] = Object.create(null); }
if (this.objCacheQueue[refKey][name] === undefined) { this.objCacheQueue[refKey][name] = []; }
this.objCacheQueue[refKey][name].push(parent);
return node;
}
return this.objCache[refKey];
} else if (node instanceof Name) {
return '/' + node.name;
} else if (typeof node === 'string') {
return `(${node})`;
} else if (node instanceof Array) {
const existingArrayIndex = this.pdfManagerArrays.indexOf(node);
if (existingArrayIndex > -1) {
return this.pdfAssemblerArrays[existingArrayIndex];
} else {
const newArrayNode = [];
this.pdfManagerArrays.push(node);
this.pdfAssemblerArrays.push(newArrayNode);
node.forEach((element, index) => newArrayNode.push(
this.resolveNodeRefs(element, index, newArrayNode, contents)
));
return newArrayNode;
}
} else if (typeof node === 'object' && node !== null) {
const objectNode: any = Object.create(null);
let source = null;
const nodeMap = node.dict instanceof Dict ? node.dict._map : node instanceof Dict ? node._map : null;
if (nodeMap) {
Object.keys(nodeMap).forEach((key) => objectNode[`/${key}`] =
this.resolveNodeRefs(nodeMap[key], `/${key}`, objectNode, !!nodeMap.Contents)
);
}
if (node instanceof DecodeStream || node instanceof Stream) {
const streamsToDecode =
[FlateStream, PredictorStream, DecryptStream, Ascii85Stream, RunLengthStream, LZWStream];
if (objectNode['/Subtype'] !== '/Image' &&
streamsToDecode.some(streamToDecode => node instanceof streamToDecode)
) {
objectNode.stream = node.getBytes();
if (objectNode['/Filter'] instanceof Array && objectNode['/Filter'].length > 1) {
objectNode['/Filter'].shift();
} else {
delete objectNode['/Filter'];
}
}
if (!objectNode.stream) {
for (const checkSource of [
node, node.stream, node.stream && node.stream.str,
node.str, node.str && node.str.str
]) {
if (checkSource instanceof Stream || checkSource instanceof DecryptStream) {
source = checkSource;
break;
}
}
// const checkStream = (streamSource) => {
// if (!source && (
// streamSource instanceof Stream ||
// streamSource instanceof DecryptStream
// )) {
// source = streamSource;
// }
// };
// checkStream(node);
// checkStream(node.stream);
// checkStream(node.stream && node.stream.str);
// checkStream(node.str);
// checkStream(node.str && node.str.str);
if (source) {
source.reset();
objectNode.stream = source.getBytes();
}
}
}
if (objectNode.stream) {
if (contents || objectNode['/Subtype'] === '/XML' ||
(objectNode.stream && objectNode.stream.every(byte => byte < 128))
) {
// TODO: split command stream into array of commands?
objectNode.stream = bytesToString(objectNode.stream);
}
delete objectNode['/Length'];
}
if (node === this.pdfManager.pdfDocument.catalog.catDict) {
const catKey = node.objId.slice(0, -1) + '-0';
this.objCache[catKey] = Object.assign(objectNode, { num: this.nextNodeNum++, gen: 0 });
}
return objectNode;
} else {
return node;
}
}
pad(number, digits): string {
return ('0'.repeat(digits - 1) + parseInt(number, 10)).slice(-digits);
}
toPdfDate(jsDate = new Date()): string {
if (!(jsDate instanceof Date)) { return null; }
const timezoneOffset = jsDate.getTimezoneOffset();
return 'D:' +
jsDate.getFullYear() +
this.pad(jsDate.getMonth() + 1, 2) +
this.pad(jsDate.getDate(), 2) +
this.pad(jsDate.getHours(), 2) +
this.pad(jsDate.getMinutes(), 2) +
this.pad(jsDate.getSeconds(), 2) +
(timezoneOffset < 0 ? '+' : '-') +
this.pad(Math.abs(Math.trunc(timezoneOffset / 60)), 2) + '\'' +
this.pad(Math.abs(timezoneOffset % 60), 2) + '\'';
}
fromPdfDate(pdfDate: string): Date {
if (typeof pdfDate !== 'string') { return null; }
if (pdfDate[0] === '(' && pdfDate[pdfDate.length - 1] === ')') { pdfDate = pdfDate.slice(1, -1); }
if (pdfDate.slice(0, 2) !== 'D:') { return null; }
const part = (start, end, offset = 0) => parseInt(pdfDate.slice(start, end), 10) + offset;
return new Date(
part(2, 6), part(6, 8, -1), part(8, 10), // year, month, day
part(10, 12), part(12, 14), part(14, 16), 0 // hours, minutes, seconds
);
}
removeRootEntries(entries?: string[]): Promise<any> {
return this.pdfObject.then(tree => {
Object.keys(tree['/Root'])
.filter(key => entries && entries.length ?
// if specific entries specified, remove them
entries.includes(key) :
// otherwise, remove all non-required entries
!['/Type', '/Pages', 'num', 'gen'].includes(key)
)
.forEach(key => delete tree['/Root'][key]);
return tree;
});
}
flattenPageTree(
pageTree = this.pdfTree['/Root']['/Pages']['/Kids'],
parent = this.pdfTree['/Root']['/Pages']
) {
let flatPageTree = [];
pageTree.forEach((page) => flatPageTree = (page && page['/Kids']) ?
[...flatPageTree, ...this.flattenPageTree(page['/Kids'], page)] :
[...flatPageTree, page]
);
['/Resources', '/MediaBox', '/CropBox', '/Rotate']
.filter(attribute => parent[attribute])
.forEach(attribute => {
flatPageTree
.filter(page => !page[attribute])
.forEach(page => page[attribute] = parent[attribute]);
delete parent[attribute];
});
if (pageTree === this.pdfTree['/Root']['/Pages']['/Kids']) {
this.pdfTree['/Root']['/Pages']['/Count'] = flatPageTree.length;
this.pdfTree['/Root']['/Pages']['/Kids'] = flatPageTree;
} else {
return flatPageTree;
}
}
groupPageTree(
pageTree = this.pdfTree['/Root']['/Pages']['/Kids'],
parent = this.pdfTree['/Root']['/Pages'],
groupSize = this.pageGroupSize
) {
let groupedPageTree = [];
if (pageTree.length <= groupSize) {
groupedPageTree = pageTree.map(page => Object.assign(page, { 'num': 0, '/Parent': parent }));
} else {
let branchSize = groupSize, branches = Math.ceil(pageTree.length / branchSize);
if (pageTree.length > groupSize * groupSize) { [branchSize, branches] = [branches, branchSize]; }
for (let i = 0; i < branches; i++) {
const branchPages = pageTree.slice(branchSize * i, branchSize * (i + 1));
if (branchPages.length === 1) {
groupedPageTree.push(Object.assign(branchPages[0], { 'num': 0, '/Parent': parent }));
} else if (branchPages.length > 1) {
const pagesObject = {};
groupedPageTree.push(Object.assign(pagesObject, {
'num': 0, '/Type': '/Pages', '/Parent': parent, '/Count': branchPages.length,
'/Kids': this.groupPageTree(branchPages, pagesObject, groupSize),
}));
}
}
}
// TODO: fix / enable moving duplicate items to parent node
// if (groupedPageTree.every((t, i, g) => !i || t['/Resources'] === g[i - 1]['/Resources'])) {
// parent['/Resources'] = groupedPageTree[0]['/Resources'];
// groupedPageTree.forEach(t => delete t['/Resources']);
// }
// ['/MediaBox', '/CropBox', '/Rotate']
// .filter(attribute => groupedPageTree.every((t, i, g) => t[attribute] &&
// (!i || t[attribute].every((v, j) => v === g[i - 1][attribute][j]))
// ))
// .forEach(attribute => {
// parent[attribute] = groupedPageTree[0][attribute];
// groupedPageTree.forEach(t => delete t[attribute]);
// });
if (pageTree === this.pdfTree['/Root']['/Pages']['/Kids']) {
this.pdfTree['/Root']['/Pages']['/Count'] = pageTree.length;
this.pdfTree['/Root']['/Pages']['/Kids'] = groupedPageTree;
} else {
return groupedPageTree;
}
}
resetObjectIds(node = this.pdfTree['/Root']) {
if (node === this.pdfTree['/Root']) {
this.nextNodeNum = 1;
this.objCache = [];
}
if (!this.objCache.includes(node)) {
this.objCache.push(node);
const toReset = (item) => typeof item === 'object' && item !== null && !this.objCache.includes(item);
if (node instanceof Array) {
node.filter(toReset).forEach(item => this.resetObjectIds(item));
} else {
const makeIndirect = [
'/AcroForm', '/MarkInfo', '/Metadata', '/Names', '/Outlines', '/StructTreeRoot',
'/ViewerPreferences', '/Catalog', '/Pages', '/OCG'
];
if (typeof node.num === 'number' || node.stream || makeIndirect.includes(node['/Type'])) {
Object.assign(node, { num: this.nextNodeNum++, gen: 0 });
}
Object.keys(node)
.filter(key => toReset(node[key]))
.forEach(key => this.resetObjectIds(node[key]));
}
}
}
assemblePdf(nameOrOutputFormat = 'output.pdf'): Promise<File|ArrayBuffer|Uint8Array> {
return this.promiseQueue.add(() => new Promise((resolve, reject) => {
const stringByteMap = [ // encodes string chars by byte code
'\\000', '\\001', '\\002', '\\003', '\\004', '\\005', '\\006', '\\007',
'\\b', '\\t', '\\n', '\\013', '\\f', '\\r', '\\016', '\\017',
'\\020', '\\021', '\\022', '\\023', '\\024', '\\025', '\\026', '\\027',
'\\030', '\\031', '\\032', '\\033', '\\034', '\\035', '\\036', '\\037',
' ', '!', '"', '#', '$', '%', '&', '\'', '\\(', '\\)', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '\\177',
'\\200', '\\201', '\\202', '\\203', '\\204', '\\205', '\\206', '\\207',
'\\210', '\\211', '\\212', '\\213', '\\214', '\\215', '\\216', '\\217',
'\\220', '\\221', '\\222', '\\223', '\\224', '\\225', '\\226', '\\227',
'\\230', '\\231', '\\232', '\\233', '\\234', '\\235', '\\236', '\\237',
'\\240', '¡', '¢', '£', '¤', '¥', '¦', '§', '¨', '©', 'ª', '«', '¬', '', '®', '¯',
'°', '±', '²', '³', '´', 'µ', '¶', '·', '¸', '¹', 'º', '»', '¼', '½', '¾', '¿',
'À', 'Á', 'Â', 'Ã', 'Ä', 'Å', 'Æ', 'Ç', 'È', 'É', 'Ê', 'Ë', 'Ì', 'Í', 'Î', 'Ï',
'Ð', 'Ñ', 'Ò', 'Ó', 'Ô', 'Õ', 'Ö', '×', 'Ø', 'Ù', 'Ú', 'Û', 'Ü', 'Ý', 'Þ', 'ß',
'à', 'á', 'â', 'ã', 'ä', 'å', 'æ', 'ç', 'è', 'é', 'ê', 'ë', 'ì', 'í', 'î', 'ï',
'ð', 'ñ', 'ò', 'ó', 'ô', 'õ', 'ö', '÷', 'ø', 'ù', 'ú', 'û', 'ü', 'ý', 'þ', 'ÿ',
];
const space = !this.indent ? '' :
typeof this.indent === 'number' ? ' '.repeat(this.indent) :
typeof this.indent === 'string' ? this.indent :
'\t'; // if this.indent == truthy
const newline = !this.indent ? '' : '\n';
// const newline = '\n';
// TODO: If no indent, break lines longer than 255 characters
this.flattenPageTree();
this.groupPageTree();
this.resetObjectIds();
this.pdfTree['/Root']['/Version'] = `/${this.pdfVersion}`; // default: 1.7
const indirectObjects: any[] = []; // initialize object cache
// create new PDF object from JavaScript object
const newPdfObject = (jsObject, depth = 0, nextIndent: string|boolean = true) => {
if (nextIndent === true) { nextIndent = newline + space.repeat(depth); }
let pdfObject = '';
// detect and encode name or string
if (typeof jsObject === 'string') {
const firstChar = jsObject[0], lastChar = jsObject[jsObject.length - 1];
if (firstChar === '/') { // name
// encode name chars: NUL, TAB, LF, FF, CR, space, #, %, (, ), /, <, >, [, ], {, }
const encodeChar = (char: string) => '\0\t\n\f\r #%()/<>[]{}'.indexOf(char) === -1 ?
char : `#${`0${char.charCodeAt(0).toString(16)}`.slice(-2)}`;
pdfObject = `/${jsObject.slice(1).replace(/./g, encodeChar)}`;
} else if (firstChar === '(' && lastChar === ')') { // string
const byteArray = Array.from(arraysToBytes(jsObject.slice(1, -1)));
const stringEncode = byteArray.map((byte: number) => stringByteMap[byte]).join('');
if (stringEncode.length < byteArray.length * 2) {
pdfObject = `(${stringEncode})`;
} else {
const hexEncode = byteArray.map((byte: number) => `0${byte.toString(16)}`.slice(-2)).join('');
pdfObject = `<${hexEncode}>`;
}
} else {
pdfObject = jsObject;
}
// convert true, false, null, or number to string
} else if (typeof jsObject !== 'object' || jsObject === null) {
pdfObject = jsObject === null || jsObject === undefined ? 'null' :
jsObject === true ? 'true' :
jsObject === false ? 'false' :
jsObject + '';
// format array
} else if (jsObject instanceof Array) {
const arrayItems = jsObject
.map((item, index) => newPdfObject(item, depth + 1, !!space || !!index))
.join('');
pdfObject = `[${arrayItems}${newline}${space.repeat(depth)}]`;
// if an indirect object has already been saved, just return a reference to it
} else if (typeof jsObject.num === 'number' && indirectObjects[jsObject.num] !== undefined) {
pdfObject = `${jsObject.num} ${jsObject.gen} R`;
// format dictionary, as either a direct or indirect object
} else {
// new indirect object
if (typeof jsObject.num === 'number') {
indirectObjects[jsObject.num] = null; // save placeholder to stop recursive loops
pdfObject = `${jsObject.num} ${jsObject.gen} obj${newline}`;
depth = 0;
// compress stream?
if (typeof jsObject.stream !== 'undefined') {
if (jsObject.stream.length) {
if (this.compress && !jsObject['/Filter']) {
// If stream is not already compressed, compress it
const compressedStream = deflate(arraysToBytes([jsObject.stream]));
// but use compressed version only if it is smaller overall
// (+ 19 for additional '/Filter/FlateDecode' dict entry)
if (compressedStream.length + 19 < jsObject.stream.length) {
jsObject.stream = compressedStream;
jsObject['/Filter'] = '/FlateDecode';
}
}
}
jsObject['/Length'] = jsObject.stream.length;
}
}
// format object dictionary entries
const dictItems = Object.keys(jsObject)
.filter((key) => key[0] === '/')
.map(key =>
newPdfObject(key, depth + 1) +
newPdfObject(jsObject[key], depth + 1, !!space ? ' ' : '')
)
.join('');
pdfObject += `<<${dictItems}${newline}${space.repeat(depth)}>>`;
// finish and save indirect object
if (typeof jsObject.num === 'number') {
if (typeof jsObject.stream !== 'undefined') {
if (jsObject.stream.length) {
const streamPrefix = `${pdfObject}${newline}stream\n`;
const streamSuffix = `${newline}endstream\nendobj\n`;
pdfObject = arraysToBytes([streamPrefix, jsObject.stream, streamSuffix]);
} else {
pdfObject += `${newline}stream\nendstream\nendobj\n`;
}
} else {
pdfObject += `${newline}endobj\n`;
}
// save indirect object in object cache
indirectObjects[jsObject.num] = pdfObject;
// return object reference
pdfObject = `${jsObject.num} ${jsObject.gen} R`;
}
// otherwise, return inline object
}
// add indentation or space?
const prefix =
// if nextIndent is set, indent item
nextIndent ? nextIndent :
// otherwise, check if item is first in an array, or starts with a delimiter character
// if not (if nextIndent = ''), add a space to separate it from the previous item
nextIndent === false || ['/', '[', '(', '<'].includes(pdfObject[0]) ? '' : ' ';
return prefix + pdfObject;
};
const rootRef = newPdfObject(this.pdfTree['/Root'], 0, false);
this.pdfTree['/Info'].gen = 0;
this.pdfTree['/Info'].num = this.nextNodeNum++;
const infoRef = this.pdfTree['/Info'] && Object.keys(this.pdfTree['/Info']).length ?
newPdfObject(this.pdfTree['/Info'], 0, false) : null;
const header =
`%PDF-${this.pdfVersion}\n` + // default: 1.7
`%âãÏÓ\n`;
let offset = 0;
const xref =
`xref\n` +
`0 ${indirectObjects.length}\n` +
`0000000000 65535 f \n` +
[header, ...indirectObjects]
.filter(o => o)
.map(o => (`0000000000${offset += o.length} 00000 n \n`).slice(-20))
.slice(0, -1)
.join('');
const trailer =
`trailer\n` +
`<<${newline}` +
`${space}/Root ${rootRef}${newline}` +
(infoRef ? `${space}/Info ${infoRef}${newline}` : '') +
`${space}/Size ${indirectObjects.length}${newline}` +
`>>\n` +
`startxref\n` +
`${offset}\n` +
`%%EOF\n`;
const pdfData = arraysToBytes([header, ...indirectObjects.filter(o => o), xref, trailer]);
switch (nameOrOutputFormat) {
case 'ArrayBuffer': resolve(pdfData.buffer); break;
case 'Uint8Array': resolve(pdfData); break;
default:
if (nameOrOutputFormat.slice(-4) !== '.pdf') { nameOrOutputFormat += '.pdf'; }
resolve(new File([pdfData], nameOrOutputFormat, { type: 'application/pdf' }));
}
}));
}
// utility functions from js.pdf:
arraysToBytes(arrays) {
return arraysToBytes(arrays);
}
bytesToString(bytes) {
return bytesToString(bytes);
}
}
| PDFAssembler |
commandline.py | # Natural Language Toolkit CommandLine
# understands the command line interaction
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from optparse import OptionParser
from nltk_contrib.classifier.exceptions import filenotfounderror as fnf, invaliddataerror as inv
from nltk_contrib.classifier import format
import time
D_help = "Used to specify the data format. " \
+ "Options: C45 for C4.5 format. " \
+ "Default: C45. "
l_help = "Used to specify the log file. "
ALGORITHM = 'algorithm'
FILES = 'files'
TRAINING = 'training'
TEST = 'test'
GOLD = 'gold'
DATA_FORMAT = 'data_format'
LOG_FILE = 'log_file'
OPTIONS = 'options'
C45_FORMAT = 'c45'
DATA_FORMAT_MAPPINGS = {C45_FORMAT: format.c45}
class CommandLineInterface(OptionParser):
def __init__(self, alg_choices, alg_default, a_help, f_help, t_help, T_help, g_help, o_help):
OptionParser.__init__(self)
self.add_option("-a", "--algorithm", dest=ALGORITHM, type="choice", \
choices=alg_choices, default=alg_default, help= a_help)
self.add_option("-f", "--files", dest=FILES, type="string", help=f_help)
self.add_option("-t", "--training-file", dest=TRAINING, type="string", help=t_help)
self.add_option("-T", "--test-file", dest=TEST, type="string", help=T_help)
self.add_option("-g", "--gold-file", dest=GOLD, type="string", help=g_help)
self.add_option("-D", "--data-format", dest=DATA_FORMAT, type="choice", choices=DATA_FORMAT_MAPPINGS.keys(), \
default=C45_FORMAT, help=D_help)
self.add_option("-l", "--log-file", dest=LOG_FILE, type="string", help=l_help)
self.add_option("-o", "--options", dest=OPTIONS, type="string", help=o_help)
def get_value(self, name):
|
def parse(self, args):
"""
method to aid testing
"""
self.parse_args(args, None)
def execute(self):
"""
Stores values from arguments which are common to all command line interfaces
"""
self.algorithm = self.get_value(ALGORITHM)
self.files = self.get_value(FILES)
self.training_path = self.get_value(TRAINING)
self.test_path = self.get_value(TEST)
self.gold_path = self.get_value(GOLD)
self.options = self.get_value(OPTIONS)
self.data_format = DATA_FORMAT_MAPPINGS[self.get_value(DATA_FORMAT)]
log_file = self.get_value(LOG_FILE)
self.log = None
if log_file is not None:
self.log = open(log_file, 'a')
print >>self.log, '-' * 40
print >>self.log, 'DateTime: ' + time.strftime('%c', time.localtime())
def run(self, args):
"""
Main method which delegates all the work
"""
self.parse(args)
self.execute()
if self.log is not None: self.log.close()
def validate_basic_arguments_are_present(self):
if self.algorithm is None or self.files is None and self.training_path is None :
self.required_arguments_not_present_error()
def validate_files_arg_is_exclusive(self):
if self.files is not None and (self.training_path is not None or self.test_path is not None or self.gold_path is not None):
self.error("Invalid arguments. The files argument cannot exist with training, test or gold arguments.")
def get_instances(self, training_path, test_path, gold_path, ignore_missing = False):
test = gold = None
training = self.data_format.training(training_path)
attributes, klass = self.data_format.metadata(training_path)
test = self.__get_instance(self.data_format.test, test_path, ignore_missing)
gold = self.__get_instance(self.data_format.gold, gold_path, ignore_missing)
return (training, attributes, klass, test, gold)
def __get_instance(self, method, path, ignore_if_missing):
if path is not None:
if ignore_if_missing:
try:
return method(path)
except fnf.FileNotFoundError:
return None
return method(path)
return None
def required_arguments_not_present_error(self):
self.error("Invalid arguments. One or more required arguments are not present.")
def write_to_file(self, suffix, training, attributes, klass, test, gold, include_classification = True):
files_written = []
files_written.append(self.data_format.write_training(training, self.training_path + suffix))
if test is not None: files_written.append(self.data_format.write_test(test, self.test_path + suffix, include_classification))
if gold is not None: files_written.append(self.data_format.write_gold(gold, self.gold_path + suffix, include_classification))
files_written.append(self.data_format.write_metadata(attributes, klass, self.training_path + suffix))
return files_written
def log_common_params(self, name):
if self.log is not None:
print >>self.log, 'Operation: ' + name
print >>self.log, '\nAlgorithm: ' + str(self.algorithm) + '\nTraining: ' + str(self.training_path) + \
'\nTest: ' + str(self.test_path) + '\nGold: ' + str(self.gold_path) + '\nOptions: ' + str(self.options)
def log_created_files(self, files_names, message):
if self.log is None:
print message
else:
print >>self.log, "NumberOfFilesCreated: " + str(len(files_names))
count = 0
for file_name in files_names:
if self.log is None:
print file_name
else:
print >>self.log, "CreatedFile" + str(count) + ": " + file_name
count += 1
def as_integers(name, com_str):
indices = []
if com_str is not None:
for element in com_str.split(','):
try:
indices.append(int(element.strip()))
except ValueError:
raise inv.InvalidDataError('Invalid Data. ' + name + ' should contain integers.')
return indices
| return self.values.ensure_value(name, None) |
field_mask.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/field_mask.proto
package field_mask // import "google.golang.org/genproto/protobuf/field_mask"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `FieldMask` represents a set of symbolic field paths, for example:
//
// paths: "f.a"
// paths: "f.b.d"
//
// Here `f` represents a field in some root message, `a` and `b`
// fields in the message found in `f`, and `d` a field found in the
// message in `f.b`.
//
// Field masks are used to specify a subset of fields that should be
// returned by a get operation or modified by an update operation.
// Field masks also have a custom JSON encoding (see below).
//
// # Field Masks in Projections
//
// When used in the context of a projection, a response message or
// sub-message is filtered by the API to only contain those fields as
// specified in the mask. For example, if the mask in the previous
// example is applied to a response message as follows:
//
// f {
// a : 22
// b {
// d : 1
// x : 2
// }
// y : 13
// }
// z: 8
//
// The result will not contain specific values for fields x,y and z
// (their value will be set to the default, and omitted in proto text
// output):
//
//
// f {
// a : 22
// b {
// d : 1
// }
// }
//
// A repeated field is not allowed except at the last position of a
// paths string.
//
// If a FieldMask object is not present in a get operation, the
// operation applies to all fields (as if a FieldMask of all fields
// had been specified).
//
// Note that a field mask does not necessarily apply to the
// top-level response message. In case of a REST get operation, the
// field mask applies directly to the response, but in case of a REST
// list operation, the mask instead applies to each individual message
// in the returned resource list. In case of a REST custom method,
// other definitions may be used. Where the mask applies will be
// clearly documented together with its declaration in the API. In
// any case, the effect on the returned resource/resources is required
// behavior for APIs.
//
// # Field Masks in Update Operations
//
// A field mask in update operations specifies which fields of the
// targeted resource are going to be updated. The API is required
// to only change the values of the fields as specified in the mask
// and leave the others untouched. If a resource is passed in to
// describe the updated values, the API ignores the values of all
// fields not covered by the mask.
//
// If a repeated field is specified for an update operation, the existing
// repeated values in the target resource will be overwritten by the new values.
// Note that a repeated field is only allowed in the last position of a `paths`
// string.
//
// If a sub-message is specified in the last position of the field mask for an
// update operation, then the existing sub-message in the target resource is
// overwritten. Given the target message:
//
// f {
// b {
// d : 1
// x : 2
// }
// c : 1
// }
//
// And an update message:
//
// f {
// b {
// d : 10
// }
// }
//
// then if the field mask is:
//
// paths: "f.b"
//
// then the result will be:
//
// f {
// b {
// d : 10
// }
// c : 1
// }
//
// However, if the update mask was:
//
// paths: "f.b.d"
//
// then the result would be:
//
// f {
// b {
// d : 10
// x : 2
// }
// c : 1
// }
//
// In order to reset a field's value to the default, the field must
// be in the mask and set to the default value in the provided resource.
// Hence, in order to reset all fields of a resource, provide a default
// instance of the resource and set all fields in the mask, or do
// not provide a mask as described below.
//
// If a field mask is not present on update, the operation applies to
// all fields (as if a field mask of all fields has been specified).
// Note that in the presence of schema evolution, this may mean that
// fields the client does not know and has therefore not filled into
// the request will be reset to their default. If this is unwanted
// behavior, a specific service may require a client to always specify
// a field mask, producing an error if not.
//
// As with get operations, the location of the resource which
// describes the updated values in the request message depends on the
// operation kind. In any case, the effect of the field mask is
// required to be honored by the API.
//
// ## Considerations for HTTP REST
//
// The HTTP kind of an update operation which uses a field mask must
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
// (PUT must only be used for full updates).
//
// # JSON Encoding of Field Masks
//
// In JSON, a field mask is encoded as a single string where paths are
// separated by a comma. Fields name in each path are converted
// to/from lower-camel naming conventions.
//
// As an example, consider the following message declarations:
//
// message Profile {
// User user = 1;
// Photo photo = 2;
// }
// message User {
// string display_name = 1;
// string address = 2;
// }
//
// In proto a field mask for `Profile` may look as such:
//
// mask {
// paths: "user.display_name"
// paths: "photo"
// }
//
// In JSON, the same mask is represented as below:
//
// {
// mask: "user.displayName,photo"
// }
//
// # Field Masks and Oneof Fields
//
// Field masks treat fields in oneofs just as regular fields. Consider the
// following message:
//
// message SampleMessage {
// oneof test_oneof {
// string name = 4;
// SubMessage sub_message = 9;
// }
// }
//
// The field mask can be:
//
// mask {
// paths: "name"
// }
//
// Or:
//
// mask {
// paths: "sub_message"
// }
//
// Note that oneof type names ("test_oneof" in this case) cannot be used in
// paths.
//
// ## Field Mask Verification
//
// The implementation of any API method which has a FieldMask type field in the
// request should verify the included field paths, and return an
// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
type FieldMask struct {
// The set of field mask paths.
Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *FieldMask) Reset() { *m = FieldMask{} }
func (m *FieldMask) String() string { return proto.CompactTextString(m) }
func (*FieldMask) ProtoMessage() {}
func (*FieldMask) Descriptor() ([]byte, []int) {
return fileDescriptor_field_mask_56ec45e1ddac5c77, []int{0}
}
func (m *FieldMask) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FieldMask.Unmarshal(m, b)
}
func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
}
func (dst *FieldMask) XXX_Merge(src proto.Message) {
xxx_messageInfo_FieldMask.Merge(dst, src)
}
func (m *FieldMask) XXX_Size() int {
return xxx_messageInfo_FieldMask.Size(m)
}
func (m *FieldMask) XXX_DiscardUnknown() {
xxx_messageInfo_FieldMask.DiscardUnknown(m)
}
var xxx_messageInfo_FieldMask proto.InternalMessageInfo
func (m *FieldMask) GetPaths() []string {
if m != nil {
return m.Paths
}
return nil
}
func | () {
proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
}
func init() {
proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_56ec45e1ddac5c77)
}
var fileDescriptor_field_mask_56ec45e1ddac5c77 = []byte{
// 171 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x9d, 0x8c,
0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01,
0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa,
0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0x2e, 0x62, 0x62, 0x76, 0x0f, 0x70,
0x5a, 0xc5, 0x24, 0xe7, 0x0e, 0x31, 0x21, 0x00, 0xaa, 0x5a, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b,
0x2f, 0xbf, 0x3c, 0x2f, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x6c, 0x8c, 0x31, 0x20, 0x00,
0x00, 0xff, 0xff, 0x5a, 0xdb, 0x3a, 0xc0, 0xea, 0x00, 0x00, 0x00,
}
| init |
test_views.py | import re
import pytest
from django.contrib.auth.models import User
from django.test import override_settings
from django.urls import reverse
from django_dynamic_fixture import get
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
from readthedocs.search.tests.utils import (
DATA_TYPES_VALUES,
get_search_query_from_project_file,
)
@pytest.mark.django_db
@pytest.mark.search
class TestProjectSearch:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def test_search_by_project_name(self, client, project, all_projects):
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
assert len(results) == 1
assert project.name == results[0]['name']
for proj in all_projects[1:]:
assert proj.name != results[0]['name']
def test_search_project_have_correct_language_facets(self, client, project):
"""Test that searching project should have correct language facets in the results"""
# Create a project in bn and add it as a translation
get(Project, language='bn', name=project.name)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
for facet in lang_facets:
assert facet[2] == False # because none of the facets are applied
def test_search_project_filter_language(self, client, project):
"""Test that searching project filtered according to language."""
# Create a project in bn and add it as a translation
translate = get(Project, language='bn', name=project.name)
search_params = { 'q': project.name, 'language': 'bn' }
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
) |
# There should be only 1 result
assert len(results) == 1
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages because both `en` and `bn` should show there
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug='docs')
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
# Search for all projects.
'q': ' '.join(project.slug for project in all_projects),
'type': 'project',
},
)
assert len(results) > 0
other_projects = [
project.slug
for project in all_projects
if project.slug != 'docs'
]
for result in results:
assert result['name'] == 'docs'
assert result['name'] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
# Search for all projects.
'q': ' '.join(project.slug for project in all_projects),
'type': 'project',
},
)
assert len(results) == 0
@pytest.mark.django_db
@pytest.mark.search
@pytest.mark.usefixtures("all_projects")
class TestPageSearch:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def _get_highlight(self, result, field, type=None):
# if query is from page title,
# highlighted title is present in 'result.meta.highlight.title'
if not type and field == 'title':
highlight = result['highlights']['title']
# if result is not from page title,
# then results and highlighted results are present inside 'blocks'
else:
blocks = result['blocks']
assert len(blocks) >= 1
# checking first inner_hit
inner_hit_0 = blocks[0]
assert inner_hit_0['type'] == type
highlight = inner_hit_0['highlights'][field]
return highlight
def _get_highlighted_words(self, string):
highlighted_words = re.findall(
'<span>(.*?)</span>',
string
)
return highlighted_words
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('page_num', [0, 1])
def test_file_search(self, client, project, data_type, page_num):
data_type = data_type.split('.')
type, field = None, None
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query = get_search_query_from_project_file(
project_slug=project.slug,
page_num=page_num,
type=type,
field=field,
)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
# checking first result
result_0 = results[0]
highlight = self._get_highlight(result_0, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
# Make it lower because our search is case insensitive
assert word.lower() in query.lower()
def test_file_search_have_correct_role_name_facets(self, client):
"""Test that searching files should result all role_names."""
# searching for 'celery' to test that
# correct role_names are displayed
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': 'celery', 'type': 'file' }
)
assert len(results) >= 1
role_name_facets = facets['role_name']
role_name_facets_str = [facet[0] for facet in role_name_facets]
expected_role_names = ['py:class', 'py:function', 'py:method']
assert sorted(expected_role_names) == sorted(role_name_facets_str)
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
def test_file_search_filter_role_name(self, client):
"""Test that searching files filtered according to role_names."""
search_params = { 'q': 'celery', 'type': 'file' }
# searching without the filter
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
assert len(results) >= 2 # there are > 1 results without the filter
role_name_facets = facets['role_name']
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
confval_facet = 'py:class'
# checking if 'py:class' facet is present in results
assert confval_facet in [facet[0] for facet in role_name_facets]
# filtering with role_name=py:class
search_params['role_name'] = confval_facet
new_results, new_facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
new_role_names_facets = new_facets['role_name']
# there is only one result with role_name='py:class'
# in `signals` page
assert len(new_results) == 1
first_result = new_results[0] # first result
blocks = first_result['blocks'] # blocks of first results
assert len(blocks) >= 1
inner_hit_0 = blocks[0] # first inner_hit
assert inner_hit_0['type'] == 'domain'
assert inner_hit_0['role'] == confval_facet
for facet in new_role_names_facets:
if facet[0] == confval_facet:
assert facet[2] == True # because 'std:confval' filter is active
else:
assert facet[2] == False
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('case', ['upper', 'lower', 'title'])
def test_file_search_case_insensitive(self, client, project, case, data_type):
"""
Check File search is case insensitive.
It tests with uppercase, lowercase and camelcase.
"""
type, field = None, None
data_type = data_type.split('.')
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query_text = get_search_query_from_project_file(
project_slug=project.slug,
type=type,
field=field,
)
cased_query = getattr(query_text, case)
query = cased_query()
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
first_result = results[0]
highlight = self._get_highlight(first_result, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_exact_match(self, client, project):
"""
Check quoted query match exact phrase.
Making a query with quoted text like ``"foo bar"`` should match exactly
``foo bar`` phrase.
"""
# `Sphinx` word is present both in `kuma` and `docs` files
# But the phrase `Sphinx uses` is present only in `kuma` docs.
# So search with this phrase to check
query = r'"Sphinx uses"'
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' })
# there must be only 1 result
# because the phrase is present in
# only one project
assert len(results) == 1
assert results[0]['project'] == 'kuma'
assert results[0]['domain'] == 'http://readthedocs.org'
assert results[0]['path'] == '/docs/kuma/en/latest/documentation.html'
blocks = results[0]['blocks']
assert len(blocks) == 1
assert blocks[0]['type'] == 'section'
highlight = self._get_highlight(results[0], 'content', 'section')
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_have_correct_project_facets(self, client, all_projects):
"""Test that file search have correct project facets in results"""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase
query = 'environment'
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
# There should be 2 search result
assert len(results) == 2
project_facets = facets['project']
project_facets_str = [facet[0] for facet in project_facets]
assert len(project_facets_str) == 2
# kuma and pipeline should be there
assert sorted(project_facets_str) == sorted(['kuma', 'docs'])
def test_file_search_filter_by_project(self, client):
"""Test that search result are filtered according to project."""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase but filter through `kuma` project
search_params = {
'q': 'environment',
'type': 'file',
'project': 'kuma'
}
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
project_facets = facets['project']
resulted_project_facets = [facet[0] for facet in project_facets]
# There should be 1 search result as we have filtered
assert len(results) == 1
# kuma should should be there only
assert 'kuma' == results[0]['project']
# But there should be 2 projects in the project facets
# as the query is present in both projects
assert sorted(resulted_project_facets) == sorted(['kuma', 'docs'])
@pytest.mark.xfail(reason='Versions are not showing correctly! Fixme while rewrite!')
def test_file_search_show_versions(self, client, all_projects, es_index, settings):
# override the settings to index all versions
settings.INDEX_ONLY_LATEST = False
project = all_projects[0]
# Create some versions of the project
versions = [get(Version, project=project) for _ in range(3)]
query = get_search_query_from_project_file(project_slug=project.slug)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
# Results can be from other projects also
assert len(results) >= 1
version_facets = facets['version']
version_facets_str = [facet[0] for facet in version_facets]
# There should be total 4 versions
# one is latest, and other 3 that we created above
assert len(version_facets) == 4
project_versions = [v.slug for v in versions] + [LATEST]
assert sorted(project_versions) == sorted(version_facets_str)
def test_file_search_subprojects(self, client, all_projects, es_index):
"""
TODO: File search should return results from subprojects also.
This is currently disabled because the UX around it is weird.
You filter by a project, and get results for multiple.
"""
project = all_projects[0]
subproject = all_projects[1]
# Add another project as subproject of the project
project.add_subproject(subproject)
# Now search with subproject content but explicitly filter by the parent project
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {
'q': query,
'type': 'file',
'project': project.slug,
}
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
assert len(results) == 0
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug='docs')
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
# Search for the most common english word.
search_params={'q': 'the', 'type': 'file'},
)
assert len(results) > 0
other_projects = [
project.slug
for project in all_projects
if project.slug != 'docs'
]
for result in results:
assert result['project'] == 'docs'
assert result['project'] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
# Search for the most common english word.
search_params={'q': 'the', 'type': 'file'},
)
assert len(results) == 0 | |
lib.rs | extern crate xmachine;
use xmachine::{Machine, Value};
pub fn xasm_list(xasm: &mut Machine) {
let mut result = vec![];
let val = xasm.get_arg();
let count = i32::from(val.clone());
for _ in 0..count {
result.push(xasm.pop());
}
xasm.return_value(Value::List(result));
}
pub fn xasm_range(xasm: &mut Machine) {
let mut result = vec![];
let lower = i32::from(xasm.get_arg());
let upper = i32::from(xasm.get_arg());
for i in lower..upper {
result.push(Value::number(i));
}
xasm.return_value(Value::List(result));
}
pub fn xasm_filter(xasm: &mut Machine) {
let mut result = vec![];
let list = xasm.get_arg();
let function = xasm.pop();
if let Value::List(mut l) = list {
for item in l {
xasm.push(item.clone());
xasm.push(function.clone());
xasm.call();
if bool::from(xasm.get_arg()) {
result.push(item);
}
}
}
xasm.return_value(Value::List(result));
}
pub fn xasm_reverse(xasm: &mut Machine) {
let mut list = xasm.get_arg();
if let Value::List(mut l) = list {
l.reverse();
list = Value::List(l);
}
xasm.return_value(list);
}
pub fn xasm_reduce(xasm: &mut Machine) {
let list = xasm.get_arg();
let function = xasm.pop();
let mut accumulator = xasm.pop();
if let Value::List(mut l) = list {
for item in l {
xasm.push(accumulator.clone());
xasm.push(item.clone());
xasm.push(function.clone());
xasm.call();
accumulator = xasm.pop();
}
}
xasm.push(accumulator);
}
pub fn xasm_map(xasm: &mut Machine) {
let mut result = vec![];
let list = xasm.get_arg();
let function = xasm.pop();
if let Value::List(mut l) = list {
for item in l {
xasm.push(item.clone());
xasm.push(function.clone());
xasm.call();
result.push(xasm.pop());
}
}
xasm.return_value(Value::List(result));
}
pub fn xasm_push(xasm: &mut Machine) {
let list_value = xasm.pop();
if let Value::List(mut l) = (*list_value).clone() {
l.push(xasm.pop());
xasm.return_value(Value::List(l));
}
}
pub fn xasm_pop(xasm: &mut Machine) {
let value = xasm.pop();
if let Value::List(mut l) = (*value).clone() {
let last_value = l[l.len() - 1].clone();
l.pop();
xasm.push(last_value.copy());
xasm.return_value(Value::List(l));
}
}
pub fn xasm_len(xasm: &mut Machine) {
let value = xasm.pop();
xasm.push(
Value::number(
if let Value::List(l) = (*value).clone() {
l.len() as f64
} else if let Value::String(s) = (*value).clone() {
s.len() as f64
} else {
0.0
}
)
);
}
pub fn xasm_format(xasm: &mut Machine) {
let s = Value::string(format!("{}", xasm.pop()));
xasm.push(s);
}
pub fn xasm_debug(xasm: &mut Machine) {
let machine = Value::string(format!("{}", xasm));
xasm.push(machine);
}
pub fn | (xasm: &mut Machine) {
let class = xasm.pop();
class.call(xasm);
xasm.push(Value::string("new"));
xasm.method_call();
}
pub fn xasm_add(xasm: &mut Machine) {
let first = xasm.get_arg();
let second = xasm.get_arg();
xasm.return_value(first + second);
}
pub fn xasm_sub(xasm: &mut Machine) {
let first = xasm.get_arg();
let second = xasm.get_arg();
xasm.return_value(first - second);
}
pub fn xasm_mul(xasm: &mut Machine) {
let first = xasm.get_arg();
let second = xasm.get_arg();
xasm.return_value(first * second);
}
pub fn xasm_div(xasm: &mut Machine) {
let first = xasm.get_arg();
let second = xasm.get_arg();
xasm.return_value(first / second);
}
pub fn xasm_rem(xasm: &mut Machine) {
let first = xasm.get_arg();
let second = xasm.get_arg();
xasm.return_value(first % second);
}
pub fn xasm_not(xasm: &mut Machine) {
let value = xasm.get_arg();
xasm.return_value(!value);
}
pub fn xasm_eq(xasm: &mut Machine) {
let first = xasm.get_arg();
let second = xasm.get_arg();
xasm.return_value(Value::from(first == second));
} | xasm_new |
fs.ts | import fs from 'fs'
import { promisify } from 'util'
import { I18nError } from './error'
export const readFileAsync = promisify(fs.readFile)
export const existsAsync = promisify(fs.exists)
export const readdirAsync = promisify(fs.readdir)
export const lstatAsync = promisify(fs.lstat)
/**
* Load JSON files from specified path
*
* @param path
*/
export async function | (path: string): Promise<object> {
const file = await readFileAsync(path, { encoding: 'utf-8' })
let parsedContent = null
try {
parsedContent = JSON.parse(file)
} catch (err) {
throw I18nError(`invalid locale file ${path}, ensure locale file is in valid JSON format`)
}
return parsedContent
}
| readJSONFile |
co_grouped_rdd.rs | use std::any::Any;
use std::collections::HashMap;
use std::hash::Hash;
use std::hash::Hasher;
use std::marker::PhantomData;
use std::sync::Arc;
use crate::error::*;
use crate::rdd::*;
#[derive(Clone, Serialize, Deserialize)]
enum CoGroupSplitDep {
NarrowCoGroupSplitDep {
#[serde(with = "serde_traitobject")]
rdd: Arc<dyn RddBase>,
#[serde(with = "serde_traitobject")]
split: Box<dyn Split>,
},
ShuffleCoGroupSplitDep {
shuffle_id: usize,
},
}
#[derive(Clone, Serialize, Deserialize)]
struct CoGroupSplit {
index: usize,
deps: Vec<CoGroupSplitDep>,
}
impl CoGroupSplit {
fn new(index: usize, deps: Vec<CoGroupSplitDep>) -> Self {
CoGroupSplit { index, deps }
}
}
impl Hasher for CoGroupSplit {
fn finish(&self) -> u64 {
self.index as u64
}
fn write(&mut self, bytes: &[u8]) {
for i in bytes {
self.write_u8(*i);
}
}
}
impl Split for CoGroupSplit {
fn get_index(&self) -> usize |
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CoGroupedRdd<K: Data> {
pub(crate) vals: Arc<RddVals>,
pub(crate) rdds: Vec<serde_traitobject::Arc<dyn RddBase>>,
#[serde(with = "serde_traitobject")]
pub(crate) part: Box<dyn Partitioner>,
_marker: PhantomData<K>,
}
impl<K: Data + Eq + Hash> CoGroupedRdd<K> {
pub fn new(
rdds: Vec<serde_traitobject::Arc<dyn RddBase>>,
part: Box<dyn Partitioner>,
) -> Self {
let context = rdds[0].get_context();
let mut vals = RddVals::new(context.clone());
let create_combiner = Box::new(Fn!(|v: Box<dyn AnyData>| vec![v]));
fn merge_value(
mut buf: Vec<Box<dyn AnyData>>,
v: Box<dyn AnyData>,
) -> Vec<Box<dyn AnyData>> {
buf.push(v);
buf
}
let merge_value = Box::new(Fn!(|(buf, v)| merge_value(buf, v)));
fn merge_combiners(
mut b1: Vec<Box<dyn AnyData>>,
mut b2: Vec<Box<dyn AnyData>>,
) -> Vec<Box<dyn AnyData>> {
b1.append(&mut b2);
b1
}
let merge_combiners = Box::new(Fn!(|(b1, b2)| merge_combiners(b1, b2)));
trait AnyDataWithEq: AnyData + PartialEq {}
impl<T: AnyData + PartialEq> AnyDataWithEq for T {}
let aggr = Arc::new(
Aggregator::<K, Box<dyn AnyData>, Vec<Box<dyn AnyData>>>::new(
create_combiner,
merge_value,
merge_combiners,
),
);
let mut deps = Vec::new();
for (index, rdd) in rdds.iter().enumerate() {
let part = part.clone();
if rdd
.partitioner()
.map_or(false, |p| p.equals(&part as &dyn Any))
{
let rdd_base = rdd.clone().into();
deps.push(Dependency::NarrowDependency(
Arc::new(OneToOneDependency::new(rdd_base)) as Arc<dyn NarrowDependencyTrait>,
))
} else {
let rdd_base = rdd.clone().into();
info!("creating aggregator inside cogrouprdd");
deps.push(Dependency::ShuffleDependency(
Arc::new(ShuffleDependency::new(
context.new_shuffle_id(),
true,
rdd_base,
aggr.clone(),
part,
)) as Arc<dyn ShuffleDependencyTrait>,
))
}
}
vals.dependencies = deps;
let vals = Arc::new(vals);
CoGroupedRdd {
vals,
rdds,
part,
_marker: PhantomData,
}
}
}
impl<K: Data + Eq + Hash> RddBase for CoGroupedRdd<K> {
fn get_rdd_id(&self) -> usize {
self.vals.id
}
fn get_context(&self) -> Arc<Context> {
self.vals.context.clone()
}
fn get_dependencies(&self) -> Vec<Dependency> {
self.vals.dependencies.clone()
}
fn splits(&self) -> Vec<Box<dyn Split>> {
let first_rdd = self.rdds[0].clone();
let mut splits = Vec::new();
for i in 0..self.part.get_num_of_partitions() {
splits.push(Box::new(CoGroupSplit::new(
i,
self.rdds
.iter()
.enumerate()
.map(|(i, r)| match &self.get_dependencies()[i] {
Dependency::ShuffleDependency(s) => {
CoGroupSplitDep::ShuffleCoGroupSplitDep {
shuffle_id: s.get_shuffle_id(),
}
}
_ => CoGroupSplitDep::NarrowCoGroupSplitDep {
rdd: r.clone().into(),
split: r.splits()[i].clone(),
},
})
.collect(),
)) as Box<dyn Split>)
}
splits
}
fn number_of_splits(&self) -> usize {
self.part.get_num_of_partitions()
}
fn partitioner(&self) -> Option<Box<dyn Partitioner>> {
let part = self.part.clone() as Box<dyn Partitioner>;
Some(part)
}
fn iterator_any(
&self,
split: Box<dyn Split>,
) -> Result<Box<dyn Iterator<Item = Box<dyn AnyData>>>> {
Ok(Box::new(self.iterator(split)?.map(|(k, v)| {
Box::new((k, Box::new(v) as Box<dyn AnyData>)) as Box<dyn AnyData>
})))
}
}
impl<K: Data + Eq + Hash> Rdd for CoGroupedRdd<K> {
type Item = (K, Vec<Vec<Box<dyn AnyData>>>);
fn get_rdd(&self) -> Arc<dyn Rdd<Item = Self::Item>> {
Arc::new(self.clone())
}
fn get_rdd_base(&self) -> Arc<dyn RddBase> {
Arc::new(self.clone()) as Arc<dyn RddBase>
}
#[allow(clippy::type_complexity)]
fn compute(&self, split: Box<dyn Split>) -> Result<Box<dyn Iterator<Item = Self::Item>>> {
if let Ok(split) = split.downcast::<CoGroupSplit>() {
let mut agg: HashMap<K, Vec<Vec<Box<dyn AnyData>>>> = HashMap::new();
for (dep_num, dep) in split.clone().deps.into_iter().enumerate() {
match dep {
CoGroupSplitDep::NarrowCoGroupSplitDep { rdd, split } => {
info!("inside iterator cogrouprdd narrow dep");
for i in rdd.iterator_any(split)? {
info!(
"inside iterator cogrouprdd narrow dep iterator any {:?}",
i
);
let b = i
.into_any()
.downcast::<(Box<dyn AnyData>, Box<dyn AnyData>)>()
.unwrap();
let (k, v) = *b;
let k = *(k.into_any().downcast::<K>().unwrap());
agg.entry(k)
.or_insert_with(|| vec![Vec::new(); self.rdds.len()])[dep_num]
.push(v)
}
}
CoGroupSplitDep::ShuffleCoGroupSplitDep { shuffle_id } => {
info!("inside iterator cogrouprdd shuffle dep agg {:?}", agg);
let merge_pair = |(k, c): (K, Vec<Box<dyn AnyData>>)| {
let temp = agg
.entry(k)
.or_insert_with(|| vec![Vec::new(); self.rdds.len()]);
for v in c {
temp[dep_num].push(v);
}
};
let fetcher = ShuffleFetcher;
fetcher.fetch(
self.vals.context.clone(),
shuffle_id,
split.get_index(),
merge_pair,
);
}
}
}
Ok(Box::new(agg.into_iter().map(|(k, v)| (k, v))))
} else {
panic!("Got split object from different concrete type other than CoGroupSplit")
}
}
}
| {
self.index
} |
instrument.py | import logging
from typing import Optional, Any
from opentrons import types
from opentrons.calibration_storage import get
from opentrons.calibration_storage.types import TipLengthCalNotFound
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.protocol_api.labware import Labware, Well
from opentrons.protocols.api_support.types import APIVersion
from opentrons_shared_data.protocol.dev_types import LiquidHandlingCommand, \
BlowoutLocation
def validate_blowout_location(
api_version: APIVersion,
liquid_handling_command: LiquidHandlingCommand,
blowout_location: Optional[Any]) -> None:
"""Validate the blowout location."""
if blowout_location and api_version < APIVersion(2, 8):
raise ValueError(
'Cannot specify blowout location when using api' +
' version below 2.8, current version is {api_version}'
.format(api_version=api_version))
elif liquid_handling_command == 'consolidate' \
and blowout_location == 'source well':
raise ValueError(
"blowout location for consolidate cannot be source well")
elif liquid_handling_command == 'distribute' \
and blowout_location == 'destination well':
raise ValueError(
"blowout location for distribute cannot be destination well")
elif liquid_handling_command == 'transfer' and \
blowout_location and \
blowout_location not in \
[location.value for location in BlowoutLocation]:
raise ValueError(
"blowout location should be either 'source well', " +
" 'destination well', or 'trash'" +
f" but it is {blowout_location}")
def tip_length_for(pipette: PipetteDict, tiprack: Labware) -> float:
""" Get the tip length, including overlap, for a tip from this rack """
def | () -> float:
tip_overlap = pipette['tip_overlap'].get(
tiprack.uri,
pipette['tip_overlap']['default'])
tip_length = tiprack.tip_length
return tip_length - tip_overlap
try:
return get.load_tip_length_calibration(
pipette['pipette_id'],
tiprack._implementation.get_definition()
).tip_length
except TipLengthCalNotFound:
return _build_length_from_overlap()
VALID_PIP_TIPRACK_VOL = {
'p10': [10, 20],
'p20': [10, 20],
'p50': [200, 300],
'p300': [200, 300],
'p1000': [1000]
}
def validate_tiprack(
instrument_name: str,
tiprack: Labware,
log: logging.Logger) -> None:
"""Validate a tiprack logging a warning message."""
# TODO AA 2020-06-24 - we should instead add the acceptable Opentrons
# tipracks to the pipette as a refactor
if tiprack._implementation.get_definition()['namespace'] \
== 'opentrons':
tiprack_vol = tiprack.wells()[0].max_volume
valid_vols = VALID_PIP_TIPRACK_VOL[instrument_name.split('_')[0]]
if tiprack_vol not in valid_vols:
log.warning(
f'The pipette {instrument_name} and its tiprack '
f'{tiprack.load_name} in slot {tiprack.parent} appear to '
'be mismatched. Please check your protocol before running '
'on the robot.')
def determine_drop_target(
api_version: APIVersion,
location: Well,
return_height: float,
version_breakpoint: APIVersion = None) -> types.Location:
"""Determine the drop target based on well and api version."""
version_breakpoint = version_breakpoint or APIVersion(2, 2)
if api_version < version_breakpoint:
bot = location.bottom()
return types.Location(
point=bot.point._replace(z=bot.point.z + 10),
labware=location)
else:
tr = location.parent
assert tr.is_tiprack
z_height = return_height * tr.tip_length
return location.top(-z_height)
def validate_can_aspirate(location: types.Location) -> None:
""" Can one aspirate on the given `location` or not? This method is
pretty basic and will probably remain so (?) as the future holds neat
ambitions for how validation is implemented. And as robots become more
intelligent more rigorous testing will be possible
Args:
location: target for aspiration
Raises:
RuntimeError:
"""
if _is_tiprack(location):
raise RuntimeError("Cannot aspirate a tiprack")
def validate_can_dispense(location: types.Location) -> None:
""" Can one dispense to the given `location` or not? This method is
pretty basic and will probably remain so (?) as the future holds neat
ambitions for how validation is implemented. And as robots become more
intelligent more rigorous testing will be possible
Args:
location: target for dispense
Raises:
RuntimeError:
"""
if _is_tiprack(location):
raise RuntimeError("Cannot dispense to a tiprack")
def _is_tiprack(location: types.Location) -> bool:
labware = location.labware.as_labware()
return labware.parent and labware.parent.is_tiprack
| _build_length_from_overlap |
serializers.py | from rest_framework import serializers
from wallet.models import UserWallet, PaymentMethod, DriverWallet
class UserWalletSerializer(serializers.ModelSerializer):
class Meta:
model = UserWallet
fields = "__all__"
class DriverWalletSerializer(serializers.ModelSerializer): | model = DriverWallet
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__" | class Meta: |
main.rs | use zero2prod::configuration::get_configuration;
use zero2prod::startup::Application;
use zero2prod::telemetry::{get_subscriber, init_subscriber};
// This is a test
#[actix_web::main]
async fn main() -> std::io::Result<()> | {
let subscriber = get_subscriber("zero2prod".into(), "info".into(), std::io::stdout);
init_subscriber(subscriber);
let configuration = get_configuration().expect("Failed to read configuration.");
let application = Application::build(configuration).await?;
application.run_until_stopped().await?;
Ok(())
} |
|
index.js | "use strict";
var path = require("path");
var process = require("process");
var childProcess = require("child_process");
var chalk_1 = require("chalk");
var fs = require("fs");
var micromatch = require("micromatch");
var os = require("os");
var isString = require("lodash/isString");
var isFunction = require("lodash/isFunction");
var CancellationToken_1 = require("./CancellationToken");
var NormalizedMessage_1 = require("./NormalizedMessage");
var defaultFormatter_1 = require("./formatter/defaultFormatter");
var codeframeFormatter_1 = require("./formatter/codeframeFormatter");
var tapable_1 = require("tapable");
var checkerPluginName = 'fork-ts-checker-webpack-plugin';
var customHooks = {
forkTsCheckerServiceBeforeStart: 'fork-ts-checker-service-before-start',
forkTsCheckerCancel: 'fork-ts-checker-cancel',
forkTsCheckerServiceStartError: 'fork-ts-checker-service-start-error',
forkTsCheckerWaiting: 'fork-ts-checker-waiting',
forkTsCheckerServiceStart: 'fork-ts-checker-service-start',
forkTsCheckerReceive: 'fork-ts-checker-receive',
forkTsCheckerServiceOutOfMemory: 'fork-ts-checker-service-out-of-memory',
forkTsCheckerEmit: 'fork-ts-checker-emit',
forkTsCheckerDone: 'fork-ts-checker-done'
};
/**
* ForkTsCheckerWebpackPlugin
* Runs typescript type checker and linter (tslint) on separate process.
* This speed-ups build a lot.
*
* Options description in README.md
*/
var ForkTsCheckerWebpackPlugin = /** @class */ (function () {
function | (options) {
options = options || {};
this.options = Object.assign({}, options);
this.tsconfig = options.tsconfig || './tsconfig.json';
this.compilerOptions =
typeof options.compilerOptions === 'object'
? options.compilerOptions
: {};
this.tslint = options.tslint
? options.tslint === true
? './tslint.json'
: options.tslint
: undefined;
this.tslintAutoFix = options.tslintAutoFix || false;
this.watch = isString(options.watch)
? [options.watch]
: options.watch || [];
this.ignoreDiagnostics = options.ignoreDiagnostics || [];
this.ignoreLints = options.ignoreLints || [];
this.reportFiles = options.reportFiles || [];
this.logger = options.logger || console;
this.silent = options.silent === true; // default false
this.async = options.async !== false; // default true
this.checkSyntacticErrors = options.checkSyntacticErrors === true; // default false
this.workersNumber = options.workers || ForkTsCheckerWebpackPlugin.ONE_CPU;
this.memoryLimit =
options.memoryLimit || ForkTsCheckerWebpackPlugin.DEFAULT_MEMORY_LIMIT;
this.useColors = options.colors !== false; // default true
this.colors = new chalk_1.default.constructor({ enabled: this.useColors });
this.formatter =
options.formatter && isFunction(options.formatter)
? options.formatter
: ForkTsCheckerWebpackPlugin.createFormatter(options.formatter || 'default', options.formatterOptions || {});
this.tsconfigPath = undefined;
this.tslintPath = undefined;
this.watchPaths = [];
this.compiler = undefined;
this.started = undefined;
this.elapsed = undefined;
this.cancellationToken = undefined;
this.isWatching = false;
this.checkDone = false;
this.compilationDone = false;
this.diagnostics = [];
this.lints = [];
this.emitCallback = this.createNoopEmitCallback();
this.doneCallback = this.createDoneCallback();
this.typescriptVersion = require('typescript').version;
this.tslintVersion = this.tslint
? require('tslint').Linter.VERSION
: undefined;
this.vue = options.vue === true; // default false
}
ForkTsCheckerWebpackPlugin.createFormatter = function (type, options) {
switch (type) {
case 'default':
return defaultFormatter_1.createDefaultFormatter();
case 'codeframe':
return codeframeFormatter_1.createCodeframeFormatter(options);
default:
throw new Error('Unknown "' + type + '" formatter. Available are: default, codeframe.');
}
};
ForkTsCheckerWebpackPlugin.prototype.apply = function (compiler) {
this.compiler = compiler;
this.tsconfigPath = this.computeContextPath(this.tsconfig);
this.tslintPath = this.tslint
? this.computeContextPath(this.tslint)
: null;
this.watchPaths = this.watch.map(this.computeContextPath.bind(this));
// validate config
var tsconfigOk = fs.existsSync(this.tsconfigPath);
var tslintOk = !this.tslintPath || fs.existsSync(this.tslintPath);
// validate logger
if (this.logger) {
if (!this.logger.error || !this.logger.warn || !this.logger.info) {
throw new Error("Invalid logger object - doesn't provide `error`, `warn` or `info` method.");
}
}
if (tsconfigOk && tslintOk) {
if ('hooks' in compiler) {
this.registerCustomHooks();
}
this.pluginStart();
this.pluginStop();
this.pluginCompile();
this.pluginEmit();
this.pluginDone();
}
else {
if (!tsconfigOk) {
throw new Error('Cannot find "' +
this.tsconfigPath +
'" file. Please check webpack and ForkTsCheckerWebpackPlugin configuration. \n' +
'Possible errors: \n' +
' - wrong `context` directory in webpack configuration' +
' (if `tsconfig` is not set or is a relative path in fork plugin configuration)\n' +
' - wrong `tsconfig` path in fork plugin configuration' +
' (should be a relative or absolute path)');
}
if (!tslintOk) {
throw new Error('Cannot find "' +
this.tslintPath +
'" file. Please check webpack and ForkTsCheckerWebpackPlugin configuration. \n' +
'Possible errors: \n' +
' - wrong `context` directory in webpack configuration' +
' (if `tslint` is not set or is a relative path in fork plugin configuration)\n' +
' - wrong `tslint` path in fork plugin configuration' +
' (should be a relative or absolute path)\n' +
' - `tslint` path is not set to false in fork plugin configuration' +
' (if you want to disable tslint support)');
}
}
};
ForkTsCheckerWebpackPlugin.prototype.computeContextPath = function (filePath) {
return path.isAbsolute(filePath)
? filePath
: path.resolve(this.compiler.options.context, filePath);
};
ForkTsCheckerWebpackPlugin.prototype.pluginStart = function () {
var _this = this;
var run = function (_compiler, callback) {
_this.isWatching = false;
callback();
};
var watchRun = function (_compiler, callback) {
_this.isWatching = true;
callback();
};
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.run.tapAsync(checkerPluginName, run);
this.compiler.hooks.watchRun.tapAsync(checkerPluginName, watchRun);
}
else {
// webpack 2 / 3
this.compiler.plugin('run', run);
this.compiler.plugin('watch-run', watchRun);
}
};
ForkTsCheckerWebpackPlugin.prototype.pluginStop = function () {
var _this = this;
var watchClose = function () {
_this.killService();
};
var done = function (_stats) {
if (!_this.isWatching) {
_this.killService();
}
};
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.watchClose.tap(checkerPluginName, watchClose);
this.compiler.hooks.done.tap(checkerPluginName, done);
}
else {
// webpack 2 / 3
this.compiler.plugin('watch-close', watchClose);
this.compiler.plugin('done', done);
}
process.on('exit', function () {
_this.killService();
});
};
ForkTsCheckerWebpackPlugin.prototype.registerCustomHooks = function () {
if (this.compiler.hooks.forkTsCheckerServiceBeforeStart ||
this.compiler.hooks.forkTsCheckerCancel ||
this.compiler.hooks.forkTsCheckerServiceStartError ||
this.compiler.hooks.forkTsCheckerWaiting ||
this.compiler.hooks.forkTsCheckerServiceStart ||
this.compiler.hooks.forkTsCheckerReceive ||
this.compiler.hooks.forkTsCheckerServiceOutOfMemory ||
this.compiler.hooks.forkTsCheckerDone ||
this.compiler.hooks.forkTsCheckerEmit) {
throw new Error('fork-ts-checker-webpack-plugin hooks are already in use');
}
this.compiler.hooks.forkTsCheckerServiceBeforeStart = new tapable_1.AsyncSeriesHook([]);
this.compiler.hooks.forkTsCheckerCancel = new tapable_1.SyncHook([
'cancellationToken'
]);
this.compiler.hooks.forkTsCheckerServiceStartError = new tapable_1.SyncHook([
'error'
]);
this.compiler.hooks.forkTsCheckerWaiting = new tapable_1.SyncHook(['hasTsLint']);
this.compiler.hooks.forkTsCheckerServiceStart = new tapable_1.SyncHook([
'tsconfigPath',
'tslintPath',
'watchPaths',
'workersNumber',
'memoryLimit'
]);
this.compiler.hooks.forkTsCheckerReceive = new tapable_1.SyncHook([
'diagnostics',
'lints'
]);
this.compiler.hooks.forkTsCheckerServiceOutOfMemory = new tapable_1.SyncHook([]);
this.compiler.hooks.forkTsCheckerEmit = new tapable_1.SyncHook([
'diagnostics',
'lints',
'elapsed'
]);
this.compiler.hooks.forkTsCheckerDone = new tapable_1.SyncHook([
'diagnostics',
'lints',
'elapsed'
]);
// for backwards compatibility
this.compiler._pluginCompat.tap(checkerPluginName, function (options) {
switch (options.name) {
case customHooks.forkTsCheckerServiceBeforeStart:
options.async = true;
break;
case customHooks.forkTsCheckerCancel:
case customHooks.forkTsCheckerServiceStartError:
case customHooks.forkTsCheckerWaiting:
case customHooks.forkTsCheckerServiceStart:
case customHooks.forkTsCheckerReceive:
case customHooks.forkTsCheckerServiceOutOfMemory:
case customHooks.forkTsCheckerEmit:
case customHooks.forkTsCheckerDone:
return true;
}
return undefined;
});
};
ForkTsCheckerWebpackPlugin.prototype.pluginCompile = function () {
var _this = this;
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.compile.tap(checkerPluginName, function () {
_this.compilationDone = false;
_this.compiler.hooks.forkTsCheckerServiceBeforeStart.callAsync(function () {
if (_this.cancellationToken) {
// request cancellation if there is not finished job
_this.cancellationToken.requestCancellation();
_this.compiler.hooks.forkTsCheckerCancel.call(_this.cancellationToken);
}
_this.checkDone = false;
_this.started = process.hrtime();
// create new token for current job
_this.cancellationToken = new CancellationToken_1.CancellationToken(undefined, undefined);
if (!_this.service || !_this.service.connected) {
_this.spawnService();
}
try {
_this.service.send(_this.cancellationToken);
}
catch (error) {
if (!_this.silent && _this.logger) {
_this.logger.error(_this.colors.red('Cannot start checker service: ' +
(error ? error.toString() : 'Unknown error')));
}
_this.compiler.hooks.forkTsCheckerServiceStartError.call(error);
}
});
});
}
else {
// webpack 2 / 3
this.compiler.plugin('compile', function () {
_this.compilationDone = false;
_this.compiler.applyPluginsAsync('fork-ts-checker-service-before-start', function () {
if (_this.cancellationToken) {
// request cancellation if there is not finished job
_this.cancellationToken.requestCancellation();
_this.compiler.applyPlugins('fork-ts-checker-cancel', _this.cancellationToken);
}
_this.checkDone = false;
_this.started = process.hrtime();
// create new token for current job
_this.cancellationToken = new CancellationToken_1.CancellationToken(undefined, undefined);
if (!_this.service || !_this.service.connected) {
_this.spawnService();
}
try {
_this.service.send(_this.cancellationToken);
}
catch (error) {
if (!_this.silent && _this.logger) {
_this.logger.error(_this.colors.red('Cannot start checker service: ' +
(error ? error.toString() : 'Unknown error')));
}
_this.compiler.applyPlugins('fork-ts-checker-service-start-error', error);
}
});
});
}
};
ForkTsCheckerWebpackPlugin.prototype.pluginEmit = function () {
var _this = this;
var emit = function (compilation, callback) {
if (_this.isWatching && _this.async) {
callback();
return;
}
_this.emitCallback = _this.createEmitCallback(compilation, callback);
if (_this.checkDone) {
_this.emitCallback();
}
_this.compilationDone = true;
};
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.emit.tapAsync(checkerPluginName, emit);
}
else {
// webpack 2 / 3
this.compiler.plugin('emit', emit);
}
};
ForkTsCheckerWebpackPlugin.prototype.pluginDone = function () {
var _this = this;
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.done.tap(checkerPluginName, function (_stats) {
if (!_this.isWatching || !_this.async) {
return;
}
if (_this.checkDone) {
_this.doneCallback();
}
else {
if (_this.compiler) {
_this.compiler.hooks.forkTsCheckerWaiting.call(_this.tslint !== false);
}
if (!_this.silent && _this.logger) {
_this.logger.info(_this.tslint
? 'Type checking and linting in progress...'
: 'Type checking in progress...');
}
}
_this.compilationDone = true;
});
}
else {
// webpack 2 / 3
this.compiler.plugin('done', function () {
if (!_this.isWatching || !_this.async) {
return;
}
if (_this.checkDone) {
_this.doneCallback();
}
else {
if (_this.compiler) {
_this.compiler.applyPlugins('fork-ts-checker-waiting', _this.tslint !== false);
}
if (!_this.silent && _this.logger) {
_this.logger.info(_this.tslint
? 'Type checking and linting in progress...'
: 'Type checking in progress...');
}
}
_this.compilationDone = true;
});
}
};
ForkTsCheckerWebpackPlugin.prototype.spawnService = function () {
var _this = this;
this.service = childProcess.fork(path.resolve(__dirname, this.workersNumber > 1 ? './cluster.js' : './service.js'), [], {
execArgv: this.workersNumber > 1
? []
: ['--max-old-space-size=' + this.memoryLimit],
env: Object.assign({}, process.env, {
TSCONFIG: this.tsconfigPath,
COMPILER_OPTIONS: JSON.stringify(this.compilerOptions),
TSLINT: this.tslintPath || '',
TSLINTAUTOFIX: this.tslintAutoFix,
WATCH: this.isWatching ? this.watchPaths.join('|') : '',
WORK_DIVISION: Math.max(1, this.workersNumber),
MEMORY_LIMIT: this.memoryLimit,
CHECK_SYNTACTIC_ERRORS: this.checkSyntacticErrors,
VUE: this.vue
}),
stdio: ['inherit', 'inherit', 'inherit', 'ipc']
});
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.forkTsCheckerServiceStart.call(this.tsconfigPath, this.tslintPath, this.watchPaths, this.workersNumber, this.memoryLimit);
}
else {
// webpack 2 / 3
this.compiler.applyPlugins('fork-ts-checker-service-start', this.tsconfigPath, this.tslintPath, this.watchPaths, this.workersNumber, this.memoryLimit);
}
if (!this.silent && this.logger) {
this.logger.info('Starting type checking' +
(this.tslint ? ' and linting' : '') +
' service...');
this.logger.info('Using ' +
this.colors.bold(this.workersNumber === 1
? '1 worker'
: this.workersNumber + ' workers') +
' with ' +
this.colors.bold(this.memoryLimit + 'MB') +
' memory limit');
if (this.watchPaths.length && this.isWatching) {
this.logger.info('Watching:' +
(this.watchPaths.length > 1 ? '\n' : ' ') +
this.watchPaths.map(function (wpath) { return _this.colors.grey(wpath); }).join('\n'));
}
}
this.service.on('message', function (message) {
return _this.handleServiceMessage(message);
});
this.service.on('exit', function (code, signal) {
return _this.handleServiceExit(code, signal);
});
};
ForkTsCheckerWebpackPlugin.prototype.killService = function () {
if (this.service) {
try {
if (this.cancellationToken) {
this.cancellationToken.cleanupCancellation();
}
this.service.kill();
this.service = undefined;
}
catch (e) {
if (this.logger && !this.silent) {
this.logger.error(e);
}
}
}
};
ForkTsCheckerWebpackPlugin.prototype.handleServiceMessage = function (message) {
var _this = this;
if (this.cancellationToken) {
this.cancellationToken.cleanupCancellation();
// job is done - nothing to cancel
this.cancellationToken = undefined;
}
this.checkDone = true;
this.elapsed = process.hrtime(this.started);
this.diagnostics = message.diagnostics.map(NormalizedMessage_1.NormalizedMessage.createFromJSON);
this.lints = message.lints.map(NormalizedMessage_1.NormalizedMessage.createFromJSON);
if (this.ignoreDiagnostics.length) {
this.diagnostics = this.diagnostics.filter(function (diagnostic) {
return _this.ignoreDiagnostics.indexOf(parseInt(diagnostic.getCode(), 10)) === -1;
});
}
if (this.ignoreLints.length) {
this.lints = this.lints.filter(function (lint) { return _this.ignoreLints.indexOf(lint.getCode()) === -1; });
}
if (this.reportFiles.length) {
var reportFilesPredicate = function (diagnostic) {
if (diagnostic.file) {
var relativeFileName = path.relative(_this.compiler.options.context, diagnostic.file);
var matchResult = micromatch([relativeFileName], _this.reportFiles);
if (matchResult.length === 0) {
return false;
}
}
return true;
};
this.diagnostics = this.diagnostics.filter(reportFilesPredicate);
this.lints = this.lints.filter(reportFilesPredicate);
}
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.forkTsCheckerReceive.call(this.diagnostics, this.lints);
}
else {
// webpack 2 / 3
this.compiler.applyPlugins('fork-ts-checker-receive', this.diagnostics, this.lints);
}
if (this.compilationDone) {
this.isWatching && this.async ? this.doneCallback() : this.emitCallback();
}
};
ForkTsCheckerWebpackPlugin.prototype.handleServiceExit = function (_code, signal) {
if (signal === 'SIGABRT') {
// probably out of memory :/
if (this.compiler) {
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.forkTsCheckerServiceOutOfMemory.call();
}
else {
// webpack 2 / 3
this.compiler.applyPlugins('fork-ts-checker-service-out-of-memory');
}
}
if (!this.silent && this.logger) {
this.logger.error(this.colors.red('Type checking and linting aborted - probably out of memory. ' +
'Check `memoryLimit` option in ForkTsCheckerWebpackPlugin configuration.'));
}
}
};
ForkTsCheckerWebpackPlugin.prototype.createEmitCallback = function (compilation, callback) {
return function emitCallback() {
var _this = this;
var elapsed = Math.round(this.elapsed[0] * 1e9 + this.elapsed[1]);
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.forkTsCheckerEmit.call(this.diagnostics, this.lints, elapsed);
}
else {
// webpack 2 / 3
this.compiler.applyPlugins('fork-ts-checker-emit', this.diagnostics, this.lints, elapsed);
}
this.diagnostics.concat(this.lints).forEach(function (message) {
// webpack message format
var formatted = {
rawMessage: message.getSeverity().toUpperCase() +
' ' +
message.getFormattedCode() +
': ' +
message.getContent(),
message: _this.formatter(message, _this.useColors),
location: {
line: message.getLine(),
character: message.getCharacter()
},
file: message.getFile()
};
if (message.isWarningSeverity()) {
compilation.warnings.push(formatted);
}
else {
compilation.errors.push(formatted);
}
});
callback();
};
};
ForkTsCheckerWebpackPlugin.prototype.createNoopEmitCallback = function () {
// tslint:disable-next-line:no-empty
return function noopEmitCallback() { };
};
ForkTsCheckerWebpackPlugin.prototype.createDoneCallback = function () {
return function doneCallback() {
var _this = this;
var elapsed = Math.round(this.elapsed[0] * 1e9 + this.elapsed[1]);
if (this.compiler) {
if ('hooks' in this.compiler) {
// webpack 4
this.compiler.hooks.forkTsCheckerDone.call(this.diagnostics, this.lints, elapsed);
}
else {
// webpack 2 / 3
this.compiler.applyPlugins('fork-ts-checker-done', this.diagnostics, this.lints, elapsed);
}
}
if (!this.silent && this.logger) {
if (this.diagnostics.length || this.lints.length) {
(this.lints || []).concat(this.diagnostics).forEach(function (message) {
var formattedMessage = _this.formatter(message, _this.useColors);
message.isWarningSeverity()
? _this.logger.warn(formattedMessage)
: _this.logger.error(formattedMessage);
});
}
if (!this.diagnostics.length) {
this.logger.info(this.colors.green('No type errors found'));
}
if (this.tslint && !this.lints.length) {
this.logger.info(this.colors.green('No lint errors found'));
}
this.logger.info('Version: typescript ' +
this.colors.bold(this.typescriptVersion) +
(this.tslint
? ', tslint ' + this.colors.bold(this.tslintVersion)
: ''));
this.logger.info('Time: ' +
this.colors.bold(Math.round(elapsed / 1e6).toString()) +
'ms');
}
};
};
ForkTsCheckerWebpackPlugin.DEFAULT_MEMORY_LIMIT = 2048;
ForkTsCheckerWebpackPlugin.ONE_CPU = 1;
ForkTsCheckerWebpackPlugin.ALL_CPUS = os.cpus && os.cpus() ? os.cpus().length : 1;
ForkTsCheckerWebpackPlugin.ONE_CPU_FREE = Math.max(1, ForkTsCheckerWebpackPlugin.ALL_CPUS - 1);
ForkTsCheckerWebpackPlugin.TWO_CPUS_FREE = Math.max(1, ForkTsCheckerWebpackPlugin.ALL_CPUS - 2);
return ForkTsCheckerWebpackPlugin;
}());
module.exports = ForkTsCheckerWebpackPlugin;
| ForkTsCheckerWebpackPlugin |
run.py | #!/usr/bin/env python3.8
from account import Account
from credential import Credential
from termcolor import colored, cprint
import os
import time
import pickle
# Functions that implement the behaviours in account class.
def create_account(username, fname, lname, p_word):
'''
Function to create new account
'''
new_account = Account(username, fname, lname, p_word)
return new_account
def save_account(account):
'''
Function to save account
'''
account.save_account()
def delete_account(account):
'''
Function to delete an account
'''
account.delete_account()
def check_account_exists(username):
'''
Function that check if an account with that username already exists and return a Boolean
'''
return Account.account_exists(username)
def auth_user(username, password):
'''
Function to authenicate user during login
'''
return Account.auth_user(username, password)
# Functions that implement the behaviours in credential class.
def create_credential(page, username, password):
'''
Function to create credentials
'''
new_credential = Credential(page, username, password)
return new_credential
def save_credential(credential):
'''
Function to save credential
'''
credential.save_credential()
def delete_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
def find_cred_by_pagename(pagename):
"""
Function that finds a credential by pagename and returns the credentials
"""
return Credential.find_by_pagename(pagename)
def copy_cred_pass(pagename):
'''
Function to copy credential password
'''
return Credential.copy_cred_password(pagename)
def check_credential_exists(pagename):
'''
Function that check if a credential exists with that pagename and return a Boolean
''' | '''
Function that returns all the saved credentials
'''
return Credential.display_credentials()
def generate_password(length):
'''
Function that generte a random password
'''
return Credential.generate_password(length)
def main():
login = False # Set initial login value to false
sign_name = '' # Name of user currently logged in
logged = True
def load_pickles():
try:
file_object = open('accounts.pydata', 'rb')
Account.accounts_list = pickle.load(file_object)
file_object.close()
print("\nLOADED PICKLES ACCOUNTS")
except:
print("\nCLDN'T LOAD PICKLES ACCOUNTS")
Account.accounts_list = []
try:
file_objectt = open('credentials.pydata', 'rb')
Credential.credentials_list = pickle.load(file_objectt)
file_object.close()
print("\nLOADED PICKLES CREDENTIALS")
except:
print("\nCLDN'T LOAD PICKLES CREDENTIALS")
Credential.credentials_list = []
def pickle_save():
try:
file_object = open('accounts.pydata', 'wb')
pickle.dump(Account.accounts_list, file_object)
file_object.close()
print("\nSAVED ACCOUNTS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T ACCOUNTS SAVE TO PICKLES.")
try:
file_objectt = open('credentials.pydata', 'wb')
pickle.dump(display_credentials(), file_objectt)
file_objectt.close()
print("\nSAVED CREDENTIALS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T CREDENTIALS SAVE TO PICKLES.")
def display_title():
os.system('clear')
'''
Function to display app title bar
'''
cprint("""
\n\t\t\t\t**********************************************
\t\t**************************************************************************
\t*******************************************************************************************
\n
\t\t\t\t
\t\t\t\t
\t\t\t\t |\ /|
\t\t\t\t | \ / |
\t\t\t\t | \/ |
\n\t\t\t\t*** WELCOME TO PASSWORD LOCKER ***
\n`\t\t\t******************************************************************
""", "magenta")
while logged:
display_title()
load_pickles()
while login == False:
cprint("""
Use the following short codes to manage your password locker account
'ln' - Login
'xx' - Close app
""", "blue")
s_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if s_code == 'ln':
acc_code = input(
colored('\tDo you have an account? Y/N >> ', 'cyan')).upper()
if acc_code == 'Y':
cprint(
'\tEnter your username and password to login >>>\n', 'pink')
login_user_name = input(
colored('\tEnter username >> ', 'cyan'))
login_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in...")
time.sleep(1.5)
if auth_user(login_user_name, login_password):
cprint('\n\t\tLOGIN SUCCESSFUL',
'green', attrs=['bold'])
sign_name = login_user_name
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY',
'red', attrs=['bold'])
elif acc_code == 'N':
cprint(
'\tEnter your username,firstname,lastname and password to register account >>>\n', 'blue')
reg_user_name = input(
colored('\tEnter username >> ', 'cyan'))
reg_f_name = input(
colored('\tEnter firstname >> ', 'cyan'))
reg_l_name = input(colored('\tEnter lastname >> ', 'cyan'))
reg_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tRegistering ...")
time.sleep(1.5)
if check_account_exists(reg_user_name):
cprint(
f"\n\t\tACCOUNT WITH, {reg_user_name.upper()} USERNAME ALREADY CREATED", "red", attrs=['bold'])
else:
new_acc = create_account(
reg_user_name, reg_f_name, reg_l_name, reg_password)
save_account(new_acc)
cprint(
"\n\t\tCONGRATULATIONS, YOUR ACCOUNT HAS BEEN CREATED", "green", attrs=['bold'])
cprint("\n\tSign into your new account", "blue")
sign_username = input(
colored('\n\tEnter username >> ', 'cyan'))
sign_password = input(
colored('\n\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in ...")
time.sleep(1.5)
if auth_user(sign_username, sign_password):
cprint("\n\t\tLOGIN SUCCESSFUL",
"green", attrs=['bold'])
sign_name = sign_username
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY USER',
'red', attrs=['bold'])
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
elif s_code == 'xx':
cprint(f"""\n\t\tTHANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
logged = False
break
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
while login == True:
time.sleep(1.5)
cprint(f"""
{sign_name.upper()}, WELCOME TO YOUR PASSWORD LOCKER:
Use the following commands to navigate the application:
'sc' >> Save existing page credentials
'cc' >> Create new page credentials
'dc' >> Display all credentials saved
'fc' >> Find credential saved by page name
'cp' >> Copy pagename credential password to clipboard
'dl' >> Delete page credential
'lgo' >> Log out
'ex' >> Close App
""", "blue")
app_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if app_code == 'sc':
cprint(
'\tEnter pagename,username and password to save credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tSaving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'cc':
cprint(
'\tEnter pagename,username and password to create and save new page credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
gen_pass_code = input(colored(
'\tWould you like to generate a random password? Y/N >> ', 'cyan')).upper()
pass_word = ''
if gen_pass_code == 'Y':
pass_len = int(input(colored(
'\tHow long would you like your password? Provide numbers only >> ', 'cyan')))
pass_word = generate_password(pass_len)
else:
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tCreating and Saving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'dc':
if len(display_credentials()) > 0:
cprint("\n\t\t"+sign_name.upper() +
", CREDENTIALS", "green", attrs=['bold'])
for credential in display_credentials():
cprint(f'''
-------------------------------------------------------
Page Name >>>> {credential.page_name.upper()}
Page Username >>>> {credential.user_name}
Page Password >>>> {credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint("\n\t\t"+sign_name.upper() +
",HAS NO CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'fc':
search_page = input(
colored('\n\tEnter page name to search credentials >> ', 'cyan')).lower()
print("\n\t\tLoading ...")
time.sleep(1.5)
if check_credential_exists(search_page):
found_credential = find_cred_by_pagename(search_page)
cprint(f'''
-------------------------------------------------------
Page Name >>>> {found_credential.page_name.upper()}
Page Username >>>> {found_credential.user_name}
Page Password >>>> {found_credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'cp':
search_page = input(colored(
'\n\tEnter page name to copy password to clipboard >> ', 'cyan')).lower()
print("\n\t\tSearching ...")
time.sleep(1.5)
if check_credential_exists(search_page):
copy_cred_pass(search_page)
cprint("\n\t\t"+search_page.upper() +
", PASSWORD COPIED TO CLIPBOARD", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'dl':
del_page = input(
colored('\n\tEnter page name you want to delete >> ', 'cyan')).lower()
print("\n\t\tDeleting ...")
time.sleep(1.5)
if check_credential_exists(del_page):
found_page = find_cred_by_pagename(del_page)
found_page.delete_credential()
cprint("\n\t\t"+del_page.upper() +
", CREDENTIALS DELETED", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{del_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'lgo':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tLogin out >>>>>
""", "green", attrs=['bold'])
time.sleep(1.5)
login = False
elif app_code == 'ex':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
login = False
logged = False
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
if __name__ == '__main__':
main() | return Credential.credential_exists(pagename)
def display_credentials(): |
TagsPanel.tsx | import { getTags } from '@linode/api-v4/lib/tags';
import classNames from 'classnames';
import { withSnackbar, WithSnackbarProps } from 'notistack'; | import { clone } from 'ramda';
import * as React from 'react';
import { compose } from 'recompose';
import Plus from 'src/assets/icons/plusSign.svg';
import CircleProgress from 'src/components/CircleProgress';
import {
createStyles,
Theme,
withStyles,
WithStyles,
} from 'src/components/core/styles';
import Typography from 'src/components/core/Typography';
import Select from 'src/components/EnhancedSelect/Select';
import { isRestrictedUser } from 'src/features/Profile/permissionsHelpers';
import Tag from 'src/components/Tag';
import { getErrorStringOrDefault } from 'src/utilities/errorUtils';
type ClassNames =
| 'root'
| 'tag'
| 'addButtonWrapper'
| 'hasError'
| 'errorNotice'
| 'addTagButton'
| 'tagsPanelItemWrapper'
| 'selectTag'
| 'progress'
| 'loading';
const styles = (theme: Theme) =>
createStyles({
'@keyframes fadeIn': {
from: {
opacity: 0,
},
to: {
opacity: 1,
},
},
tag: {
marginTop: theme.spacing(1) / 2,
marginRight: 4,
},
addButtonWrapper: {
display: 'flex',
justifyContent: 'flex-start',
width: '100%',
},
hasError: {
marginTop: 0,
},
errorNotice: {
animation: '$fadeIn 225ms linear forwards',
borderLeft: `5px solid ${theme.palette.status.errorDark}`,
'& .noticeText': {
...theme.typography.body1,
fontFamily: '"LatoWeb", sans-serif',
},
marginTop: 20,
paddingLeft: 10,
textAlign: 'left',
},
addTagButton: {
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
backgroundColor: theme.color.tagButton,
border: 'none',
borderRadius: 3,
color: theme.cmrTextColors.linkActiveLight,
cursor: 'pointer',
fontFamily: theme.font.normal,
fontSize: '0.875rem',
fontWeight: 'bold',
padding: '7px 10px',
whiteSpace: 'nowrap',
'& svg': {
color: theme.color.tagIcon,
marginLeft: 10,
height: 10,
width: 10,
},
},
tagsPanelItemWrapper: {
marginBottom: theme.spacing(),
position: 'relative',
},
selectTag: {
animation: '$fadeIn .3s ease-in-out forwards',
marginTop: -3.5,
minWidth: 275,
position: 'relative',
textAlign: 'left',
width: '100%',
zIndex: 3,
'& .error-for-scroll > div': {
flexDirection: 'row',
flexWrap: 'wrap-reverse',
},
'& .input': {
'& p': {
color: theme.color.grey1,
borderLeft: 'none',
fontSize: '.9rem',
},
},
'& .react-select__input': {
backgroundColor: 'transparent',
color: theme.palette.text.primary,
fontSize: '.9rem',
},
'& .react-select__value-container': {
padding: '6px',
},
},
progress: {
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
position: 'absolute',
height: '100%',
width: '100%',
zIndex: 2,
},
loading: {
opacity: 0.4,
},
});
interface Item {
label: string;
value: string;
}
interface Tag {
label: string;
}
interface ActionMeta {
action: string;
}
interface State {
tagsToSuggest?: Item[];
tagError: string;
isCreatingTag: boolean;
tagInputValue: string;
listDeletingTags: string[];
loading?: boolean;
}
export interface Props {
align?: 'left' | 'right';
tags: string[];
updateTags: (tags: string[]) => Promise<any>;
disabled?: boolean;
}
type CombinedProps = Props & WithStyles<ClassNames> & WithSnackbarProps;
class TagsPanel extends React.Component<CombinedProps, State> {
state: State = {
tagsToSuggest: [],
tagError: '',
isCreatingTag: false,
tagInputValue: '',
listDeletingTags: [],
loading: false,
};
componentDidMount() {
const { tags } = this.props;
if (!isRestrictedUser()) {
getTags()
.then((response) => {
/*
* The end goal is to display to the user a list of auto-suggestions
* when they start typing in a new tag, but we don't want to display
* tags that are already applied because there cannot
* be duplicates.
*/
const filteredTags = response.data.filter((thisTag: Tag) => {
return !tags.some((alreadyAppliedTag: string) => {
return alreadyAppliedTag === thisTag.label;
});
});
/*
* reshaping them for the purposes of being passed to the Select component
*/
const reshapedTags = filteredTags.map((thisTag: Tag) => {
return {
label: thisTag.label,
value: thisTag.label,
};
});
this.setState({ tagsToSuggest: reshapedTags });
})
.catch((e) => e);
}
}
toggleTagInput = () => {
if (!this.props.disabled) {
this.setState({
tagError: '',
isCreatingTag: !this.state.isCreatingTag,
});
}
};
handleDeleteTag = (label: string) => {
const { tags, updateTags } = this.props;
/*
* Add this tag to the current list of tags that are queued for deletion
*/
this.setState(
{
listDeletingTags: [...this.state.listDeletingTags, label],
loading: true,
},
() => {
/*
* Update the new list of tags (which is the previous list but
* with the deleted tag filtered out). It's important to note that the Tag is *not*
* being deleted here - it's just being removed from the list
*/
const tagsWithoutDeletedTag = tags.filter((thisTag: string) => {
return this.state.listDeletingTags.indexOf(thisTag) === -1;
});
updateTags(tagsWithoutDeletedTag)
.then(() => {
/*
* Remove this tag from the current list of tags that are queued for deletion
*/
const cloneTagSuggestions = clone(this.state.tagsToSuggest) || [];
this.setState({
tagsToSuggest: [
{
value: label,
label,
},
...cloneTagSuggestions,
],
listDeletingTags: this.state.listDeletingTags.filter(
(thisTag) => thisTag !== label
),
loading: false,
tagError: '',
});
})
.catch((_) => {
this.props.enqueueSnackbar(`Could not delete Tag: ${label}`, {
variant: 'error',
});
/*
* Remove this tag from the current list of tags that are queued for deletion
*/
this.setState({
listDeletingTags: this.state.listDeletingTags.filter(
(thisTag) => thisTag !== label
),
loading: false,
});
});
}
);
};
handleCreateTag = (value: Item, actionMeta: ActionMeta) => {
const { tagsToSuggest } = this.state;
const { tags, updateTags } = this.props;
const inputValue = value && value.value;
/*
* This comes from the react-select API
* basically, we only want to make a request if the user is either
* hitting the enter button or choosing a selection from the dropdown
*/
if (
actionMeta.action !== 'select-option' &&
actionMeta.action !== 'create-option'
) {
return;
}
const tagExists = (tag: string) => {
return tags.some((el) => {
return el === tag;
});
};
this.toggleTagInput();
if (inputValue.length < 3 || inputValue.length > 50) {
this.setState({
tagError: `Tag "${inputValue}" length must be 3-50 characters`,
});
} else if (tagExists(inputValue)) {
this.setState({
tagError: `Tag "${inputValue}" is a duplicate`,
});
} else {
this.setState({
loading: true,
});
updateTags([...tags, value.label])
.then(() => {
// set the input value to blank on submit
this.setState({ tagInputValue: '' });
/*
* Filter out the new tag out of the auto-suggestion list
* since we can't attach this tag anymore
*/
const cloneTagSuggestions = clone(tagsToSuggest) || [];
const filteredTags = cloneTagSuggestions.filter((thisTag: Item) => {
return thisTag.label !== value.label;
});
this.setState({
tagsToSuggest: filteredTags,
loading: false,
});
})
.catch((e) => {
const tagError = getErrorStringOrDefault(
e,
'Error while creating tag'
);
this.setState({ loading: false, tagError });
});
}
};
render() {
const { tags, classes, disabled } = this.props;
const {
isCreatingTag,
tagsToSuggest,
tagInputValue,
tagError,
loading,
} = this.state;
return (
<>
{isCreatingTag ? (
<Select
onChange={this.handleCreateTag}
options={tagsToSuggest}
variant="creatable"
onBlur={this.toggleTagInput}
placeholder="Create or Select a Tag"
label="Create or Select a Tag"
hideLabel
value={tagInputValue}
createOptionPosition="first"
className={classes.selectTag}
escapeClearsValue
blurInputOnSelect
// eslint-disable-next-line
autoFocus
/>
) : (
<div
className={classNames({
[classes.addButtonWrapper]: true,
[classes.hasError]: tagError,
})}
>
<button
className={classes.addTagButton}
title="Add a tag"
onClick={this.toggleTagInput}
>
Add a tag
<Plus />
</button>
</div>
)}
<div className={classes.tagsPanelItemWrapper}>
{loading && (
<div className={classes.progress}>
<CircleProgress mini />
</div>
)}
{tags.map((thisTag) => {
return (
<Tag
key={`tag-item-${thisTag}`}
className={classNames({
[classes.tag]: true,
[classes.loading]: loading,
})}
colorVariant="lightBlue"
label={thisTag}
maxLength={30}
onDelete={
disabled ? undefined : () => this.handleDeleteTag(thisTag)
}
/>
);
})}
{tagError && (
<Typography className={classes.errorNotice}>{tagError}</Typography>
)}
</div>
</>
);
}
}
const styled = withStyles(styles);
export default compose<CombinedProps, Props>(styled, withSnackbar)(TagsPanel); | |
move_semantics2.rs | // move_semantics2.rs
// Make me compile without changing line 13!
// Execute `rustlings hint move_semantics2` for hints :)
fn main() {
let mut vec0 = Vec::new();
let mut vec1 = fill_vec(&mut vec0);
// Do not change the following line!
println!("{} has length {} content `{:?}`", "vec0", vec0.len(), vec0);
vec1.push(88);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
}
fn fill_vec(vec: &mut Vec<i32>) -> Vec<i32> | {
vec.push(22);
vec.push(44);
vec.push(66);
vec.to_vec() // vec.to_vec() copies self into new vec
} |
|
audio.py | import os
import scipy.signal
import numpy as np
from soundfile import SoundFile
from pyutils.iolib.video import getFFprobeMeta
from pyutils.cmd import runSystemCMD
# from scikits.audiolab import Sndfile, Format
import tempfile
import resampy
# import librosa
def load_wav(fname, rate=None):
# fp = Sndfile(fname, 'r')
fp = SoundFile(fname, 'r')
#_signal = fp.read_frames(fp.nframes)
_signal = fp.buffer_read(dtype="int32")
_signal = np.asarray(_signal).reshape((-1, fp.channels))
_rate = fp.samplerate
if _signal.ndim == 1:
_signal.reshape((-1, 1))
if rate is not None and rate != _rate:
# _num_frames = _signal.shape[0]
# _duration = _num_frames / float(_rate)
# signal = scipy.signal.resample(_signal, int(rate * _duration))
signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_fast')
else:
signal = _signal
rate = _rate
return signal, rate
def save_wav(fname, signal, rate):
fp = SoundFile(fname, 'w', rate, signal.shape[1])
#fp.write(fname, signal, rate)
#print(f'########################fp: {fp}')
fp.write(signal)
# with SoundFile(fname, 'w', rate, signal.shape[1], 'PCM_24') as f:
# f.write(signal)
#fp.close()
# Intento 3
# y, sr = librosa.load(librosa.util.example_audio_file(), duration=5.0)
# librosa.output.write_wav(fname, signal, rate)
# fp = SoundFile(fname, 'w', rate, signal.shape[1])
# # d, sr = fp.read()
# fp.write(signal)
# Intento 4
def convert2wav(inp_fn, out_fn, rate=None):
cmd = ['ffmpeg', '-y',
'-i', inp_fn,
'-map', '0:a',
'-acodec', 'pcm_s16le']
if rate is not None:
cmd += ['-ar', str(rate),]
cmd += [out_fn]
stdout, stderr = runSystemCMD(' '.join(cmd))
if any([l.startswith('Output file is empty,')
for l in stderr.split('\n')]):
raise (ValueError, 'Output file is empty.\n' + stderr)
class AudioReader:
def | (self, fn, rate=None, pad_start=0, seek=None, duration=None, rotation=None):
fp = Sndfile(fn, 'r') if fn.endswith('.wav') else None
if fp is None or (rate is not None and fp.samplerate != rate):
# Convert to wav file
if not os.path.isdir('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/'):
os.makedirs('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/')
snd_file = tempfile.NamedTemporaryFile('w', prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.wav', delete=False)
snd_file.close()
convert2wav(fn, snd_file.name, rate)
self.snd_fn = snd_file.name
self.rm_flag = True
else:
self.snd_fn = fn
self.rm_flag = False
self.fp = Sndfile(self.snd_fn, 'r')
self.num_channels = self.fp.channels
self.rate = self.fp.samplerate
self.num_frames = self.fp.nframes
self.duration = self.num_frames / float(self.rate)
self.k = 0
self.pad = pad_start
if seek is not None and seek > 0:
num_frames = int(seek * self.rate)
self.fp.read_frames(num_frames)
else:
seek = 0
if duration is not None:
self.duration = min(duration, self.duration-seek)
self.num_frames = int(self.duration * self.rate)
if rotation is not None:
assert self.num_channels > 2 # Spatial audio
assert -np.pi <= rotation < np.pi
c = np.cos(rotation)
s = np.sin(rotation)
rot_mtx = np.array([[1, 0, 0, 0], # W' = W
[0, c, 0, s], # Y' = X sin + Y cos
[0, 0, 1, 0], # Z' = Z
[0, -s, 0, c]]) # X' = X cos - Y sin
self.rot_mtx = rot_mtx
else:
self.rot_mtx = None
def __del__(self):
if self.rm_flag:
os.remove(self.snd_fn)
def get_chunk(self, n=1, force_size=False):
if self.k >= self.num_frames:
return None
frames_left = self.num_frames - self.k
if force_size and n > frames_left:
return None
# Pad zeros to start
if self.pad > 0:
pad_size = min(n, self.pad)
pad_chunk = np.zeros((pad_size, self.num_channels))
n -= pad_size
self.pad -= pad_size
else:
pad_chunk = None
# Read frames
chunk_size = min(n, frames_left)
chunk = self.fp.read_frames(chunk_size)
chunk = chunk.reshape((chunk.shape[0], self.num_channels))
self.k += chunk_size
if pad_chunk is not None:
chunk = np.concatenate((pad_chunk.astype(chunk.dtype), chunk), 0)
if self.rot_mtx is not None:
chunk = np.dot(chunk, self.rot_mtx.T)
return chunk
def loop_chunks(self, n=1, force_size=False):
while True:
chunk = self.get_chunk(n, force_size=False)
if chunk is None:
break
yield chunk
class AudioReader2:
def __init__(self, audio_folder, rate=None,
seek=0, duration=None, rotation=None):
self.audio_folder = audio_folder
fns = os.listdir(audio_folder)
self.num_files = len(fns)
# fp = Sndfile(os.path.join(self.audio_folder, fns[0]), 'r')
fp = SoundFile(os.path.join(self.audio_folder, fns[0]), 'r')
data, fps = load_wav(os.path.join(self.audio_folder, fns[0]))
self.rate = float(fp.samplerate) if rate is not None else fps
self.num_channels = fp.channels
self.duration = self.num_files
self.num_frames = int(self.duration * rate)
self.cur_frame = int(seek * self.rate)
self.time = self.cur_frame / self.rate
self.max_time = self.duration
if duration is not None:
self.max_time = min(seek + duration, self.max_time)
if rotation is not None:
assert self.num_channels > 2 # Spatial audio
assert -np.pi <= rotation < np.pi
c = np.cos(rotation)
s = np.sin(rotation)
rot_mtx = np.array([[1, 0, 0, 0], # W' = W
[0, c, 0, s], # Y' = X sin + Y cos
[0, 0, 1, 0], # Z' = Z
[0, -s, 0, c]]) # X' = X cos - Y sin
self.rot_mtx = rot_mtx
else:
self.rot_mtx = None
def get(self, start_time, size):
index = range(int(start_time), int(start_time + size / self.rate) + 1)
fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))
for i in index]
chunk = []
for fn in fns:
if not os.path.exists(fn):
return None
data, _ = load_wav(fn, self.rate)
chunk.append(data)
chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]
ss = int((start_time - int(start_time)) * self.rate)
chunk = chunk[ss:ss+size, :]
return chunk
def get_chunk(self, n=1, force_size=False):
if self.time >= self.max_time:
return None
frames_left = int((self.max_time - self.time) * self.rate)
if force_size and n > frames_left:
return None
# Read frames
chunk_size = min(n, frames_left)
start_time = self.cur_frame / self.rate
end_frame_no = self.cur_frame + chunk_size - 1
end_time = end_frame_no / self.rate
index = range(int(start_time), int(end_time) + 1)
fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))
for i in index]
chunk = []
for fn in fns:
data, _ = load_wav(fn, self.rate)
chunk.append(data)
chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]
ss = int((self.time - int(self.time)) * self.rate)
chunk = chunk[ss:ss+chunk_size, :]
self.cur_frame += chunk.shape[0]
self.time = self.cur_frame / self.rate
if self.rot_mtx is not None:
chunk = np.dot(chunk, self.rot_mtx.T)
return chunk
def loop_chunks(self, n=1, force_size=False):
while True:
chunk = self.get_chunk(n, force_size=False)
if chunk is None:
break
yield chunk
def test_audio_reader():
reader = AudioReader2('/gpu2_data/morgado/spatialaudiogen/youtube/train/687gkvLi5kI/ambix',
rate=10000, seek=0, duration=5.5)
for s in reader.loop_chunks(10000):
print(s.shape), s.max(), s.min()
# test_audio_reader()
| __init__ |
accounts_usage_records.go | /*
* Twilio - Api
*
* This is the public Twilio REST API.
*
* API version: 1.24.0
* Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package openapi
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"github.com/NellybettIrahola/twilio-go/client"
)
// Optional parameters for the method 'ListUsageRecord'
type ListUsageRecordParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the UsageRecord resources to read.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
// The [usage category](https://www.twilio.com/docs/usage/api/usage-record#usage-categories) of the UsageRecord resources to read. Only UsageRecord resources in the specified category are retrieved.
Category *string `json:"Category,omitempty"`
// Only include usage that has occurred on or after this date. Specify the date in GMT and format as `YYYY-MM-DD`. You can also specify offsets from the current date, such as: `-30days`, which will set the start date to be 30 days before the current date.
StartDate *string `json:"StartDate,omitempty"`
// Only include usage that occurred on or before this date. Specify the date in GMT and format as `YYYY-MM-DD`. You can also specify offsets from the current date, such as: `+30days`, which will set the end date to 30 days from the current date.
EndDate *string `json:"EndDate,omitempty"`
// Whether to include usage from the master account and all its subaccounts. Can be: `true` (the default) to include usage from the master account and all subaccounts or `false` to retrieve usage from only the specified account.
IncludeSubaccounts *bool `json:"IncludeSubaccounts,omitempty"`
// How many resources to return in each list page. The default is 50, and the maximum is 1000.
PageSize *int `json:"PageSize,omitempty"`
// Max number of records to return.
Limit *int `json:"limit,omitempty"`
}
func (params *ListUsageRecordParams) SetPathAccountSid(PathAccountSid string) *ListUsageRecordParams {
params.PathAccountSid = &PathAccountSid
return params
}
func (params *ListUsageRecordParams) SetCategory(Category string) *ListUsageRecordParams {
params.Category = &Category
return params
}
func (params *ListUsageRecordParams) SetStartDate(StartDate string) *ListUsageRecordParams {
params.StartDate = &StartDate
return params
}
func (params *ListUsageRecordParams) SetEndDate(EndDate string) *ListUsageRecordParams {
params.EndDate = &EndDate
return params
}
func (params *ListUsageRecordParams) SetIncludeSubaccounts(IncludeSubaccounts bool) *ListUsageRecordParams {
params.IncludeSubaccounts = &IncludeSubaccounts
return params
}
func (params *ListUsageRecordParams) SetPageSize(PageSize int) *ListUsageRecordParams {
params.PageSize = &PageSize
return params
}
func (params *ListUsageRecordParams) SetLimit(Limit int) *ListUsageRecordParams {
params.Limit = &Limit
return params
}
// Retrieve a single page of UsageRecord records from the API. Request is executed immediately.
func (c *ApiService) PageUsageRecord(params *ListUsageRecordParams, pageToken, pageNumber string) (*ListUsageRecordResponse, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Usage/Records.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.Category != nil {
data.Set("Category", *params.Category)
}
if params != nil && params.StartDate != nil {
data.Set("StartDate", fmt.Sprint(*params.StartDate))
}
if params != nil && params.EndDate != nil {
data.Set("EndDate", fmt.Sprint(*params.EndDate))
}
if params != nil && params.IncludeSubaccounts != nil {
data.Set("IncludeSubaccounts", fmt.Sprint(*params.IncludeSubaccounts))
}
if params != nil && params.PageSize != nil {
data.Set("PageSize", fmt.Sprint(*params.PageSize))
}
if pageToken != "" {
data.Set("PageToken", pageToken)
}
if pageNumber != "" {
data.Set("Page", pageNumber)
}
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ListUsageRecordResponse{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Lists UsageRecord records from the API as a list. Unlike stream, this operation is eager and loads 'limit' records into memory before returning.
func (c *ApiService) ListUsageRecord(params *ListUsageRecordParams) ([]ApiV2010UsageRecord, error) {
if params == nil {
params = &ListUsageRecordParams{}
}
params.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))
response, err := c.PageUsageRecord(params, "", "")
if err != nil {
return nil, err
}
curRecord := 0
var records []ApiV2010UsageRecord
for response != nil {
records = append(records, response.UsageRecords...)
var record interface{}
if record, err = client.GetNext(c.baseURL, response, &curRecord, params.Limit, c.getNextListUsageRecordResponse); record == nil || err != nil {
return records, err
}
response = record.(*ListUsageRecordResponse)
}
return records, err
}
// Streams UsageRecord records from the API as a channel stream. This operation lazily loads records as efficiently as possible until the limit is reached.
func (c *ApiService) StreamUsageRecord(params *ListUsageRecordParams) (chan ApiV2010UsageRecord, error) {
if params == nil {
params = &ListUsageRecordParams{}
}
params.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))
response, err := c.PageUsageRecord(params, "", "")
if err != nil {
return nil, err
}
curRecord := 0
//set buffer size of the channel to 1
channel := make(chan ApiV2010UsageRecord, 1)
go func() {
for response != nil {
for item := range response.UsageRecords {
channel <- response.UsageRecords[item]
}
var record interface{}
if record, err = client.GetNext(c.baseURL, response, &curRecord, params.Limit, c.getNextListUsageRecordResponse); record == nil || err != nil {
close(channel)
return
}
response = record.(*ListUsageRecordResponse)
}
close(channel)
}()
return channel, err
}
func (c *ApiService) getNextListUsageRecordResponse(nextPageUrl string) (interface{}, error) { | if nextPageUrl == "" {
return nil, nil
}
resp, err := c.requestHandler.Get(nextPageUrl, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ListUsageRecordResponse{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, nil
} | |
b66e30eb6816_note.py | """note
Revision ID: b66e30eb6816
Revises: 8add39cb253d
Create Date: 2019-02-26 13:09:27.596374
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'b66e30eb6816'
down_revision = '8add39cb253d'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('collection_note',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('collection_id', sa.Integer,
sa.ForeignKey("collection.id",
name="fk_collection_file_collection_id"),
nullable=False),
sa.Column('note', sa.Text, nullable=False),
sa.Column('stored_at', sa.DateTime(timezone=False), nullable=False),
)
def downgrade():
| op.drop_table('collection_note') |
|
tailscaled_windows.go | // Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main // import "tailscale.com/cmd/tailscaled"
// TODO: check if administrator, like tswin does.
//
// TODO: try to load wintun.dll early at startup, before wireguard/tun
// does (which panics) and if we'd fail (e.g. due to access
// denied, even if administrator), use 'tasklist /m wintun.dll'
// to see if something else is currently using it and tell user.
//
// TODO: check if Tailscale service is already running, and fail early
// like tswin does.
//
// TODO: on failure, check if on a UNC drive and recommend copying it
// to C:\ to run it, like tswin does.
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"time"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.zx2c4.com/wireguard/windows/tunnel/winipcfg"
"inet.af/netaddr"
"tailscale.com/ipn/ipnserver"
"tailscale.com/logpolicy"
"tailscale.com/net/dns"
"tailscale.com/net/tstun"
"tailscale.com/safesocket"
"tailscale.com/types/logger"
"tailscale.com/util/winutil"
"tailscale.com/version"
"tailscale.com/wf"
"tailscale.com/wgengine"
"tailscale.com/wgengine/netstack"
"tailscale.com/wgengine/router"
)
const serviceName = "Tailscale"
func | () bool {
v, err := svc.IsWindowsService()
if err != nil {
log.Fatalf("svc.IsWindowsService failed: %v", err)
}
return v
}
func runWindowsService(pol *logpolicy.Policy) error {
return svc.Run(serviceName, &ipnService{Policy: pol})
}
type ipnService struct {
Policy *logpolicy.Policy
}
// Called by Windows to execute the windows service.
func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) {
changes <- svc.Status{State: svc.StartPending}
svcAccepts := svc.AcceptStop
if winutil.GetRegInteger("FlushDNSOnSessionUnlock", 0) != 0 {
svcAccepts |= svc.AcceptSessionChange
}
ctx, cancel := context.WithCancel(context.Background())
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
args := []string{"/subproc", service.Policy.PublicID.String()}
ipnserver.BabysitProc(ctx, args, log.Printf)
}()
changes <- svc.Status{State: svc.Running, Accepts: svcAccepts}
for ctx.Err() == nil {
select {
case <-doneCh:
case cmd := <-r:
switch cmd.Cmd {
case svc.Stop:
cancel()
case svc.Interrogate:
changes <- cmd.CurrentStatus
case svc.SessionChange:
handleSessionChange(cmd)
changes <- cmd.CurrentStatus
}
}
}
changes <- svc.Status{State: svc.StopPending}
return false, windows.NO_ERROR
}
func beWindowsSubprocess() bool {
if beFirewallKillswitch() {
return true
}
if len(os.Args) != 3 || os.Args[1] != "/subproc" {
return false
}
logid := os.Args[2]
log.Printf("Program starting: v%v: %#v", version.Long, os.Args)
log.Printf("subproc mode: logid=%v", logid)
go func() {
b := make([]byte, 16)
for {
_, err := os.Stdin.Read(b)
if err != nil {
log.Fatalf("stdin err (parent process died): %v", err)
}
}
}()
err := startIPNServer(context.Background(), logid)
if err != nil {
log.Fatalf("ipnserver: %v", err)
}
return true
}
func beFirewallKillswitch() bool {
if len(os.Args) != 3 || os.Args[1] != "/firewall" {
return false
}
log.SetFlags(0)
log.Printf("killswitch subprocess starting, tailscale GUID is %s", os.Args[2])
guid, err := windows.GUIDFromString(os.Args[2])
if err != nil {
log.Fatalf("invalid GUID %q: %v", os.Args[2], err)
}
luid, err := winipcfg.LUIDFromGUID(&guid)
if err != nil {
log.Fatalf("no interface with GUID %q: %v", guid, err)
}
start := time.Now()
fw, err := wf.New(uint64(luid))
if err != nil {
log.Fatalf("failed to enable firewall: %v", err)
}
log.Printf("killswitch enabled, took %s", time.Since(start))
// Note(maisem): when local lan access toggled, tailscaled needs to
// inform the firewall to let local routes through. The set of routes
// is passed in via stdin encoded in json.
dcd := json.NewDecoder(os.Stdin)
for {
var routes []netaddr.IPPrefix
if err := dcd.Decode(&routes); err != nil {
log.Fatalf("parent process died or requested exit, exiting (%v)", err)
}
if err := fw.UpdatePermittedRoutes(routes); err != nil {
log.Fatalf("failed to update routes (%v)", err)
}
}
}
func startIPNServer(ctx context.Context, logid string) error {
var logf logger.Logf = log.Printf
getEngineRaw := func() (wgengine.Engine, error) {
dev, devName, err := tstun.New(logf, "Tailscale")
if err != nil {
return nil, fmt.Errorf("TUN: %w", err)
}
r, err := router.New(logf, dev, nil)
if err != nil {
dev.Close()
return nil, fmt.Errorf("router: %w", err)
}
if wrapNetstack {
r = netstack.NewSubnetRouterWrapper(r)
}
d, err := dns.NewOSConfigurator(logf, devName)
if err != nil {
r.Close()
dev.Close()
return nil, fmt.Errorf("DNS: %w", err)
}
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
Tun: dev,
Router: r,
DNS: d,
ListenPort: 41641,
})
if err != nil {
r.Close()
dev.Close()
return nil, fmt.Errorf("engine: %w", err)
}
ns, err := newNetstack(logf, eng)
if err != nil {
return nil, fmt.Errorf("newNetstack: %w", err)
}
ns.ProcessLocalIPs = false
ns.ProcessSubnets = wrapNetstack
if err := ns.Start(); err != nil {
return nil, fmt.Errorf("failed to start netstack: %w", err)
}
return wgengine.NewWatchdog(eng), nil
}
type engineOrError struct {
Engine wgengine.Engine
Err error
}
engErrc := make(chan engineOrError)
t0 := time.Now()
go func() {
const ms = time.Millisecond
for try := 1; ; try++ {
logf("tailscaled: getting engine... (try %v)", try)
t1 := time.Now()
eng, err := getEngineRaw()
d, dt := time.Since(t1).Round(ms), time.Since(t1).Round(ms)
if err != nil {
logf("tailscaled: engine fetch error (try %v) in %v (total %v, sysUptime %v): %v",
try, d, dt, windowsUptime().Round(time.Second), err)
} else {
if try > 1 {
logf("tailscaled: got engine on try %v in %v (total %v)", try, d, dt)
} else {
logf("tailscaled: got engine in %v", d)
}
}
timer := time.NewTimer(5 * time.Second)
engErrc <- engineOrError{eng, err}
if err == nil {
timer.Stop()
return
}
<-timer.C
}
}()
// getEngine is called by ipnserver to get the engine. It's
// not called concurrently and is not called again once it
// successfully returns an engine.
getEngine := func() (wgengine.Engine, error) {
if msg := os.Getenv("TS_DEBUG_WIN_FAIL"); msg != "" {
return nil, fmt.Errorf("pretending to be a service failure: %v", msg)
}
for {
res := <-engErrc
if res.Engine != nil {
return res.Engine, nil
}
if time.Since(t0) < time.Minute || windowsUptime() < 10*time.Minute {
// Ignore errors during early boot. Windows 10 auto logs in the GUI
// way sooner than the networking stack components start up.
// So the network will fail for a bit (and require a few tries) while
// the GUI is still fine.
continue
}
// Return nicer errors to users, annotated with logids, which helps
// when they file bugs.
return nil, fmt.Errorf("%w\n\nlogid: %v", res.Err, logid)
}
}
store, err := ipnserver.StateStore(statePathOrDefault(), logf)
if err != nil {
return err
}
ln, _, err := safesocket.Listen(args.socketpath, safesocket.WindowsLocalPort)
if err != nil {
return fmt.Errorf("safesocket.Listen: %v", err)
}
err = ipnserver.Run(ctx, logf, ln, store, logid, getEngine, ipnServerOpts())
if err != nil {
logf("ipnserver.Run: %v", err)
}
return err
}
func handleSessionChange(chgRequest svc.ChangeRequest) {
if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK {
return
}
log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.")
go func() {
err := dns.Flush()
if err != nil {
log.Printf("Error flushing DNS on session unlock: %v", err)
}
}()
}
var (
kernel32 = windows.NewLazySystemDLL("kernel32.dll")
getTickCount64Proc = kernel32.NewProc("GetTickCount64")
)
func windowsUptime() time.Duration {
r, _, _ := getTickCount64Proc.Call()
return time.Duration(int64(r)) * time.Millisecond
}
| isWindowsService |
types.ts | import * as chain from '../chain/types'
export type ComparisonType = {
total: number,
percentageOfMarketCap?: number
}
export type AssetType = {
id: chain.AssetType,
value: number,
marketCap: number,
compare?: (targetAsset: AssetType) => ComparisonType
}
export type WalletType = chain.WalletType & {
ratioOfMarketCap?: number
}
export type TableType = {
[K: string]: any,
asset?: {
[A in chain.AssetType]: AssetType
} | }
export type ContractStateType = {
assets: {
[A in chain.AssetType]?: number
},
table: TableType
}
export type ContractType = 'Contract' | 'CoreContract' | 'ContentContract' | 'ForgeContract' | 'PortfolioContract' | 'PredictionContract' | 'SenseContract' | 'SmithContract'
export type ContractInfo = {
id: string,
type: ContractType,
version: number
}
export type Dependencies = {
[id: string]: {
id?: string,
type: ContractType,
version: number
}
}
export type DependencyMap = {
[id: string]: string
}
export interface IContract {
info: ContractInfo,
assets: ContractStateType['assets'],
table: ContractStateType['table']
}
export type ConfigureParams = {
asset: chain.AssetType,
token?: chain.AssetType,
baseAsset?: number
} | |
SelectorSeRegistrar.ts | import * as Audio from '../../audios';
import * as Asset from '../../assets';
import * as Ui from '../../ui';
import { ISelector } from './ISelector';
/**
* 全てキャッシュに登録されているオーディオのキーを指定する
*/
type SelectorSeConfig = {
rootGroupCanceled?: string;
groupCanceled?: string;
goNext?: string;
select?: string;
};
export class SelectorSeRegistrar {
private static readonly defaultR | Asset.AssetCacheKey.audio('se_ui_cancel');
private static readonly defaultGroupCanceled = Asset.AssetCacheKey.audio('se_ui_cancel');
private static readonly defaultGoNext = Asset.AssetCacheKey.audio('se_ui_curor_move');
private static readonly defaultSelect = Asset.AssetCacheKey.audio('se_ui_select');
static regist(selector: ISelector, audioManager: Audio.IAudioManager, config?: SelectorSeConfig): void {
config = config ? config : {};
config = {
rootGroupCanceled: config.rootGroupCanceled
? config.rootGroupCanceled
: SelectorSeRegistrar.defaultRootGroupCanceled,
groupCanceled: config.groupCanceled ? config.groupCanceled : SelectorSeRegistrar.defaultGroupCanceled,
goNext: config.goNext ? config.goNext : SelectorSeRegistrar.defaultGoNext,
select: config.select ? config.select : SelectorSeRegistrar.defaultSelect,
};
selector.on(Ui.SelectorEventNames.RootGroupCanceled, () => {
audioManager.playSe(config.rootGroupCanceled, {});
});
selector.on(Ui.SelectorEventNames.GroupCanceled, () => {
audioManager.playSe(config.groupCanceled, {});
});
selector.on(Ui.SelectorEventNames.GoNext, () => {
audioManager.playSe(config.goNext, {});
});
selector.on(Ui.SelectorEventNames.Select, () => {
audioManager.playSe(config.select, {});
});
}
}
| ootGroupCanceled = |
main.go | package main
import (
"errors"
"github.com/tumb1er/go-reloader/reloader"
"github.com/tumb1er/go-reloader/reloader/executable"
"github.com/urfave/cli"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
)
var Version = "0.2.0"
func watch(c *cli.Context) error {
var err error
var child string
r := reloader.NewReloader(c.App.Version)
if logfile := c.String("log"); logfile != "" {
if l, err := os.OpenFile(logfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644); err != nil {
return err
} else {
defer executable.CloseFile(l)
r.SetLogger(log.New(l, "", log.LstdFlags))
}
}
if stdout := c.String("stdout"); stdout != "" {
if w, err := os.OpenFile(stdout, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644); err != nil {
return err
} else {
defer executable.CloseFile(w)
r.SetStdout(w)
}
}
if stderr := c.String("stderr"); stderr != "" {
if w, err := os.OpenFile(stderr, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644); err != nil {
return err
} else {
defer executable.CloseFile(w)
r.SetStderr(w)
}
}
r.SetInterval(c.Duration("interval"))
if err := r.SetStaging(c.String("staging")); err != nil {
return err
}
args := c.Args()
if len(args) == 0 {
return errors.New("no child executable passed")
}
if child, err = filepath.Abs(args[0]); err != nil {
return err
}
if c.Bool("tmp") {
// Copy child executable to temporary file
if child, err = copyToTemp(child); err != nil {
return err
}
defer func() {
if err := os.RemoveAll(filepath.Dir(child)); err != nil {
panic(err)
}
}()
}
if c.Bool("tree") {
r.SetTerminateTree(true)
}
if c.Bool("restart") {
r.SetRestart(true)
}
r.SetChild(child, args[1:]...)
service := c.String("service")
update := c.String("update")
if service == "" {
if update != "" {
return r.Update(update, true)
} else {
return r.Run()
}
} else {
if update != "" {
if err := r.Update(update, false); err != nil {
return err
}
return r.RestartDaemon(service)
}
return r.Daemonize()
}
}
func copyToTemp(child string) (string, error) {
basename := filepath.Base(child)
dir, err := ioutil.TempDir("", strings.Split(basename, ".")[0])
r, err := os.Open(child)
if err != nil {
return "", err
}
defer executable.CloseFile(r)
dst := filepath.Join(dir, basename)
w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, 0751)
if err != nil {
return "", err
}
defer executable.CloseFile(w)
if _, err := io.Copy(w, r); err != nil {
return "", err
}
if err := reloader.SetExecutable(dst); err != nil {
return "", err
}
return dst, nil
}
func main() | {
app := cli.NewApp()
app.Name = "reloader"
app.Usage = "reloads an executable after an update"
app.Version = Version
app.ArgsUsage = "<cmd> [<arg>...]"
app.UsageText = "reloader [options...] <cmd> [<arg>...]"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "update",
Usage: "perform update of executable and exit",
},
&cli.DurationFlag{
Name: "interval",
Value: time.Minute,
Usage: "update check interval",
},
&cli.StringFlag{
Name: "staging",
Value: "staging",
Usage: "staging directory path",
},
&cli.StringFlag{
Name: "service",
Usage: "daemon/service name",
},
&cli.StringFlag{
Name: "log",
Value: "",
Usage: "reloader log file",
},
&cli.StringFlag{
Name: "stdout",
Value: "",
Usage: "child process stdout file",
},
&cli.StringFlag{
Name: "stderr",
Value: "",
Usage: "child process stderr file",
},
&cli.BoolFlag{
Name: "tmp",
Usage: "copy executable binary to temporary directory before start",
},
&cli.BoolFlag{
Name: "tree",
Usage: "terminate child process and it's process tree",
},
&cli.BoolFlag{
Name: "restart",
Usage: "restart child process after exit",
},
}
app.Action = watch
err := app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
} |
|
handle_test.go | package errorfmt
import (
"errors"
"testing"
)
func TestNil(t *testing.T) {
var err error
Handlef("foo: %w", &err)
if err != nil {
t.Errorf("err == %v, want nil", err)
}
}
func TestWrap(t *testing.T) |
func TestPanicMissingArg(t *testing.T) {
defer func() {
recover()
}()
Handlef("foo")
t.Errorf("want panic")
}
func TestPanicNil(t *testing.T) {
defer func() {
recover()
}()
var badPointer *error
Handlef("foo", badPointer)
t.Errorf("want panic")
}
| {
err := errors.New("value")
Handlef("foo: %w", &err)
g := err.Error()
w := "foo: value"
if g != w {
t.Errorf("g == %q, want %q", g, w)
}
} |
oauth2.py | # -*- coding: utf-8 -*-
from __future__ import print_function
__all__ = [
'is_token_expired',
'SpotifyClientCredentials',
'SpotifyOAuth',
'SpotifyOauthError'
]
import base64
import json
import os
import sys
import time
import requests
# Workaround to support both python 2 & 3
import six
import six.moves.urllib.parse as urllibparse
class SpotifyOauthError(Exception):
pass
def _make_authorization_headers(client_id, client_secret):
auth_header = base64.b64encode(
six.text_type(
client_id +
':' +
client_secret).encode('ascii'))
return {'Authorization': 'Basic %s' % auth_header.decode('ascii')}
def is_token_expired(token_info):
now = int(time.time())
return token_info['expires_at'] - now < 60
class SpotifyClientCredentials(object):
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token'
def __init__(self, client_id=None, client_secret=None, proxies=None):
"""
You can either provide a client_id and client_secret to the
constructor or set SPOTIPY_CLIENT_ID and SPOTIPY_CLIENT_SECRET
environment variables
"""
if not client_id:
client_id = os.getenv('SPOTIPY_CLIENT_ID')
if not client_secret:
client_secret = os.getenv('SPOTIPY_CLIENT_SECRET')
if not client_id:
raise SpotifyOauthError('No client id')
if not client_secret:
raise SpotifyOauthError('No client secret')
self.client_id = client_id
self.client_secret = client_secret
self.token_info = None
self.proxies = proxies
def get_access_token(self):
"""
If a valid access token is in memory, returns it
Else feches a new token and returns it
"""
if self.token_info and not self.is_token_expired(self.token_info):
return self.token_info['access_token']
token_info = self._request_access_token()
token_info = self._add_custom_values_to_token_info(token_info)
self.token_info = token_info
return self.token_info['access_token']
def _request_access_token(self):
"""Gets client credentials access token """
payload = {'grant_type': 'client_credentials'}
headers = _make_authorization_headers(
self.client_id, self.client_secret)
response = requests.post(self.OAUTH_TOKEN_URL, data=payload,
headers=headers, verify=True,
proxies=self.proxies)
if response.status_code != 200:
raise SpotifyOauthError(response.reason)
token_info = response.json()
return token_info
def is_token_expired(self, token_info):
return is_token_expired(token_info)
def _add_custom_values_to_token_info(self, token_info):
"""
Store some values that aren't directly provided by a Web API
response.
"""
token_info['expires_at'] = int(time.time()) + token_info['expires_in']
return token_info
class SpotifyOAuth(object):
'''
Implements Authorization Code Flow for Spotify's OAuth implementation.
'''
OAUTH_AUTHORIZE_URL = 'https://accounts.spotify.com/authorize'
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token'
def __init__(self, client_id, client_secret, redirect_uri,
state=None, scope=None, cache_path=None, proxies=None):
'''
Creates a SpotifyOAuth object
Parameters:
- client_id - the client id of your app
- client_secret - the client secret of your app
- redirect_uri - the redirect URI of your app
- state - security state
- scope - the desired scope of the request
- cache_path - path to location to save tokens
'''
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.state = state
self.cache_path = cache_path
self.scope = self._normalize_scope(scope)
self.proxies = proxies
def get_cached_token(self):
''' Gets a cached auth token
'''
token_info = None
if self.cache_path:
try:
f = open(self.cache_path)
token_info_string = f.read()
f.close()
token_info = json.loads(token_info_string)
# if scopes don't match, then bail
if 'scope' not in token_info or not self._is_scope_subset(
self.scope, token_info['scope']):
return None
if self.is_token_expired(token_info):
token_info = self.refresh_access_token(
token_info['refresh_token'])
except IOError:
pass
return token_info
def _save_token_info(self, token_info):
if self.cache_path:
try:
f = open(self.cache_path, 'w')
f.write(json.dumps(token_info))
f.close()
except IOError:
self._warn("couldn't write token cache to " + self.cache_path)
pass
def _is_scope_subset(self, needle_scope, haystack_scope):
needle_scope = set(needle_scope.split()) if needle_scope else set()
haystack_scope = set(
haystack_scope.split()) if haystack_scope else set()
return needle_scope <= haystack_scope
def is_token_expired(self, token_info):
return is_token_expired(token_info)
def get_authorize_url(self, state=None, show_dialog=False):
""" Gets the URL to use to authorize this app
"""
payload = {'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': self.redirect_uri}
if self.scope:
payload['scope'] = self.scope
if state is None:
state = self.state
if state is not None:
payload['state'] = state
if show_dialog:
payload['show_dialog'] = True
urlparams = urllibparse.urlencode(payload)
return "%s?%s" % (self.OAUTH_AUTHORIZE_URL, urlparams)
def parse_response_code(self, url):
""" Parse the response code in the given response url | Parameters:
- url - the response url
"""
try:
return url.split("?code=")[1].split("&")[0]
except IndexError:
return None
def _make_authorization_headers(self):
return _make_authorization_headers(self.client_id, self.client_secret)
def get_access_token(self, code):
""" Gets the access token for the app given the code
Parameters:
- code - the response code
"""
payload = {'redirect_uri': self.redirect_uri,
'code': code,
'grant_type': 'authorization_code'}
if self.scope:
payload['scope'] = self.scope
if self.state:
payload['state'] = self.state
headers = self._make_authorization_headers()
response = requests.post(self.OAUTH_TOKEN_URL, data=payload,
headers=headers, verify=True,
proxies=self.proxies)
if response.status_code != 200:
raise SpotifyOauthError(response.reason)
token_info = response.json()
token_info = self._add_custom_values_to_token_info(token_info)
self._save_token_info(token_info)
return token_info
def _normalize_scope(self, scope):
if scope:
scopes = sorted(scope.split())
return ' '.join(scopes)
else:
return None
def refresh_access_token(self, refresh_token):
payload = {'refresh_token': refresh_token,
'grant_type': 'refresh_token'}
headers = self._make_authorization_headers()
response = requests.post(self.OAUTH_TOKEN_URL, data=payload,
headers=headers, proxies=self.proxies)
if response.status_code != 200:
if False: # debugging code
print('headers', headers)
print('request', response.url)
self._warn("couldn't refresh token: code:%d reason:%s"
% (response.status_code, response.reason))
return None
token_info = response.json()
token_info = self._add_custom_values_to_token_info(token_info)
if 'refresh_token' not in token_info:
token_info['refresh_token'] = refresh_token
self._save_token_info(token_info)
return token_info
def _add_custom_values_to_token_info(self, token_info):
'''
Store some values that aren't directly provided by a Web API
response.
'''
token_info['expires_at'] = int(time.time()) + token_info['expires_in']
token_info['scope'] = self.scope
return token_info
def _warn(self, msg):
print('warning:' + msg, file=sys.stderr) | |
verifier.rs | use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use crate::plonk::polynomials::*;
use crate::worker::Worker;
use crate::plonk::domains::*;
use std::marker::PhantomData;
use super::cs::*;
use super::keys::{Proof, VerificationKey};
use crate::source::{DensityTracker, DensityTrackerersChain};
use crate::kate_commitment::*;
use super::utils::*;
use crate::plonk::commitments::transcript::*;
pub fn verify<E: Engine, P: PlonkConstraintSystemParams<E>, T: Transcript<E::Fr>>(
proof: &Proof<E, P>,
verification_key: &VerificationKey<E, P>,
) -> Result<bool, SynthesisError> | {
use crate::pairing::CurveAffine;
use crate::pairing::CurveProjective;
assert!(P::CAN_ACCESS_NEXT_TRACE_STEP);
let mut transcript = T::new();
if proof.n != verification_key.n {
return Err(SynthesisError::MalformedVerifyingKey);
}
if proof.num_inputs != verification_key.num_inputs {
return Err(SynthesisError::MalformedVerifyingKey);
}
let n = proof.n;
let required_domain_size = n + 1;
if required_domain_size.is_power_of_two() == false {
return Err(SynthesisError::MalformedVerifyingKey);
}
let domain = Domain::<E::Fr>::new_for_size(required_domain_size as u64)?;
let selector_q_const_index = P::STATE_WIDTH + 1;
let selector_q_m_index = P::STATE_WIDTH;
let non_residues = make_non_residues::<E::Fr>(P::STATE_WIDTH - 1, &domain);
// Commit public inputs
for inp in proof.input_values.iter() {
transcript.commit_field_element(&inp);
}
// Commit wire values
for w in proof.wire_commitments.iter() {
commit_point_as_xy::<E, _>(&mut transcript, &w);
}
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
// commit grand product
commit_point_as_xy::<E, _>(&mut transcript, &proof.grand_product_commitment);
let alpha = transcript.get_challenge();
// Commit parts of the quotient polynomial
for w in proof.quotient_poly_commitments.iter() {
commit_point_as_xy::<E, _>(&mut transcript, &w);
}
let z = transcript.get_challenge();
let mut z_by_omega = z;
z_by_omega.mul_assign(&domain.generator);
// commit every claimed value
for el in proof.wire_values_at_z.iter() {
transcript.commit_field_element(el);
}
for el in proof.wire_values_at_z_omega.iter() {
transcript.commit_field_element(el);
}
for el in proof.permutation_polynomials_at_z.iter() {
transcript.commit_field_element(el);
}
transcript.commit_field_element(&proof.quotient_polynomial_at_z);
transcript.commit_field_element(&proof.linearization_polynomial_at_z);
// do the actual check for relationship at z
{
let mut lhs = proof.quotient_polynomial_at_z;
let vanishing_at_z = evaluate_vanishing_for_size(&z, required_domain_size as u64);
lhs.mul_assign(&vanishing_at_z);
let mut quotient_linearization_challenge = E::Fr::one();
let mut rhs = proof.linearization_polynomial_at_z;
// add public inputs
{
for (idx, input) in proof.input_values.iter().enumerate() {
let mut tmp = evaluate_lagrange_poly_at_point(idx, &domain, z)?;
tmp.mul_assign(&input);
rhs.add_assign(&tmp);
}
}
quotient_linearization_challenge.mul_assign(&alpha);
// - \alpha (a + perm(z) * beta + gamma)*()*(d + gamma) & z(z*omega)
let mut z_part = proof.grand_product_at_z_omega;
for (w, p) in proof.wire_values_at_z.iter().zip(proof.permutation_polynomials_at_z.iter()) {
let mut tmp = *p;
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&w);
z_part.mul_assign(&tmp);
}
// last poly value and gamma
let mut tmp = gamma;
tmp.add_assign(&proof.wire_values_at_z.iter().rev().next().unwrap());
z_part.mul_assign(&tmp);
z_part.mul_assign("ient_linearization_challenge);
rhs.sub_assign(&z_part);
quotient_linearization_challenge.mul_assign(&alpha);
// - L_0(z) * \alpha^2
let mut l_0_at_z = evaluate_l0_at_point(required_domain_size as u64, z)?;
l_0_at_z.mul_assign("ient_linearization_challenge);
rhs.sub_assign(&l_0_at_z);
if lhs != rhs {
return Ok(false);
}
}
let v = transcript.get_challenge();
commit_point_as_xy::<E, _>(&mut transcript, &proof.opening_at_z_proof);
commit_point_as_xy::<E, _>(&mut transcript, &proof.opening_at_z_omega_proof);
let u = transcript.get_challenge();
let z_in_domain_size = z.pow(&[required_domain_size as u64]);
// first let's reconstruct the linearization polynomial from
// honomorphic commitments, and simultaneously add (through the separation scalar "u")
// part for opening of z(X) at z*omega
// calculate the power to add z(X) commitment that is opened at x*omega
// it's r(X) + witness + all permutations + 1
let v_power_for_standalone_z_x_opening = 1 + 1 + P::STATE_WIDTH + (P::STATE_WIDTH-1);
let virtual_commitment_for_linearization_poly = {
let mut r = E::G1::zero();
// main gate. Does NOT include public inputs
{
// Q_const(x)
r.add_assign_mixed(&verification_key.selector_commitments[selector_q_const_index]);
for i in 0..P::STATE_WIDTH {
// Q_k(X) * K(z)
r.add_assign(&verification_key.selector_commitments[i].mul(proof.wire_values_at_z[i].into_repr()));
}
// Q_m(X) * A(z) * B(z)
let mut scalar = proof.wire_values_at_z[0];
scalar.mul_assign(&proof.wire_values_at_z[1]);
r.add_assign(&verification_key.selector_commitments[selector_q_m_index].mul(scalar.into_repr()));
// Q_d_next(X) * D(z*omega)
r.add_assign(&verification_key.next_step_selector_commitments[0].mul(proof.wire_values_at_z_omega[0].into_repr()));
}
// v * [alpha * (a + beta*z + gamma)(b + beta*k_1*z + gamma)()() * z(X) -
// - \alpha * (a*perm_a(z)*beta + gamma)()()*beta*z(z*omega) * perm_d(X) +
// + alpha^2 * L_0(z) * z(X) ] +
// + v^{P} * u * z(X)
// and join alpha^2 * L_0(z) and v^{P} * u into the first term containing z(X)
// [alpha * (a + beta*z + gamma)(b + beta*k_1*z + gamma)()() + alpha^2 * L_0(z)] * z(X)
let grand_product_part_at_z = {
let mut scalar = E::Fr::one();
// permutation part
for (wire, non_res) in proof.wire_values_at_z.iter()
.zip(Some(E::Fr::one()).iter().chain(&non_residues))
{
let mut tmp = z;
tmp.mul_assign(&non_res);
tmp.mul_assign(&beta);
tmp.add_assign(&wire);
tmp.add_assign(&gamma);
scalar.mul_assign(&tmp);
}
scalar.mul_assign(&alpha);
let l_0_at_z = evaluate_l0_at_point(required_domain_size as u64, z)?;
// + L_0(z) * alpha^2
let mut tmp = l_0_at_z;
tmp.mul_assign(&alpha);
tmp.mul_assign(&alpha);
scalar.add_assign(&tmp);
// * v
// scalar.mul_assign(&v);
scalar
};
// v^{P} * u * z(X)
let grand_product_part_at_z_omega = {
// + v^{P} * u
let mut tmp = v.pow(&[v_power_for_standalone_z_x_opening as u64]);
tmp.mul_assign(&u);
tmp
};
// \alpha * (a*perm_a(z)*beta + gamma)()()*beta*z(z*omega) * perm_d(X)
let last_permutation_part_at_z = {
let mut scalar = E::Fr::one();
// permutation part
for (wire, perm_at_z) in proof.wire_values_at_z.iter()
.zip(&proof.permutation_polynomials_at_z)
{
let mut tmp = beta;
tmp.mul_assign(&perm_at_z);
tmp.add_assign(&wire);
tmp.add_assign(&gamma);
scalar.mul_assign(&tmp);
}
scalar.mul_assign(&beta);
scalar.mul_assign(&proof.grand_product_at_z_omega);
scalar.mul_assign(&alpha);
// scalar.mul_assign(&v);
scalar
};
{
let mut tmp = proof.grand_product_commitment.mul(grand_product_part_at_z.into_repr());
tmp.sub_assign(&verification_key.permutation_commitments.last().unwrap().mul(last_permutation_part_at_z.into_repr()));
r.add_assign(&tmp);
}
r.mul_assign(v.into_repr());
r.add_assign(&proof.grand_product_commitment.mul(grand_product_part_at_z_omega.into_repr()));
r
};
// now check the openings
let mut multiopening_challenge = E::Fr::one();
// reassemble a homomorphic commitment
// aggregate t(X) from parts
let mut commitments_aggregation = proof.quotient_poly_commitments[0].into_projective();
let mut current = z_in_domain_size;
for part in proof.quotient_poly_commitments.iter().skip(1) {
commitments_aggregation.add_assign(&part.mul(current.into_repr()));
current.mul_assign(&z_in_domain_size);
}
// do the same for linearization
multiopening_challenge.mul_assign(&v); // to preserve sequence
commitments_aggregation.add_assign(&virtual_commitment_for_linearization_poly); // v^1 is contained inside
debug_assert_eq!(multiopening_challenge, v.pow(&[1 as u64]));
// do the same for wires
for com in proof.wire_commitments.iter() {
multiopening_challenge.mul_assign(&v); // v^{1+STATE_WIDTH}
let tmp = com.mul(multiopening_challenge.into_repr());
commitments_aggregation.add_assign(&tmp);
}
debug_assert_eq!(multiopening_challenge, v.pow(&[1 + 4 as u64]));
// and for all permutation polynomials except the last one
assert_eq!(verification_key.permutation_commitments.len(), proof.permutation_polynomials_at_z.len() + 1);
for com in verification_key.permutation_commitments[0..(verification_key.permutation_commitments.len() - 1)].iter() {
multiopening_challenge.mul_assign(&v); // v^{1+STATE_WIDTH + STATE_WIDTH - 1}
let tmp = com.mul(multiopening_challenge.into_repr());
commitments_aggregation.add_assign(&tmp);
}
multiopening_challenge.mul_assign(&v); // we skip z(X) at z
// aggregate last wire commitment (that is opened at z*omega)
// using multiopening challenge and u
multiopening_challenge.mul_assign(&v);
let mut scalar = multiopening_challenge;
scalar.mul_assign(&u);
commitments_aggregation.add_assign(&proof.wire_commitments.last().unwrap().mul(scalar.into_repr()));
// subtract the opening value using one multiplication
let mut multiopening_challenge_for_values = E::Fr::one();
let mut aggregated_value = proof.quotient_polynomial_at_z;
for value_at_z in Some(proof.linearization_polynomial_at_z).iter()
.chain(&proof.wire_values_at_z)
.chain(&proof.permutation_polynomials_at_z)
{
multiopening_challenge_for_values.mul_assign(&v);
let mut tmp = *value_at_z;
tmp.mul_assign(&multiopening_challenge_for_values);
aggregated_value.add_assign(&tmp);
}
// add parts that are opened at z*omega using `u`
{
multiopening_challenge_for_values.mul_assign(&v);
let mut scalar = multiopening_challenge_for_values;
scalar.mul_assign(&u);
let mut tmp = proof.grand_product_at_z_omega;
tmp.mul_assign(&scalar);
aggregated_value.add_assign(&tmp);
}
{
multiopening_challenge_for_values.mul_assign(&v);
let mut scalar = multiopening_challenge_for_values;
scalar.mul_assign(&u);
let mut tmp = proof.wire_values_at_z_omega[0];
tmp.mul_assign(&scalar);
aggregated_value.add_assign(&tmp);
}
assert_eq!(multiopening_challenge, multiopening_challenge_for_values);
// make equivalent of (f(x) - f(z))
commitments_aggregation.sub_assign(&E::G1Affine::one().mul(aggregated_value.into_repr()));
// now check that
// e(proof_for_z + u*proof_for_z_omega, g2^x) = e(z*proof_for_z + z*omega*u*proof_for_z_omega + (aggregated_commitment - aggregated_opening), g2^1)
// with a corresponding change of sign
let mut pair_with_generator = commitments_aggregation;
pair_with_generator.add_assign(&proof.opening_at_z_proof.mul(z.into_repr()));
let mut scalar = z_by_omega;
scalar.mul_assign(&u);
pair_with_generator.add_assign(&proof.opening_at_z_omega_proof.mul(scalar.into_repr()));
let mut pair_with_x = proof.opening_at_z_omega_proof.mul(u.into_repr());
pair_with_x.add_assign_mixed(&proof.opening_at_z_proof);
pair_with_x.negate();
let valid = E::final_exponentiation(
&E::miller_loop(&[
(&pair_with_generator.into_affine().prepare(), &verification_key.g2_elements[0].prepare()),
(&pair_with_x.into_affine().prepare(), &verification_key.g2_elements[1].prepare())
])
).unwrap() == E::Fqk::one();
Ok(valid)
} |
|
generator.rs | use std::collections::{BTreeMap, BTreeSet};
use std::iter;
use rand::Rng;
use serde::{Deserialize, Serialize};
use crate::num_basic::Field;
use crate::renderer::Renderer;
use crate::symbol::{shortest_symbolifications, SymbolTable, SymbolTableEntryId};
use crate::vecutils::{pad, Reversible};
use crate::weighted_sampler::WeightedSampler;
#[derive(Debug, PartialEq)]
pub enum GenerationError {
GenericError(String),
}
impl GenerationError {
pub fn generic_error<T: Into<String>>(v: T) -> GenerationError {
GenerationError::GenericError(v.into())
}
}
pub fn create_ngrams<T, W, D>(words: &[(W, D)], n: usize) -> BTreeMap<Vec<T>, D>
where
D: Field,
W: AsRef<[T]>,
T: Clone + Ord,
{
assert!(n > 0);
let mut ngrams = BTreeMap::new();
//TODO: Does the fact that we get n-grams of the form ^^ cause a problem?
for nn in 1..=n {
for (s, w) in words {
let s = s.as_ref();
for ww in s.windows(nn) {
*ngrams.entry(ww.to_vec()).or_insert_with(D::zero) += *w;
}
}
}
ngrams
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TransitionTable<T, D>
where
T: Ord,
{
n: usize,
weights_table: BTreeMap<Vec<T>, WeightedSampler<T, D>>,
}
impl<T, D> TransitionTable<T, D>
where
T: Ord + Clone,
D: Field,
{
pub fn new(counts: BTreeMap<Vec<T>, D>, n: usize) -> TransitionTable<T, D> {
let mut weights_table: BTreeMap<Vec<T>, WeightedSampler<T, D>> = BTreeMap::new();
for (v, w) in counts.into_iter() {
weights_table
.entry(v[0..v.len() - 1].to_vec())
.or_default()
.add_symbol_with_weight(v[v.len() - 1].clone(), w);
}
TransitionTable { n, weights_table }
}
pub fn to_ngrams_and_weights(&self) -> BTreeMap<Vec<T>, D> {
let mut result = BTreeMap::new();
for (k, ws) in &self.weights_table {
for (s, w) in &ws.counts {
let mut v = k.clone();
v.push(s.clone());
result.insert(v, *w);
}
}
result
}
pub fn sample<R: Rng>(&self, key: &[T], katz_coefficient: Option<D>, rng: &mut R) -> Option<T> {
match katz_coefficient {
Some(katz_coefficient) => {
let mut key = key;
// Until we get a table with enough weight we shrink our key down.
loop {
let m = self.weights_table.get(&key.to_vec());
if let Some(m) = m {
if m.total > katz_coefficient {
if let Some(v) = m.sample_next_symbol(rng) {
return Some(v);
}
}
}
if key.is_empty() {
return None;
}
key = &key[1..];
}
}
None => {
let m = self.weights_table.get(&key.to_vec())?;
m.sample_next_symbol(rng)
}
}
}
pub fn context_length(&self) -> usize {
self.n - 1
}
pub fn get_window_logp(&self, w: &[T], katz_coefficient: Option<D>) -> Option<f32> {
match katz_coefficient {
None => self
.weights_table
.get(&w[0..self.n - 1].to_vec())
.and_then(|ws| ws.logp(&w[self.n - 1])),
Some(katz_coefficient) => {
let mut prefix = &w[0..self.n - 1];
let last = &w[self.n - 1];
// Until we get a table with enough weight we shrink our key down.
loop {
let m = self.weights_table.get(&prefix.to_vec());
if let Some(m) = m {
if m.total > katz_coefficient {
return m.logp(last);
}
}
if prefix.is_empty() {
return None;
}
prefix = &prefix[1..];
}
}
}
}
pub fn calculate_logp(&self, v: &[T], katz_coefficient: Option<D>) -> f32 {
let mut sum_log_p = 0.0;
for w in v.windows(self.n) {
let log_p = self.get_window_logp(w, katz_coefficient);
match log_p {
Some(log_p) => {
sum_log_p += log_p;
}
None => return -f32::INFINITY,
}
}
sum_log_p
}
}
impl<T> TransitionTable<T, f32>
where
T: Ord + Clone,
{
fn map_probabilities<F>(&self, f: F) -> TransitionTable<T, f32>
where
F: Fn(f32) -> f32 + Copy,
{
let mut weights_table: BTreeMap<Vec<T>, WeightedSampler<T, f32>> = BTreeMap::new();
let n = self.n;
for (k, v) in &self.weights_table {
weights_table.insert(k.clone(), v.map_probabilities(f));
}
TransitionTable { n, weights_table }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackedKeyCollection<T, W>
where
T: PartialEq,
{
pub key_length: usize,
pub prefixes: Vec<T>,
pub prefix_counts: Vec<usize>,
pub last: Vec<T>,
pub weights: Vec<W>,
}
impl<T, W> PackedKeyCollection<T, W>
where
T: PartialEq + Copy,
W: Copy,
{
fn last_prefix(&self) -> Option<&[T]> {
let prefix_length = self.key_length - 1;
// Note rely on the counts rather than the prefixes
// since it is possible to have a key-length of 1
// which has a zero-length prefix,
// in which case prefixes.len() / (key_length - 1)
// gives an error.
let n_prefixes = self.prefix_counts.len();
if n_prefixes > 0 {
let last_prefix_start = (n_prefixes - 1) * prefix_length;
let last_prefix_end = n_prefixes * prefix_length;
Some(&self.prefixes[last_prefix_start..last_prefix_end])
} else {
None
}
}
fn add_entry(&mut self, key: &[T], weight: W) {
assert_eq!(key.len(), self.key_length);
let prefix_length = self.key_length - 1;
let last_prefix = self.last_prefix();
let key_prefix = &key[0..prefix_length];
// If we match the last prefix we need to bump the count
// otherwise we need to register a new prefix
if last_prefix == Some(&key[0..prefix_length]) {
*self.prefix_counts.last_mut().unwrap() += 1;
} else {
for p in key_prefix {
self.prefixes.push(*p);
}
self.prefix_counts.push(1);
}
// Now we need to add the weight and last part of the key
self.weights.push(weight);
self.last.push(key[self.key_length - 1]);
}
fn new(key_length: usize) -> Self
where
T: Clone,
{
PackedKeyCollection {
key_length,
prefixes: vec![],
prefix_counts: vec![],
last: vec![],
weights: vec![],
}
}
fn unpack(&self) -> Vec<(Vec<T>, W)> {
let prefix_length = self.key_length - 1;
let mut result = vec![];
let mut ikey = 0;
for prefix_index in 0..self.prefix_counts.len() {
let prefix_start = prefix_index * prefix_length;
let prefix_end = (prefix_index + 1) * prefix_length;
let prefix = &self.prefixes[prefix_start..prefix_end];
for _i in 0..self.prefix_counts[prefix_index] {
let key: Vec<T> = prefix
.iter()
.chain(std::iter::once(&self.last[ikey]))
.cloned()
.collect();
let weight = self.weights[ikey];
result.push((key, weight));
ikey += 1;
}
}
result
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneratorReprInternal<T, D, ST>
where
ST: PartialEq,
T: Ord + Clone,
D: Clone,
{
pub symbol_table: SymbolTable<T>,
pub key_collections: BTreeMap<usize, PackedKeyCollection<ST, D>>,
pub n: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum GeneratorRepr<T, D>
where
T: Ord + Clone,
D: Clone,
{
GeneratorReprU8(GeneratorReprInternal<T, D, u8>),
GeneratorReprU16(GeneratorReprInternal<T, D, u16>),
GeneratorReprRaw(GeneratorReprInternal<T, D, SymbolTableEntryId>),
}
#[derive(Debug)]
pub struct WeightRange {
pub min_weight: f32,
pub max_weight: f32,
pub mean_weight: f32,
pub count: usize,
}
impl WeightRange {
pub fn update(&mut self, w: f32) {
if w < self.min_weight {
self.min_weight = w;
}
if w > self.max_weight {
self.max_weight = w;
}
let mut sum = self.mean_weight * (self.count as f32);
sum += w;
self.count += 1;
self.mean_weight = sum / (self.count as f32);
}
pub fn new(w: f32) -> WeightRange {
WeightRange {
min_weight: w,
max_weight: w,
mean_weight: w,
count: 1,
}
}
}
#[derive(Debug)]
pub struct GeneratorInfo {
pub ngram_weights_by_length: BTreeMap<usize, WeightRange>,
pub prefix_weights_by_length: BTreeMap<usize, WeightRange>,
pub ngram_weight_summaries_by_length: BTreeMap<usize, WeightSummary>,
}
impl GeneratorInfo {
pub fn add_ngram_weight(&mut self, key_length: usize, w: f32) {
self.ngram_weights_by_length
.entry(key_length)
.and_modify(|wr| wr.update(w))
.or_insert_with(|| WeightRange::new(w));
}
pub fn add_prefix_weight(&mut self, key_length: usize, w: f32) {
self.prefix_weights_by_length
.entry(key_length)
.and_modify(|wr| wr.update(w))
.or_insert_with(|| WeightRange::new(w));
}
pub fn new() -> GeneratorInfo {
GeneratorInfo {
ngram_weights_by_length: BTreeMap::new(),
prefix_weights_by_length: BTreeMap::new(),
ngram_weight_summaries_by_length: BTreeMap::new(),
}
}
}
impl Default for GeneratorInfo {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug)]
pub struct WeightQuantile {
pub q: f64,
pub w: f64,
pub sym: Vec<SymbolTableEntryId>,
}
#[derive(Debug)]
pub struct WeightSummary {
pub quantiles: Vec<WeightQuantile>,
}
impl WeightSummary {
pub fn from_weights_and_quantiles(
weights: &[(f64, Vec<SymbolTableEntryId>)],
quantiles: &[f64],
) -> WeightSummary {
let n = weights.len();
let quantiles: Vec<WeightQuantile> = quantiles
.iter()
.map(|&q| {
let i = ((n as f64) * q).round();
let idx = if i <= 0.0 {
0
} else if i >= weights.len() as f64 {
weights.len() - 1
} else {
i as usize
};
let (w, sym) = weights[idx].clone();
WeightQuantile { q, w, sym }
})
.collect();
WeightSummary { quantiles }
}
}
// TODO: This serializes "badly" - there's a lot of redundancy
// it should just be the symbol_table (which tends to be very small)
// then a list of the symbol_id triples + weights. The TTs can be rebuilt from
// them quickly and easily.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(from = "GeneratorRepr<T,D>")]
#[serde(into = "GeneratorRepr<T,D>")]
pub struct Generator<T, D>
where
T: Ord + Clone,
D: Field,
{
pub symbol_table: SymbolTable<T>,
transition_table: TransitionTable<SymbolTableEntryId, D>,
rev_transition_table: TransitionTable<SymbolTableEntryId, D>,
}
impl<T, D, P> From<GeneratorReprInternal<T, D, P>> for Generator<T, D>
where
T: Ord + Clone,
D: Field,
P: PackedSymbolId + Copy + PartialEq,
{
fn from(repr: GeneratorReprInternal<T, D, P>) -> Self {
let mut ngrams: BTreeMap<Vec<SymbolTableEntryId>, D> = BTreeMap::new();
for (_key_size, packed_keys) in repr.key_collections {
for (k, w) in packed_keys.unpack() {
let kk = k.into_iter().map(|k| k.unpack()).collect();
ngrams.insert(kk, w);
}
}
Generator::from_ngrams(repr.symbol_table, ngrams)
}
}
impl<T, D> From<GeneratorRepr<T, D>> for Generator<T, D>
where
T: Ord + Clone,
D: Field,
{
fn from(repr: GeneratorRepr<T, D>) -> Self {
match repr {
GeneratorRepr::GeneratorReprU8(r) => Self::from(r),
GeneratorRepr::GeneratorReprU16(r) => Self::from(r),
GeneratorRepr::GeneratorReprRaw(r) => Self::from(r),
}
}
}
pub trait PackedSymbolId {
fn default() -> Self;
fn pack(v: SymbolTableEntryId) -> Self;
fn unpack(self) -> SymbolTableEntryId;
}
impl PackedSymbolId for u8 {
fn default() -> Self {
0
}
fn pack(v: SymbolTableEntryId) -> Self {
assert!(v.0 <= u8::MAX as u64);
v.0 as u8
}
fn unpack(self) -> SymbolTableEntryId {
SymbolTableEntryId(self as u64)
}
}
impl PackedSymbolId for u16 {
fn default() -> Self {
0
}
fn pack(v: SymbolTableEntryId) -> Self {
assert!(v.0 <= u16::MAX as u64);
v.0 as u16
}
fn unpack(self) -> SymbolTableEntryId {
SymbolTableEntryId(self as u64)
}
}
impl PackedSymbolId for SymbolTableEntryId {
fn default() -> Self {
SymbolTableEntryId(0)
}
fn pack(v: SymbolTableEntryId) -> Self {
v
}
fn unpack(self) -> SymbolTableEntryId {
self
}
}
impl<T, D, P> From<Generator<T, D>> for GeneratorReprInternal<T, D, P>
where
T: Ord + Clone,
D: Field,
P: PackedSymbolId + Copy + PartialEq,
{
fn from(v: Generator<T, D>) -> Self {
let ngram_size: usize = v.transition_table.n;
let mut key_collections: BTreeMap<usize, PackedKeyCollection<P, D>> = BTreeMap::new();
for (key, weight) in v.transition_table.to_ngrams_and_weights() {
let key_length = key.len();
let packed_key: Vec<_> = key.into_iter().map(P::pack).collect();
key_collections
.entry(key_length)
.or_insert_with(|| PackedKeyCollection::new(key_length))
.add_entry(&packed_key, weight);
}
GeneratorReprInternal {
symbol_table: v.symbol_table,
key_collections,
n: ngram_size,
}
}
}
impl<T, D> From<Generator<T, D>> for GeneratorRepr<T, D>
where
T: Ord + Clone,
D: Field,
{
fn from(v: Generator<T, D>) -> Self {
let max_symbol_id = v.symbol_table.max_symbol_id();
if max_symbol_id <= u8::MAX as usize {
GeneratorRepr::GeneratorReprU8(v.into())
} else if max_symbol_id <= u16::MAX as usize {
GeneratorRepr::GeneratorReprU16(v.into())
} else {
GeneratorRepr::GeneratorReprRaw(v.into())
}
}
}
impl<T, D> Generator<T, D>
where
D: Field,
T: Ord + Clone,
{
pub fn from_ngrams(
symbol_table: SymbolTable<T>,
ngrams: BTreeMap<Vec<SymbolTableEntryId>, D>,
) -> Generator<T, D> {
let n = ngrams.iter().map(|(k, _w)| k.len()).max().unwrap_or(0);
let rev_ngrams: BTreeMap<Vec<SymbolTableEntryId>, D> = ngrams
.iter()
.map(|(ngram, w)| (ngram.reversed(), *w))
.collect();
let transition_table = TransitionTable::new(ngrams, n);
let rev_transition_table = TransitionTable::new(rev_ngrams, n);
Generator {
symbol_table,
transition_table,
rev_transition_table,
}
}
pub fn into_symbol_table_and_ngrams(
self,
) -> (SymbolTable<T>, BTreeMap<Vec<SymbolTableEntryId>, D>) {
// We assume the forward transition table is correct
(
self.symbol_table,
self.transition_table.to_ngrams_and_weights(),
)
}
pub fn context_length(&self) -> usize {
self.transition_table.context_length()
}
pub fn start_symbol_id(&self) -> SymbolTableEntryId {
self.symbol_table.start_symbol_id()
}
pub fn end_symbol_id(&self) -> SymbolTableEntryId {
self.symbol_table.end_symbol_id()
}
pub fn generate_initial_vector(&self) -> Vec<SymbolTableEntryId> {
vec![self.start_symbol_id(); self.context_length()]
}
pub fn key<'a>(&self, v: &'a [SymbolTableEntryId]) -> &'a [SymbolTableEntryId] {
&v[v.len() - self.context_length()..v.len()]
}
pub fn body<'a>(&self, v: &'a [SymbolTableEntryId]) -> &'a [SymbolTableEntryId] {
&v[self.context_length()..(v.len() - self.context_length())]
}
pub fn augment_prefix(&self, prefix: &[SymbolTableEntryId]) -> Vec<SymbolTableEntryId> {
iter::repeat(self.start_symbol_id())
.take(self.context_length())
.chain(prefix.iter().cloned())
.collect()
}
pub fn log_prob(&self, word: &[T], katz_coefficient: Option<D>) -> f32 {
let ss_logp = self
.symbol_table
.symbolifications(word)
.into_iter()
.map(|w| {
let w: Vec<SymbolTableEntryId> = self.augment_prefix(&w);
let lp = self.transition_table.calculate_logp(&w, katz_coefficient);
(w, lp)
})
.filter(|(_w, lp)| *lp > -f32::INFINITY)
.collect::<Vec<_>>();
if ss_logp.is_empty() {
// They're all imposable...
return -f32::INFINITY;
}
let max_log_p = ss_logp
.iter()
.map(|(_s, lp)| *lp)
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
let mut sump = 0.0;
for (_ss, logp) in ss_logp {
let w = (logp - max_log_p).exp();
sump += w;
}
max_log_p + sump.ln()
}
pub fn augment_and_reverse_suffix(
&self,
suffix: &[SymbolTableEntryId],
) -> Vec<SymbolTableEntryId> {
iter::repeat(self.end_symbol_id())
.take(self.context_length())
.chain(suffix.iter().rev().cloned())
.collect()
}
pub fn build_prefix_sampler(
&self,
katz_coefficient: Option<D>,
prefix: &[T],
) -> WeightedSampler<Vec<SymbolTableEntryId>, f32> {
let symbolified_prefixes = self.symbol_table.symbolifications_prefix(prefix);
let prefixes_with_log_prob: Vec<_> = symbolified_prefixes
.iter()
.map(|prefix| {
let w: Vec<SymbolTableEntryId> = self.augment_prefix(prefix);
let logp: f32 = self.transition_table.calculate_logp(&w, katz_coefficient);
(w, logp)
})
.collect();
let mut sampler = WeightedSampler::<Vec<SymbolTableEntryId>, f32>::new();
if prefixes_with_log_prob
.iter()
.all(|(_k, logp)| *logp == -f32::INFINITY)
{
// They all have zero weight so we just assume all are equally likely
for (ss, _logp) in prefixes_with_log_prob {
sampler.add_symbol_with_weight(ss, 1.0);
}
} else {
let mut min_log_p = 0.0;
for (_, logp) in &prefixes_with_log_prob {
if (*logp < min_log_p) && (*logp > -f32::INFINITY) {
min_log_p = *logp;
}
}
for (ss, logp) in prefixes_with_log_prob {
let w = (logp - min_log_p).exp();
sampler.add_symbol_with_weight(ss, w);
}
}
sampler
}
fn build_suffix_sampler(
&self,
katz_coefficient: Option<D>,
suffix: &[T],
) -> WeightedSampler<Vec<SymbolTableEntryId>, f32> {
// Generate all possible symbolifications of the suffix
// Calculate their probabilities and select one.
// Generate using that symbolified prefix
let symbolified_sufixes = self.symbol_table.symbolifications_suffix(suffix);
let suffixes_with_log_prob: Vec<_> = symbolified_sufixes
.iter()
.map(|suffix| {
let w: Vec<SymbolTableEntryId> = self.augment_and_reverse_suffix(suffix);
let logp: f32 = self
.rev_transition_table
.calculate_logp(&w, katz_coefficient);
(w, logp)
})
.collect();
let mut sampler = WeightedSampler::<Vec<SymbolTableEntryId>, f32>::new();
if suffixes_with_log_prob
.iter()
.all(|(_k, logp)| *logp == -f32::INFINITY)
{
// They all have zero weight so we just assume all are equally likely
for (ss, _logp) in suffixes_with_log_prob {
sampler.add_symbol_with_weight(ss, 1.0);
}
} else {
let mut min_log_p = 0.0;
for (_, logp) in &suffixes_with_log_prob {
if (*logp < min_log_p) && (*logp > -f32::INFINITY) {
min_log_p = *logp;
}
}
for (ss, logp) in suffixes_with_log_prob {
let w = (logp - min_log_p).exp();
sampler.add_symbol_with_weight(ss, w);
}
}
sampler
}
// TODO this probably belongs in TransitionTable
// TODO as does self.key and self.context_length
fn continue_prediction<R: Rng>(
&self,
transition_table: &TransitionTable<SymbolTableEntryId, D>,
terminal: SymbolTableEntryId,
mut v: Vec<SymbolTableEntryId>,
katz_coefficient: Option<D>,
rng: &mut R,
) -> Result<Vec<SymbolTableEntryId>, GenerationError> {
loop {
let next: Option<SymbolTableEntryId> =
transition_table.sample(self.key(&v), katz_coefficient, rng);
let next = next.ok_or_else(|| {
GenerationError::generic_error("Unable to find valid continuation")
})?;
if next == terminal {
v.extend(iter::repeat(terminal).take(self.context_length()));
return Ok(v);
}
v.push(next);
}
}
fn continue_fwd_prediction<R: Rng>(
&self,
v: Vec<SymbolTableEntryId>,
katz_coefficient: Option<D>,
rng: &mut R,
) -> Result<Vec<SymbolTableEntryId>, GenerationError> {
let end_id = self.end_symbol_id();
self.continue_prediction(&self.transition_table, end_id, v, katz_coefficient, rng)
}
fn continue_bwd_prediction<R: Rng>(
&self,
v: Vec<SymbolTableEntryId>,
katz_coefficient: Option<D>,
rng: &mut R,
) -> Result<Vec<SymbolTableEntryId>, GenerationError> {
let start_id = self.start_symbol_id();
self.continue_prediction(
&self.rev_transition_table,
start_id,
v,
katz_coefficient,
rng,
)
}
pub fn generate_multi<R: Rng>(
&self,
prefix: Option<&[T]>,
suffix: Option<&[T]>,
n: usize,
katz_coefficient: Option<D>,
rng: &mut R,
renderer: &impl Renderer,
) -> Result<Vec<String>, GenerationError>
where
T: std::fmt::Debug, // TODO: Only used for error message - would be nice to remove
{
match (prefix, suffix) {
(None, None) => self.generate(n, katz_coefficient, rng, renderer),
(None, Some(suffix)) => {
self.generate_with_suffix(suffix, n, katz_coefficient, rng, renderer)
}
(Some(prefix), None) => {
self.generate_with_prefix(prefix, n, katz_coefficient, rng, renderer)
}
(Some(prefix), Some(suffix)) => self.generate_with_prefix_and_suffix(
prefix,
suffix,
n,
katz_coefficient,
rng,
renderer,
),
}
}
pub fn generate<R: Rng>(
&self,
n: usize,
katz_coefficient: Option<D>,
rng: &mut R,
renderer: &impl Renderer,
) -> Result<Vec<String>, GenerationError> {
// Generate an initial vector.
let mut result = Vec::<String>::with_capacity(n);
for _i in 0..n {
let v = self.generate_initial_vector();
let v = self.continue_fwd_prediction(v, katz_coefficient, rng)?;
result.push(renderer.render(self.body(&v)).unwrap())
}
Ok(result)
}
pub fn generate_with_prefix<R: Rng>(
&self,
prefix: &[T],
n: usize,
katz_coefficient: Option<D>,
rng: &mut R,
renderer: &impl Renderer,
) -> Result<Vec<String>, GenerationError> {
let mut result = Vec::<String>::with_capacity(n);
// Generate all possible symbolifications of the prefix
// Calculate their probabilities
let sampler = self.build_prefix_sampler(katz_coefficient, prefix);
for _i in 0..n {
// Choose one of the prefixes
let chosen_prefix = sampler.sample_next_symbol(rng).unwrap();
// Generate using that symbolified prefix
let v = self.continue_fwd_prediction(chosen_prefix, katz_coefficient, rng)?;
result.push(renderer.render(self.body(&v)).unwrap())
}
Ok(result)
}
pub fn generate_with_suffix<R: Rng>(
&self,
suffix: &[T],
n: usize,
katz_coefficient: Option<D>,
rng: &mut R,
renderer: &impl Renderer,
) -> Result<Vec<String>, GenerationError> {
let mut result = Vec::<String>::with_capacity(n);
// Generate all possible symbolifications of the suffix
// Calculate their probabilities
// NOTE: This sampler generates the suffix *reversed*
let sampler = self.build_suffix_sampler(katz_coefficient, suffix);
for _i in 0..n {
// Choose one of the suffixes
let chosen_suffix = sampler.sample_next_symbol(rng).unwrap();
// Generate using that symbolified prefix
let v = self.continue_bwd_prediction(chosen_suffix, katz_coefficient, rng)?;
// Need to reverse v before we render it.
let mut v = self.body(&v).to_vec();
v.reverse();
result.push(renderer.render(&v).unwrap())
}
Ok(result)
}
pub fn generate_with_prefix_and_suffix<R: Rng>(
&self,
prefix: &[T],
suffix: &[T],
n: usize,
katz_coefficient: Option<D>,
rng: &mut R,
renderer: &impl Renderer,
) -> Result<Vec<String>, GenerationError>
where
T: std::fmt::Debug, // TODO: Only used for error message - would be nice to remove
{
// TOOD: Should we add weights to any of the samplers to get a better result?
let mut result = Vec::<String>::with_capacity(n);
// TODO: Q. How big does N need to be? Currently it is a completely random guess.
let n_gen = (30 * n).max(10);
let splice_length = self.context_length() + 1;
// We generate N forward from prefix_str
// Then store up all the "fwd-splice-points" after prefix
let prefix_sampler = self.build_prefix_sampler(katz_coefficient, prefix);
let mut fwd_completions = Vec::<(usize, Vec<SymbolTableEntryId>)>::with_capacity(n_gen);
for _i in 0..n_gen {
let chosen_prefix = prefix_sampler.sample_next_symbol(rng).unwrap();
let prefix_length = chosen_prefix.len();
let completed_fwd = self
.continue_fwd_prediction(chosen_prefix, katz_coefficient, rng)
.map_err(|e| {
GenerationError::generic_error(format!(
"Unable to generate continuation of prefix '{:?}' - {:?}",
prefix, e
))
})?;
fwd_completions.push((prefix_length, completed_fwd));
}
type StemSampler<'a> = BTreeMap<
&'a [SymbolTableEntryId],
WeightedSampler<(usize, &'a [SymbolTableEntryId]), f32>,
>;
let mut fwd_part_samplers: StemSampler = BTreeMap::new();
for (k, v) in &fwd_completions {
for (i, w) in v[*k..].windows(splice_length).enumerate() {
fwd_part_samplers
.entry(w)
.or_default()
.add_symbol((k + i, v));
}
}
//TODO: How do we handle duplication etc?
// We generate N backward from suffix_str
// Store up all the bwd-splice-points before suffix
let suffix_sampler = self.build_suffix_sampler(katz_coefficient, suffix);
let mut bwd_completions = Vec::<(usize, Vec<SymbolTableEntryId>)>::with_capacity(n_gen);
for _i in 0..n_gen {
let chosen_suffix = suffix_sampler.sample_next_symbol(rng).unwrap();
let suffix_length = chosen_suffix.len();
let mut completed_bwd = self
.continue_bwd_prediction(chosen_suffix, katz_coefficient, rng)
.map_err(|e| {
GenerationError::generic_error(format!(
"Unable to generate backward continuation of suffix '{:?}' - {:?}",
suffix, e
))
})?;
completed_bwd.reverse();
bwd_completions.push((suffix_length, completed_bwd));
}
let mut bwd_part_samplers: StemSampler = BTreeMap::new();
for (k, v) in &bwd_completions {
for (i, w) in v[..v.len() - *k].windows(splice_length).enumerate() {
bwd_part_samplers.entry(w).or_default().add_symbol((i, v));
}
}
// The we try to match up fwd and bwd splice points.
let fwd_splice_point_keys: BTreeSet<&[SymbolTableEntryId]> =
fwd_part_samplers.keys().cloned().collect();
let bwd_splice_point_keys: BTreeSet<&[SymbolTableEntryId]> =
bwd_part_samplers.keys().cloned().collect();
// These might be one too short.
// println!("fwd_splice_point_keys={:?}", fwd_splice_point_keys.iter().map( |v| self.symbol_table.render(&v) ).collect::<Vec<_>>());
// println!("bwd_splice_point_keys={:?}", bwd_splice_point_keys.iter().map( |v| self.symbol_table.render(&v) ).collect::<Vec<_>>());
let common_splice_point_keys: Vec<_> = fwd_splice_point_keys
.intersection(&bwd_splice_point_keys)
.collect();
// println!("common_splice_point_keys={:?}", common_splice_point_keys.iter().map( |v| self.symbol_table.render(&v) ).collect::<Vec<_>>());
let mut splice_point_sampler: WeightedSampler<&[SymbolTableEntryId], f32> =
WeightedSampler::new();
for sp in &common_splice_point_keys {
splice_point_sampler.add_symbol(*sp)
}
for _i in 0..n {
// Pick a splice point key
let splice_point = splice_point_sampler.sample_next_symbol(rng).unwrap();
// println!("picked splice_point={:?}", self.symbol_table.render(splice_point) );
// Pick a prefix for that key
let fwd_part_sampler = fwd_part_samplers.get(splice_point).unwrap();
let prefix = fwd_part_sampler.sample_next_symbol(rng).unwrap();
// println!("picked prefix u={}, v={}", prefix.0, self.symbol_table.render(prefix.1) );
// Pick a suffix for that key
let bwd_part_sampler = bwd_part_samplers.get(splice_point).unwrap();
let suffix = bwd_part_sampler.sample_next_symbol(rng).unwrap();
// println!("picked suffix u={}, v={}", suffix.0, self.symbol_table.render(suffix.1) );
// Join it all together
// Finally an answer is PREFIX-FWD-SPLICE-BWD-SUFFIX
// TODO We should allow the FWD and BWD to be empty
// TODO The SPLICE should be able to be part of the PREFIX or suffix
let whole = [
&prefix.1[..prefix.0],
//splice_point,
&suffix.1[suffix.0..],
]
.concat();
// Then we render the answer.
// println!("whole={}", self.symbol_table.render(&whole) );
let text = renderer.render(self.body(&whole)).unwrap();
result.push(text);
}
Ok(result)
}
}
pub fn weight_for_symbolification(v: &[SymbolTableEntryId]) -> f32 {
1.0 / ((v.len() * v.len()) as f32)
}
impl<T> Generator<T, f32>
where
T: Ord + Clone,
{
pub fn map_probabilities<F>(&self, f: F) -> Generator<T, f32>
where
F: Fn(f32) -> f32 + Copy,
{
let tt: TransitionTable<SymbolTableEntryId, f32> =
self.transition_table.map_probabilities(f);
let ngrams = tt.to_ngrams_and_weights();
Generator::from_ngrams(self.symbol_table.clone(), ngrams)
}
}
impl<T> Generator<T, f32>
where
T: Ord + Clone,
{
pub fn get_info(&self) -> GeneratorInfo {
let mut info = GeneratorInfo::new();
for (k, w) in self.transition_table.to_ngrams_and_weights().iter() {
info.add_ngram_weight(k.len(), *w);
}
for (prefix, sampler) in self.transition_table.weights_table.iter() {
info.add_prefix_weight(prefix.len(), sampler.total);
}
// For the more detailed weight summaries we need to actually keep all the weights
// But we trim out the ngrams with start/end keys
let mut weights: BTreeMap<usize, Vec<(f64, Vec<SymbolTableEntryId>)>> = BTreeMap::new();
for (k, w) in self.transition_table.to_ngrams_and_weights().iter() {
//TODO: Only really need to check the first and last entries.
if k.contains(&SymbolTableEntryId(0)) || k.contains(&SymbolTableEntryId(1)) {
continue;
}
weights
.entry(k.len())
.or_default()
.push((*w as f64, k.clone()));
}
for (n, mut ws) in weights {
ws.sort_by(|a, b| a.partial_cmp(b).unwrap());
let summary = WeightSummary::from_weights_and_quantiles(
&ws,
&[0.0, 0.01, 0.05, 0.1, 0.9, 0.95, 0.99, 1.0],
);
info.ngram_weight_summaries_by_length.insert(n, summary);
}
info
}
}
pub trait ToSymbolsAndWeights<T> {
fn to_symbols_and_weights(&self, v: &[T]) -> Vec<(Vec<SymbolTableEntryId>, f32)>;
}
pub struct InverseSquareOfLengthWeighter<'a, T>
where
T: Ord + Clone,
{
symbol_table: &'a SymbolTable<T>,
}
impl<'a, T> InverseSquareOfLengthWeighter<'a, T>
where
T: Ord + Clone,
{
pub fn new(symbol_table: &'a SymbolTable<T>) -> Self {
InverseSquareOfLengthWeighter { symbol_table }
}
}
impl<'a, T> ToSymbolsAndWeights<T> for InverseSquareOfLengthWeighter<'a, T>
where
T: Ord + Clone,
{
fn to_symbols_and_weights(&self, v: &[T]) -> Vec<(Vec<SymbolTableEntryId>, f32)> {
let mut result = Vec::new();
let mut sum_w = 0.0;
for x in self.symbol_table.symbolifications(v) {
let w = weight_for_symbolification(&x);
result.push((x, w));
sum_w += w;
}
for x in &mut result {
x.1 /= sum_w;
}
result
}
}
pub struct ShortestOnlyWeighter<'a, T>
where
T: Ord + Clone,
{
symbol_table: &'a SymbolTable<T>,
}
impl<'a, T> ShortestOnlyWeighter<'a, T>
where
T: Ord + Clone,
{
pub fn new(symbol_table: &'a SymbolTable<T>) -> Self {
ShortestOnlyWeighter { symbol_table }
}
}
impl<'a, T> ToSymbolsAndWeights<T> for ShortestOnlyWeighter<'a, T>
where
T: Ord + Clone,
{
fn to_symbols_and_weights(&self, v: &[T]) -> Vec<(Vec<SymbolTableEntryId>, f32)> {
let ss = shortest_symbolifications(self.symbol_table, v);
let l = 1.0 / (ss.len() as f32);
ss.into_iter().map(|s| (s, l)).collect()
}
}
// TODO: Error if we can't get at least one symbolification
// TODO: Move this into the Symbol table?
// TODO: Provide weight_for_symbolification as argument.
// TOOD: Only use the shortest symbolifications?
pub fn augment_and_symbolify<T>(
symbol_table: &SymbolTable<T>,
v: &[T],
n: usize,
) -> Vec<(Vec<SymbolTableEntryId>, f32)>
where
T: Ord + Clone,
{
assert!(n > 1);
let start_id = symbol_table.start_symbol_id();
let end_id = symbol_table.end_symbol_id();
let result = InverseSquareOfLengthWeighter::new(symbol_table).to_symbols_and_weights(v);
let sum_w: f32 = result.iter().map(|(_, w)| w).sum();
result
.into_iter()
.map(|(x, w)| (pad(n - 1, start_id, end_id, x), w / sum_w))
.collect()
}
#[cfg(test)]
mod test {
use super::*;
use crate::renderer::RenderU8;
use crate::symbol::SymbolTableEntry;
fn dumb_u8_symbol_table<T: AsRef<str>>(values: &[T]) -> SymbolTable<u8> {
let mut symbol_table = SymbolTable::new();
symbol_table.add(SymbolTableEntry::Start).unwrap();
symbol_table.add(SymbolTableEntry::End).unwrap();
for s in values {
for c in s.as_ref().bytes() {
symbol_table.add(SymbolTableEntry::Single(c)).unwrap();
}
}
symbol_table
}
fn simple_generator() -> Generator<u8, f32> {
let values = vec!["hello"];
let symbol_table = dumb_u8_symbol_table(&values);
let symbolified_values: Vec<(Vec<SymbolTableEntryId>, f32)> = values
.iter()
.flat_map(|s| augment_and_symbolify(&symbol_table, s.as_bytes(), 3))
.collect();
let ngrams = create_ngrams(&symbolified_values, 3);
Generator::from_ngrams(symbol_table, ngrams)
}
fn simple_generator_2() -> Generator<u8, f32> {
let values = vec!["word"];
let symbol_table = dumb_u8_symbol_table(&values);
let symbolified_values: Vec<(Vec<SymbolTableEntryId>, f32)> = values
.iter()
.flat_map(|s| augment_and_symbolify(&symbol_table, s.as_bytes(), 3))
.collect();
let ngrams = create_ngrams(&symbolified_values, 3);
Generator::from_ngrams(symbol_table, ngrams)
}
fn larger_generator() -> Generator<u8, f32> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let values: Vec<String> =
std::fs::read_to_string(format!("{}/../resources/Moby_Names_M_lc.txt", manifest_dir))
.unwrap()
.lines()
.map(|n| n.trim().to_string())
.filter(|s| s.len() >= 3)
.collect();
let symbol_table = dumb_u8_symbol_table(&values);
let symbolified_values: Vec<(Vec<SymbolTableEntryId>, f32)> = values
.iter()
.flat_map(|s| augment_and_symbolify(&symbol_table, s.as_bytes(), 3))
.collect();
let trigrams = create_ngrams(&symbolified_values, 3);
Generator::from_ngrams(symbol_table, trigrams)
}
#[test]
pub fn test_augment_and_symbolify_hello() {
let v = "hello";
let symbol_table = dumb_u8_symbol_table(&[v]);
let s = augment_and_symbolify(&symbol_table, v.as_bytes(), 3);
assert_eq!(s.len(), 1);
}
#[test]
pub fn | () {
let v = "hello";
let symbol_table = dumb_u8_symbol_table(&[v]);
let ss = symbol_table.symbolifications(v.as_bytes());
assert_eq!(ss.len(), 1);
}
#[test]
pub fn generate_simple() {
let mut rng = rand::thread_rng();
let g = simple_generator();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g.generate(1, None, &mut rng, &renderer).unwrap()[0].clone();
assert_eq!(m, "hello");
}
#[test]
pub fn generate_simple_2() {
let mut rng = rand::thread_rng();
let g = simple_generator_2();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g.generate(1, None, &mut rng, &renderer).unwrap()[0].clone();
assert_eq!(m, "word");
}
#[test]
pub fn generate_prefix() {
let mut rng = rand::thread_rng();
let g = simple_generator();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g
.generate_with_prefix("hel".as_bytes(), 1, None, &mut rng, &renderer)
.unwrap()[0]
.clone();
assert_eq!(m, "hello");
}
#[test]
pub fn generate_prefix_empty() {
let mut rng = rand::thread_rng();
let g = simple_generator();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g
.generate_with_prefix("".as_bytes(), 1, None, &mut rng, &renderer)
.unwrap()[0]
.clone();
assert_eq!(m, "hello");
}
#[test]
pub fn generate_suffix() {
let mut rng = rand::thread_rng();
let g = simple_generator();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g
.generate_with_suffix("llo".as_bytes(), 1, None, &mut rng, &renderer)
.unwrap()[0]
.clone();
assert_eq!(m, "hello");
}
#[test]
pub fn generate_suffix_empty() {
let mut rng = rand::thread_rng();
let g = simple_generator();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g
.generate_with_suffix("".as_bytes(), 1, None, &mut rng, &renderer)
.unwrap()[0]
.clone();
assert_eq!(m, "hello");
}
#[test]
pub fn generate_with_prefix_and_suffix() {
let mut rng = rand::thread_rng();
let g = simple_generator();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m: String = g
.generate_with_prefix_and_suffix(
"h".as_bytes(),
"o".as_bytes(),
1,
None,
&mut rng,
&renderer,
)
.unwrap()[0]
.clone();
assert_eq!(m, "hello");
}
#[test]
pub fn generate_with_prefix_and_suffix_big() {
let mut rng = rand::thread_rng();
let g = larger_generator();
let prefix_str = "h";
let suffix_str = "y";
let prefix = prefix_str.as_bytes();
let suffix = suffix_str.as_bytes();
let renderer = RenderU8 {
table: &g.symbol_table,
start: b"^",
end: b"$",
};
let m = g
.generate_with_prefix_and_suffix(prefix, suffix, 10, None, &mut rng, &renderer)
.unwrap();
for v in m {
assert!(
v.starts_with(prefix_str) && v.ends_with(suffix_str),
"Expected {}..{} but got {}",
prefix_str,
suffix_str,
v,
);
}
}
#[test]
pub fn generate_katz_fallback() {
// A very simple generator that will only work using Katz fallback
// ^^ -> A
// ^A -> A
// no AA -> ?
// A -> B
// AB -> $
let mut symbol_table: SymbolTable<u8> = SymbolTable::new();
let start = symbol_table.add(SymbolTableEntry::Start).unwrap();
let end = symbol_table.add(SymbolTableEntry::End).unwrap();
let a = symbol_table.add(SymbolTableEntry::Single(b'a')).unwrap();
let b = symbol_table.add(SymbolTableEntry::Single(b'b')).unwrap();
let mut ngrams: BTreeMap<Vec<SymbolTableEntryId>, f32> = BTreeMap::new();
ngrams.insert(vec![start, start, a], 1.0);
ngrams.insert(vec![start, a, a], 1.0);
ngrams.insert(vec![a, b], 1.0);
ngrams.insert(vec![a, b, end], 1.0);
let gen = Generator::from_ngrams(symbol_table, ngrams);
let mut rng = rand::thread_rng();
let renderer = RenderU8 {
table: &gen.symbol_table,
start: b"^",
end: b"$",
};
let v = gen.generate(1, None, &mut rng, &renderer);
assert_eq!(
v,
Err(GenerationError::GenericError(
"Unable to find valid continuation".into()
))
);
let v = gen.generate(1, Some(0.0), &mut rng, &renderer);
assert_eq!(v.unwrap()[0], "aab");
}
#[test]
pub fn generate_katz_fallback_2() {
// A very simple generator that will only work using Katz fallback
// ^^ -> A
// ^A -> A
// reject AA -> C as weight is too low.
// A -> B
// AB -> $
let mut symbol_table: SymbolTable<u8> = SymbolTable::new();
let start = symbol_table.add(SymbolTableEntry::Start).unwrap();
let end = symbol_table.add(SymbolTableEntry::End).unwrap();
let a = symbol_table.add(SymbolTableEntry::Single(b'a')).unwrap();
let b = symbol_table.add(SymbolTableEntry::Single(b'b')).unwrap();
let c = symbol_table.add(SymbolTableEntry::Single(b'c')).unwrap();
let mut ngrams: BTreeMap<Vec<SymbolTableEntryId>, f32> = BTreeMap::new();
ngrams.insert(vec![start, start, a], 1.0);
ngrams.insert(vec![start, a, a], 1.0);
ngrams.insert(vec![a, a, c], 0.1);
ngrams.insert(vec![a, b], 1.0);
ngrams.insert(vec![a, b, end], 1.0);
ngrams.insert(vec![a, c, end], 1.0);
let gen = Generator::from_ngrams(symbol_table, ngrams);
let mut rng = rand::thread_rng();
let renderer = RenderU8 {
table: &gen.symbol_table,
start: b"^",
end: b"$",
};
// Without Katz we dont reject AAC
let v = gen.generate(1, None, &mut rng, &renderer).unwrap()[0].clone();
assert_eq!(v, "aac");
// With low Katz coefficient we dont reject AAC
let v = gen.generate(1, Some(0.05), &mut rng, &renderer).unwrap()[0].clone();
assert_eq!(v, "aac");
// With high Katz coefficient we do reject AAC
let v = gen.generate(1, Some(0.5), &mut rng, &renderer).unwrap()[0].clone();
assert_eq!(v, "aab");
}
#[test]
pub fn serialize_deserialize() {
let gen = larger_generator();
let s = bincode::serialize(&gen).unwrap();
let gen2: Generator<u8, f32> = bincode::deserialize(&s).unwrap();
assert_eq!(gen, gen2);
}
#[test]
pub fn serialize_deserialize_with_katz() {
let mut symbol_table: SymbolTable<u8> = SymbolTable::new();
symbol_table.add(SymbolTableEntry::Start).unwrap();
symbol_table.add(SymbolTableEntry::End).unwrap();
let a = symbol_table.add(SymbolTableEntry::Single(b'a')).unwrap();
let b = symbol_table.add(SymbolTableEntry::Single(b'b')).unwrap();
let mut ngrams: BTreeMap<Vec<SymbolTableEntryId>, f32> = BTreeMap::new();
ngrams.insert(vec![a, a, a], 1.0);
ngrams.insert(vec![a, b], 1.0);
let gen = Generator::from_ngrams(symbol_table, ngrams);
let s = bincode::serialize(&gen).unwrap();
let gen2: Generator<u8, f32> = bincode::deserialize(&s).unwrap();
assert_eq!(gen, gen2);
}
}
| test_symbolify_hello |
test_pkgrepo.py | # -*- coding: utf-8 -*-
'''
tests for pkgrepo states
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
'''
pkgrepo state tests
'''
@requires_system_grains
def | (self, grains):
'''
Test adding a repo
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/managed.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
def test_pkgrepo_02_absent(self):
'''
Test removing the repo from the above test
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/absent.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@requires_system_grains
def test_pkgrepo_03_with_comments(self, grains):
'''
Test adding a repo with comments
'''
os_family = grains['os_family'].lower()
if os_family in ('redhat',):
kwargs = {
'name': 'examplerepo',
'baseurl': 'http://example.com/repo',
'enabled': False,
'comments': ['This is a comment']
}
elif os_family in ('debian',):
self.skipTest('Debian/Ubuntu test case needed')
else:
self.skipTest("No test case for os_family '{0}'".format(os_family))
try:
# Run the state to add the repo
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
# Run again with modified comments
kwargs['comments'].append('This is another comment')
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertEqual(
ret['changes'],
{
'comments': {
'old': ['This is a comment'],
'new': ['This is a comment',
'This is another comment']
}
}
)
# Run a third time, no changes should be made
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertFalse(ret['changes'])
self.assertEqual(
ret['comment'],
"Package repo '{0}' already configured".format(kwargs['name'])
)
finally:
# Clean up
self.run_state('pkgrepo.absent', name=kwargs['name'])
| test_pkgrepo_01_managed |
conf.py | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Copyright (c) 2019, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
#-------------------------------------------------------------------------------#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('./'))
import tfm_copy_files
# -- Project information -----------------------------------------------------
project = 'TF-M'
copyright = '2017-2019, ARM CE-OSS'
author = 'ARM CE-OSS'
title = 'TF-M user Guide'
# The short X.Y version
version = '1.0.0-Beta'
# The full version, including alpha/beta/rc tags
release = 'Version 1.0.0-Beta'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# | # ones.
extensions = [
'sphinx.ext.imgmath',
'm2r', #Support markdown files. Needed for external code.
'sphinx.ext.autosectionlabel', #Make sphinx generate a label for each section
'sphinxcontrib.plantuml' #Add support for PlantUML drawings
]
#Location of PlantUML
plantuml = '/usr/bin/java -jar /usr/share/plantuml/plantuml.jar'
#Make auso section labals generated be prefixed with file name.
autosectionlabel_prefix_document=True
#Add auso section label for level 1 headers only.
autosectionlabel_maxdepth=1
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../docs/_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#Disable adding conf.py copyright notice to HTML output
html_show_copyright = False
#Add custom css for HTML. Used to allow full page width rendering
def setup(app):
app.add_stylesheet('css/custom.css')
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TF-M doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TF-M.tex', title,
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# (master_doc, 'tf-m', title,
# [author], 7)
#]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# (master_doc, 'TF-M', title,
# author, 'TF-M', 'Trusted Firmware for Cortex-M',
# 'Miscellaneous'),
#]
# -- Extension configuration ------------------------------------------------- | # needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom |
envoy_filter.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: networking/v1alpha3/envoy_filter.proto
package v1alpha3
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf3 "github.com/gogo/protobuf/types"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type EnvoyFilter_ListenerMatch_ListenerType int32
const (
// All listeners
EnvoyFilter_ListenerMatch_ANY EnvoyFilter_ListenerMatch_ListenerType = 0
// Inbound listener in sidecar
EnvoyFilter_ListenerMatch_SIDECAR_INBOUND EnvoyFilter_ListenerMatch_ListenerType = 1
// Outbound listener in sidecar
EnvoyFilter_ListenerMatch_SIDECAR_OUTBOUND EnvoyFilter_ListenerMatch_ListenerType = 2
// Gateway listener
EnvoyFilter_ListenerMatch_GATEWAY EnvoyFilter_ListenerMatch_ListenerType = 3
)
var EnvoyFilter_ListenerMatch_ListenerType_name = map[int32]string{
0: "ANY",
1: "SIDECAR_INBOUND",
2: "SIDECAR_OUTBOUND",
3: "GATEWAY",
}
var EnvoyFilter_ListenerMatch_ListenerType_value = map[string]int32{
"ANY": 0,
"SIDECAR_INBOUND": 1,
"SIDECAR_OUTBOUND": 2,
"GATEWAY": 3,
}
func (x EnvoyFilter_ListenerMatch_ListenerType) String() string {
return proto.EnumName(EnvoyFilter_ListenerMatch_ListenerType_name, int32(x))
}
func (EnvoyFilter_ListenerMatch_ListenerType) EnumDescriptor() ([]byte, []int) {
return fileDescriptorEnvoyFilter, []int{0, 1, 0}
}
type EnvoyFilter_ListenerMatch_ListenerProtocol int32
const (
// All protocols
EnvoyFilter_ListenerMatch_ALL EnvoyFilter_ListenerMatch_ListenerProtocol = 0
// HTTP or HTTPS (with termination) / HTTP2/gRPC
EnvoyFilter_ListenerMatch_HTTP EnvoyFilter_ListenerMatch_ListenerProtocol = 1
// Any non-HTTP listener
EnvoyFilter_ListenerMatch_TCP EnvoyFilter_ListenerMatch_ListenerProtocol = 2
)
var EnvoyFilter_ListenerMatch_ListenerProtocol_name = map[int32]string{
0: "ALL",
1: "HTTP",
2: "TCP",
}
var EnvoyFilter_ListenerMatch_ListenerProtocol_value = map[string]int32{
"ALL": 0,
"HTTP": 1,
"TCP": 2,
}
func (x EnvoyFilter_ListenerMatch_ListenerProtocol) String() string {
return proto.EnumName(EnvoyFilter_ListenerMatch_ListenerProtocol_name, int32(x))
}
func (EnvoyFilter_ListenerMatch_ListenerProtocol) EnumDescriptor() ([]byte, []int) {
return fileDescriptorEnvoyFilter, []int{0, 1, 1}
}
// Index/position in the filter chain.
type EnvoyFilter_InsertPosition_Index int32
const (
// Insert first
EnvoyFilter_InsertPosition_FIRST EnvoyFilter_InsertPosition_Index = 0
// Insert last
EnvoyFilter_InsertPosition_LAST EnvoyFilter_InsertPosition_Index = 1
// Insert before the named filter.
EnvoyFilter_InsertPosition_BEFORE EnvoyFilter_InsertPosition_Index = 2
// Insert after the named filter.
EnvoyFilter_InsertPosition_AFTER EnvoyFilter_InsertPosition_Index = 3
)
var EnvoyFilter_InsertPosition_Index_name = map[int32]string{
0: "FIRST",
1: "LAST",
2: "BEFORE",
3: "AFTER",
}
var EnvoyFilter_InsertPosition_Index_value = map[string]int32{
"FIRST": 0,
"LAST": 1,
"BEFORE": 2,
"AFTER": 3,
}
func (x EnvoyFilter_InsertPosition_Index) String() string {
return proto.EnumName(EnvoyFilter_InsertPosition_Index_name, int32(x))
}
func (EnvoyFilter_InsertPosition_Index) EnumDescriptor() ([]byte, []int) {
return fileDescriptorEnvoyFilter, []int{0, 2, 0}
}
type EnvoyFilter_Filter_FilterType int32
const (
// placeholder
EnvoyFilter_Filter_INVALID EnvoyFilter_Filter_FilterType = 0
// Http filter
EnvoyFilter_Filter_HTTP EnvoyFilter_Filter_FilterType = 1
// Network filter
EnvoyFilter_Filter_NETWORK EnvoyFilter_Filter_FilterType = 2
)
var EnvoyFilter_Filter_FilterType_name = map[int32]string{
0: "INVALID",
1: "HTTP",
2: "NETWORK",
}
var EnvoyFilter_Filter_FilterType_value = map[string]int32{
"INVALID": 0,
"HTTP": 1,
"NETWORK": 2,
}
func (x EnvoyFilter_Filter_FilterType) String() string {
return proto.EnumName(EnvoyFilter_Filter_FilterType_name, int32(x))
}
func (EnvoyFilter_Filter_FilterType) EnumDescriptor() ([]byte, []int) {
return fileDescriptorEnvoyFilter, []int{0, 3, 0}
}
// `EnvoyFilter` describes Envoy proxy-specific filters that can be used to
// customize the Envoy proxy configuration generated by Istio networking
// subsystem (Pilot). This feature must be used with care, as incorrect
// configurations could potentially destabilize the entire mesh.
//
// NOTE 1: Since this is break glass configuration, there will not be any
// backward compatibility across different Istio releases. In other words,
// this configuration is subject to change based on internal implementation
// of Istio networking subsystem.
//
// NOTE 2: When multiple EnvoyFilters are bound to the same workload, all filter
// configurations will be processed sequentially in order of creation time.
// The behavior is undefined if multiple EnvoyFilter configurations conflict
// with each other.
//
// The following example for Kubernetes enables Envoy's Lua filter for all
// inbound calls arriving at service port 8080 of the reviews service pod with
// labels "app: reviews".
//
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: EnvoyFilter
// metadata:
// name: reviews-lua
// spec:
// workloadLabels:
// app: reviews
// filters:
// - listenerMatch:
// portNumber: 8080
// listenerType: SIDECAR_INBOUND #will match with the inbound listener for reviews:8080
// filterName: envoy.lua
// filterType: HTTP
// filterConfig:
// inlineCode: |
// ... lua code ...
// ```
type EnvoyFilter struct {
// One or more labels that indicate a specific set of pods/VMs whose
// proxies should be configured to use these additional filters. The
// scope of label search is platform dependent. On Kubernetes, for
// example, the scope includes pods running in all reachable
// namespaces. Omitting the selector applies the filter to all proxies in
// the mesh.
// NOTE: There can be only one EnvoyFilter bound to a specific workload.
// The behavior is undefined if multiple EnvoyFilter configurations are
// specified for the same workload.
WorkloadLabels map[string]string `protobuf:"bytes,1,rep,name=workload_labels,json=workloadLabels" json:"workload_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// REQUIRED: Envoy network filters/http filters to be added to matching
// listeners. When adding network filters to http connections, care
// should be taken to ensure that the filter is added before
// envoy.http_connection_manager.
Filters []*EnvoyFilter_Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"`
}
func (m *EnvoyFilter) Reset() { *m = EnvoyFilter{} }
func (m *EnvoyFilter) String() string { return proto.CompactTextString(m) }
func (*EnvoyFilter) ProtoMessage() {}
func (*EnvoyFilter) Descriptor() ([]byte, []int) { return fileDescriptorEnvoyFilter, []int{0} }
func (m *EnvoyFilter) GetWorkloadLabels() map[string]string {
if m != nil {
return m.WorkloadLabels
}
return nil
}
func (m *EnvoyFilter) GetFilters() []*EnvoyFilter_Filter {
if m != nil {
return m.Filters
}
return nil
}
// Select a listener to add the filter to based on the match conditions.
// All conditions specified in the ListenerMatch must be met for the filter
// to be applied to a listener.
type EnvoyFilter_ListenerMatch struct {
// The service port/gateway port to which traffic is being
// sent/received. If not specified, matches all listeners. Even though
// inbound listeners are generated for the instance/pod ports, only
// service ports should be used to match listeners.
PortNumber uint32 `protobuf:"varint,1,opt,name=port_number,json=portNumber,proto3" json:"port_number,omitempty"`
// Instead of using specific port numbers, a set of ports matching a
// given port name prefix can be selected. E.g., "mongo" selects ports
// named mongo-port, mongo, mongoDB, MONGO, etc. Matching is case
// insensitive.
PortNamePrefix string `protobuf:"bytes,2,opt,name=port_name_prefix,json=portNamePrefix,proto3" json:"port_name_prefix,omitempty"`
// Inbound vs outbound sidecar listener or gateway listener. If not specified,
// matches all listeners.
ListenerType EnvoyFilter_ListenerMatch_ListenerType `protobuf:"varint,3,opt,name=listener_type,json=listenerType,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_ListenerMatch_ListenerType" json:"listener_type,omitempty"`
// Selects a class of listeners for the same protocol. If not
// specified, applies to listeners on all protocols. Use the protocol
// selection to select all HTTP listeners (includes HTTP2/gRPC/HTTPS
// where Envoy terminates TLS) or all TCP listeners (includes HTTPS
// passthrough using SNI).
ListenerProtocol EnvoyFilter_ListenerMatch_ListenerProtocol `protobuf:"varint,4,opt,name=listener_protocol,json=listenerProtocol,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_ListenerMatch_ListenerProtocol" json:"listener_protocol,omitempty"`
// One or more IP addresses to which the listener is bound. If
// specified, should match at least one address in the list.
Address []string `protobuf:"bytes,5,rep,name=address" json:"address,omitempty"`
}
func (m *EnvoyFilter_ListenerMatch) Reset() { *m = EnvoyFilter_ListenerMatch{} }
func (m *EnvoyFilter_ListenerMatch) String() string { return proto.CompactTextString(m) }
func (*EnvoyFilter_ListenerMatch) ProtoMessage() {}
func (*EnvoyFilter_ListenerMatch) Descriptor() ([]byte, []int) {
return fileDescriptorEnvoyFilter, []int{0, 1}
}
func (m *EnvoyFilter_ListenerMatch) GetPortNumber() uint32 {
if m != nil {
return m.PortNumber
}
return 0
}
func (m *EnvoyFilter_ListenerMatch) GetPortNamePrefix() string {
if m != nil {
return m.PortNamePrefix
}
return ""
}
func (m *EnvoyFilter_ListenerMatch) GetListenerType() EnvoyFilter_ListenerMatch_ListenerType {
if m != nil {
return m.ListenerType
}
return EnvoyFilter_ListenerMatch_ANY
}
func (m *EnvoyFilter_ListenerMatch) GetListenerProtocol() EnvoyFilter_ListenerMatch_ListenerProtocol {
if m != nil {
return m.ListenerProtocol
}
return EnvoyFilter_ListenerMatch_ALL
}
func (m *EnvoyFilter_ListenerMatch) GetAddress() []string {
if m != nil {
return m.Address
}
return nil
}
// Indicates the relative index in the filter chain where the filter should be inserted.
type EnvoyFilter_InsertPosition struct {
// Position of this filter in the filter chain.
Index EnvoyFilter_InsertPosition_Index `protobuf:"varint,1,opt,name=index,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_InsertPosition_Index" json:"index,omitempty"`
// If BEFORE or AFTER position is specified, specify the name of the
// filter relative to which this filter should be inserted.
RelativeTo string `protobuf:"bytes,2,opt,name=relative_to,json=relativeTo,proto3" json:"relative_to,omitempty"`
}
func (m *EnvoyFilter_InsertPosition) Reset() { *m = EnvoyFilter_InsertPosition{} }
func (m *EnvoyFilter_InsertPosition) String() string { return proto.CompactTextString(m) }
func (*EnvoyFilter_InsertPosition) ProtoMessage() {}
func (*EnvoyFilter_InsertPosition) Descriptor() ([]byte, []int) {
return fileDescriptorEnvoyFilter, []int{0, 2}
}
func (m *EnvoyFilter_InsertPosition) GetIndex() EnvoyFilter_InsertPosition_Index {
if m != nil {
return m.Index
}
return EnvoyFilter_InsertPosition_FIRST
}
func (m *EnvoyFilter_InsertPosition) GetRelativeTo() string {
if m != nil {
return m.RelativeTo
}
return ""
}
// Envoy filters to be added to a network or http filter chain.
type EnvoyFilter_Filter struct {
// Filter will be added to the listener only if the match conditions are true.
// If not specified, the filters will be applied to all listeners.
ListenerMatch *EnvoyFilter_ListenerMatch `protobuf:"bytes,1,opt,name=listener_match,json=listenerMatch" json:"listener_match,omitempty"`
// Insert position in the filter chain. Defaults to FIRST
InsertPosition *EnvoyFilter_InsertPosition `protobuf:"bytes,2,opt,name=insert_position,json=insertPosition" json:"insert_position,omitempty"`
// REQUIRED: The type of filter to instantiate.
FilterType EnvoyFilter_Filter_FilterType `protobuf:"varint,3,opt,name=filter_type,json=filterType,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_Filter_FilterType" json:"filter_type,omitempty"`
// REQUIRED: The name of the filter to instantiate. The name must match a supported
// filter _compiled into_ Envoy.
FilterName string `protobuf:"bytes,4,opt,name=filter_name,json=filterName,proto3" json:"filter_name,omitempty"`
// REQUIRED: Filter specific configuration which depends on the filter being
// instantiated.
FilterConfig *google_protobuf3.Struct `protobuf:"bytes,5,opt,name=filter_config,json=filterConfig" json:"filter_config,omitempty"`
}
func (m *EnvoyFilter_Filter) Reset() { *m = EnvoyFilter_Filter{} }
func (m *EnvoyFilter_Filter) String() string { return proto.CompactTextString(m) }
func (*EnvoyFilter_Filter) ProtoMessage() {}
func (*EnvoyFilter_Filter) Descriptor() ([]byte, []int) { return fileDescriptorEnvoyFilter, []int{0, 3} }
func (m *EnvoyFilter_Filter) GetListenerMatch() *EnvoyFilter_ListenerMatch {
if m != nil {
return m.ListenerMatch
}
return nil
}
func (m *EnvoyFilter_Filter) GetInsertPosition() *EnvoyFilter_InsertPosition {
if m != nil {
return m.InsertPosition
}
return nil
}
func (m *EnvoyFilter_Filter) GetFilterType() EnvoyFilter_Filter_FilterType {
if m != nil {
return m.FilterType
}
return EnvoyFilter_Filter_INVALID
}
func (m *EnvoyFilter_Filter) GetFilterName() string {
if m != nil {
return m.FilterName
}
return ""
}
func (m *EnvoyFilter_Filter) GetFilterConfig() *google_protobuf3.Struct {
if m != nil {
return m.FilterConfig
}
return nil
}
func init() {
proto.RegisterType((*EnvoyFilter)(nil), "istio.networking.v1alpha3.EnvoyFilter")
proto.RegisterType((*EnvoyFilter_ListenerMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ListenerMatch")
proto.RegisterType((*EnvoyFilter_InsertPosition)(nil), "istio.networking.v1alpha3.EnvoyFilter.InsertPosition")
proto.RegisterType((*EnvoyFilter_Filter)(nil), "istio.networking.v1alpha3.EnvoyFilter.Filter")
proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_ListenerMatch_ListenerType", EnvoyFilter_ListenerMatch_ListenerType_name, EnvoyFilter_ListenerMatch_ListenerType_value)
proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_ListenerMatch_ListenerProtocol", EnvoyFilter_ListenerMatch_ListenerProtocol_name, EnvoyFilter_ListenerMatch_ListenerProtocol_value)
proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_InsertPosition_Index", EnvoyFilter_InsertPosition_Index_name, EnvoyFilter_InsertPosition_Index_value)
proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_Filter_FilterType", EnvoyFilter_Filter_FilterType_name, EnvoyFilter_Filter_FilterType_value)
}
func (m *EnvoyFilter) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EnvoyFilter) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.WorkloadLabels) > 0 {
for k, _ := range m.WorkloadLabels {
dAtA[i] = 0xa
i++
v := m.WorkloadLabels[k]
mapSize := 1 + len(k) + sovEnvoyFilter(uint64(len(k))) + 1 + len(v) + sovEnvoyFilter(uint64(len(v)))
i = encodeVarintEnvoyFilter(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
if len(m.Filters) > 0 {
for _, msg := range m.Filters {
dAtA[i] = 0x12
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *EnvoyFilter_ListenerMatch) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EnvoyFilter_ListenerMatch) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PortNumber != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.PortNumber))
}
if len(m.PortNamePrefix) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.PortNamePrefix)))
i += copy(dAtA[i:], m.PortNamePrefix)
}
if m.ListenerType != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.ListenerType))
}
if m.ListenerProtocol != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.ListenerProtocol))
}
if len(m.Address) > 0 {
for _, s := range m.Address {
dAtA[i] = 0x2a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func (m *EnvoyFilter_InsertPosition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EnvoyFilter_InsertPosition) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Index != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.Index))
}
if len(m.RelativeTo) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.RelativeTo)))
i += copy(dAtA[i:], m.RelativeTo)
}
return i, nil
}
func (m *EnvoyFilter_Filter) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EnvoyFilter_Filter) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ListenerMatch != nil {
dAtA[i] = 0xa
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.ListenerMatch.Size()))
n1, err := m.ListenerMatch.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.InsertPosition != nil {
dAtA[i] = 0x12
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.InsertPosition.Size()))
n2, err := m.InsertPosition.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
if m.FilterType != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.FilterType))
}
if len(m.FilterName) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.FilterName)))
i += copy(dAtA[i:], m.FilterName)
}
if m.FilterConfig != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.FilterConfig.Size()))
n3, err := m.FilterConfig.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
}
return i, nil
}
func encodeVarintEnvoyFilter(dAtA []byte, offset int, v uint64) int |
func (m *EnvoyFilter) Size() (n int) {
var l int
_ = l
if len(m.WorkloadLabels) > 0 {
for k, v := range m.WorkloadLabels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovEnvoyFilter(uint64(len(k))) + 1 + len(v) + sovEnvoyFilter(uint64(len(v)))
n += mapEntrySize + 1 + sovEnvoyFilter(uint64(mapEntrySize))
}
}
if len(m.Filters) > 0 {
for _, e := range m.Filters {
l = e.Size()
n += 1 + l + sovEnvoyFilter(uint64(l))
}
}
return n
}
func (m *EnvoyFilter_ListenerMatch) Size() (n int) {
var l int
_ = l
if m.PortNumber != 0 {
n += 1 + sovEnvoyFilter(uint64(m.PortNumber))
}
l = len(m.PortNamePrefix)
if l > 0 {
n += 1 + l + sovEnvoyFilter(uint64(l))
}
if m.ListenerType != 0 {
n += 1 + sovEnvoyFilter(uint64(m.ListenerType))
}
if m.ListenerProtocol != 0 {
n += 1 + sovEnvoyFilter(uint64(m.ListenerProtocol))
}
if len(m.Address) > 0 {
for _, s := range m.Address {
l = len(s)
n += 1 + l + sovEnvoyFilter(uint64(l))
}
}
return n
}
func (m *EnvoyFilter_InsertPosition) Size() (n int) {
var l int
_ = l
if m.Index != 0 {
n += 1 + sovEnvoyFilter(uint64(m.Index))
}
l = len(m.RelativeTo)
if l > 0 {
n += 1 + l + sovEnvoyFilter(uint64(l))
}
return n
}
func (m *EnvoyFilter_Filter) Size() (n int) {
var l int
_ = l
if m.ListenerMatch != nil {
l = m.ListenerMatch.Size()
n += 1 + l + sovEnvoyFilter(uint64(l))
}
if m.InsertPosition != nil {
l = m.InsertPosition.Size()
n += 1 + l + sovEnvoyFilter(uint64(l))
}
if m.FilterType != 0 {
n += 1 + sovEnvoyFilter(uint64(m.FilterType))
}
l = len(m.FilterName)
if l > 0 {
n += 1 + l + sovEnvoyFilter(uint64(l))
}
if m.FilterConfig != nil {
l = m.FilterConfig.Size()
n += 1 + l + sovEnvoyFilter(uint64(l))
}
return n
}
func sovEnvoyFilter(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozEnvoyFilter(x uint64) (n int) {
return sovEnvoyFilter(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *EnvoyFilter) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EnvoyFilter: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EnvoyFilter: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field WorkloadLabels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.WorkloadLabels == nil {
m.WorkloadLabels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthEnvoyFilter
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthEnvoyFilter
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipEnvoyFilter(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEnvoyFilter
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.WorkloadLabels[mapkey] = mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Filters = append(m.Filters, &EnvoyFilter_Filter{})
if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEnvoyFilter(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEnvoyFilter
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EnvoyFilter_ListenerMatch) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ListenerMatch: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ListenerMatch: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PortNumber", wireType)
}
m.PortNumber = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.PortNumber |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PortNamePrefix", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PortNamePrefix = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ListenerType", wireType)
}
m.ListenerType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ListenerType |= (EnvoyFilter_ListenerMatch_ListenerType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ListenerProtocol", wireType)
}
m.ListenerProtocol = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ListenerProtocol |= (EnvoyFilter_ListenerMatch_ListenerProtocol(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Address = append(m.Address, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEnvoyFilter(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEnvoyFilter
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EnvoyFilter_InsertPosition) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: InsertPosition: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: InsertPosition: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
}
m.Index = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Index |= (EnvoyFilter_InsertPosition_Index(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RelativeTo", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RelativeTo = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEnvoyFilter(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEnvoyFilter
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EnvoyFilter_Filter) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Filter: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListenerMatch", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ListenerMatch == nil {
m.ListenerMatch = &EnvoyFilter_ListenerMatch{}
}
if err := m.ListenerMatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InsertPosition", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.InsertPosition == nil {
m.InsertPosition = &EnvoyFilter_InsertPosition{}
}
if err := m.InsertPosition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FilterType", wireType)
}
m.FilterType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FilterType |= (EnvoyFilter_Filter_FilterType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FilterName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FilterName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FilterConfig", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEnvoyFilter
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.FilterConfig == nil {
m.FilterConfig = &google_protobuf3.Struct{}
}
if err := m.FilterConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEnvoyFilter(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEnvoyFilter
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipEnvoyFilter(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthEnvoyFilter
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEnvoyFilter
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipEnvoyFilter(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthEnvoyFilter = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowEnvoyFilter = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("networking/v1alpha3/envoy_filter.proto", fileDescriptorEnvoyFilter) }
var fileDescriptorEnvoyFilter = []byte{
// 706 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x41, 0x6f, 0xda, 0x4c,
0x10, 0x8d, 0x71, 0x08, 0x1f, 0xe3, 0x40, 0xfc, 0x6d, 0x22, 0xd5, 0x45, 0x55, 0x8a, 0x38, 0x54,
0x5c, 0x6a, 0x5a, 0xd2, 0x4a, 0x51, 0xda, 0x8b, 0x93, 0x98, 0x14, 0x95, 0x02, 0x5d, 0x9c, 0x46,
0x69, 0xa5, 0x5a, 0x06, 0x16, 0xb2, 0x8a, 0xf1, 0x5a, 0xf6, 0x42, 0xc2, 0xcf, 0xea, 0xb1, 0xd7,
0x9e, 0x7a, 0x6c, 0xff, 0x41, 0x95, 0x5f, 0x52, 0x79, 0x6d, 0x08, 0x44, 0xa9, 0x94, 0xa6, 0x27,
0x7b, 0x9f, 0xde, 0xbc, 0x99, 0xd9, 0x79, 0xb3, 0xf0, 0xc4, 0x23, 0xfc, 0x82, 0x05, 0xe7, 0xd4,
0x1b, 0x56, 0x26, 0xcf, 0x1d, 0xd7, 0x3f, 0x73, 0x76, 0x2a, 0xc4, 0x9b, 0xb0, 0xa9, 0x3d, 0xa0,
0x2e, 0x27, 0x81, 0xee, 0x07, 0x8c, 0x33, 0xf4, 0x90, 0x86, 0x9c, 0x32, 0xfd, 0x9a, 0xad, 0xcf,
0xd8, 0x85, 0x47, 0x43, 0xc6, 0x86, 0x2e, 0xa9, 0x08, 0x62, 0x77, 0x3c, 0xa8, 0x84, 0x3c, 0x18,
0xf7, 0x78, 0x1c, 0x58, 0xfa, 0x06, 0xa0, 0x98, 0x91, 0x5e, 0x4d, 0xc8, 0xa1, 0x1e, 0x6c, 0x44,
0x0a, 0x2e, 0x73, 0xfa, 0xb6, 0xeb, 0x74, 0x89, 0x1b, 0x6a, 0x52, 0x51, 0x2e, 0x2b, 0xd5, 0x3d,
0xfd, 0x8f, 0x29, 0xf4, 0x05, 0x01, 0xfd, 0x24, 0x89, 0x6e, 0x88, 0x60, 0xd3, 0xe3, 0xc1, 0x14,
0xe7, 0x2f, 0x96, 0x40, 0x74, 0x04, 0x99, 0xb8, 0xfa, 0x50, 0x4b, 0x09, 0xf1, 0xa7, 0x77, 0x14,
0x8f, 0x3f, 0x78, 0x16, 0x5d, 0x30, 0x60, 0xf3, 0x96, 0x7c, 0x48, 0x05, 0xf9, 0x9c, 0x4c, 0x35,
0xa9, 0x28, 0x95, 0xb3, 0x38, 0xfa, 0x45, 0x5b, 0x90, 0x9e, 0x38, 0xee, 0x98, 0x68, 0x29, 0x81,
0xc5, 0x87, 0xbd, 0xd4, 0xae, 0x54, 0xf8, 0x29, 0x43, 0xae, 0x41, 0x43, 0x4e, 0x3c, 0x12, 0xbc,
0x73, 0x78, 0xef, 0x0c, 0x3d, 0x06, 0xc5, 0x67, 0x01, 0xb7, 0xbd, 0xf1, 0xa8, 0x4b, 0x02, 0xa1,
0x92, 0xc3, 0x10, 0x41, 0x4d, 0x81, 0xa0, 0x32, 0xa8, 0x31, 0xc1, 0x19, 0x11, 0xdb, 0x0f, 0xc8,
0x80, 0x5e, 0x26, 0xba, 0x79, 0xc1, 0x72, 0x46, 0xa4, 0x2d, 0x50, 0x34, 0x80, 0x9c, 0x9b, 0x68,
0xdb, 0x7c, 0xea, 0x13, 0x4d, 0x2e, 0x4a, 0xe5, 0x7c, 0xd5, 0xb8, 0x63, 0xbb, 0x4b, 0x75, 0xcd,
0x4f, 0xd6, 0xd4, 0x27, 0x78, 0xdd, 0x5d, 0x38, 0xa1, 0x00, 0xfe, 0x9f, 0xe7, 0x11, 0x73, 0xed,
0x31, 0x57, 0x5b, 0x15, 0xb9, 0xcc, 0x7f, 0xca, 0xd5, 0x4e, 0xc4, 0xb0, 0xea, 0xde, 0x40, 0x90,
0x06, 0x19, 0xa7, 0xdf, 0x0f, 0x48, 0x18, 0x6a, 0xe9, 0xa2, 0x5c, 0xce, 0xe2, 0xd9, 0xb1, 0xd4,
0x82, 0xf5, 0xc5, 0x5a, 0x51, 0x06, 0x64, 0xa3, 0x79, 0xaa, 0xae, 0xa0, 0x4d, 0xd8, 0xe8, 0xd4,
0x0f, 0xcd, 0x03, 0x03, 0xdb, 0xf5, 0xe6, 0x7e, 0xeb, 0xb8, 0x79, 0xa8, 0x4a, 0x68, 0x0b, 0xd4,
0x19, 0xd8, 0x3a, 0xb6, 0x62, 0x34, 0x85, 0x14, 0xc8, 0x1c, 0x19, 0x96, 0x79, 0x62, 0x9c, 0xaa,
0x72, 0x49, 0x07, 0xf5, 0x66, 0x41, 0x42, 0xb4, 0xd1, 0x50, 0x57, 0xd0, 0x7f, 0xb0, 0xfa, 0xc6,
0xb2, 0xda, 0xaa, 0x14, 0x41, 0xd6, 0x41, 0x5b, 0x4d, 0x15, 0xbe, 0x4a, 0x90, 0xaf, 0x7b, 0x21,
0x09, 0x78, 0x9b, 0x85, 0x94, 0x53, 0xe6, 0xa1, 0xf7, 0x90, 0xa6, 0x5e, 0x9f, 0x5c, 0x8a, 0x71,
0xe6, 0xab, 0xaf, 0xee, 0x78, 0x2b, 0xcb, 0x2a, 0x7a, 0x3d, 0x92, 0xc0, 0xb1, 0x52, 0xe4, 0x93,
0x80, 0xb8, 0x0e, 0xa7, 0x13, 0x62, 0x73, 0x96, 0x38, 0x00, 0x66, 0x90, 0xc5, 0x4a, 0x3b, 0x90,
0x16, 0x01, 0x28, 0x0b, 0xe9, 0x5a, 0x1d, 0x77, 0xac, 0xb8, 0xda, 0x86, 0xd1, 0xb1, 0x54, 0x09,
0x01, 0xac, 0xed, 0x9b, 0xb5, 0x16, 0x36, 0xd5, 0x54, 0x44, 0x30, 0x6a, 0x96, 0x89, 0x55, 0xb9,
0xf0, 0x45, 0x86, 0xb5, 0x64, 0x17, 0x3f, 0x41, 0x7e, 0x3e, 0xd5, 0x51, 0x34, 0x16, 0x51, 0xbc,
0x52, 0x7d, 0x71, 0x9f, 0x91, 0xe2, 0xb9, 0x13, 0x63, 0x97, 0x7f, 0x86, 0x0d, 0x2a, 0x9a, 0xb3,
0xfd, 0xa4, 0x3b, 0xd1, 0x81, 0x52, 0x7d, 0x79, 0xaf, 0xab, 0xc1, 0x79, 0xba, 0x7c, 0xe1, 0xa7,
0xa0, 0xc4, 0x5b, 0xba, 0x68, 0xfc, 0xdd, 0xbf, 0xda, 0xf3, 0xe4, 0x23, 0xfc, 0x0e, 0x83, 0xf9,
0x7f, 0x74, 0xf1, 0x89, 0x74, 0xb4, 0x81, 0xc2, 0xe7, 0xd9, 0x19, 0x21, 0x5a, 0x3e, 0xf4, 0x1a,
0x72, 0x09, 0xa1, 0xc7, 0xbc, 0x01, 0x1d, 0x6a, 0x69, 0xd1, 0xd9, 0x03, 0x3d, 0x7e, 0x0a, 0xf5,
0xd9, 0x53, 0xa8, 0x77, 0xc4, 0x53, 0x88, 0xd7, 0x63, 0xf6, 0x81, 0x20, 0x97, 0x9e, 0x01, 0x5c,
0x27, 0x8e, 0x8c, 0x58, 0x6f, 0x7e, 0x30, 0x1a, 0xf5, 0xc3, 0x25, 0xaf, 0x29, 0x90, 0x69, 0x9a,
0xd6, 0x49, 0x0b, 0xbf, 0x55, 0x53, 0xfb, 0xfa, 0xf7, 0xab, 0x6d, 0xe9, 0xc7, 0xd5, 0xb6, 0xf4,
0xeb, 0x6a, 0x5b, 0xfa, 0x58, 0x8c, 0x7b, 0xa4, 0xac, 0xe2, 0xf8, 0xb4, 0x72, 0xcb, 0x03, 0xde,
0x5d, 0x13, 0x05, 0xec, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x26, 0x48, 0xc9, 0xfc, 0xde, 0x05,
0x00, 0x00,
}
| {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
} |
lib.rs | //! Actori tracing - support for tokio tracing with Actori services.
#![deny(rust_2018_idioms, warnings)]
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actori_service::{
apply, dev::ApplyTransform, IntoServiceFactory, Service, ServiceFactory, Transform,
};
use futures_util::future::{ok, Either, Ready};
use tracing_futures::{Instrument, Instrumented};
/// A `Service` implementation that automatically enters/exits tracing spans
/// for the wrapped inner service.
#[derive(Clone)]
pub struct TracingService<S, F> {
inner: S,
make_span: F,
}
impl<S, F> TracingService<S, F> {
pub fn new(inner: S, make_span: F) -> Self {
TracingService { inner, make_span }
}
}
impl<S, F> Service for TracingService<S, F>
where
S: Service,
F: Fn(&S::Request) -> Option<tracing::Span>,
{
type Request = S::Request;
type Response = S::Response;
type Error = S::Error;
type Future = Either<S::Future, Instrumented<S::Future>>;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(ctx)
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let span = (self.make_span)(&req);
let _enter = span.as_ref().map(|s| s.enter());
let fut = self.inner.call(req);
// make a child span to track the future's execution
if let Some(span) = span
.clone()
.map(|span| tracing::span!(parent: &span, tracing::Level::INFO, "future"))
{
Either::Right(fut.instrument(span))
} else {
Either::Left(fut)
}
}
}
/// A `Transform` implementation that wraps services with a [`TracingService`].
///
/// [`TracingService`]: struct.TracingService.html
pub struct TracingTransform<S, U, F> {
make_span: F,
_p: PhantomData<fn(S, U)>,
}
impl<S, U, F> TracingTransform<S, U, F> {
pub fn new(make_span: F) -> Self {
TracingTransform {
make_span,
_p: PhantomData,
}
}
}
impl<S, U, F> Transform<S> for TracingTransform<S, U, F>
where
S: Service,
U: ServiceFactory<
Request = S::Request,
Response = S::Response,
Error = S::Error,
Service = S,
>,
F: Fn(&S::Request) -> Option<tracing::Span> + Clone,
{
type Request = S::Request;
type Response = S::Response;
type Error = S::Error;
type Transform = TracingService<S, F>;
type InitError = U::InitError;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ok(TracingService::new(service, self.make_span.clone()))
}
}
/// Wraps the provided service factory with a transform that automatically
/// enters/exits the given span.
///
/// The span to be entered/exited can be provided via a closure. The closure
/// is passed in a reference to the request being handled by the service.
///
/// For example:
/// ```rust,ignore
/// let traced_service = trace(
/// web_service,
/// |req: &Request| Some(span!(Level::INFO, "request", req.id))
/// );
/// ```
pub fn trace<S, U, F>(
service_factory: U,
make_span: F,
) -> ApplyTransform<TracingTransform<S::Service, S, F>, S>
where
S: ServiceFactory,
F: Fn(&S::Request) -> Option<tracing::Span> + Clone,
U: IntoServiceFactory<S>,
{
apply(
TracingTransform::new(make_span),
service_factory.into_factory(),
)
}
#[cfg(test)]
mod test {
use super::*;
use std::cell::RefCell;
use std::collections::{BTreeMap, BTreeSet};
use std::sync::{Arc, RwLock};
use actori_service::{fn_factory, fn_service};
use slab::Slab;
use tracing::{span, Event, Level, Metadata, Subscriber};
thread_local! {
static SPAN: RefCell<Vec<span::Id>> = RefCell::new(Vec::new());
}
#[derive(Default)]
struct Stats {
entered_spans: BTreeSet<u64>,
exited_spans: BTreeSet<u64>,
events_count: BTreeMap<u64, usize>,
}
#[derive(Default)]
struct Inner {
spans: Slab<&'static Metadata<'static>>,
stats: Stats,
}
#[derive(Clone, Default)]
struct TestSubscriber {
inner: Arc<RwLock<Inner>>,
}
impl Subscriber for TestSubscriber {
fn enabled(&self, _metadata: &Metadata<'_>) -> bool {
true
}
fn new_span(&self, span: &span::Attributes<'_>) -> span::Id {
let id = self.inner.write().unwrap().spans.insert(span.metadata());
span::Id::from_u64(id as u64 + 1)
}
fn | (&self, _span: &span::Id, _values: &span::Record<'_>) {}
fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {}
fn event(&self, event: &Event<'_>) {
let id = event
.parent()
.cloned()
.or_else(|| SPAN.with(|current_span| current_span.borrow().last().cloned()))
.unwrap();
*self
.inner
.write()
.unwrap()
.stats
.events_count
.entry(id.into_u64())
.or_insert(0) += 1;
}
fn enter(&self, span: &span::Id) {
self.inner
.write()
.unwrap()
.stats
.entered_spans
.insert(span.into_u64());
SPAN.with(|current_span| {
current_span.borrow_mut().push(span.clone());
});
}
fn exit(&self, span: &span::Id) {
self.inner
.write()
.unwrap()
.stats
.exited_spans
.insert(span.into_u64());
// we are guaranteed that on any given thread, spans are exited in reverse order
SPAN.with(|current_span| {
let leaving = current_span
.borrow_mut()
.pop()
.expect("told to exit span when not in span");
assert_eq!(
&leaving, span,
"told to exit span that was not most recently entered"
);
});
}
}
#[actori_rt::test]
async fn service_call() {
let service_factory = fn_factory(|| {
ok::<_, ()>(fn_service(|req: &'static str| {
tracing::event!(Level::TRACE, "It's happening - {}!", req);
ok::<_, ()>(())
}))
});
let subscriber = TestSubscriber::default();
let _guard = tracing::subscriber::set_default(subscriber.clone());
let span_svc = span!(Level::TRACE, "span_svc");
let trace_service_factory = trace(service_factory, |_: &&str| Some(span_svc.clone()));
let mut service = trace_service_factory.new_service(()).await.unwrap();
service.call("boo").await.unwrap();
let id = span_svc.id().unwrap().into_u64();
assert!(subscriber
.inner
.read()
.unwrap()
.stats
.entered_spans
.contains(&id));
assert!(subscriber
.inner
.read()
.unwrap()
.stats
.exited_spans
.contains(&id));
assert_eq!(subscriber.inner.read().unwrap().stats.events_count[&id], 1);
}
}
| record |
epan.rs | // Copyright 2021-2021, Ivor Wanders and the wireshark_dissector_rs contributors
// SPDX-License-Identifier: GPL-2.0-or-later
// https://www.wireshark.org/docs/wsdg_html/#ChDissectDetails
// /usr/include/wireshark/epan
// https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/epan/dissectors/packet-usb.c
// https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/epan/dissectors/packet-usb-hid.c
// 1.5 Constructing the protocol tree; https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/doc/README.dissector#L713
// 1.5.1 Field Registration; https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/doc/README.dissector#L1270
// 1.7 Calling other dissectors; https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/doc/README.dissector#L2471
// 1.7.1 Dissector Tables; https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/doc/README.dissector#L2540
// 1.5.2 Adding Items and Values to the Protocol Tree. https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/doc/README.dissector#L1351
// Reassembly 2.7.2 Modifying the pinfo struct; https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/doc/README.dissector#L3472
// Yeah, that doesn't work for USB packets... gg.
// https://doc.rust-lang.org/nomicon/ffi.html
// We can probably hook; https://github.com/wireshark/wireshark/blob/ebfbf958f6930b2dad486b33277470e8368dc111/epan/dissectors/packet-usb.c#L3516-L3518
// This seems useful?
// https://stackoverflow.com/a/55323693
#![allow(non_camel_case_types)]
#![allow(dead_code)]
extern crate libc;
// These files follow the same structure as the header files.
pub mod ftypes;
pub mod glib;
pub mod packet;
pub mod packet_info;
pub mod proto;
pub mod range;
pub mod tvbuff;
pub mod value_string;
pub type FieldType = ftypes::ftenum;
pub type FieldDisplay = proto::FieldDisplay;
pub type Encoding = proto::Encoding;
/*
Dissector
get_fields()
get_tree()
get_protocol_name()
get_registration()
ProtoTree
add_**(field_index, tvb, pos, len, encoding, ....) -> returns ProtoItem
add_boolean(field_index,tvb, start,
add_item_ret_uint64 -> returns (ProtoItem, u64)
PacketInfo?
Lets ignore for now.
TVB
// Raw peeking into the buffer.
ProtoItem
// Things like:
proto_item_set_text(proto_item *ti, const char *format, ...) G_GNUC_PRINTF(2,3);
proto_item_add_subtree(tree_index) -> ProtoTree
Todo: switch from pointers to references with proper lifetime if that's possible?
*/
/// Wrapper around the fvalue_t found in the FieldInfo struct
pub struct FValue<'a> {
value: &'a ftypes::fvalue_t,
}
impl FValue<'_> {
/// Create the FValue from the input argument.
pub unsafe fn from(v: &ftypes::fvalue_t) -> FValue {
// This may even be safe??
return FValue { value: v };
}
/// Obtain the enum that represents the type of data held by the value.
pub fn ftenum(&self) -> ftypes::ftenum {
unsafe { ftypes::fvalue_type_ftenum(self.value as *const ftypes::fvalue_t) }
}
/// Retrieve an unsigned integer, should only be called if ftenum returns an integer-type.
pub fn get_uinteger(&self) -> u32 {
unsafe { ftypes::fvalue_get_uinteger(self.value as *const ftypes::fvalue_t) }
}
/// Retrieve an unsigned integer, should only be called if ftenum returns an signed integer-type.
pub fn get_sinteger(&self) -> i32 {
unsafe { ftypes::fvalue_get_sinteger(self.value as *const ftypes::fvalue_t) }
}
/// Retrieve an unsigned 64 bit integer, should only be called if ftenum returns an 64 bit integer-type.
pub fn get_uinteger64(&self) -> u64 {
unsafe { ftypes::fvalue_get_uinteger64(self.value as *const ftypes::fvalue_t) }
}
/// Retrieve an unsigned 64 bit signed integer, should only be called if ftenum returns an 64 bit signed integer-type.
pub fn get_sinteger64(&self) -> i64 {
unsafe { ftypes::fvalue_get_sinteger64(self.value as *const ftypes::fvalue_t) }
}
/// Retrieve an floating point value, should only be called if ftenum returns an a floating point type.
pub fn get_floating(&self) -> f64 {
unsafe { ftypes::fvalue_get_floating(self.value as *const ftypes::fvalue_t) }
}
/*
pub fn get_length(&self) -> usize
{
unsafe
{
// somehow causes a missing symbol?
ftypes::fvalue_length(self.value as *const ftypes::fvalue_t) as usize
}
}
pub fn get(self: &Self) -> &[u8] {
unsafe {
let data_ptr = ftypes::fvalue_get(self.value as *const ftypes::fvalue_t) as *const u8;
return std::slice::from_raw_parts(data_ptr, self.get_length());
};
}
*/
}
impl Debug for FValue<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "FValue<'_> {{ ")?;
write!(f, "type: \"{:?}\", ", self.ftenum())?;
match self.ftenum() {
ftypes::ftenum::UINT8 => write!(f, "value: {:?}", self.get_uinteger())?,
ftypes::ftenum::UINT16 => write!(f, "value: {:?}", self.get_uinteger())?,
ftypes::ftenum::UINT32 => write!(f, "value: {:?}", self.get_uinteger())?,
ftypes::ftenum::INT8 => write!(f, "value: {:?}", self.get_sinteger())?,
ftypes::ftenum::INT16 => write!(f, "value: {:?}", self.get_sinteger())?,
ftypes::ftenum::INT32 => write!(f, "value: {:?}", self.get_sinteger())?,
//~ ftypes::ftenum::BYTES => write!(f, "value: {:?}", self.get())?,
_ => write!(f, "value: ...")?,
}
write!(f, "}}")
}
}
/// Enum to specify what type strings to use during the dissection.
#[derive(Debug, Clone)]
pub enum HeaderFieldStrings {
/// No string representation for the decoded value.
None,
/// Lookup using u32 as index.
ValueString(Vec<(u32, String)>),
/// Lookup using u64 as index.
Value64String(Vec<(u64, String)>),
/// Lookup using a range.
RangeString(Vec<((u32, u32), String)>),
// There's some more, that are not supported right now.
// string-string
// ext string
}
/// Trait to represent a Header Field Info. This is what the user should provide to specify the
/// dissection fields. It gets converted to the [`proto::header_field_info`] struct.
pub trait HeaderFieldInfo: Debug {
/// The human readable name for this header field. ('My Integer')
fn name(&self) -> String;
/// The abbreviation used for filters. ("proto.my_integer")
fn abbrev(&self) -> String;
/// The feature type for this entry.
fn feature_type(&self) -> ftypes::ftenum {
Default::default()
}
/// Specifies how this entry should be displayed.
fn display_type(&self) -> FieldDisplay {
FieldDisplay::BASE_NONE
}
/// Strings to look up from after dissection.
fn strings(&self) -> HeaderFieldStrings {
HeaderFieldStrings::None
}
/// Bitmask of interesting bits.
fn bitmask(&self) -> u64 {
0
}
fn blurb(&self) -> Option<String> {
None
}
}
/// Struct to represent header field information, serves as a read only wrapper around the `header_field_info` C struct.
pub struct WrappedHeaderFieldInfo {
hfi: *const proto::header_field_info,
}
impl WrappedHeaderFieldInfo {
/// Function to make this structure from a raw pointer.
pub unsafe fn from_ptr(header_field_info: *const proto::header_field_info) -> Box<dyn HeaderFieldInfo> {
if header_field_info.is_null() {
panic!("HeaderFieldInfo from nullptr.");
}
return Box::new(WrappedHeaderFieldInfo { hfi: header_field_info });
}
}
impl HeaderFieldInfo for WrappedHeaderFieldInfo {
/// Retrieve the pretty field name
fn name(&self) -> String {
use std::ffi::CStr;
unsafe {
match CStr::from_ptr((*self.hfi).name).to_str() {
Ok(t) => t.to_owned(),
Err(_) => "".to_owned(),
}
}
}
/// Retrieve the field abbreviation.
fn abbrev(&self) -> String {
use std::ffi::CStr;
unsafe {
match CStr::from_ptr((*self.hfi).abbrev).to_str() {
Ok(t) => t.to_owned(),
Err(_) => "".to_owned(),
}
}
}
/// Obtain the field type enum.
fn feature_type(&self) -> ftypes::ftenum {
unsafe {
return (*self.hfi).type_;
}
}
/// Obtain the field display enum.
fn display_type(&self) -> proto::FieldDisplay {
unsafe {
return (*self.hfi).display.into(); | }
}
}
use core::fmt::Debug;
impl Debug for WrappedHeaderFieldInfo {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "HeaderFieldInfo {{ ")?;
write!(f, "name: \"{}\", ", self.name())?;
write!(f, "abbrev: \"{}\", ", self.abbrev())?;
write!(f, "feature_type: {:?}, ", self.feature_type())?;
//~ write!(f, "display: {:?}", self.display())?; // This segfaults, somewhere in 'gimli'.
write!(f, "}}")
}
}
/// Struct to represent field information, serves as a wrapper around the `field_info` C struct.
pub struct FieldInfo {
fi: *const proto::field_info,
}
impl FieldInfo {
/// Function to make this structure from a raw pointer.
pub unsafe fn from_ptr(field_info: *const proto::field_info) -> FieldInfo {
if field_info.is_null() {
panic!("Field Info from nullptr.");
}
return FieldInfo { fi: field_info };
}
/// Obtain the header field info for this field.
pub fn hfinfo(self: &Self) -> Result<Box<dyn HeaderFieldInfo>, &'static str> {
unsafe {
if (*self.fi).hfinfo.is_null() {
return Err("No hfinfo provided");
}
return Ok(WrappedHeaderFieldInfo::from_ptr((*self.fi).hfinfo));
}
}
/// current start of data in field_info.ds_tvb
pub fn start(self: &Self) -> i32 {
unsafe { (*self.fi).start }
}
/// current data length of item in field_info.ds_tvb
pub fn length(self: &Self) -> i32 {
unsafe { (*self.fi).length }
}
/// data source tvbuff
pub fn ds_tvb(self: &Self) -> Option<TVB> {
unsafe {
if (*self.fi).ds_tvb.is_null() {
return None;
}
return Some(TVB::from_ptr((*self.fi).ds_tvb));
}
}
pub fn value(self: &Self) -> FValue {
unsafe { FValue::from(&(*self.fi).value) }
}
}
impl Debug for FieldInfo {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "FieldInfo {{ ")?;
write!(f, "hfinfo: \"{:?}\", ", self.hfinfo())?;
write!(f, "start: \"{:?}\", ", self.start())?;
write!(f, "length: \"{:?}\", ", self.length())?;
write!(f, "value: \"{:?}\", ", self.value())?;
write!(f, "}}")
}
}
/// Struct to represent a protocol tree, serves as a wrapper around the `proto_tree_*` C functions.
#[derive(Copy, Clone)]
pub struct ProtoTree {
tree: *mut proto::proto_tree,
}
impl ProtoTree {
/// Function to make this structure from a raw pointer.
pub unsafe fn from_ptr(tree: *mut proto::proto_tree) -> ProtoTree {
return ProtoTree { tree: tree };
}
/// Add an item to a proto_tree, using the text label registered to that item.
/// The item is extracted from the tvbuff handed to it.
pub fn add_item(
self: &mut Self,
hfindex: proto::HFIndex,
tvb: &TVB,
start: usize,
length: usize,
encoding: proto::Encoding,
) -> ProtoItem {
unsafe {
ProtoItem {
item: proto::proto_tree_add_item(self.tree, hfindex, tvb.into(), start as i32, length as i32, encoding),
}
}
}
/// Add bits to a proto_tree, using the text label registered to that item. The item is
/// extracted from the tvbuff handed to it.
pub fn add_bits_item(
self: &mut Self,
hfindex: proto::HFIndex,
tvb: &TVB,
bit_offset: usize,
no_of_bits: usize,
encoding: proto::Encoding,
) -> ProtoItem {
unsafe {
ProtoItem {
item: proto::proto_tree_add_bits_item(
self.tree,
hfindex,
tvb.into(),
bit_offset as i32,
no_of_bits as i32,
encoding,
),
}
}
}
/// Add an integer data item to a proto_tree, using the text label registered to that item.
/// The item is extracted from the tvbuff handed to it, and the retrieved
/// value is also returned to so the caller gets it back for other uses.
pub fn add_item_ret_int(
self: &mut Self,
hfindex: proto::HFIndex,
tvb: &TVB,
start: usize,
length: usize,
encoding: proto::Encoding,
) -> (ProtoItem, i32) {
let mut retval: i32 = 0;
unsafe {
return (
ProtoItem {
item: proto::proto_tree_add_item_ret_int(
self.tree,
hfindex,
tvb.into(),
start as i32,
length as i32,
encoding,
&mut retval as *mut i32,
),
},
retval,
);
}
}
/// Function to retrieve all field info's currently associated with the protocol tree.
pub fn all_finfos(self: &mut Self) -> Vec<FieldInfo> {
let mut res: Vec<FieldInfo> = Vec::new();
// see wslua_field.c function wslua_all_field_infos
if self.tree.is_null()
// Not too sure when this happens... tree seems to be null when first invoked?
{
return res;
}
unsafe {
let fields = proto::proto_all_finfos(self.tree);
for i in 0..(*fields).len() {
let field =
std::mem::transmute::<*mut libc::c_void, *const proto::field_info>((*fields).index(i as isize));
res.push(FieldInfo::from_ptr(field));
}
glib::g_ptr_array_free(fields, true);
// The field info's actually stay in scope, as they are part of the proto datastructure.
// the lua part also persists them after they're gone.
}
return res;
}
}
use std::ffi::CString;
/// Struct to represent a protocol item, serves as a wrapper around the `proto_item_*` C functions.
#[derive(Copy, Clone)]
pub struct ProtoItem {
item: *mut proto::proto_item,
}
impl From<&mut ProtoItem> for *mut proto::proto_item {
fn from(field: &mut ProtoItem) -> Self {
return field.item;
}
}
impl ProtoItem {
/// Replace text of item after it already has been created.
pub fn set_text(self: &mut Self, text: &str) {
let to_add = CString::new(text).unwrap().into_raw();
unsafe {
proto::proto_item_set_text(self.item.into(), to_add);
// and clean up the string again.
let _ = CString::from_raw(to_add);
}
}
/// Append to text of item after it has already been created.
pub fn append_text(self: &mut Self, text: &str) {
let to_add = CString::new(text).unwrap().into_raw();
unsafe {
proto::proto_item_append_text(self.item.into(), to_add);
let _ = CString::from_raw(to_add);
}
}
/// Prepend to text of item after it has already been created.
pub fn prepend_text(self: &mut Self, text: &str) {
let to_add = CString::new(text).unwrap().into_raw();
unsafe {
proto::proto_item_prepend_text(self.item.into(), to_add);
let _ = CString::from_raw(to_add);
}
}
pub fn add_subtree(self: &mut Self, ett_id: proto::ETTIndex) -> ProtoTree {
unsafe { ProtoTree::from_ptr(proto::proto_item_add_subtree(self.item.into(), ett_id)) }
}
}
/// Struct to represent a Testy Virtual Buffer, serves as a wrapper around the `tvb_*` C functions.
#[derive(Copy, Clone)]
pub struct TVB {
tvb: *mut tvbuff::tvbuff_t,
}
impl TVB {
/// Create this structure from a raw pointer.
pub unsafe fn from_ptr(tvb: *mut tvbuff::tvbuff_t) -> TVB {
return TVB { tvb: tvb };
}
/// Function to create a byte slice that can be used to access the data from the tvb.
/// This comes with the following disclaimer in the header:
///
/// This function is possibly expensive, temporarily allocating
/// another copy of the packet data. Furthermore, it's dangerous because once
/// this pointer is given to the user, there's no guarantee that the user will
/// honor the 'length' and not overstep the boundaries of the buffer.
///
/// If you're thinking of using tvb_get_ptr, STOP WHAT YOU ARE DOING
/// IMMEDIATELY. Go take a break. Consider that tvb_get_ptr hands you
/// a raw, unprotected pointer that you can easily use to create a
/// security vulnerability or otherwise crash Wireshark. Then consider
/// that you can probably find a function elsewhere in this file that
/// does exactly what you want in a much more safe and robust manner.
pub fn tvb_get_ptr(&self, offset: usize) -> &[u8] {
unsafe {
let mut available_length = tvbuff::tvb_reported_length_remaining(self.tvb, offset as i32);
if available_length < 0 {
available_length = 0;
}
let data_ptr = tvbuff::tvb_get_ptr(self.tvb, offset as i32, available_length as i32);
return std::slice::from_raw_parts(data_ptr, available_length as usize);
};
}
/// Get reported length of buffer.
pub fn reported_length(&self) -> usize {
unsafe {
return tvbuff::tvb_reported_length(self.tvb) as usize;
}
}
/// Computes bytes of reported packet data to end of buffer, from offset
/// (which can be negative, to indicate bytes from end of buffer). Function
/// returns 0 if offset is either at the end of the buffer or out of bounds.
/// No exception is thrown.
pub fn reported_length_remaining(&self, offset: usize) -> i32 {
unsafe {
return tvbuff::tvb_reported_length_remaining(self.tvb, offset as i32);
}
}
/// Retrieve a block of memory from the buffer.
///
/// Does not suffer from possible
/// expense of tvb_get_ptr(), since this routine is smart enough
/// to copy data in chunks if the request range actually exists in
/// different "real" tvbuffs.
pub fn get_mem(&self, offset: usize, length: usize) -> Vec<u8> {
let mut v: Vec<u8> = vec![0; length];
unsafe {
tvbuff::tvb_memcpy(self.tvb, v.as_mut_ptr() as *mut libc::c_void, offset as i32, length);
}
return v;
}
}
impl From<&mut TVB> for *mut tvbuff::tvbuff_t {
fn from(field: &mut TVB) -> Self {
return field.tvb;
}
}
impl From<&TVB> for *const tvbuff::tvbuff_t {
fn from(field: &TVB) -> Self {
return field.tvb;
}
} | |
public_crudapi.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: ves.io/schema/alert_receiver/public_crudapi.proto
// Alert Receiver
//
// x-displayName: "Alert Receiver"
// Alert Receiver is used to specify a receiver (slack, pagerDuty, etc.,) to send the alert notifications.
// An Alert Receiver may be associated with one or more Alert Policy objects, which defines one or more routes to match
// the incoming alert.
package alert_receiver
import (
context "context"
fmt "fmt"
_ "github.com/gogo/googleapis/google/api"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types "github.com/gogo/protobuf/types"
golang_proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
schema "github.com/volterraedge/terraform-provider-volterra/pbgo/extschema/schema"
_ "github.com/volterraedge/terraform-provider-volterra/pbgo/extschema/schema/vesenv"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strconv "strconv"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = golang_proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// GetResponseFormatCode
//
// x-displayName: "Get Response Format"
// This is the various forms that can be requested to be sent in the GetResponse
type GetResponseFormatCode int32
const (
// x-displayName: "Default Format"
// Default format of returned resource
GET_RSP_FORMAT_DEFAULT GetResponseFormatCode = 0
// x-displayName: "Create request Format"
// Response should be in CreateRequest format
GET_RSP_FORMAT_FOR_CREATE GetResponseFormatCode = 1
// x-displayName: "Replace request format"
// Response should be in ReplaceRequest format
GET_RSP_FORMAT_FOR_REPLACE GetResponseFormatCode = 2
// x-displayName: "Status format"
// Response should be in StatusObject(s) format
GET_RSP_FORMAT_STATUS GetResponseFormatCode = 3
// x-displayName: "GetSpecType format"
// Response should be in format of GetSpecType
GET_RSP_FORMAT_READ GetResponseFormatCode = 4
// x-displayName: "Referring Objects"
// Response should have other objects referring to this object
GET_RSP_FORMAT_REFERRING_OBJECTS GetResponseFormatCode = 5
)
var GetResponseFormatCode_name = map[int32]string{
0: "GET_RSP_FORMAT_DEFAULT",
1: "GET_RSP_FORMAT_FOR_CREATE",
2: "GET_RSP_FORMAT_FOR_REPLACE",
3: "GET_RSP_FORMAT_STATUS",
4: "GET_RSP_FORMAT_READ",
5: "GET_RSP_FORMAT_REFERRING_OBJECTS",
}
var GetResponseFormatCode_value = map[string]int32{
"GET_RSP_FORMAT_DEFAULT": 0,
"GET_RSP_FORMAT_FOR_CREATE": 1,
"GET_RSP_FORMAT_FOR_REPLACE": 2,
"GET_RSP_FORMAT_STATUS": 3,
"GET_RSP_FORMAT_READ": 4,
"GET_RSP_FORMAT_REFERRING_OBJECTS": 5,
}
func (GetResponseFormatCode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{0}
}
// CreateRequest is used to create an instance of alert_receiver
//
// x-displayName: "Create Request"
// This is the input message of the 'Create' RPC
type CreateRequest struct {
// metadata
//
// x-displayName: "Metadata"
// Common attributes that can be set during create for all configuration objects like name, labels etc.
Metadata *schema.ObjectCreateMetaType `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// spec
//
// x-displayName: "Spec"
// A specification of the configuration object to be created
Spec *CreateSpecType `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
}
func (m *CreateRequest) Reset() { *m = CreateRequest{} }
func (*CreateRequest) ProtoMessage() {}
func (*CreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{0}
}
func (m *CreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CreateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CreateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateRequest.Merge(m, src)
}
func (m *CreateRequest) XXX_Size() int {
return m.Size()
}
func (m *CreateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateRequest proto.InternalMessageInfo
func (m *CreateRequest) GetMetadata() *schema.ObjectCreateMetaType {
if m != nil {
return m.Metadata
}
return nil
}
func (m *CreateRequest) GetSpec() *CreateSpecType {
if m != nil {
return m.Spec
}
return nil
}
type CreateResponse struct {
// metadata
//
// x-displayName: "Metadata"
// Common attributes of the object like name, labels etc.
Metadata *schema.ObjectGetMetaType `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// system metadata
//
// x-displayName: "System Metadata"
// System generated attributes all this object.
SystemMetadata *schema.SystemObjectGetMetaType `protobuf:"bytes,3,opt,name=system_metadata,json=systemMetadata,proto3" json:"system_metadata,omitempty"`
// spec
//
// x-displayName: "Spec"
// A specification of the configuration object created
Spec *GetSpecType `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
}
func (m *CreateResponse) Reset() { *m = CreateResponse{} }
func (*CreateResponse) ProtoMessage() {}
func (*CreateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{1}
}
func (m *CreateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CreateResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CreateResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateResponse.Merge(m, src)
}
func (m *CreateResponse) XXX_Size() int {
return m.Size()
}
func (m *CreateResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CreateResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CreateResponse proto.InternalMessageInfo
func (m *CreateResponse) GetMetadata() *schema.ObjectGetMetaType {
if m != nil {
return m.Metadata
}
return nil
}
func (m *CreateResponse) GetSystemMetadata() *schema.SystemObjectGetMetaType {
if m != nil {
return m.SystemMetadata
}
return nil
}
func (m *CreateResponse) GetSpec() *GetSpecType {
if m != nil {
return m.Spec
}
return nil
}
// ReplaceRequest is used to replace contents of a alert_receiver
//
// x-displayName: "Replace Request"
// This is the input message of the 'Replace' RPC
type ReplaceRequest struct {
// metadata
//
// x-displayName: "Metadata"
// Common attributes that can be set during replace for all configuration objects like labels etc.
Metadata *schema.ObjectReplaceMetaType `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// spec
//
// x-displayName: "Spec"
// A specification of the configuration object to be replaced
Spec *ReplaceSpecType `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
// resource_version
//
// x-displayName: "Resource Version"
// x-example: "42"
// If provided, do the replace operation if the configuration object is still at 'resource_version'
ResourceVersion string `protobuf:"bytes,3,opt,name=resource_version,json=resourceVersion,proto3" json:"resource_version,omitempty"`
}
func (m *ReplaceRequest) Reset() { *m = ReplaceRequest{} }
func (*ReplaceRequest) ProtoMessage() {}
func (*ReplaceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{2}
}
func (m *ReplaceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReplaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReplaceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ReplaceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReplaceRequest.Merge(m, src)
}
func (m *ReplaceRequest) XXX_Size() int {
return m.Size()
}
func (m *ReplaceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReplaceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReplaceRequest proto.InternalMessageInfo
func (m *ReplaceRequest) GetMetadata() *schema.ObjectReplaceMetaType {
if m != nil {
return m.Metadata
}
return nil
}
func (m *ReplaceRequest) GetSpec() *ReplaceSpecType {
if m != nil {
return m.Spec
}
return nil
}
func (m *ReplaceRequest) GetResourceVersion() string {
if m != nil {
return m.ResourceVersion
}
return ""
}
type ReplaceResponse struct {
}
func (m *ReplaceResponse) Reset() { *m = ReplaceResponse{} }
func (*ReplaceResponse) ProtoMessage() {}
func (*ReplaceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{3}
}
func (m *ReplaceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReplaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReplaceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ReplaceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReplaceResponse.Merge(m, src)
}
func (m *ReplaceResponse) XXX_Size() int {
return m.Size()
}
func (m *ReplaceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReplaceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReplaceResponse proto.InternalMessageInfo
// GetRequest is used to get different forms of a alert_receiver
//
// x-displayName: "Get Request"
// This is the input message of the 'Get' RPC. Different forms of a resource
// for e.g. a ReplaceRequest form (for editing), a CreateRequest form (for
// creating new instance of alert_receiver) etc. can be fetched
type GetRequest struct {
// namespace
//
// x-displayName: "Namespace"
// x-example: "ns1"
// The namespace in which the configuration object is present
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
// name
//
// x-displayName: "Name"
// x-example: "name"
// The name of the configuration object to be fetched
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// response_format
//
// x-displayName: "Response Format"
// The format in which the configuration object is to be fetched. This could be for example
// - in GetSpec form for the contents of object
// - in CreateRequest form to create a new similar object
// - to ReplaceRequest form to replace changeable values
ResponseFormat GetResponseFormatCode `protobuf:"varint,3,opt,name=response_format,json=responseFormat,proto3,enum=ves.io.schema.alert_receiver.GetResponseFormatCode" json:"response_format,omitempty"`
}
func (m *GetRequest) Reset() { *m = GetRequest{} }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{4}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetRequest.Merge(m, src)
}
func (m *GetRequest) XXX_Size() int {
return m.Size()
}
func (m *GetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetRequest proto.InternalMessageInfo
func (m *GetRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *GetRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *GetRequest) GetResponseFormat() GetResponseFormatCode {
if m != nil {
return m.ResponseFormat
}
return GET_RSP_FORMAT_DEFAULT
}
// GetResponse is the shape of a read alert_receiver
//
// x-displayName: "Get Response"
// This is the output message of the 'Get' RPC
type GetResponse struct {
// object
//
// x-displayName: "Object"
Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"`
// create_form
//
// x-displayName: "CreateRequest Format"
// Format used to create a new similar object
CreateForm *CreateRequest `protobuf:"bytes,2,opt,name=create_form,json=createForm,proto3" json:"create_form,omitempty"`
// replace_form
//
// x-displayName: "ReplaceRequest Format"
// Format to replace changeable values in object
ReplaceForm *ReplaceRequest `protobuf:"bytes,3,opt,name=replace_form,json=replaceForm,proto3" json:"replace_form,omitempty"`
// resource_version
//
// x-displayName: "Resource Version"
// x-example: "42"
// Version of the object
ResourceVersion string `protobuf:"bytes,4,opt,name=resource_version,json=resourceVersion,proto3" json:"resource_version,omitempty"`
// metadata
//
// x-displayName: "Metadata"
// Common attributes of the object like name, labels etc.
Metadata *schema.ObjectGetMetaType `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"`
// system metadata
//
// x-displayName: "System Metadata"
// System generated attributes of this object.
SystemMetadata *schema.SystemObjectGetMetaType `protobuf:"bytes,7,opt,name=system_metadata,json=systemMetadata,proto3" json:"system_metadata,omitempty"`
// spec
//
// x-displayName: "Spec"
// A specification of the configuration object read
Spec *GetSpecType `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"`
// status
//
// x-displayName: "Status"
// The status reported by different services for this configuration object
Status []*StatusObject `protobuf:"bytes,20000,rep,name=status,proto3" json:"status,omitempty"`
// referring_objects
//
// x-displayName: "Referring Objects"
// The set of objects that are referring to this object in their spec
ReferringObjects []*schema.ObjectRefType `protobuf:"bytes,8,rep,name=referring_objects,json=referringObjects,proto3" json:"referring_objects,omitempty"`
}
func (m *GetResponse) Reset() { *m = GetResponse{} }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{5}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetResponse.Merge(m, src)
}
func (m *GetResponse) XXX_Size() int {
return m.Size()
}
func (m *GetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetResponse proto.InternalMessageInfo
func (m *GetResponse) GetObject() *Object {
if m != nil {
return m.Object
}
return nil
}
func (m *GetResponse) GetCreateForm() *CreateRequest {
if m != nil {
return m.CreateForm
}
return nil
}
func (m *GetResponse) GetReplaceForm() *ReplaceRequest {
if m != nil {
return m.ReplaceForm
}
return nil
}
func (m *GetResponse) GetResourceVersion() string {
if m != nil {
return m.ResourceVersion
}
return ""
}
func (m *GetResponse) GetMetadata() *schema.ObjectGetMetaType {
if m != nil {
return m.Metadata
}
return nil
}
func (m *GetResponse) GetSystemMetadata() *schema.SystemObjectGetMetaType {
if m != nil {
return m.SystemMetadata
}
return nil
}
func (m *GetResponse) GetSpec() *GetSpecType {
if m != nil {
return m.Spec
}
return nil
}
func (m *GetResponse) GetStatus() []*StatusObject {
if m != nil {
return m.Status
}
return nil
}
func (m *GetResponse) GetReferringObjects() []*schema.ObjectRefType {
if m != nil {
return m.ReferringObjects
}
return nil
}
// ListRequest is used to get a collection of alert_receiver
//
// x-displayName: "List Request"
// This is the input message of the 'List' RPC. Fields can be used to control
// scope and filtering of collection.
type ListRequest struct {
// namespace
//
// x-displayName: "Namespace"
// x-example: "ns1"
// Namespace to scope the listing of alert_receiver
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
// label_filter
//
// x-displayName: "Label Filter"
// x-example: "env in (staging, testing), tier in (web, db)"
// A LabelSelectorType expression that every item in list response will satisfy
LabelFilter string `protobuf:"bytes,2,opt,name=label_filter,json=labelFilter,proto3" json:"label_filter,omitempty"`
// report_fields
//
// x-displayName: "Report Fields"
// x-example: ""
// Extra fields to return along with summary fields
ReportFields []string `protobuf:"bytes,3,rep,name=report_fields,json=reportFields,proto3" json:"report_fields,omitempty"`
// report_status_fields
//
// x-displayName: "Report Status Fields"
// x-example: ""
// Extra status fields to return along with summary fields
ReportStatusFields []string `protobuf:"bytes,4,rep,name=report_status_fields,json=reportStatusFields,proto3" json:"report_status_fields,omitempty"`
}
func (m *ListRequest) Reset() { *m = ListRequest{} }
func (*ListRequest) ProtoMessage() {}
func (*ListRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{6}
}
func (m *ListRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ListRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ListRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListRequest.Merge(m, src)
}
func (m *ListRequest) XXX_Size() int {
return m.Size()
}
func (m *ListRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListRequest proto.InternalMessageInfo
func (m *ListRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *ListRequest) GetLabelFilter() string {
if m != nil {
return m.LabelFilter
}
return ""
}
func (m *ListRequest) GetReportFields() []string {
if m != nil {
return m.ReportFields
}
return nil
}
func (m *ListRequest) GetReportStatusFields() []string {
if m != nil {
return m.ReportStatusFields
}
return nil
}
// ListResponseItem is an individual item in a collection of alert_receiver
//
// x-displayName: "List Item"
// By default a summary of alert_receiver is returned in 'List'. By setting
// 'report_fields' in the ListRequest more details of each item can be got.
type ListResponseItem struct {
// tenant
//
// x-displayName: "Tenant"
// x-example: "acmecorp"
// The tenant this item belongs to
Tenant string `protobuf:"bytes,6,opt,name=tenant,proto3" json:"tenant,omitempty"`
// namespace
//
// x-displayName: "Namespace"
// x-example: "ns1"
// The namespace this item belongs to
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
// name
//
// x-displayName: "Name"
// x-example: "name"
// The name of this alert_receiver
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// uid
//
// x-displayName: "UID"
// x-example: "d27938ba-967e-40a7-9709-57b8627f9f75"
// The unique uid of this alert_receiver
Uid string `protobuf:"bytes,3,opt,name=uid,proto3" json:"uid,omitempty"`
// description
//
// x-displayName: "Description"
// The description set for this alert_receiver
Description string `protobuf:"bytes,11,opt,name=description,proto3" json:"description,omitempty"`
// disabled
//
// x-displayName: "Disabled"
// A value of true indicates alert_receiver is administratively disabled
Disabled bool `protobuf:"varint,12,opt,name=disabled,proto3" json:"disabled,omitempty"`
// labels
//
// x-displayName: "Labels"
// The set of labels present on this alert_receiver
Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// annotations
//
// x-displayName: "Annotations"
// The set of annotations present on this alert_receiver
Annotations map[string]string `protobuf:"bytes,10,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// owner_view
//
// x-displayName: "Owner View"
// Reference to the view object that owns this object.
// If there is no view owner, this field will be nil.
// If not nil, this object can only be edited/deleted through the view
OwnerView *schema.ViewRefType `protobuf:"bytes,9,opt,name=owner_view,json=ownerView,proto3" json:"owner_view,omitempty"`
// metadata
//
// x-displayName: "Metadata"
// If list request has report_fields set then metadata will
// contain all the metadata associated with the object.
Metadata *schema.ObjectGetMetaType `protobuf:"bytes,13,opt,name=metadata,proto3" json:"metadata,omitempty"`
// system_metadata
//
// x-displayName: "System Metadata"
// If list request has report_fields set then system_metadata will
// contain all the system generated details of this object.
SystemMetadata *schema.SystemObjectGetMetaType `protobuf:"bytes,14,opt,name=system_metadata,json=systemMetadata,proto3" json:"system_metadata,omitempty"`
// object
//
// x-displayName: "Object"
// If ListRequest has any specified report_fields, it will appear in object
// DEPRECATED by get_spec, metadata and system_metadata
Object *Object `protobuf:"bytes,5,opt,name=object,proto3" json:"object,omitempty"`
// get_spec
//
// x-displayName: "Get Specification"
// If ListRequest has any specified report_fields, it will appear in object
GetSpec *GetSpecType `protobuf:"bytes,7,opt,name=get_spec,json=getSpec,proto3" json:"get_spec,omitempty"`
// status
//
// x-displayName: "Status"
// The status reported by different services for this configuration object
StatusSet []*StatusObject `protobuf:"bytes,8,rep,name=status_set,json=statusSet,proto3" json:"status_set,omitempty"`
}
func (m *ListResponseItem) Reset() { *m = ListResponseItem{} }
func (*ListResponseItem) ProtoMessage() {}
func (*ListResponseItem) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{7}
}
func (m *ListResponseItem) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ListResponseItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ListResponseItem.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ListResponseItem) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListResponseItem.Merge(m, src)
}
func (m *ListResponseItem) XXX_Size() int {
return m.Size()
}
func (m *ListResponseItem) XXX_DiscardUnknown() {
xxx_messageInfo_ListResponseItem.DiscardUnknown(m)
}
var xxx_messageInfo_ListResponseItem proto.InternalMessageInfo
func (m *ListResponseItem) GetTenant() string {
if m != nil {
return m.Tenant
}
return ""
}
func (m *ListResponseItem) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *ListResponseItem) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ListResponseItem) GetUid() string {
if m != nil {
return m.Uid
}
return ""
}
func (m *ListResponseItem) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *ListResponseItem) GetDisabled() bool {
if m != nil {
return m.Disabled
}
return false
}
func (m *ListResponseItem) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *ListResponseItem) GetAnnotations() map[string]string {
if m != nil {
return m.Annotations
}
return nil
}
func (m *ListResponseItem) GetOwnerView() *schema.ViewRefType {
if m != nil {
return m.OwnerView
}
return nil
}
func (m *ListResponseItem) GetMetadata() *schema.ObjectGetMetaType {
if m != nil {
return m.Metadata
}
return nil
}
func (m *ListResponseItem) GetSystemMetadata() *schema.SystemObjectGetMetaType {
if m != nil {
return m.SystemMetadata
}
return nil
}
func (m *ListResponseItem) GetObject() *Object {
if m != nil {
return m.Object
}
return nil
}
func (m *ListResponseItem) GetGetSpec() *GetSpecType {
if m != nil {
return m.GetSpec
}
return nil
}
func (m *ListResponseItem) GetStatusSet() []*StatusObject {
if m != nil {
return m.StatusSet
}
return nil
}
// ListResponse is the collection of alert_receiver
//
// x-displayName: "List Response"
// This is the output message of 'List' RPC.
type ListResponse struct {
// items
//
// x-displayName: "Items"
// items represents the collection in response
Items []*ListResponseItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
// errors
//
// x-displayName: "Errors"
// Errors(if any) while listing items from collection
Errors []*schema.ErrorType `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
}
func (m *ListResponse) Reset() { *m = ListResponse{} }
func (*ListResponse) ProtoMessage() {}
func (*ListResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{8}
}
func (m *ListResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ListResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListResponse.Merge(m, src)
}
func (m *ListResponse) XXX_Size() int {
return m.Size()
}
func (m *ListResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListResponse proto.InternalMessageInfo
func (m *ListResponse) GetItems() []*ListResponseItem {
if m != nil {
return m.Items
}
return nil
}
func (m *ListResponse) GetErrors() []*schema.ErrorType {
if m != nil {
return m.Errors
}
return nil
}
// DeleteRequest is used to delete a alert_receiver
//
// x-displayName: "Delete Request"
// This is the input message of the 'Delete' RPC.
type DeleteRequest struct {
// namespace
//
// x-displayName: "Namespace"
// x-example: "ns1"
// Namespace in which the configuration object is present
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
// name
//
// x-displayName: "Name"
// x-example: "name"
// Name of the configuration object
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// fail_if_referred
//
// x-displayName: "Fail-If-Referred"
// Fail the delete operation if this object is being referred by other objects
FailIfReferred bool `protobuf:"varint,3,opt,name=fail_if_referred,json=failIfReferred,proto3" json:"fail_if_referred,omitempty"`
}
func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
func (*DeleteRequest) ProtoMessage() {}
func (*DeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d7901683cee671bb, []int{9}
}
func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *DeleteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteRequest.Merge(m, src)
}
func (m *DeleteRequest) XXX_Size() int {
return m.Size()
}
func (m *DeleteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
func (m *DeleteRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *DeleteRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *DeleteRequest) GetFailIfReferred() bool {
if m != nil {
return m.FailIfReferred
}
return false
}
func init() {
proto.RegisterEnum("ves.io.schema.alert_receiver.GetResponseFormatCode", GetResponseFormatCode_name, GetResponseFormatCode_value)
golang_proto.RegisterEnum("ves.io.schema.alert_receiver.GetResponseFormatCode", GetResponseFormatCode_name, GetResponseFormatCode_value)
proto.RegisterType((*CreateRequest)(nil), "ves.io.schema.alert_receiver.CreateRequest")
golang_proto.RegisterType((*CreateRequest)(nil), "ves.io.schema.alert_receiver.CreateRequest")
proto.RegisterType((*CreateResponse)(nil), "ves.io.schema.alert_receiver.CreateResponse")
golang_proto.RegisterType((*CreateResponse)(nil), "ves.io.schema.alert_receiver.CreateResponse")
proto.RegisterType((*ReplaceRequest)(nil), "ves.io.schema.alert_receiver.ReplaceRequest")
golang_proto.RegisterType((*ReplaceRequest)(nil), "ves.io.schema.alert_receiver.ReplaceRequest")
proto.RegisterType((*ReplaceResponse)(nil), "ves.io.schema.alert_receiver.ReplaceResponse")
golang_proto.RegisterType((*ReplaceResponse)(nil), "ves.io.schema.alert_receiver.ReplaceResponse")
proto.RegisterType((*GetRequest)(nil), "ves.io.schema.alert_receiver.GetRequest")
golang_proto.RegisterType((*GetRequest)(nil), "ves.io.schema.alert_receiver.GetRequest")
proto.RegisterType((*GetResponse)(nil), "ves.io.schema.alert_receiver.GetResponse")
golang_proto.RegisterType((*GetResponse)(nil), "ves.io.schema.alert_receiver.GetResponse")
proto.RegisterType((*ListRequest)(nil), "ves.io.schema.alert_receiver.ListRequest")
golang_proto.RegisterType((*ListRequest)(nil), "ves.io.schema.alert_receiver.ListRequest")
proto.RegisterType((*ListResponseItem)(nil), "ves.io.schema.alert_receiver.ListResponseItem")
golang_proto.RegisterType((*ListResponseItem)(nil), "ves.io.schema.alert_receiver.ListResponseItem")
proto.RegisterMapType((map[string]string)(nil), "ves.io.schema.alert_receiver.ListResponseItem.AnnotationsEntry")
golang_proto.RegisterMapType((map[string]string)(nil), "ves.io.schema.alert_receiver.ListResponseItem.AnnotationsEntry")
proto.RegisterMapType((map[string]string)(nil), "ves.io.schema.alert_receiver.ListResponseItem.LabelsEntry")
golang_proto.RegisterMapType((map[string]string)(nil), "ves.io.schema.alert_receiver.ListResponseItem.LabelsEntry")
proto.RegisterType((*ListResponse)(nil), "ves.io.schema.alert_receiver.ListResponse")
golang_proto.RegisterType((*ListResponse)(nil), "ves.io.schema.alert_receiver.ListResponse")
proto.RegisterType((*DeleteRequest)(nil), "ves.io.schema.alert_receiver.DeleteRequest")
golang_proto.RegisterType((*DeleteRequest)(nil), "ves.io.schema.alert_receiver.DeleteRequest")
}
func init() {
proto.RegisterFile("ves.io/schema/alert_receiver/public_crudapi.proto", fileDescriptor_d7901683cee671bb)
}
func init() |
var fileDescriptor_d7901683cee671bb = []byte{
// 1535 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x4d, 0x6c, 0x13, 0xd7,
0x16, 0xce, 0xb5, 0x13, 0x27, 0xbe, 0xce, 0x8f, 0xb9, 0xfc, 0x3c, 0xe3, 0x97, 0x37, 0xf2, 0x33,
0xe8, 0x29, 0xc9, 0xc3, 0x63, 0x5e, 0xd0, 0x13, 0x8f, 0x08, 0xc1, 0x73, 0x12, 0x27, 0x4a, 0x15,
0x08, 0x5c, 0x1b, 0x84, 0x50, 0xa5, 0xd1, 0xd8, 0x3e, 0x36, 0x03, 0xb6, 0x67, 0x7a, 0xe7, 0xda,
0x69, 0x54, 0x45, 0x6a, 0x59, 0x74, 0x5d, 0xa9, 0xac, 0x2a, 0x16, 0x6c, 0xaa, 0x76, 0x55, 0x75,
0xd1, 0x45, 0xa5, 0x2c, 0xca, 0x0e, 0x84, 0xaa, 0x0a, 0xd1, 0x0d, 0xcb, 0xe2, 0x74, 0xc1, 0x92,
0x75, 0x57, 0xd5, 0xdc, 0x3b, 0x76, 0x3c, 0x8e, 0x71, 0xec, 0x34, 0x2b, 0xcf, 0x9c, 0x9f, 0x6f,
0xbe, 0x73, 0xcf, 0xcf, 0x1c, 0x0f, 0xfe, 0x4f, 0x1d, 0x6c, 0xd5, 0x30, 0x93, 0x76, 0xfe, 0x1e,
0x54, 0xf4, 0xa4, 0x5e, 0x06, 0xc6, 0x35, 0x06, 0x79, 0x30, 0xea, 0xc0, 0x92, 0x56, 0x2d, 0x57,
0x36, 0xf2, 0x5a, 0x9e, 0xd5, 0x0a, 0xba, 0x65, 0xa8, 0x16, 0x33, 0xb9, 0x49, 0xa6, 0xa5, 0x8b,
0x2a, 0x5d, 0x54, 0xaf, 0x4b, 0x34, 0x51, 0x32, 0xf8, 0xbd, 0x5a, 0x4e, 0xcd, 0x9b, 0x95, 0x64,
0xc9, 0x2c, 0x99, 0x49, 0xe1, 0x94, 0xab, 0x15, 0xc5, 0x9d, 0xb8, 0x11, 0x57, 0x12, 0x2c, 0x3a,
0x5d, 0x32, 0xcd, 0x52, 0x19, 0x92, 0xba, 0x65, 0x24, 0xf5, 0x6a, 0xd5, 0xe4, 0x3a, 0x37, 0xcc,
0xaa, 0xed, 0x6a, 0xff, 0xee, 0x6a, 0x5b, 0x18, 0x50, 0xb1, 0xf8, 0x96, 0xab, 0x9c, 0xed, 0x49,
0xdd, 0xcc, 0xdd, 0x87, 0x3c, 0x77, 0x4d, 0x67, 0x7a, 0x9a, 0xf2, 0x2d, 0x0b, 0x9a, 0x4f, 0x8c,
0x7a, 0x2d, 0x81, 0x31, 0x93, 0xb5, 0xd8, 0x78, 0x75, 0xa6, 0xd5, 0x4e, 0xf5, 0xb4, 0x57, 0xd9,
0x8e, 0x19, 0xf7, 0xaa, 0xea, 0x60, 0x43, 0xb5, 0xde, 0xe1, 0x1e, 0xeb, 0xb0, 0x31, 0x60, 0x53,
0xf3, 0x58, 0xc4, 0xbf, 0x43, 0x78, 0x62, 0x89, 0x81, 0xce, 0x81, 0xc2, 0x47, 0x35, 0xb0, 0x39,
0xb9, 0x8a, 0xc7, 0x2a, 0xc0, 0xf5, 0x82, 0xce, 0xf5, 0x08, 0x8a, 0xa1, 0x99, 0xd0, 0xfc, 0x19,
0xd5, 0x9b, 0x9b, 0x0d, 0x71, 0x08, 0xd2, 0xeb, 0x1a, 0x70, 0x3d, 0xbb, 0x65, 0x01, 0x6d, 0x39,
0x91, 0x9b, 0x78, 0xd8, 0xb6, 0x20, 0x1f, 0xf1, 0x09, 0xe7, 0x73, 0x6a, 0xaf, 0xc4, 0xaa, 0x12,
0x25, 0x63, 0x41, 0xde, 0x41, 0x59, 0x9c, 0xfa, 0x6a, 0x7b, 0xdc, 0x71, 0x56, 0x4b, 0x79, 0xcd,
0xf9, 0xa5, 0x02, 0x6a, 0x21, 0xf8, 0xe2, 0x4a, 0x40, 0x3e, 0x35, 0xfe, 0x07, 0xc2, 0x93, 0x4d,
0xc2, 0xb6, 0x65, 0x56, 0x6d, 0x20, 0x97, 0xf7, 0x31, 0x8e, 0x75, 0x65, 0xbc, 0x0a, 0xbc, 0x0b,
0xdd, 0x0d, 0x3c, 0x65, 0x6f, 0xd9, 0x1c, 0x2a, 0x5a, 0x0b, 0xc4, 0x2f, 0x40, 0xfe, 0xd5, 0x01,
0x92, 0x11, 0x56, 0xfb, 0xa1, 0x26, 0xa5, 0xfb, 0xb5, 0x26, 0xe0, 0x75, 0x4f, 0xfc, 0xb3, 0xbd,
0xe3, 0x5f, 0x05, 0x3e, 0x40, 0xf0, 0x0d, 0x84, 0x27, 0x29, 0x58, 0x65, 0x3d, 0xdf, 0x4a, 0xd7,
0xff, 0xf7, 0x05, 0x7f, 0xb6, 0x6b, 0xf0, 0xae, 0x5b, 0x97, 0x03, 0xa0, 0x1e, 0xbe, 0x89, 0xde,
0x7c, 0x5d, 0x98, 0x03, 0x38, 0x93, 0x24, 0x0e, 0x33, 0xb0, 0xcd, 0x1a, 0xcb, 0x83, 0x56, 0x07,
0x66, 0x1b, 0x66, 0x55, 0x9c, 0x6a, 0x70, 0x71, 0xf8, 0xed, 0x0e, 0x42, 0x74, 0xaa, 0xa9, 0xbd,
0x2d, 0x95, 0xed, 0x41, 0x1e, 0xc3, 0x53, 0xad, 0x18, 0x65, 0x86, 0xe3, 0x8f, 0x11, 0xc6, 0xab,
0xc0, 0x9b, 0x31, 0x4f, 0xe3, 0x60, 0x55, 0xaf, 0x80, 0x6d, 0xe9, 0x79, 0x10, 0x41, 0x07, 0xe9,
0x9e, 0x80, 0x10, 0x3c, 0xec, 0xdc, 0x88, 0x78, 0x82, 0x54, 0x5c, 0x93, 0x0f, 0xb1, 0xf3, 0x44,
0x01, 0xa6, 0x15, 0x4d, 0x56, 0xd1, 0xb9, 0xa0, 0x33, 0x39, 0x7f, 0xe1, 0xc0, 0xf4, 0x34, 0x49,
0xac, 0x08, 0xb7, 0x25, 0xb3, 0x00, 0x74, 0x92, 0x79, 0x64, 0xf1, 0x27, 0x23, 0x38, 0xd4, 0x66,
0x49, 0x52, 0x38, 0x20, 0x07, 0xc5, 0x7b, 0x32, 0xd2, 0xf1, 0x10, 0x19, 0xf7, 0xa2, 0xff, 0xe9,
0x36, 0xa2, 0xae, 0x23, 0x59, 0xc7, 0xa1, 0xbc, 0xa8, 0x72, 0x41, 0xd7, 0xcd, 0xcd, 0xbf, 0xfb,
0xe9, 0x25, 0xf7, 0x90, 0x28, 0x96, 0xfe, 0x0e, 0x45, 0xb2, 0x81, 0xc7, 0x99, 0x3c, 0x52, 0x09,
0xe7, 0xef, 0xa7, 0x35, 0xbd, 0x85, 0x46, 0x43, 0x2e, 0x82, 0x00, 0xec, 0x96, 0xdf, 0xe1, 0x1e,
0xf9, 0xf5, 0xf4, 0xe8, 0xc8, 0x51, 0xf4, 0xe8, 0xe8, 0x91, 0xf4, 0x68, 0xe0, 0x68, 0x7a, 0x94,
0xac, 0xe1, 0x80, 0xcd, 0x75, 0x5e, 0xb3, 0x23, 0x4f, 0x1e, 0xa3, 0x98, 0x7f, 0x26, 0x34, 0x3f,
0xd7, 0x1b, 0x33, 0x23, 0xac, 0x3d, 0x99, 0x97, 0x00, 0x64, 0x0d, 0x1f, 0x63, 0x50, 0x04, 0xc6,
0x8c, 0x6a, 0x49, 0x93, 0xd5, 0x60, 0x47, 0xc6, 0x04, 0xe6, 0xf4, 0x7b, 0x3a, 0xbb, 0x28, 0x62,
0x0c, 0xb7, 0xdc, 0xa4, 0xdc, 0x6e, 0x6f, 0xaa, 0xaf, 0x11, 0x0e, 0xad, 0x1b, 0x76, 0x9f, 0x2d,
0xf4, 0x4f, 0x3c, 0x5e, 0xd6, 0x73, 0x50, 0xd6, 0x8a, 0x46, 0x99, 0x03, 0x73, 0x5b, 0x29, 0x24,
0x64, 0x2b, 0x42, 0x44, 0xce, 0xe0, 0x09, 0x06, 0x96, 0xc9, 0xb8, 0x56, 0x34, 0xa0, 0x5c, 0xb0,
0x23, 0xfe, 0x98, 0x7f, 0x26, 0x48, 0xc7, 0xa5, 0x70, 0x45, 0xc8, 0xc8, 0x79, 0x7c, 0xc2, 0x35,
0x92, 0xc1, 0x35, 0x6d, 0x87, 0x85, 0x2d, 0x91, 0x3a, 0x79, 0x14, 0xd2, 0x23, 0xfe, 0xd9, 0x28,
0x0e, 0x4b, 0x9e, 0xb2, 0x97, 0xd6, 0x38, 0x54, 0xc8, 0x29, 0x1c, 0xe0, 0x50, 0xd5, 0xab, 0x5c,
0xe4, 0x2b, 0x48, 0xdd, 0xbb, 0x43, 0xcc, 0x81, 0x30, 0xf6, 0xd7, 0x8c, 0x82, 0x1c, 0x45, 0xd4,
0xb9, 0x24, 0x31, 0x1c, 0x2a, 0x80, 0x9d, 0x67, 0x86, 0x78, 0x2d, 0x46, 0x42, 0x32, 0xd2, 0x36,
0x11, 0x89, 0xe2, 0xb1, 0x82, 0x61, 0xeb, 0xb9, 0x32, 0x14, 0x22, 0xe3, 0x31, 0x34, 0x33, 0x46,
0x5b, 0xf7, 0x84, 0xe2, 0x80, 0x38, 0x14, 0x19, 0x52, 0x68, 0x7e, 0xa1, 0x77, 0xd6, 0x3b, 0x23,
0x53, 0xd7, 0x85, 0x73, 0xba, 0xca, 0xd9, 0x16, 0x75, 0x91, 0x88, 0x8e, 0x43, 0x6d, 0x3b, 0x4b,
0x04, 0x0b, 0xe0, 0xab, 0x03, 0x02, 0xa7, 0xf6, 0x10, 0x24, 0x7a, 0x3b, 0x26, 0xb9, 0x84, 0xb1,
0xb9, 0x59, 0x05, 0xa6, 0x39, 0x1b, 0x41, 0x24, 0x28, 0x9a, 0x20, 0xda, 0xf1, 0x84, 0xdb, 0x06,
0x6c, 0x36, 0x4b, 0x2b, 0x28, 0xac, 0x1d, 0x89, 0xa7, 0x91, 0x27, 0x8e, 0xa2, 0x91, 0x27, 0xff,
0x52, 0x23, 0xef, 0x8d, 0xda, 0x91, 0xc3, 0x8e, 0xda, 0x65, 0x3c, 0x56, 0x02, 0x2e, 0xba, 0xd9,
0x9d, 0x2a, 0xfd, 0xcf, 0x03, 0x3a, 0x5a, 0x92, 0x37, 0xe4, 0x3a, 0xc6, 0x6e, 0x8d, 0xdb, 0xc0,
0xdd, 0x7e, 0x1d, 0x78, 0x06, 0x04, 0x25, 0x44, 0x06, 0x78, 0xf4, 0x12, 0x0e, 0xb5, 0x15, 0x87,
0x53, 0xb8, 0x0f, 0x60, 0xcb, 0x2d, 0x72, 0xe7, 0x92, 0x9c, 0xc0, 0x23, 0x75, 0xbd, 0x5c, 0x6b,
0xd6, 0xb7, 0xbc, 0x59, 0xf0, 0xfd, 0x0f, 0x45, 0xaf, 0xe0, 0x70, 0x67, 0xfa, 0x07, 0xf1, 0x8f,
0x7f, 0x8e, 0xf0, 0x78, 0x7b, 0x41, 0x91, 0x65, 0x3c, 0x62, 0x70, 0xa8, 0xd8, 0x11, 0x39, 0xda,
0xd4, 0xc1, 0x6a, 0x91, 0x4a, 0x67, 0x72, 0x1e, 0x07, 0xe4, 0xe2, 0x1b, 0xf1, 0x09, 0x98, 0x48,
0x07, 0x4c, 0xda, 0x51, 0x8a, 0x43, 0x75, 0xed, 0xe2, 0x0f, 0xf0, 0xc4, 0x32, 0x94, 0x61, 0x6f,
0x37, 0x1d, 0xbc, 0xe1, 0x67, 0x70, 0xb8, 0xa8, 0x1b, 0x65, 0xcd, 0x28, 0x6a, 0x72, 0x3c, 0x82,
0xec, 0xfe, 0x31, 0x3a, 0xe9, 0xc8, 0xd7, 0x8a, 0xd4, 0x95, 0xce, 0xfd, 0x8c, 0xf0, 0xc9, 0xae,
0xaf, 0x7b, 0x12, 0xc5, 0xa7, 0x56, 0xd3, 0x59, 0x8d, 0x66, 0x6e, 0x68, 0x2b, 0x1b, 0xf4, 0x5a,
0x2a, 0xab, 0x2d, 0xa7, 0x57, 0x52, 0xb7, 0xd6, 0xb3, 0xe1, 0x21, 0xf2, 0x0f, 0x7c, 0xba, 0x43,
0xb7, 0xb2, 0x41, 0xb5, 0x25, 0x9a, 0x4e, 0x65, 0xd3, 0x61, 0x44, 0x14, 0x1c, 0xed, 0xa2, 0xa6,
0xe9, 0x1b, 0xeb, 0xa9, 0xa5, 0x74, 0xd8, 0x47, 0x4e, 0xe3, 0x93, 0x1d, 0xfa, 0x4c, 0x36, 0x95,
0xbd, 0x95, 0x09, 0xfb, 0xc9, 0xdf, 0xf0, 0xf1, 0x0e, 0x15, 0x4d, 0xa7, 0x96, 0xc3, 0xc3, 0xe4,
0x2c, 0x8e, 0xed, 0x53, 0xac, 0xa4, 0x29, 0x5d, 0xbb, 0xbe, 0xaa, 0x6d, 0x2c, 0x7e, 0x90, 0x5e,
0xca, 0x66, 0xc2, 0x23, 0xf3, 0x8f, 0xc6, 0xb0, 0x3f, 0x75, 0x63, 0x8d, 0xfc, 0x84, 0x70, 0x40,
0x2e, 0x06, 0x64, 0x90, 0xf5, 0x21, 0x7a, 0xae, 0x3f, 0x63, 0x77, 0x41, 0xbb, 0xdb, 0x78, 0x16,
0x39, 0x51, 0x07, 0x3b, 0x61, 0x98, 0x89, 0x12, 0x54, 0x81, 0xe9, 0xe5, 0xc4, 0x26, 0x33, 0x38,
0x3c, 0xfc, 0xf5, 0xf7, 0x2f, 0x7d, 0x97, 0xe3, 0x17, 0xdd, 0x3f, 0x7d, 0xc9, 0x56, 0xb6, 0xec,
0xe4, 0x27, 0xcd, 0x11, 0xa0, 0xb6, 0x84, 0xdb, 0x1d, 0x7f, 0xa2, 0xec, 0x05, 0x34, 0x47, 0x7e,
0x40, 0xd8, 0xbf, 0x0a, 0x9c, 0xcc, 0xf4, 0xb1, 0xaa, 0x49, 0xee, 0xb3, 0x7d, 0x2f, 0x75, 0xf1,
0x3b, 0xcf, 0x7f, 0xf4, 0xa1, 0xc6, 0xb3, 0xc8, 0xf1, 0x0e, 0xf2, 0x0c, 0xf4, 0x82, 0xe0, 0x7e,
0x91, 0xfc, 0xb7, 0x1b, 0xf7, 0xf7, 0x53, 0x96, 0xba, 0x6d, 0xf2, 0x3d, 0xc2, 0xc3, 0x4e, 0x2b,
0x90, 0xd9, 0x7e, 0xda, 0x45, 0x12, 0x9f, 0xeb, 0xbf, 0xb3, 0xe2, 0x37, 0x0f, 0x62, 0x7e, 0x9e,
0xa8, 0x83, 0x31, 0x27, 0xaf, 0x10, 0x1e, 0x75, 0xb7, 0x3e, 0x32, 0xd0, 0x72, 0x18, 0x4d, 0xf4,
0x69, 0xed, 0x72, 0xbf, 0xdf, 0xb3, 0x5c, 0xd6, 0xa3, 0xab, 0x87, 0x2c, 0x97, 0x0e, 0xa3, 0x6d,
0xa7, 0x7c, 0xbe, 0x41, 0x38, 0x20, 0xa7, 0xc8, 0x41, 0x0d, 0xe0, 0x99, 0x35, 0xd1, 0x53, 0xaa,
0xfc, 0x4c, 0xa0, 0x36, 0x3f, 0x13, 0xa8, 0xe9, 0x8a, 0xc5, 0xb7, 0xe2, 0x77, 0x7a, 0x72, 0x5f,
0x98, 0x3b, 0x5c, 0xb9, 0x2c, 0xa0, 0xb9, 0x68, 0xee, 0xe9, 0x0e, 0xf2, 0xbd, 0xda, 0x41, 0x67,
0xfa, 0x78, 0x85, 0xbd, 0xde, 0x41, 0xa8, 0xb1, 0x83, 0x66, 0xfb, 0x7e, 0xc1, 0x3c, 0xfc, 0x25,
0xe2, 0x0b, 0xa3, 0xc5, 0x47, 0xe8, 0xc5, 0x95, 0x91, 0x25, 0x7a, 0x6b, 0x79, 0xfd, 0xe5, 0x1b,
0x65, 0xe8, 0xf5, 0x1b, 0x65, 0xe8, 0xdd, 0x1b, 0x05, 0x7d, 0xda, 0x50, 0xd0, 0xb7, 0x0d, 0x05,
0x3d, 0x6f, 0x28, 0xe8, 0x65, 0x43, 0x41, 0xbf, 0x35, 0x14, 0xf4, 0xb6, 0xa1, 0x0c, 0xbd, 0x6b,
0x28, 0xe8, 0x8b, 0x5d, 0x65, 0xe8, 0xe9, 0xae, 0x82, 0x5e, 0xee, 0x2a, 0x43, 0xaf, 0x77, 0x95,
0xa1, 0xbb, 0xb4, 0x64, 0x5a, 0x0f, 0x4a, 0x6a, 0xdd, 0x74, 0xf6, 0x40, 0xa6, 0xab, 0x35, 0x3b,
0x29, 0x2e, 0x9c, 0x7f, 0x16, 0x09, 0x8b, 0x99, 0x75, 0xa3, 0x00, 0x2c, 0xd1, 0x54, 0x27, 0xad,
0x5c, 0xc9, 0x4c, 0xc2, 0xc7, 0xdc, 0xfd, 0x06, 0xd1, 0xf5, 0x63, 0x49, 0x2e, 0x20, 0x0e, 0xf9,
0xc2, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x66, 0x55, 0x8f, 0x39, 0x12, 0x00, 0x00,
}
func (x GetResponseFormatCode) String() string {
s, ok := GetResponseFormatCode_name[int32(x)]
if ok {
return s
}
return strconv.Itoa(int(x))
}
func (this *CreateRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CreateRequest)
if !ok {
that2, ok := that.(CreateRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !this.Metadata.Equal(that1.Metadata) {
return false
}
if !this.Spec.Equal(that1.Spec) {
return false
}
return true
}
func (this *CreateResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CreateResponse)
if !ok {
that2, ok := that.(CreateResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !this.Metadata.Equal(that1.Metadata) {
return false
}
if !this.SystemMetadata.Equal(that1.SystemMetadata) {
return false
}
if !this.Spec.Equal(that1.Spec) {
return false
}
return true
}
func (this *ReplaceRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ReplaceRequest)
if !ok {
that2, ok := that.(ReplaceRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !this.Metadata.Equal(that1.Metadata) {
return false
}
if !this.Spec.Equal(that1.Spec) {
return false
}
if this.ResourceVersion != that1.ResourceVersion {
return false
}
return true
}
func (this *ReplaceResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ReplaceResponse)
if !ok {
that2, ok := that.(ReplaceResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
return true
}
func (this *GetRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*GetRequest)
if !ok {
that2, ok := that.(GetRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Namespace != that1.Namespace {
return false
}
if this.Name != that1.Name {
return false
}
if this.ResponseFormat != that1.ResponseFormat {
return false
}
return true
}
func (this *GetResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*GetResponse)
if !ok {
that2, ok := that.(GetResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !this.Object.Equal(that1.Object) {
return false
}
if !this.CreateForm.Equal(that1.CreateForm) {
return false
}
if !this.ReplaceForm.Equal(that1.ReplaceForm) {
return false
}
if this.ResourceVersion != that1.ResourceVersion {
return false
}
if !this.Metadata.Equal(that1.Metadata) {
return false
}
if !this.SystemMetadata.Equal(that1.SystemMetadata) {
return false
}
if !this.Spec.Equal(that1.Spec) {
return false
}
if len(this.Status) != len(that1.Status) {
return false
}
for i := range this.Status {
if !this.Status[i].Equal(that1.Status[i]) {
return false
}
}
if len(this.ReferringObjects) != len(that1.ReferringObjects) {
return false
}
for i := range this.ReferringObjects {
if !this.ReferringObjects[i].Equal(that1.ReferringObjects[i]) {
return false
}
}
return true
}
func (this *ListRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ListRequest)
if !ok {
that2, ok := that.(ListRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Namespace != that1.Namespace {
return false
}
if this.LabelFilter != that1.LabelFilter {
return false
}
if len(this.ReportFields) != len(that1.ReportFields) {
return false
}
for i := range this.ReportFields {
if this.ReportFields[i] != that1.ReportFields[i] {
return false
}
}
if len(this.ReportStatusFields) != len(that1.ReportStatusFields) {
return false
}
for i := range this.ReportStatusFields {
if this.ReportStatusFields[i] != that1.ReportStatusFields[i] {
return false
}
}
return true
}
func (this *ListResponseItem) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ListResponseItem)
if !ok {
that2, ok := that.(ListResponseItem)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Tenant != that1.Tenant {
return false
}
if this.Namespace != that1.Namespace {
return false
}
if this.Name != that1.Name {
return false
}
if this.Uid != that1.Uid {
return false
}
if this.Description != that1.Description {
return false
}
if this.Disabled != that1.Disabled {
return false
}
if len(this.Labels) != len(that1.Labels) {
return false
}
for i := range this.Labels {
if this.Labels[i] != that1.Labels[i] {
return false
}
}
if len(this.Annotations) != len(that1.Annotations) {
return false
}
for i := range this.Annotations {
if this.Annotations[i] != that1.Annotations[i] {
return false
}
}
if !this.OwnerView.Equal(that1.OwnerView) {
return false
}
if !this.Metadata.Equal(that1.Metadata) {
return false
}
if !this.SystemMetadata.Equal(that1.SystemMetadata) {
return false
}
if !this.Object.Equal(that1.Object) {
return false
}
if !this.GetSpec.Equal(that1.GetSpec) {
return false
}
if len(this.StatusSet) != len(that1.StatusSet) {
return false
}
for i := range this.StatusSet {
if !this.StatusSet[i].Equal(that1.StatusSet[i]) {
return false
}
}
return true
}
func (this *ListResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ListResponse)
if !ok {
that2, ok := that.(ListResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if len(this.Items) != len(that1.Items) {
return false
}
for i := range this.Items {
if !this.Items[i].Equal(that1.Items[i]) {
return false
}
}
if len(this.Errors) != len(that1.Errors) {
return false
}
for i := range this.Errors {
if !this.Errors[i].Equal(that1.Errors[i]) {
return false
}
}
return true
}
func (this *DeleteRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*DeleteRequest)
if !ok {
that2, ok := that.(DeleteRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Namespace != that1.Namespace {
return false
}
if this.Name != that1.Name {
return false
}
if this.FailIfReferred != that1.FailIfReferred {
return false
}
return true
}
func (this *CreateRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&alert_receiver.CreateRequest{")
if this.Metadata != nil {
s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
}
if this.Spec != nil {
s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *CreateResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&alert_receiver.CreateResponse{")
if this.Metadata != nil {
s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
}
if this.SystemMetadata != nil {
s = append(s, "SystemMetadata: "+fmt.Sprintf("%#v", this.SystemMetadata)+",\n")
}
if this.Spec != nil {
s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ReplaceRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&alert_receiver.ReplaceRequest{")
if this.Metadata != nil {
s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
}
if this.Spec != nil {
s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
}
s = append(s, "ResourceVersion: "+fmt.Sprintf("%#v", this.ResourceVersion)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ReplaceResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 4)
s = append(s, "&alert_receiver.ReplaceResponse{")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *GetRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&alert_receiver.GetRequest{")
s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "ResponseFormat: "+fmt.Sprintf("%#v", this.ResponseFormat)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *GetResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 13)
s = append(s, "&alert_receiver.GetResponse{")
if this.Object != nil {
s = append(s, "Object: "+fmt.Sprintf("%#v", this.Object)+",\n")
}
if this.CreateForm != nil {
s = append(s, "CreateForm: "+fmt.Sprintf("%#v", this.CreateForm)+",\n")
}
if this.ReplaceForm != nil {
s = append(s, "ReplaceForm: "+fmt.Sprintf("%#v", this.ReplaceForm)+",\n")
}
s = append(s, "ResourceVersion: "+fmt.Sprintf("%#v", this.ResourceVersion)+",\n")
if this.Metadata != nil {
s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
}
if this.SystemMetadata != nil {
s = append(s, "SystemMetadata: "+fmt.Sprintf("%#v", this.SystemMetadata)+",\n")
}
if this.Spec != nil {
s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
}
if this.Status != nil {
s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n")
}
if this.ReferringObjects != nil {
s = append(s, "ReferringObjects: "+fmt.Sprintf("%#v", this.ReferringObjects)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ListRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&alert_receiver.ListRequest{")
s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n")
s = append(s, "LabelFilter: "+fmt.Sprintf("%#v", this.LabelFilter)+",\n")
s = append(s, "ReportFields: "+fmt.Sprintf("%#v", this.ReportFields)+",\n")
s = append(s, "ReportStatusFields: "+fmt.Sprintf("%#v", this.ReportStatusFields)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ListResponseItem) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 18)
s = append(s, "&alert_receiver.ListResponseItem{")
s = append(s, "Tenant: "+fmt.Sprintf("%#v", this.Tenant)+",\n")
s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "Uid: "+fmt.Sprintf("%#v", this.Uid)+",\n")
s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n")
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
}
mapStringForLabels += "}"
if this.Labels != nil {
s = append(s, "Labels: "+mapStringForLabels+",\n")
}
keysForAnnotations := make([]string, 0, len(this.Annotations))
for k, _ := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%#v: %#v,", k, this.Annotations[k])
}
mapStringForAnnotations += "}"
if this.Annotations != nil {
s = append(s, "Annotations: "+mapStringForAnnotations+",\n")
}
if this.OwnerView != nil {
s = append(s, "OwnerView: "+fmt.Sprintf("%#v", this.OwnerView)+",\n")
}
if this.Metadata != nil {
s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
}
if this.SystemMetadata != nil {
s = append(s, "SystemMetadata: "+fmt.Sprintf("%#v", this.SystemMetadata)+",\n")
}
if this.Object != nil {
s = append(s, "Object: "+fmt.Sprintf("%#v", this.Object)+",\n")
}
if this.GetSpec != nil {
s = append(s, "GetSpec: "+fmt.Sprintf("%#v", this.GetSpec)+",\n")
}
if this.StatusSet != nil {
s = append(s, "StatusSet: "+fmt.Sprintf("%#v", this.StatusSet)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ListResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&alert_receiver.ListResponse{")
if this.Items != nil {
s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n")
}
if this.Errors != nil {
s = append(s, "Errors: "+fmt.Sprintf("%#v", this.Errors)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *DeleteRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&alert_receiver.DeleteRequest{")
s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "FailIfReferred: "+fmt.Sprintf("%#v", this.FailIfReferred)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringPublicCrudapi(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// APIClient is the client API for API service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type APIClient interface {
// Create Alert Receiver
//
// x-displayName: "Create Alert Receiver"
// Creates a new Alert Receiver object
Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error)
// Get Alert Receiver
//
// x-displayName: "Get Alert Receiver"
// Get the Alert Receiver object
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
// List
//
// x-displayName: "List Alert Receiver"
// List the set of alert_receiver in a namespace
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
// Replace Alert Receiver
//
// x-displayName: "Replace Alert Receiver"
// Replaces the content of an Alert Receiver object
Replace(ctx context.Context, in *ReplaceRequest, opts ...grpc.CallOption) (*ReplaceResponse, error)
// Delete
//
// x-displayName: "Delete Alert Receiver"
// Delete the specified alert_receiver
Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error)
}
type aPIClient struct {
cc *grpc.ClientConn
}
func NewAPIClient(cc *grpc.ClientConn) APIClient {
return &aPIClient{cc}
}
func (c *aPIClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) {
out := new(CreateResponse)
err := c.cc.Invoke(ctx, "/ves.io.schema.alert_receiver.API/Create", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) {
out := new(GetResponse)
err := c.cc.Invoke(ctx, "/ves.io.schema.alert_receiver.API/Get", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
out := new(ListResponse)
err := c.cc.Invoke(ctx, "/ves.io.schema.alert_receiver.API/List", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) Replace(ctx context.Context, in *ReplaceRequest, opts ...grpc.CallOption) (*ReplaceResponse, error) {
out := new(ReplaceResponse)
err := c.cc.Invoke(ctx, "/ves.io.schema.alert_receiver.API/Replace", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) {
out := new(types.Empty)
err := c.cc.Invoke(ctx, "/ves.io.schema.alert_receiver.API/Delete", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// APIServer is the server API for API service.
type APIServer interface {
// Create Alert Receiver
//
// x-displayName: "Create Alert Receiver"
// Creates a new Alert Receiver object
Create(context.Context, *CreateRequest) (*CreateResponse, error)
// Get Alert Receiver
//
// x-displayName: "Get Alert Receiver"
// Get the Alert Receiver object
Get(context.Context, *GetRequest) (*GetResponse, error)
// List
//
// x-displayName: "List Alert Receiver"
// List the set of alert_receiver in a namespace
List(context.Context, *ListRequest) (*ListResponse, error)
// Replace Alert Receiver
//
// x-displayName: "Replace Alert Receiver"
// Replaces the content of an Alert Receiver object
Replace(context.Context, *ReplaceRequest) (*ReplaceResponse, error)
// Delete
//
// x-displayName: "Delete Alert Receiver"
// Delete the specified alert_receiver
Delete(context.Context, *DeleteRequest) (*types.Empty, error)
}
// UnimplementedAPIServer can be embedded to have forward compatible implementations.
type UnimplementedAPIServer struct {
}
func (*UnimplementedAPIServer) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
}
func (*UnimplementedAPIServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
}
func (*UnimplementedAPIServer) List(ctx context.Context, req *ListRequest) (*ListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (*UnimplementedAPIServer) Replace(ctx context.Context, req *ReplaceRequest) (*ReplaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Replace not implemented")
}
func (*UnimplementedAPIServer) Delete(ctx context.Context, req *DeleteRequest) (*types.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
}
func RegisterAPIServer(s *grpc.Server, srv APIServer) {
s.RegisterService(&_API_serviceDesc, srv)
}
func _API_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).Create(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/ves.io.schema.alert_receiver.API/Create",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).Create(ctx, req.(*CreateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).Get(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/ves.io.schema.alert_receiver.API/Get",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).Get(ctx, req.(*GetRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/ves.io.schema.alert_receiver.API/List",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).List(ctx, req.(*ListRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_Replace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReplaceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).Replace(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/ves.io.schema.alert_receiver.API/Replace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).Replace(ctx, req.(*ReplaceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _API_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).Delete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/ves.io.schema.alert_receiver.API/Delete",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).Delete(ctx, req.(*DeleteRequest))
}
return interceptor(ctx, in, info, handler)
}
var _API_serviceDesc = grpc.ServiceDesc{
ServiceName: "ves.io.schema.alert_receiver.API",
HandlerType: (*APIServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Create",
Handler: _API_Create_Handler,
},
{
MethodName: "Get",
Handler: _API_Get_Handler,
},
{
MethodName: "List",
Handler: _API_List_Handler,
},
{
MethodName: "Replace",
Handler: _API_Replace_Handler,
},
{
MethodName: "Delete",
Handler: _API_Delete_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "ves.io/schema/alert_receiver/public_crudapi.proto",
}
func (m *CreateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CreateRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Spec != nil {
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Metadata != nil {
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *CreateResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CreateResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CreateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.SystemMetadata != nil {
{
size, err := m.SystemMetadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Spec != nil {
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Metadata != nil {
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ReplaceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReplaceRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ReplaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceVersion) > 0 {
i -= len(m.ResourceVersion)
copy(dAtA[i:], m.ResourceVersion)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.ResourceVersion)))
i--
dAtA[i] = 0x1a
}
if m.Spec != nil {
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Metadata != nil {
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ReplaceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReplaceResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ReplaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
return len(dAtA) - i, nil
}
func (m *GetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ResponseFormat != 0 {
i = encodeVarintPublicCrudapi(dAtA, i, uint64(m.ResponseFormat))
i--
dAtA[i] = 0x18
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
}
if len(m.Namespace) > 0 {
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *GetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Status) > 0 {
for iNdEx := len(m.Status) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Status[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x9
i--
dAtA[i] = 0xe2
i--
dAtA[i] = 0x82
}
}
if len(m.ReferringObjects) > 0 {
for iNdEx := len(m.ReferringObjects) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ReferringObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
}
}
if m.SystemMetadata != nil {
{
size, err := m.SystemMetadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
if m.Spec != nil {
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
if m.Metadata != nil {
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if len(m.ResourceVersion) > 0 {
i -= len(m.ResourceVersion)
copy(dAtA[i:], m.ResourceVersion)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.ResourceVersion)))
i--
dAtA[i] = 0x22
}
if m.ReplaceForm != nil {
{
size, err := m.ReplaceForm.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.CreateForm != nil {
{
size, err := m.CreateForm.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Object != nil {
{
size, err := m.Object.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ListRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ListRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ReportStatusFields) > 0 {
for iNdEx := len(m.ReportStatusFields) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.ReportStatusFields[iNdEx])
copy(dAtA[i:], m.ReportStatusFields[iNdEx])
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.ReportStatusFields[iNdEx])))
i--
dAtA[i] = 0x22
}
}
if len(m.ReportFields) > 0 {
for iNdEx := len(m.ReportFields) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.ReportFields[iNdEx])
copy(dAtA[i:], m.ReportFields[iNdEx])
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.ReportFields[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
if len(m.LabelFilter) > 0 {
i -= len(m.LabelFilter)
copy(dAtA[i:], m.LabelFilter)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.LabelFilter)))
i--
dAtA[i] = 0x12
}
if len(m.Namespace) > 0 {
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ListResponseItem) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ListResponseItem) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ListResponseItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.SystemMetadata != nil {
{
size, err := m.SystemMetadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x72
}
if m.Metadata != nil {
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6a
}
if m.Disabled {
i--
if m.Disabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x60
}
if len(m.Description) > 0 {
i -= len(m.Description)
copy(dAtA[i:], m.Description)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Description)))
i--
dAtA[i] = 0x5a
}
if len(m.Annotations) > 0 {
for k := range m.Annotations {
v := m.Annotations[k]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(k)
copy(dAtA[i:], k)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(k)))
i--
dAtA[i] = 0xa
i = encodeVarintPublicCrudapi(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x52
}
}
if m.OwnerView != nil {
{
size, err := m.OwnerView.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
if len(m.StatusSet) > 0 {
for iNdEx := len(m.StatusSet) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.StatusSet[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
}
}
if m.GetSpec != nil {
{
size, err := m.GetSpec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
if len(m.Tenant) > 0 {
i -= len(m.Tenant)
copy(dAtA[i:], m.Tenant)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Tenant)))
i--
dAtA[i] = 0x32
}
if m.Object != nil {
{
size, err := m.Object.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if len(m.Labels) > 0 {
for k := range m.Labels {
v := m.Labels[k]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(k)
copy(dAtA[i:], k)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(k)))
i--
dAtA[i] = 0xa
i = encodeVarintPublicCrudapi(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x22
}
}
if len(m.Uid) > 0 {
i -= len(m.Uid)
copy(dAtA[i:], m.Uid)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Uid)))
i--
dAtA[i] = 0x1a
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
}
if len(m.Namespace) > 0 {
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ListResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Errors) > 0 {
for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Errors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPublicCrudapi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *DeleteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.FailIfReferred {
i--
if m.FailIfReferred {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
}
if len(m.Namespace) > 0 {
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintPublicCrudapi(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintPublicCrudapi(dAtA []byte, offset int, v uint64) int {
offset -= sovPublicCrudapi(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *CreateRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Metadata != nil {
l = m.Metadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.Spec != nil {
l = m.Spec.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
return n
}
func (m *CreateResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Metadata != nil {
l = m.Metadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.Spec != nil {
l = m.Spec.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.SystemMetadata != nil {
l = m.SystemMetadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
return n
}
func (m *ReplaceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Metadata != nil {
l = m.Metadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.Spec != nil {
l = m.Spec.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.ResourceVersion)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
return n
}
func (m *ReplaceResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
return n
}
func (m *GetRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Namespace)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.ResponseFormat != 0 {
n += 1 + sovPublicCrudapi(uint64(m.ResponseFormat))
}
return n
}
func (m *GetResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Object != nil {
l = m.Object.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.CreateForm != nil {
l = m.CreateForm.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.ReplaceForm != nil {
l = m.ReplaceForm.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.ResourceVersion)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.Metadata != nil {
l = m.Metadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.Spec != nil {
l = m.Spec.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.SystemMetadata != nil {
l = m.SystemMetadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if len(m.ReferringObjects) > 0 {
for _, e := range m.ReferringObjects {
l = e.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
}
if len(m.Status) > 0 {
for _, e := range m.Status {
l = e.Size()
n += 3 + l + sovPublicCrudapi(uint64(l))
}
}
return n
}
func (m *ListRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Namespace)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.LabelFilter)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if len(m.ReportFields) > 0 {
for _, s := range m.ReportFields {
l = len(s)
n += 1 + l + sovPublicCrudapi(uint64(l))
}
}
if len(m.ReportStatusFields) > 0 {
for _, s := range m.ReportStatusFields {
l = len(s)
n += 1 + l + sovPublicCrudapi(uint64(l))
}
}
return n
}
func (m *ListResponseItem) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Namespace)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.Uid)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovPublicCrudapi(uint64(len(k))) + 1 + len(v) + sovPublicCrudapi(uint64(len(v)))
n += mapEntrySize + 1 + sovPublicCrudapi(uint64(mapEntrySize))
}
}
if m.Object != nil {
l = m.Object.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.Tenant)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.GetSpec != nil {
l = m.GetSpec.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if len(m.StatusSet) > 0 {
for _, e := range m.StatusSet {
l = e.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
}
if m.OwnerView != nil {
l = m.OwnerView.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if len(m.Annotations) > 0 {
for k, v := range m.Annotations {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovPublicCrudapi(uint64(len(k))) + 1 + len(v) + sovPublicCrudapi(uint64(len(v)))
n += mapEntrySize + 1 + sovPublicCrudapi(uint64(mapEntrySize))
}
}
l = len(m.Description)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.Disabled {
n += 2
}
if m.Metadata != nil {
l = m.Metadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.SystemMetadata != nil {
l = m.SystemMetadata.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
return n
}
func (m *ListResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
}
if len(m.Errors) > 0 {
for _, e := range m.Errors {
l = e.Size()
n += 1 + l + sovPublicCrudapi(uint64(l))
}
}
return n
}
func (m *DeleteRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Namespace)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPublicCrudapi(uint64(l))
}
if m.FailIfReferred {
n += 2
}
return n
}
func sovPublicCrudapi(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozPublicCrudapi(x uint64) (n int) {
return sovPublicCrudapi(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *CreateRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CreateRequest{`,
`Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "ObjectCreateMetaType", "schema.ObjectCreateMetaType", 1) + `,`,
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "CreateSpecType", "CreateSpecType", 1) + `,`,
`}`,
}, "")
return s
}
func (this *CreateResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CreateResponse{`,
`Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "ObjectGetMetaType", "schema.ObjectGetMetaType", 1) + `,`,
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "GetSpecType", "GetSpecType", 1) + `,`,
`SystemMetadata:` + strings.Replace(fmt.Sprintf("%v", this.SystemMetadata), "SystemObjectGetMetaType", "schema.SystemObjectGetMetaType", 1) + `,`,
`}`,
}, "")
return s
}
func (this *ReplaceRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ReplaceRequest{`,
`Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "ObjectReplaceMetaType", "schema.ObjectReplaceMetaType", 1) + `,`,
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ReplaceSpecType", "ReplaceSpecType", 1) + `,`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`}`,
}, "")
return s
}
func (this *ReplaceResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ReplaceResponse{`,
`}`,
}, "")
return s
}
func (this *GetRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&GetRequest{`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`ResponseFormat:` + fmt.Sprintf("%v", this.ResponseFormat) + `,`,
`}`,
}, "")
return s
}
func (this *GetResponse) String() string {
if this == nil {
return "nil"
}
repeatedStringForReferringObjects := "[]*ObjectRefType{"
for _, f := range this.ReferringObjects {
repeatedStringForReferringObjects += strings.Replace(fmt.Sprintf("%v", f), "ObjectRefType", "schema.ObjectRefType", 1) + ","
}
repeatedStringForReferringObjects += "}"
repeatedStringForStatus := "[]*StatusObject{"
for _, f := range this.Status {
repeatedStringForStatus += strings.Replace(fmt.Sprintf("%v", f), "StatusObject", "StatusObject", 1) + ","
}
repeatedStringForStatus += "}"
s := strings.Join([]string{`&GetResponse{`,
`Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "Object", "Object", 1) + `,`,
`CreateForm:` + strings.Replace(this.CreateForm.String(), "CreateRequest", "CreateRequest", 1) + `,`,
`ReplaceForm:` + strings.Replace(this.ReplaceForm.String(), "ReplaceRequest", "ReplaceRequest", 1) + `,`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "ObjectGetMetaType", "schema.ObjectGetMetaType", 1) + `,`,
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "GetSpecType", "GetSpecType", 1) + `,`,
`SystemMetadata:` + strings.Replace(fmt.Sprintf("%v", this.SystemMetadata), "SystemObjectGetMetaType", "schema.SystemObjectGetMetaType", 1) + `,`,
`ReferringObjects:` + repeatedStringForReferringObjects + `,`,
`Status:` + repeatedStringForStatus + `,`,
`}`,
}, "")
return s
}
func (this *ListRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ListRequest{`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`LabelFilter:` + fmt.Sprintf("%v", this.LabelFilter) + `,`,
`ReportFields:` + fmt.Sprintf("%v", this.ReportFields) + `,`,
`ReportStatusFields:` + fmt.Sprintf("%v", this.ReportStatusFields) + `,`,
`}`,
}, "")
return s
}
func (this *ListResponseItem) String() string {
if this == nil {
return "nil"
}
repeatedStringForStatusSet := "[]*StatusObject{"
for _, f := range this.StatusSet {
repeatedStringForStatusSet += strings.Replace(fmt.Sprintf("%v", f), "StatusObject", "StatusObject", 1) + ","
}
repeatedStringForStatusSet += "}"
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
keysForAnnotations := make([]string, 0, len(this.Annotations))
for k, _ := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
}
mapStringForAnnotations += "}"
s := strings.Join([]string{`&ListResponseItem{`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Uid:` + fmt.Sprintf("%v", this.Uid) + `,`,
`Labels:` + mapStringForLabels + `,`,
`Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "Object", "Object", 1) + `,`,
`Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`,
`GetSpec:` + strings.Replace(fmt.Sprintf("%v", this.GetSpec), "GetSpecType", "GetSpecType", 1) + `,`,
`StatusSet:` + repeatedStringForStatusSet + `,`,
`OwnerView:` + strings.Replace(fmt.Sprintf("%v", this.OwnerView), "ViewRefType", "schema.ViewRefType", 1) + `,`,
`Annotations:` + mapStringForAnnotations + `,`,
`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
`Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`,
`Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "ObjectGetMetaType", "schema.ObjectGetMetaType", 1) + `,`,
`SystemMetadata:` + strings.Replace(fmt.Sprintf("%v", this.SystemMetadata), "SystemObjectGetMetaType", "schema.SystemObjectGetMetaType", 1) + `,`,
`}`,
}, "")
return s
}
func (this *ListResponse) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]*ListResponseItem{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(f.String(), "ListResponseItem", "ListResponseItem", 1) + ","
}
repeatedStringForItems += "}"
repeatedStringForErrors := "[]*ErrorType{"
for _, f := range this.Errors {
repeatedStringForErrors += strings.Replace(fmt.Sprintf("%v", f), "ErrorType", "schema.ErrorType", 1) + ","
}
repeatedStringForErrors += "}"
s := strings.Join([]string{`&ListResponse{`,
`Items:` + repeatedStringForItems + `,`,
`Errors:` + repeatedStringForErrors + `,`,
`}`,
}, "")
return s
}
func (this *DeleteRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&DeleteRequest{`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`FailIfReferred:` + fmt.Sprintf("%v", this.FailIfReferred) + `,`,
`}`,
}, "")
return s
}
func valueToStringPublicCrudapi(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *CreateRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = &schema.ObjectCreateMetaType{}
}
if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Spec == nil {
m.Spec = &CreateSpecType{}
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CreateResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = &schema.ObjectGetMetaType{}
}
if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Spec == nil {
m.Spec = &GetSpecType{}
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SystemMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SystemMetadata == nil {
m.SystemMetadata = &schema.SystemObjectGetMetaType{}
}
if err := m.SystemMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReplaceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReplaceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReplaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = &schema.ObjectReplaceMetaType{}
}
if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Spec == nil {
m.Spec = &ReplaceSpecType{}
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReplaceResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReplaceResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReplaceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ResponseFormat", wireType)
}
m.ResponseFormat = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ResponseFormat |= GetResponseFormatCode(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Object == nil {
m.Object = &Object{}
}
if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreateForm", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreateForm == nil {
m.CreateForm = &CreateRequest{}
}
if err := m.CreateForm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplaceForm", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ReplaceForm == nil {
m.ReplaceForm = &ReplaceRequest{}
}
if err := m.ReplaceForm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = &schema.ObjectGetMetaType{}
}
if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Spec == nil {
m.Spec = &GetSpecType{}
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SystemMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SystemMetadata == nil {
m.SystemMetadata = &schema.SystemObjectGetMetaType{}
}
if err := m.SystemMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReferringObjects", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ReferringObjects = append(m.ReferringObjects, &schema.ObjectRefType{})
if err := m.ReferringObjects[len(m.ReferringObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 20000:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Status = append(m.Status, &StatusObject{})
if err := m.Status[len(m.Status)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ListRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ListRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LabelFilter", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LabelFilter = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReportFields", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ReportFields = append(m.ReportFields, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReportStatusFields", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ReportStatusFields = append(m.ReportStatusFields, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ListResponseItem) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ListResponseItem: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ListResponseItem: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Uid = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Labels == nil {
m.Labels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthPublicCrudapi
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthPublicCrudapi
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Labels[mapkey] = mapvalue
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Object == nil {
m.Object = &Object{}
}
if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Tenant = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GetSpec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.GetSpec == nil {
m.GetSpec = &GetSpecType{}
}
if err := m.GetSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StatusSet", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StatusSet = append(m.StatusSet, &StatusObject{})
if err := m.StatusSet[len(m.StatusSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OwnerView", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.OwnerView == nil {
m.OwnerView = &schema.ViewRefType{}
}
if err := m.OwnerView.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Annotations == nil {
m.Annotations = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthPublicCrudapi
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthPublicCrudapi
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Annotations[mapkey] = mapvalue
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Disabled = bool(v != 0)
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = &schema.ObjectGetMetaType{}
}
if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 14:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SystemMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SystemMetadata == nil {
m.SystemMetadata = &schema.SystemObjectGetMetaType{}
}
if err := m.SystemMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ListResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ListResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, &ListResponseItem{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Errors = append(m.Errors, &schema.ErrorType{})
if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *DeleteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPublicCrudapi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPublicCrudapi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FailIfReferred", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.FailIfReferred = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipPublicCrudapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthPublicCrudapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipPublicCrudapi(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPublicCrudapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthPublicCrudapi
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupPublicCrudapi
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthPublicCrudapi
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthPublicCrudapi = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPublicCrudapi = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupPublicCrudapi = fmt.Errorf("proto: unexpected end of group")
)
| {
golang_proto.RegisterFile("ves.io/schema/alert_receiver/public_crudapi.proto", fileDescriptor_d7901683cee671bb)
} |
fake_client.go | // Code generated by counterfeiter. DO NOT EDIT.
package githubfakes
import (
"net/http"
"sync"
"github.com/concourse/atc/auth/github"
)
type FakeClient struct {
CurrentUserStub func(*http.Client) (string, error)
currentUserMutex sync.RWMutex
currentUserArgsForCall []struct {
arg1 *http.Client
}
currentUserReturns struct {
result1 string
result2 error
}
currentUserReturnsOnCall map[int]struct {
result1 string
result2 error
}
OrganizationsStub func(*http.Client) ([]string, error)
organizationsMutex sync.RWMutex
organizationsArgsForCall []struct {
arg1 *http.Client
}
organizationsReturns struct {
result1 []string
result2 error
}
organizationsReturnsOnCall map[int]struct {
result1 []string
result2 error
}
TeamsStub func(*http.Client) (github.OrganizationTeams, error)
teamsMutex sync.RWMutex
teamsArgsForCall []struct {
arg1 *http.Client
}
teamsReturns struct {
result1 github.OrganizationTeams
result2 error
}
teamsReturnsOnCall map[int]struct {
result1 github.OrganizationTeams
result2 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeClient) CurrentUser(arg1 *http.Client) (string, error) {
fake.currentUserMutex.Lock()
ret, specificReturn := fake.currentUserReturnsOnCall[len(fake.currentUserArgsForCall)]
fake.currentUserArgsForCall = append(fake.currentUserArgsForCall, struct {
arg1 *http.Client
}{arg1})
fake.recordInvocation("CurrentUser", []interface{}{arg1})
fake.currentUserMutex.Unlock()
if fake.CurrentUserStub != nil {
return fake.CurrentUserStub(arg1)
}
if specificReturn {
return ret.result1, ret.result2
}
return fake.currentUserReturns.result1, fake.currentUserReturns.result2
}
func (fake *FakeClient) CurrentUserCallCount() int {
fake.currentUserMutex.RLock()
defer fake.currentUserMutex.RUnlock()
return len(fake.currentUserArgsForCall)
}
func (fake *FakeClient) CurrentUserArgsForCall(i int) *http.Client {
fake.currentUserMutex.RLock()
defer fake.currentUserMutex.RUnlock()
return fake.currentUserArgsForCall[i].arg1
}
func (fake *FakeClient) CurrentUserReturns(result1 string, result2 error) {
fake.CurrentUserStub = nil
fake.currentUserReturns = struct {
result1 string
result2 error |
func (fake *FakeClient) CurrentUserReturnsOnCall(i int, result1 string, result2 error) {
fake.CurrentUserStub = nil
if fake.currentUserReturnsOnCall == nil {
fake.currentUserReturnsOnCall = make(map[int]struct {
result1 string
result2 error
})
}
fake.currentUserReturnsOnCall[i] = struct {
result1 string
result2 error
}{result1, result2}
}
func (fake *FakeClient) Organizations(arg1 *http.Client) ([]string, error) {
fake.organizationsMutex.Lock()
ret, specificReturn := fake.organizationsReturnsOnCall[len(fake.organizationsArgsForCall)]
fake.organizationsArgsForCall = append(fake.organizationsArgsForCall, struct {
arg1 *http.Client
}{arg1})
fake.recordInvocation("Organizations", []interface{}{arg1})
fake.organizationsMutex.Unlock()
if fake.OrganizationsStub != nil {
return fake.OrganizationsStub(arg1)
}
if specificReturn {
return ret.result1, ret.result2
}
return fake.organizationsReturns.result1, fake.organizationsReturns.result2
}
func (fake *FakeClient) OrganizationsCallCount() int {
fake.organizationsMutex.RLock()
defer fake.organizationsMutex.RUnlock()
return len(fake.organizationsArgsForCall)
}
func (fake *FakeClient) OrganizationsArgsForCall(i int) *http.Client {
fake.organizationsMutex.RLock()
defer fake.organizationsMutex.RUnlock()
return fake.organizationsArgsForCall[i].arg1
}
func (fake *FakeClient) OrganizationsReturns(result1 []string, result2 error) {
fake.OrganizationsStub = nil
fake.organizationsReturns = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeClient) OrganizationsReturnsOnCall(i int, result1 []string, result2 error) {
fake.OrganizationsStub = nil
if fake.organizationsReturnsOnCall == nil {
fake.organizationsReturnsOnCall = make(map[int]struct {
result1 []string
result2 error
})
}
fake.organizationsReturnsOnCall[i] = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeClient) Teams(arg1 *http.Client) (github.OrganizationTeams, error) {
fake.teamsMutex.Lock()
ret, specificReturn := fake.teamsReturnsOnCall[len(fake.teamsArgsForCall)]
fake.teamsArgsForCall = append(fake.teamsArgsForCall, struct {
arg1 *http.Client
}{arg1})
fake.recordInvocation("Teams", []interface{}{arg1})
fake.teamsMutex.Unlock()
if fake.TeamsStub != nil {
return fake.TeamsStub(arg1)
}
if specificReturn {
return ret.result1, ret.result2
}
return fake.teamsReturns.result1, fake.teamsReturns.result2
}
func (fake *FakeClient) TeamsCallCount() int {
fake.teamsMutex.RLock()
defer fake.teamsMutex.RUnlock()
return len(fake.teamsArgsForCall)
}
func (fake *FakeClient) TeamsArgsForCall(i int) *http.Client {
fake.teamsMutex.RLock()
defer fake.teamsMutex.RUnlock()
return fake.teamsArgsForCall[i].arg1
}
func (fake *FakeClient) TeamsReturns(result1 github.OrganizationTeams, result2 error) {
fake.TeamsStub = nil
fake.teamsReturns = struct {
result1 github.OrganizationTeams
result2 error
}{result1, result2}
}
func (fake *FakeClient) TeamsReturnsOnCall(i int, result1 github.OrganizationTeams, result2 error) {
fake.TeamsStub = nil
if fake.teamsReturnsOnCall == nil {
fake.teamsReturnsOnCall = make(map[int]struct {
result1 github.OrganizationTeams
result2 error
})
}
fake.teamsReturnsOnCall[i] = struct {
result1 github.OrganizationTeams
result2 error
}{result1, result2}
}
func (fake *FakeClient) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.currentUserMutex.RLock()
defer fake.currentUserMutex.RUnlock()
fake.organizationsMutex.RLock()
defer fake.organizationsMutex.RUnlock()
fake.teamsMutex.RLock()
defer fake.teamsMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeClient) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ github.Client = new(FakeClient) | }{result1, result2}
} |
multiaddr.py | # -*- coding: utf-8 -*-
import binascii
from copy import copy
from .codec import size_for_addr
from .codec import string_to_bytes
from .codec import bytes_to_string
from .codec import protocol_with_name
from .protocols import protocol_with_code
from .protocols import read_varint_code
class ProtocolNotFoundException(Exception):
pass
class Multiaddr(object):
"""Multiaddr is a representation of multiple nested internet addresses.
Multiaddr is a cross-protocol, cross-platform format for representing
internet addresses. It emphasizes explicitness and self-description.
Learn more here: https://github.com/jbenet/multiaddr
Multiaddrs have both a binary and string representation.
>>> from multiaddr import Multiaddr
>>> addr = Multiaddr("/ip4/1.2.3.4/tcp/80")
Multiaddr objects are immutable, so `encapsulate` and `decapsulate`
return new objects rather than modify internal state.
"""
def __init__(self, addr):
"""Instantiate a new Multiaddr.
Args:
addr : A string-encoded or a byte-encoded Multiaddr
"""
if isinstance(addr, str):
self._bytes = string_to_bytes(addr)
elif isinstance(addr, bytes):
self._bytes = addr
else:
raise ValueError("Invalid address type, must be bytes or str")
def __eq__(self, other):
"""Checks if two Multiaddr objects are exactly equal."""
return self._bytes == other._bytes
def __ne__(self, other):
return not (self == other)
def __str__(self):
"""Return the string representation of this Multiaddr.
May raise an exception if the internal state of the Multiaddr is
corrupted."""
try:
return bytes_to_string(self._bytes)
except Exception:
raise ValueError(
"multiaddr failed to convert back to string. corrupted?")
def __repr__(self):
return "<Multiaddr %s>" % str(self)
def to_bytes(self):
"""Returns the byte array representation of this Multiaddr."""
return self._bytes
def protocols(self):
"""Returns a list of Protocols this Multiaddr includes."""
buf = binascii.unhexlify(self.to_bytes())
protos = []
while buf:
code, num_bytes_read = read_varint_code(buf)
proto = protocol_with_code(code)
protos.append(proto)
buf = buf[num_bytes_read:]
size = size_for_addr(proto, buf)
buf = buf[size:]
return protos
def encapsulate(self, other):
"""Wrap this Multiaddr around another.
For example:
/ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80
"""
mb = self.to_bytes()
ob = other.to_bytes()
return Multiaddr(b''.join([mb, ob]))
def decapsulate(self, other):
"""Remove a Multiaddr wrapping.
For example:
/ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80
"""
s1 = str(self)
s2 = str(other)
try:
idx = s1.rindex(s2)
except ValueError:
# if multiaddr not contained, returns a copy
return copy(self)
try:
return Multiaddr(s1[:idx])
except Exception as ex:
raise ValueError(
"Multiaddr.decapsulate incorrect byte boundaries: %s"
% str(ex))
def value_for_protocol(self, code):
| """Return the value (if any) following the specified protocol."""
from .util import split
if isinstance(code, str):
protocol = protocol_with_name(code)
code = protocol.code
for sub_addr in split(self):
if sub_addr.protocols()[0].code == code:
addr_parts = str(sub_addr).split("/")
if len(addr_parts) > 3:
raise ValueError("Unknown Protocol format")
elif len(addr_parts) == 3:
# If we have an address, return it
return addr_parts[2]
elif len(addr_parts) == 2:
# We were given something like '/utp', which doesn't have
# an address, so return ''
return ''
raise ProtocolNotFoundException() |
|
mod.rs | use std::collections::HashMap;
use std::env;
use std::fs;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::iter::Peekable;
use std::mem;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::str::Chars;
use std::thread;
use crate::config::{Color, Config, EmitMode, FileName, NewlineStyle, ReportTactic};
use crate::formatting::{ReportedErrors, SourceFile};
use crate::rustfmt_diff::{make_diff, print_diff, DiffLine, Mismatch, ModifiedChunk, OutputWriter};
use crate::source_file;
use crate::{is_nightly_channel, FormatReport, FormatReportFormatterBuilder, Input, Session};
use rustfmt_config_proc_macro::nightly_only_test;
mod configuration_snippet;
mod mod_resolver;
mod parser;
const DIFF_CONTEXT_SIZE: usize = 3;
// A list of files on which we want to skip testing.
const SKIP_FILE_WHITE_LIST: &[&str] = &[
// We want to make sure that the `skip_children` is correctly working,
// so we do not want to test this file directly.
"configs/skip_children/foo/mod.rs",
"issue-3434/no_entry.rs",
"issue-3665/sub_mod.rs",
// Testing for issue-3779
"issue-3779/ice.rs",
// These files and directory are a part of modules defined inside `cfg_if!`.
"cfg_if/mod.rs",
"cfg_if/detect",
"issue-3253/foo.rs",
"issue-3253/bar.rs",
"issue-3253/paths",
// These files and directory are a part of modules defined inside `cfg_attr(..)`.
"cfg_mod/dir",
"cfg_mod/bar.rs",
"cfg_mod/foo.rs",
"cfg_mod/wasm32.rs",
"skip/foo.rs",
];
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
struct TestSetting {
/// The size of the stack of the thread that run tests.
stack_size: usize,
}
impl Default for TestSetting {
fn default() -> Self {
TestSetting {
stack_size: 8_388_608, // 8MB
}
}
}
fn run_test_with<F>(test_setting: &TestSetting, f: F)
where
F: FnOnce(),
F: Send + 'static,
{
thread::Builder::new()
.stack_size(test_setting.stack_size)
.spawn(f)
.expect("Failed to create a test thread")
.join()
.expect("Failed to join a test thread")
}
fn is_subpath<P>(path: &Path, subpath: &P) -> bool
where
P: AsRef<Path>,
{
(0..path.components().count())
.map(|i| {
path.components()
.skip(i)
.take(subpath.as_ref().components().count())
})
.any(|c| c.zip(subpath.as_ref().components()).all(|(a, b)| a == b))
}
fn is_file_skip(path: &Path) -> bool {
SKIP_FILE_WHITE_LIST
.iter()
.any(|file_path| is_subpath(path, file_path))
}
// Returns a `Vec` containing `PathBuf`s of files with an `rs` extension in the
// given path. The `recursive` argument controls if files from subdirectories
// are also returned.
fn get_test_files(path: &Path, recursive: bool) -> Vec<PathBuf> {
let mut files = vec![];
if path.is_dir() {
for entry in fs::read_dir(path).expect(&format!(
"couldn't read directory {}",
path.to_str().unwrap()
)) {
let entry = entry.expect("couldn't get `DirEntry`");
let path = entry.path();
if path.is_dir() && recursive {
files.append(&mut get_test_files(&path, recursive));
} else if path.extension().map_or(false, |f| f == "rs") && !is_file_skip(&path) {
files.push(path);
}
}
}
files
}
fn verify_config_used(path: &Path, config_name: &str) {
for entry in fs::read_dir(path).expect(&format!(
"couldn't read {} directory",
path.to_str().unwrap()
)) {
let entry = entry.expect("couldn't get directory entry");
let path = entry.path();
if path.extension().map_or(false, |f| f == "rs") {
// check if "// rustfmt-<config_name>:" appears in the file.
let filebuf = BufReader::new(
fs::File::open(&path)
.unwrap_or_else(|_| panic!("couldn't read file {}", path.display())),
);
assert!( | .lines()
.map(Result::unwrap)
.take_while(|l| l.starts_with("//"))
.any(|l| l.starts_with(&format!("// rustfmt-{}", config_name))),
"config option file {} does not contain expected config name",
path.display()
);
}
}
}
#[test]
fn verify_config_test_names() {
init_log();
for path in &[
Path::new("tests/source/configs"),
Path::new("tests/target/configs"),
] {
for entry in fs::read_dir(path).expect("couldn't read configs directory") {
let entry = entry.expect("couldn't get directory entry");
let path = entry.path();
if path.is_dir() {
let config_name = path.file_name().unwrap().to_str().unwrap();
// Make sure that config name is used in the files in the directory.
verify_config_used(&path, config_name);
}
}
}
}
// This writes to the terminal using the same approach (via `term::stdout` or
// `println!`) that is used by `rustfmt::rustfmt_diff::print_diff`. Writing
// using only one or the other will cause the output order to differ when
// `print_diff` selects the approach not used.
fn write_message(msg: &str) {
let mut writer = OutputWriter::new(Color::Auto);
writer.writeln(msg, None);
}
// Integration tests. The files in `tests/source` are formatted and compared
// to their equivalent in `tests/target`. The target file and config can be
// overridden by annotations in the source file. The input and output must match
// exactly.
#[test]
fn system_tests() {
init_log();
run_test_with(&TestSetting::default(), || {
// Get all files in the tests/source directory.
let files = get_test_files(Path::new("tests/source"), true);
let (_reports, count, fails) = check_files(files, &None);
// Display results.
println!("Ran {} system tests.", count);
assert_eq!(fails, 0, "{} system tests failed", fails);
assert!(
count >= 300,
"Expected a minimum of {} system tests to be executed",
300
)
});
}
// Do the same for tests/coverage-source directory.
// The only difference is the coverage mode.
#[test]
fn coverage_tests() {
init_log();
let files = get_test_files(Path::new("tests/coverage/source"), true);
let (_reports, count, fails) = check_files(files, &None);
println!("Ran {} tests in coverage mode.", count);
assert_eq!(fails, 0, "{} tests failed", fails);
}
#[test]
fn checkstyle_test() {
init_log();
let filename = "tests/writemode/source/fn-single-line.rs";
let expected_filename = "tests/writemode/target/checkstyle.xml";
assert_output(Path::new(filename), Path::new(expected_filename));
}
#[test]
fn json_test() {
init_log();
let filename = "tests/writemode/source/json.rs";
let expected_filename = "tests/writemode/target/output.json";
assert_output(Path::new(filename), Path::new(expected_filename));
}
#[test]
fn modified_test() {
init_log();
use std::io::BufRead;
// Test "modified" output
let filename = "tests/writemode/source/modified.rs";
let mut data = Vec::new();
let mut config = Config::default();
config
.set()
.emit_mode(crate::config::EmitMode::ModifiedLines);
{
let mut session = Session::new(config, Some(&mut data));
session.format(Input::File(filename.into())).unwrap();
}
let mut lines = data.lines();
let mut chunks = Vec::new();
while let Some(Ok(header)) = lines.next() {
// Parse the header line
let values: Vec<_> = header
.split(' ')
.map(|s| s.parse::<u32>().unwrap())
.collect();
assert_eq!(values.len(), 3);
let line_number_orig = values[0];
let lines_removed = values[1];
let num_added = values[2];
let mut added_lines = Vec::new();
for _ in 0..num_added {
added_lines.push(lines.next().unwrap().unwrap());
}
chunks.push(ModifiedChunk {
line_number_orig,
lines_removed,
lines: added_lines,
});
}
assert_eq!(
chunks,
vec![
ModifiedChunk {
line_number_orig: 4,
lines_removed: 4,
lines: vec!["fn blah() {}".into()],
},
ModifiedChunk {
line_number_orig: 9,
lines_removed: 6,
lines: vec!["#[cfg(a, b)]".into(), "fn main() {}".into()],
},
],
);
}
// Helper function for comparing the results of rustfmt
// to a known output file generated by one of the write modes.
fn assert_output(source: &Path, expected_filename: &Path) {
let config = read_config(source);
let (_, source_file, _) = format_file(source, config.clone());
// Populate output by writing to a vec.
let mut out = vec![];
let _ = source_file::write_all_files(&source_file, &mut out, &config);
let output = String::from_utf8(out).unwrap();
let mut expected_file = fs::File::open(&expected_filename).expect("couldn't open target");
let mut expected_text = String::new();
expected_file
.read_to_string(&mut expected_text)
.expect("Failed reading target");
let compare = make_diff(&expected_text, &output, DIFF_CONTEXT_SIZE);
if !compare.is_empty() {
let mut failures = HashMap::new();
failures.insert(source.to_owned(), compare);
print_mismatches_default_message(failures);
panic!("Text does not match expected output");
}
}
// Helper function for comparing the results of rustfmt
// to a known output generated by one of the write modes.
fn assert_stdin_output(
source: &Path,
expected_filename: &Path,
emit_mode: EmitMode,
has_diff: bool,
) {
let mut config = Config::default();
config.set().newline_style(NewlineStyle::Unix);
config.set().emit_mode(emit_mode);
let mut source_file = fs::File::open(&source).expect("couldn't open source");
let mut source_text = String::new();
source_file
.read_to_string(&mut source_text)
.expect("Failed reading target");
let input = Input::Text(source_text);
// Populate output by writing to a vec.
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
let errors = ReportedErrors {
has_diff: has_diff,
..Default::default()
};
assert_eq!(session.errors, errors);
}
let mut expected_file = fs::File::open(&expected_filename).expect("couldn't open target");
let mut expected_text = String::new();
expected_file
.read_to_string(&mut expected_text)
.expect("Failed reading target");
let output = String::from_utf8(buf).unwrap();
let compare = make_diff(&expected_text, &output, DIFF_CONTEXT_SIZE);
if !compare.is_empty() {
let mut failures = HashMap::new();
failures.insert(source.to_owned(), compare);
print_mismatches_default_message(failures);
panic!("Text does not match expected output");
}
}
// Idempotence tests. Files in tests/target are checked to be unaltered by
// rustfmt.
#[nightly_only_test]
#[test]
fn idempotence_tests() {
init_log();
run_test_with(&TestSetting::default(), || {
// Get all files in the tests/target directory.
let files = get_test_files(Path::new("tests/target"), true);
let (_reports, count, fails) = check_files(files, &None);
// Display results.
println!("Ran {} idempotent tests.", count);
assert_eq!(fails, 0, "{} idempotent tests failed", fails);
assert!(
count >= 400,
"Expected a minimum of {} idempotent tests to be executed",
400
)
});
}
#[nightly_only_test]
#[test]
fn self_tests() {
let get_exe_path = |name| {
let mut path = env::current_exe().unwrap();
path.pop();
path.set_file_name(format!("{name}{}", env::consts::EXE_SUFFIX));
path
};
let status = Command::new(get_exe_path("cargo-fmt"))
.args(["--check", "--all"])
.env("RUSTFMT", get_exe_path("rustfmt"))
.status()
.unwrap();
assert!(status.success());
}
#[test]
fn format_files_find_new_files_via_cfg_if() {
init_log();
run_test_with(&TestSetting::default(), || {
// To repro issue-4656, it is necessary that these files are parsed
// as a part of the same session (hence this separate test runner).
let files = vec![
Path::new("tests/source/issue-4656/lib2.rs"),
Path::new("tests/source/issue-4656/lib.rs"),
];
let config = Config::default();
let mut session = Session::<io::Stdout>::new(config, None);
let mut write_result = HashMap::new();
for file in files {
assert!(file.exists());
let result = session.format(Input::File(file.into())).unwrap();
assert!(!session.has_formatting_errors());
assert!(!result.has_warnings());
let mut source_file = SourceFile::new();
mem::swap(&mut session.source_file, &mut source_file);
for (filename, text) in source_file {
if let FileName::Real(ref filename) = filename {
write_result.insert(filename.to_owned(), text);
}
}
}
assert_eq!(
3,
write_result.len(),
"Should have uncovered an extra file (format_me_please.rs) via lib.rs"
);
assert!(handle_result(write_result, None).is_ok());
});
}
#[test]
fn stdin_formatting_smoke_test() {
init_log();
let input = Input::Text("fn main () {}".to_owned());
let mut config = Config::default();
config.set().emit_mode(EmitMode::Stdout);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
assert!(session.has_no_errors());
}
#[cfg(not(windows))]
assert_eq!(buf, "<stdin>:\n\nfn main() {}\n".as_bytes());
#[cfg(windows)]
assert_eq!(buf, "<stdin>:\n\nfn main() {}\r\n".as_bytes());
}
#[test]
fn stdin_parser_panic_caught() {
init_log();
// See issue #3239.
for text in ["{", "}"].iter().cloned().map(String::from) {
let mut buf = vec![];
let mut session = Session::new(Default::default(), Some(&mut buf));
let _ = session.format(Input::Text(text));
assert!(session.has_parsing_errors());
}
}
/// Ensures that `EmitMode::ModifiedLines` works with input from `stdin`. Useful
/// when embedding Rustfmt (e.g. inside RLS).
#[test]
fn stdin_works_with_modified_lines() {
init_log();
let input = "\nfn\n some( )\n{\n}\nfn main () {}\n";
let output = "1 6 2\nfn some() {}\nfn main() {}\n";
let input = Input::Text(input.to_owned());
let mut config = Config::default();
config.set().newline_style(NewlineStyle::Unix);
config.set().emit_mode(EmitMode::ModifiedLines);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
let errors = ReportedErrors {
has_diff: true,
..Default::default()
};
assert_eq!(session.errors, errors);
}
assert_eq!(buf, output.as_bytes());
}
/// Ensures that `EmitMode::Json` works with input from `stdin`.
#[test]
fn stdin_works_with_json() {
init_log();
assert_stdin_output(
Path::new("tests/writemode/source/stdin.rs"),
Path::new("tests/writemode/target/stdin.json"),
EmitMode::Json,
true,
);
}
/// Ensures that `EmitMode::Checkstyle` works with input from `stdin`.
#[test]
fn stdin_works_with_checkstyle() {
init_log();
assert_stdin_output(
Path::new("tests/writemode/source/stdin.rs"),
Path::new("tests/writemode/target/stdin.xml"),
EmitMode::Checkstyle,
false,
);
}
#[test]
fn stdin_disable_all_formatting_test() {
init_log();
let input = String::from("fn main() { println!(\"This should not be formatted.\"); }");
let mut child = Command::new(rustfmt().to_str().unwrap())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.arg("--config-path=./tests/config/disable_all_formatting.toml")
.spawn()
.expect("failed to execute child");
{
let stdin = child.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(input.as_bytes())
.expect("failed to write stdin");
}
let output = child.wait_with_output().expect("failed to wait on child");
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert_eq!(input, String::from_utf8(output.stdout).unwrap());
}
#[test]
fn stdin_generated_files_issue_5172() {
init_log();
let input = Input::Text("//@generated\nfn main() {}".to_owned());
let mut config = Config::default();
config.set().emit_mode(EmitMode::Stdout);
config.set().format_generated_files(false);
config.set().newline_style(NewlineStyle::Unix);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
assert!(session.has_no_errors());
}
// N.B. this should be changed once `format_generated_files` is supported with stdin
assert_eq!(
String::from_utf8(buf).unwrap(),
"<stdin>:\n\n//@generated\nfn main() {}\n",
);
}
#[test]
fn format_lines_errors_are_reported() {
init_log();
let long_identifier = String::from_utf8(vec![b'a'; 239]).unwrap();
let input = Input::Text(format!("fn {}() {{}}", long_identifier));
let mut config = Config::default();
config.set().error_on_line_overflow(true);
let mut session = Session::<io::Stdout>::new(config, None);
session.format(input).unwrap();
assert!(session.has_formatting_errors());
}
#[test]
fn format_lines_errors_are_reported_with_tabs() {
init_log();
let long_identifier = String::from_utf8(vec![b'a'; 97]).unwrap();
let input = Input::Text(format!("fn a() {{\n\t{}\n}}", long_identifier));
let mut config = Config::default();
config.set().error_on_line_overflow(true);
config.set().hard_tabs(true);
let mut session = Session::<io::Stdout>::new(config, None);
session.format(input).unwrap();
assert!(session.has_formatting_errors());
}
// For each file, run rustfmt and collect the output.
// Returns the number of files checked and the number of failures.
fn check_files(files: Vec<PathBuf>, opt_config: &Option<PathBuf>) -> (Vec<FormatReport>, u32, u32) {
let mut count = 0;
let mut fails = 0;
let mut reports = vec![];
for file_name in files {
let sig_comments = read_significant_comments(&file_name);
if sig_comments.contains_key("unstable") && !is_nightly_channel!() {
debug!(
"Skipping '{}' because it requires unstable \
features which are only available on nightly...",
file_name.display()
);
continue;
}
debug!("Testing '{}'...", file_name.display());
match idempotent_check(&file_name, opt_config) {
Ok(ref report) if report.has_warnings() => {
print!("{}", FormatReportFormatterBuilder::new(report).build());
fails += 1;
}
Ok(report) => reports.push(report),
Err(err) => {
if let IdempotentCheckError::Mismatch(msg) = err {
print_mismatches_default_message(msg);
}
fails += 1;
}
}
count += 1;
}
(reports, count, fails)
}
fn print_mismatches_default_message(result: HashMap<PathBuf, Vec<Mismatch>>) {
for (file_name, diff) in result {
let mismatch_msg_formatter =
|line_num| format!("\nMismatch at {}:{}:", file_name.display(), line_num);
print_diff(diff, &mismatch_msg_formatter, &Default::default());
}
if let Some(mut t) = term::stdout() {
t.reset().unwrap_or(());
}
}
fn print_mismatches<T: Fn(u32) -> String>(
result: HashMap<PathBuf, Vec<Mismatch>>,
mismatch_msg_formatter: T,
) {
for (_file_name, diff) in result {
print_diff(diff, &mismatch_msg_formatter, &Default::default());
}
if let Some(mut t) = term::stdout() {
t.reset().unwrap_or(());
}
}
fn read_config(filename: &Path) -> Config {
let sig_comments = read_significant_comments(filename);
// Look for a config file. If there is a 'config' property in the significant comments, use
// that. Otherwise, if there are no significant comments at all, look for a config file with
// the same name as the test file.
let mut config = if !sig_comments.is_empty() {
get_config(sig_comments.get("config").map(Path::new))
} else {
get_config(filename.with_extension("toml").file_name().map(Path::new))
};
for (key, val) in &sig_comments {
if key != "target" && key != "config" && key != "unstable" {
config.override_value(key, val);
if config.is_default(key) {
warn!("Default value {} used explicitly for {}", val, key);
}
}
}
// Don't generate warnings for to-do items.
config.set().report_todo(ReportTactic::Never);
config
}
fn format_file<P: Into<PathBuf>>(filepath: P, config: Config) -> (bool, SourceFile, FormatReport) {
let filepath = filepath.into();
let input = Input::File(filepath);
let mut session = Session::<io::Stdout>::new(config, None);
let result = session.format(input).unwrap();
let parsing_errors = session.has_parsing_errors();
let mut source_file = SourceFile::new();
mem::swap(&mut session.source_file, &mut source_file);
(parsing_errors, source_file, result)
}
enum IdempotentCheckError {
Mismatch(HashMap<PathBuf, Vec<Mismatch>>),
Parse,
}
fn idempotent_check(
filename: &PathBuf,
opt_config: &Option<PathBuf>,
) -> Result<FormatReport, IdempotentCheckError> {
let sig_comments = read_significant_comments(filename);
let config = if let Some(ref config_file_path) = opt_config {
Config::from_toml_path(Some(config_file_path), None).expect("`rustfmt.toml` not found")
} else {
read_config(filename)
};
let (parsing_errors, source_file, format_report) = format_file(filename, config);
if parsing_errors {
return Err(IdempotentCheckError::Parse);
}
let mut write_result = HashMap::new();
for (filename, text) in source_file {
if let FileName::Real(ref filename) = filename {
write_result.insert(filename.to_owned(), text);
}
}
let target = sig_comments.get("target").map(|x| &(*x)[..]);
handle_result(write_result, target).map(|_| format_report)
}
// Reads test config file using the supplied (optional) file name. If there's no file name or the
// file doesn't exist, just return the default config. Otherwise, the file must be read
// successfully.
fn get_config(config_file: Option<&Path>) -> Config {
let config_file_name = match config_file {
None => return Default::default(),
Some(file_name) => {
let mut full_path = PathBuf::from("tests/config/");
full_path.push(file_name);
if !full_path.exists() {
return Default::default();
};
full_path
}
};
let mut def_config_file = fs::File::open(config_file_name).expect("couldn't open config");
let mut def_config = String::new();
def_config_file
.read_to_string(&mut def_config)
.expect("Couldn't read config");
Config::from_toml(Some(&def_config), Some(Path::new("tests/config/")), None)
.expect("invalid TOML")
}
// Reads significant comments of the form: `// rustfmt-key: value` into a hash map.
fn read_significant_comments(file_name: &Path) -> HashMap<String, String> {
let file = fs::File::open(file_name)
.unwrap_or_else(|_| panic!("couldn't read file {}", file_name.display()));
let reader = BufReader::new(file);
let pattern = r"^\s*//\s*rustfmt-([^:]+):\s*(\S+)";
let regex = regex::Regex::new(pattern).expect("failed creating pattern 1");
// Matches lines containing significant comments or whitespace.
let line_regex = regex::Regex::new(r"(^\s*$)|(^\s*//\s*rustfmt-[^:]+:\s*\S+)")
.expect("failed creating pattern 2");
reader
.lines()
.map(|line| line.expect("failed getting line"))
.filter(|line| line_regex.is_match(line))
.filter_map(|line| {
regex.captures_iter(&line).next().map(|capture| {
(
capture
.get(1)
.expect("couldn't unwrap capture")
.as_str()
.to_owned(),
capture
.get(2)
.expect("couldn't unwrap capture")
.as_str()
.to_owned(),
)
})
})
.collect()
}
// Compares output to input.
// TODO: needs a better name, more explanation.
fn handle_result(
result: HashMap<PathBuf, String>,
target: Option<&str>,
) -> Result<(), IdempotentCheckError> {
let mut failures = HashMap::new();
for (file_name, fmt_text) in result {
// If file is in tests/source, compare to file with same name in tests/target.
let target = get_target(&file_name, target);
let open_error = format!("couldn't open target {:?}", target);
let mut f = fs::File::open(&target).expect(&open_error);
let mut text = String::new();
let read_error = format!("failed reading target {:?}", target);
f.read_to_string(&mut text).expect(&read_error);
// Ignore LF and CRLF difference for Windows.
if !string_eq_ignore_newline_repr(&fmt_text, &text) {
let diff = make_diff(&text, &fmt_text, DIFF_CONTEXT_SIZE);
assert!(
!diff.is_empty(),
"Empty diff? Maybe due to a missing a newline at the end of a file?"
);
failures.insert(file_name, diff);
}
}
if failures.is_empty() {
Ok(())
} else {
Err(IdempotentCheckError::Mismatch(failures))
}
}
// Maps source file paths to their target paths.
fn get_target(file_name: &Path, target: Option<&str>) -> PathBuf {
if let Some(n) = file_name
.components()
.position(|c| c.as_os_str() == "source")
{
let mut target_file_name = PathBuf::new();
for (i, c) in file_name.components().enumerate() {
if i == n {
target_file_name.push("target");
} else {
target_file_name.push(c.as_os_str());
}
}
if let Some(replace_name) = target {
target_file_name.with_file_name(replace_name)
} else {
target_file_name
}
} else {
// This is either and idempotence check or a self check.
file_name.to_owned()
}
}
#[test]
fn rustfmt_diff_make_diff_tests() {
init_log();
let diff = make_diff("a\nb\nc\nd", "a\ne\nc\nd", 3);
assert_eq!(
diff,
vec![Mismatch {
line_number: 1,
line_number_orig: 1,
lines: vec![
DiffLine::Context("a".into()),
DiffLine::Resulting("b".into()),
DiffLine::Expected("e".into()),
DiffLine::Context("c".into()),
DiffLine::Context("d".into()),
],
}]
);
}
#[test]
fn rustfmt_diff_no_diff_test() {
init_log();
let diff = make_diff("a\nb\nc\nd", "a\nb\nc\nd", 3);
assert_eq!(diff, vec![]);
}
// Compare strings without distinguishing between CRLF and LF
fn string_eq_ignore_newline_repr(left: &str, right: &str) -> bool {
let left = CharsIgnoreNewlineRepr(left.chars().peekable());
let right = CharsIgnoreNewlineRepr(right.chars().peekable());
left.eq(right)
}
struct CharsIgnoreNewlineRepr<'a>(Peekable<Chars<'a>>);
impl<'a> Iterator for CharsIgnoreNewlineRepr<'a> {
type Item = char;
fn next(&mut self) -> Option<char> {
self.0.next().map(|c| {
if c == '\r' {
if *self.0.peek().unwrap_or(&'\0') == '\n' {
self.0.next();
'\n'
} else {
'\r'
}
} else {
c
}
})
}
}
#[test]
fn string_eq_ignore_newline_repr_test() {
init_log();
assert!(string_eq_ignore_newline_repr("", ""));
assert!(!string_eq_ignore_newline_repr("", "abc"));
assert!(!string_eq_ignore_newline_repr("abc", ""));
assert!(string_eq_ignore_newline_repr("a\nb\nc\rd", "a\nb\r\nc\rd"));
assert!(string_eq_ignore_newline_repr("a\r\n\r\n\r\nb", "a\n\n\nb"));
assert!(!string_eq_ignore_newline_repr("a\r\nbcd", "a\nbcdefghijk"));
}
struct TempFile {
path: PathBuf,
}
fn make_temp_file(file_name: &'static str) -> TempFile {
use std::env::var;
use std::fs::File;
// Used in the Rust build system.
let target_dir = var("RUSTFMT_TEST_DIR").unwrap_or_else(|_| ".".to_owned());
let path = Path::new(&target_dir).join(file_name);
let mut file = File::create(&path).expect("couldn't create temp file");
let content = "fn main() {}\n";
file.write_all(content.as_bytes())
.expect("couldn't write temp file");
TempFile { path }
}
impl Drop for TempFile {
fn drop(&mut self) {
use std::fs::remove_file;
remove_file(&self.path).expect("couldn't delete temp file");
}
}
fn rustfmt() -> PathBuf {
let mut me = env::current_exe().expect("failed to get current executable");
// Chop of the test name.
me.pop();
// Chop off `deps`.
me.pop();
// If we run `cargo test --release`, we might only have a release build.
if cfg!(release) {
// `../release/`
me.pop();
me.push("release");
}
me.push("rustfmt");
assert!(
me.is_file() || me.with_extension("exe").is_file(),
"{}",
if cfg!(release) {
"no rustfmt bin, try running `cargo build --release` before testing"
} else {
"no rustfmt bin, try running `cargo build` before testing"
}
);
me
}
#[test]
fn verify_check_works() {
init_log();
let temp_file = make_temp_file("temp_check.rs");
Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.arg(temp_file.path.to_str().unwrap())
.status()
.expect("run with check option failed");
}
#[test]
fn verify_check_works_with_stdin() {
init_log();
let mut child = Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("run with check option failed");
{
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
stdin
.write_all("fn main() {}\n".as_bytes())
.expect("Failed to write to rustfmt --check");
}
let output = child
.wait_with_output()
.expect("Failed to wait on rustfmt child");
assert!(output.status.success());
}
#[test]
fn verify_check_l_works_with_stdin() {
init_log();
let mut child = Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.arg("-l")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("run with check option failed");
{
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
stdin
.write_all("fn main()\n{}\n".as_bytes())
.expect("Failed to write to rustfmt --check");
}
let output = child
.wait_with_output()
.expect("Failed to wait on rustfmt child");
assert!(output.status.success());
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), "<stdin>\n");
} | filebuf |
animation.py | # Native
import os
from math import *
from cmath import *
from time import time
# Installed
import cv2 as cv
# Custom Modules
from assets.utils import float_range
from JuliaSet.core import JuliaSet
class Animation:
def __init__(self, range_from: float or int, range_to: float or int, range_step: float, frames_folder: str = "out/JuliaSet/tmp", vid_name: str = "Animation.avi", fps: int = 60) -> None:
os.mkdir("out/JuliaSet/tmp") if not os.path.exists("out/JuliaSet/tmp") else None
[os.remove(f"out/JuliaSet/tmp/{x}") for x in os.listdir(frames_folder)] if frames_folder == "out/JuliaSet/tmp" else None
self.rngf = range_from
self.rngt = range_to
self.rngs = range_step
self.framesfd = frames_folder
self.fps = fps
self.vname = vid_name if vid_name.endswith(".mp4") else f"{vid_name}.mp4"
os.mkdir("out/JuliaSet/Video") if not os.path.exists("out/JuliaSet/Video") else None
self.save_path = f"out/JuliaSet/Video/{self.vname}"
(print("This name is already taken... Please find a new one."), quit()) if self.vname in os.listdir("out/JuliaSet/Video") else None
def animation_from_iamges(self) -> tuple:
print("Making frames into a video...")
s = time()
images = [img for img in os.listdir(self.framesfd) if img.endswith(".png")]
frame = cv.imread(os.path.join(self.framesfd, images[0]))
height, width, layers = frame.shape
video = cv.VideoWriter(self.save_path, 0, self.fps, (width,height))
[(video.write(cv.imread(os.path.join(self.framesfd, img))), print(f"Current IMG: '{img}'")) for img in images]
| video.release()
print(f"Video finished in {round(time() - s, 3)} sec. Cleaning up tmp files...")
[os.remove(f"out/JuliaSet/tmp/{x}") for x in os.listdir(self.framesfd)]
print(f"Cleaning finished. Video saved here: {self.save_path}")
return self.save_path, round(time() - s, 3)
def create_animation(self, mult: int, maxit: int, cmap: str, c_func: str) -> tuple:
inv_step = int(f"1{'0' * (len(str(self.rngs)) - 2)}")
rng = list(float_range(self.rngf, self.rngt, self.rngs))
for x in rng:
c = eval(c_func.replace("X", f"{x}"))
fn = f"{'0' * (len(str(int(rng[-1]*inv_step))) - len(str(int(x*inv_step))))}{int(x*inv_step)}.png"
jset = JuliaSet(c, mult, maxit, auto_name=False, for_anim=True, silent=True)
jset.save(fn=fn, cmaps=[cmap,])
return self.animation_from_iamges() | cv.destroyAllWindows()
|
telemetry.py | """Utility to log usage statistics.
----
Copyright 2019 Data Driven Empathy LLC
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import hashlib
import multiprocessing
import random
import time
INSERT_TEMPLATE = '''INSERT INTO actions (ipAddressHash, userAgent, page, query, timestampStr) VALUES (?, ?, ?, ?, ?)'''
def run_worker_logic(task_queue, db_connection_generator, min_wait, max_wait, use_question_mark):
"""Run worker process logic.
Args:
task_queue: Queue to control records to be written.
db_connection_generator: Function taking no arguments and returning DB API v2 compliant
connection interface through which the record should be created.
min_wait: Minimum millisecond delay before checking for new tasks.
max_wait: Maximum millisecond delay before checking for new tasks.
use_question_mark: Flag indicating if question marks should be used in insert template.
If true, uses ?. If false, uses %s.
"""
if use_question_mark:
insert_sql = INSERT_TEMPLATE
else:
insert_sql = INSERT_TEMPLATE.replace('?', '%s')
def execute_task(task):
"""Inner closure that executes a single task.
Args:
task: Dictionary describing the task to exeucte.
"""
db_connection = db_connection_generator()
cursor = db_connection.cursor()
ip_address = task['ipAddress']
user_agent = task['userAgent']
page = task['page']
query = task['query']
timestamp_str = task['timestampStr']
hashable_str = ip_address + user_agent
ip_address_hash = hashlib.sha224(hashable_str.encode('utf-8')).hexdigest()
cursor.execute(
insert_sql,
(ip_address_hash, user_agent, page, query, timestamp_str)
)
db_connection.commit()
while True:
task = task_queue.get()
if task is None:
time.sleep(random.randint(min_wait, max_wait) / 1000)
else:
if task['end']:
return
execute_task(task)
class UsageReporter:
"""Utility which runs a reporting subprocess for user actions."""
def __init__(self, db_connection_generator, min_wait=1000, max_wait=5000,
use_question_mark=False):
"""Create a new reporter.
Args:
db_connection_generator: Function taking no arguments and returning DB API v2 compliant
connection interface through which the record should be created.
min_wait: Minimum delay before processing new tasks.
min_wait: Maximum delay before processing new tasks.
Flag indicating if question marks should be used in insert template.
If true, uses ?. If false, uses %s.
"""
self.__db_connection = db_connection_generator()
task_queue = multiprocessing.Queue()
self.__queue = task_queue
self.__inner_process = multiprocessing.Process(
target=run_worker_logic,
args=(task_queue, db_connection_generator, min_wait, max_wait, use_question_mark)
)
self.__inner_process.start()
def report_usage(self, ip_address, user_agent, page, query):
"""Asynchoronously report a user action within the application.
Args:
ip_address: String IP address to be hashed for creating this record.
user_agent: String user agent which will be used as sald for the ip_address hash.
page: String page name.
query: String query or empty if no query.
"""
self.__queue.put({
'ipAddress': ip_address,
'userAgent': user_agent,
'page': page,
'query': query,
'timestampStr': datetime.datetime.utcnow().isoformat(),
'end': False
})
def terminate(self):
| """Terminate the inner subprocess"""
self.__queue.put({'end': True})
self.__inner_process.join()
self.__queue.close()
self.__queue.join_thread() |
|
cmdrude.py | import numpy as np
from newdust import constants as c
__all__ = ['CmDrude']
RHO_DRUDE = 3.0 # g cm^-3
LAM_MAX = c.hc / 0.01 # maximal wavelength that we will allow for RG-Drude
class CmDrude(object):
"""
| **ATTRIBUTES**
| cmtype : 'Drude'
| rho : grain density [g cm^-3]
| citation : A string containing citation to original work
|
| *functions*
| rp(lam, unit='kev') : Returns real part (unit='kev'|'angs')
| ip(lam, unit='kev') : Returns imaginary part (always 0.0)
| cm(lam, unit='kev') : Complex index of refraction of dtype='complex'
| plot(lam, unit='kev') : Plots Re(m-1)
"""
def __init__(self, rho=RHO_DRUDE): # Returns a CM using the Drude approximation
self.cmtype = 'Drude'
self.rho = rho
self.citation = "Using the Drude approximation.\nBohren, C. F. & Huffman, D. R., 1983, Absorption and Scattering of Light by Small Particles (New York: Wiley)"
def rp(self, lam, unit='kev'):
assert unit in c.ALLOWED_LAM_UNITS
lam_cm = c._lam_cm(lam, unit)
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
return mm1 + 1.0
'''# Returns 1 if the wavelength supplied is too low energy (i.e. inappropriate for applying Drude)
mm1 = np.zeros(np.size(lam_cm))
if (np.size(lam_cm) == 1):
if lam_cm >= LAM_MAX:
pass
else:
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
else:
ii = (lam_cm <= LAM_MAX)
mm1[ii] = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm[ii], 2)
return mm1 + 1.0'''
def ip(self, lam, unit='kev'):
if np.size(lam) > 1:
return np.zeros(np.size(lam))
else:
return 0.0
def cm(self, lam, unit='kev'): | return self.rp(lam, unit=unit) + 0j
def plot(self, ax, lam, unit='kev', **kwargs):
assert unit in c.ALLOWED_LAM_UNITS
rp = self.rp(lam, unit=unit)
ax.plot(lam, rp-1.0, **kwargs)
ax.set_ylabel("m-1")
if unit == 'kev':
ax.set_xlabel("Energy (keV)")
if unit == 'angs':
ax.set_xlabel("Wavelength (Angstroms)") | |
index.ts | import { BaseModule } from '..';
import { ETHStateProvider } from './api/csp';
import { EthRoutes } from './api/eth-routes';
import { EthVerificationPeer } from './p2p/EthVerificationPeer';
import { EthP2pWorker } from './p2p/p2p';
export default class | extends BaseModule {
constructor(services: BaseModule['bitcoreServices']) {
super(services);
services.P2P.register('ETH', EthP2pWorker);
services.CSP.registerService('ETH', new ETHStateProvider());
services.Api.app.use(EthRoutes);
services.Verification.register('ETH', EthVerificationPeer);
}
}
| ETHModule |
lachnobacteriumbovis.py | """
This file offers the methods to automatically retrieve the graph Lachnobacterium bovis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:51:00.619514
The undirected graph Lachnobacterium bovis has 2717 nodes and 257981 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06992 and has 23 connected components, where the component with most
nodes has 2631 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 163, the mean node degree is 189.90, and
the node degree mode is 8. The top 5 most central nodes are 140626.JHWB01000013_gene537
(degree 1145), 140626.JHWB01000019_gene960 (degree 1118), 140626.JHWB01000009_gene1319
(degree 1020), 140626.JHWB01000011_gene81 (degree 979) and 140626.JHWB01000022_gene2049
(degree 963).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnobacteriumBovis
# Then load the graph
graph = LachnobacteriumBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def | (
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Lachnobacterium bovis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Lachnobacterium bovis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:51:00.619514
The undirected graph Lachnobacterium bovis has 2717 nodes and 257981 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06992 and has 23 connected components, where the component with most
nodes has 2631 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 163, the mean node degree is 189.90, and
the node degree mode is 8. The top 5 most central nodes are 140626.JHWB01000013_gene537
(degree 1145), 140626.JHWB01000019_gene960 (degree 1118), 140626.JHWB01000009_gene1319
(degree 1020), 140626.JHWB01000011_gene81 (degree 979) and 140626.JHWB01000022_gene2049
(degree 963).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnobacteriumBovis
# Then load the graph
graph = LachnobacteriumBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="LachnobacteriumBovis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| LachnobacteriumBovis |
redis.js | const redis = require('redis') | // 创建客户端
const redisClient = redis.createClient(REDIS_CONF.port, REDIS_CONF.host)
redisClient.on('error', err => {
console.error(err)
})
module.exports = redisClient | const { REDIS_CONF } = require('../conf/db.js')
|
mod.rs | use arrow2::{array::FixedSizeBinaryArray, bitmap::Bitmap, buffer::Buffer, datatypes::DataType};
mod mutable;
#[test]
fn basics() {
let array = FixedSizeBinaryArray::from_data(
DataType::FixedSizeBinary(2),
Buffer::from([1, 2, 3, 4, 5, 6]),
Some(Bitmap::from([true, false, true])),
); | assert_eq!(array.value(0), [1, 2]);
assert_eq!(array.value(2), [5, 6]);
let array = array.slice(1, 2);
assert_eq!(array.value(1), [5, 6]);
}
#[test]
fn with_validity() {
let values = Buffer::from([1, 2, 3, 4, 5, 6]);
let a = FixedSizeBinaryArray::from_data(DataType::FixedSizeBinary(2), values, None);
let a = a.with_validity(Some(Bitmap::from([true, false, true])));
assert!(a.validity().is_some());
}
#[test]
fn display() {
let values = Buffer::from([1, 2, 3, 4, 5, 6]);
let a = FixedSizeBinaryArray::from_data(
DataType::FixedSizeBinary(2),
values,
Some(Bitmap::from([true, false, true])),
);
assert_eq!(format!("{}", a), "FixedSizeBinaryArray[[1, 2], , [5, 6]]");
}
#[test]
fn empty() {
let array = FixedSizeBinaryArray::new_empty(DataType::FixedSizeBinary(2));
assert_eq!(array.values().len(), 0);
assert_eq!(array.validity(), None);
}
#[test]
fn from_iter() {
let iter = std::iter::repeat(vec![1u8, 2]).take(2).map(Some);
let a = FixedSizeBinaryArray::from_iter(iter, 2);
assert_eq!(a.len(), 2);
} | assert_eq!(array.size(), 2);
assert_eq!(array.len(), 3);
assert_eq!(array.validity(), Some(&Bitmap::from([true, false, true])));
|
while_let_on_iterator.rs | use super::WHILE_LET_ON_ITERATOR;
use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::higher;
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::{
get_enclosing_loop_or_closure, is_refutable, is_trait_method, match_def_path, paths, visitors::is_res_used,
};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_expr, ErasedMap, NestedVisitorMap, Visitor};
use rustc_hir::{def::Res, Expr, ExprKind, HirId, Local, Mutability, PatKind, QPath, UnOp};
use rustc_lint::LateContext;
use rustc_span::{symbol::sym, Span, Symbol};
pub(super) fn check(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
let (scrutinee_expr, iter_expr, some_pat, loop_expr) = if_chain! {
if let Some(higher::WhileLet { if_then, let_pat, let_expr }) = higher::WhileLet::hir(expr);
// check for `Some(..)` pattern
if let PatKind::TupleStruct(QPath::Resolved(None, pat_path), some_pat, _) = let_pat.kind;
if let Res::Def(_, pat_did) = pat_path.res;
if match_def_path(cx, pat_did, &paths::OPTION_SOME);
// check for call to `Iterator::next`
if let ExprKind::MethodCall(method_name, _, [iter_expr], _) = let_expr.kind;
if method_name.ident.name == sym::next;
if is_trait_method(cx, let_expr, sym::Iterator);
if let Some(iter_expr_struct) = try_parse_iter_expr(cx, iter_expr);
// get the loop containing the match expression
if !uses_iter(cx, &iter_expr_struct, if_then);
then {
(let_expr, iter_expr_struct, some_pat, expr)
} else {
return;
}
};
let mut applicability = Applicability::MachineApplicable;
let loop_var = if let Some(some_pat) = some_pat.first() {
if is_refutable(cx, some_pat) {
// Refutable patterns don't work with for loops.
return;
}
snippet_with_applicability(cx, some_pat.span, "..", &mut applicability)
} else {
"_".into()
};
// If the iterator is a field or the iterator is accessed after the loop is complete it needs to be
// borrowed mutably. TODO: If the struct can be partially moved from and the struct isn't used
// afterwards a mutable borrow of a field isn't necessary.
let ref_mut = if !iter_expr.fields.is_empty() || needs_mutable_borrow(cx, &iter_expr, loop_expr) {
if cx.typeck_results().node_type(iter_expr.hir_id).ref_mutability() == Some(Mutability::Mut) {
// Reborrow for mutable references. It may not be possible to get a mutable reference here.
"&mut *"
} else {
"&mut "
}
} else {
""
};
let iterator = snippet_with_applicability(cx, iter_expr.span, "_", &mut applicability);
span_lint_and_sugg(
cx,
WHILE_LET_ON_ITERATOR,
expr.span.with_hi(scrutinee_expr.span.hi()),
"this loop could be written as a `for` loop",
"try",
format!("for {} in {}{}", loop_var, ref_mut, iterator),
applicability,
);
}
#[derive(Debug)]
struct IterExpr {
/// The span of the whole expression, not just the path and fields stored here.
span: Span,
/// The HIR id of the whole expression, not just the path and fields stored here.
hir_id: HirId,
/// The fields used, in order of child to parent.
fields: Vec<Symbol>,
/// The path being used.
path: Res,
}
/// Parses any expression to find out which field of which variable is used. Will return `None` if
/// the expression might have side effects.
fn try_parse_iter_expr(cx: &LateContext<'_>, mut e: &Expr<'_>) -> Option<IterExpr> {
let span = e.span;
let hir_id = e.hir_id;
let mut fields = Vec::new();
loop {
match e.kind {
ExprKind::Path(ref path) => {
break Some(IterExpr {
span,
hir_id,
fields,
path: cx.qpath_res(path, e.hir_id),
});
},
ExprKind::Field(base, name) => {
fields.push(name.name);
e = base;
},
// Dereferencing a pointer has no side effects and doesn't affect which field is being used.
ExprKind::Unary(UnOp::Deref, base) if cx.typeck_results().expr_ty(base).is_ref() => e = base,
// Shouldn't have side effects, but there's no way to trace which field is used. So forget which fields have
// already been seen.
ExprKind::Index(base, idx) if !idx.can_have_side_effects() => {
fields.clear();
e = base;
},
ExprKind::Unary(UnOp::Deref, base) => {
fields.clear();
e = base;
},
// No effect and doesn't affect which field is being used.
ExprKind::DropTemps(base) | ExprKind::AddrOf(_, _, base) | ExprKind::Type(base, _) => e = base,
_ => break None,
}
}
}
fn is_expr_same_field(cx: &LateContext<'_>, mut e: &Expr<'_>, mut fields: &[Symbol], path_res: Res) -> bool {
loop {
match (&e.kind, fields) {
(&ExprKind::Field(base, name), [head_field, tail_fields @ ..]) if name.name == *head_field => | ,
(ExprKind::Path(path), []) => {
break cx.qpath_res(path, e.hir_id) == path_res;
},
(&(ExprKind::DropTemps(base) | ExprKind::AddrOf(_, _, base) | ExprKind::Type(base, _)), _) => e = base,
_ => break false,
}
}
}
/// Checks if the given expression is the same field as, is a child of, or is the parent of the
/// given field. Used to check if the expression can be used while the given field is borrowed
/// mutably. e.g. if checking for `x.y`, then `x.y`, `x.y.z`, and `x` will all return true, but
/// `x.z`, and `y` will return false.
fn is_expr_same_child_or_parent_field(cx: &LateContext<'_>, expr: &Expr<'_>, fields: &[Symbol], path_res: Res) -> bool {
match expr.kind {
ExprKind::Field(base, name) => {
if let Some((head_field, tail_fields)) = fields.split_first() {
if name.name == *head_field && is_expr_same_field(cx, base, tail_fields, path_res) {
return true;
}
// Check if the expression is a parent field
let mut fields_iter = tail_fields.iter();
while let Some(field) = fields_iter.next() {
if *field == name.name && is_expr_same_field(cx, base, fields_iter.as_slice(), path_res) {
return true;
}
}
}
// Check if the expression is a child field.
let mut e = base;
loop {
match e.kind {
ExprKind::Field(..) if is_expr_same_field(cx, e, fields, path_res) => break true,
ExprKind::Field(base, _) | ExprKind::DropTemps(base) | ExprKind::Type(base, _) => e = base,
ExprKind::Path(ref path) if fields.is_empty() => {
break cx.qpath_res(path, e.hir_id) == path_res;
},
_ => break false,
}
}
},
// If the path matches, this is either an exact match, or the expression is a parent of the field.
ExprKind::Path(ref path) => cx.qpath_res(path, expr.hir_id) == path_res,
ExprKind::DropTemps(base) | ExprKind::Type(base, _) | ExprKind::AddrOf(_, _, base) => {
is_expr_same_child_or_parent_field(cx, base, fields, path_res)
},
_ => false,
}
}
/// Strips off all field and path expressions. This will return true if a field or path has been
/// skipped. Used to skip them after failing to check for equality.
fn skip_fields_and_path(expr: &'tcx Expr<'_>) -> (Option<&'tcx Expr<'tcx>>, bool) {
let mut e = expr;
let e = loop {
match e.kind {
ExprKind::Field(base, _) | ExprKind::DropTemps(base) | ExprKind::Type(base, _) => e = base,
ExprKind::Path(_) => return (None, true),
_ => break e,
}
};
(Some(e), e.hir_id != expr.hir_id)
}
/// Checks if the given expression uses the iterator.
fn uses_iter(cx: &LateContext<'tcx>, iter_expr: &IterExpr, container: &'tcx Expr<'_>) -> bool {
struct V<'a, 'b, 'tcx> {
cx: &'a LateContext<'tcx>,
iter_expr: &'b IterExpr,
uses_iter: bool,
}
impl Visitor<'tcx> for V<'_, '_, 'tcx> {
type Map = ErasedMap<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, e: &'tcx Expr<'_>) {
if self.uses_iter {
// return
} else if is_expr_same_child_or_parent_field(self.cx, e, &self.iter_expr.fields, self.iter_expr.path) {
self.uses_iter = true;
} else if let (e, true) = skip_fields_and_path(e) {
if let Some(e) = e {
self.visit_expr(e);
}
} else if let ExprKind::Closure(_, _, id, _, _) = e.kind {
if is_res_used(self.cx, self.iter_expr.path, id) {
self.uses_iter = true;
}
} else {
walk_expr(self, e);
}
}
}
let mut v = V {
cx,
iter_expr,
uses_iter: false,
};
v.visit_expr(container);
v.uses_iter
}
#[allow(clippy::too_many_lines)]
fn needs_mutable_borrow(cx: &LateContext<'tcx>, iter_expr: &IterExpr, loop_expr: &'tcx Expr<'_>) -> bool {
struct AfterLoopVisitor<'a, 'b, 'tcx> {
cx: &'a LateContext<'tcx>,
iter_expr: &'b IterExpr,
loop_id: HirId,
after_loop: bool,
used_iter: bool,
}
impl Visitor<'tcx> for AfterLoopVisitor<'_, '_, 'tcx> {
type Map = ErasedMap<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, e: &'tcx Expr<'_>) {
if self.used_iter {
return;
}
if self.after_loop {
if is_expr_same_child_or_parent_field(self.cx, e, &self.iter_expr.fields, self.iter_expr.path) {
self.used_iter = true;
} else if let (e, true) = skip_fields_and_path(e) {
if let Some(e) = e {
self.visit_expr(e);
}
} else if let ExprKind::Closure(_, _, id, _, _) = e.kind {
self.used_iter = is_res_used(self.cx, self.iter_expr.path, id);
} else {
walk_expr(self, e);
}
} else if self.loop_id == e.hir_id {
self.after_loop = true;
} else {
walk_expr(self, e);
}
}
}
struct NestedLoopVisitor<'a, 'b, 'tcx> {
cx: &'a LateContext<'tcx>,
iter_expr: &'b IterExpr,
local_id: HirId,
loop_id: HirId,
after_loop: bool,
found_local: bool,
used_after: bool,
}
impl Visitor<'tcx> for NestedLoopVisitor<'a, 'b, 'tcx> {
type Map = ErasedMap<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_local(&mut self, l: &'tcx Local<'_>) {
if !self.after_loop {
l.pat.each_binding_or_first(&mut |_, id, _, _| {
if id == self.local_id {
self.found_local = true;
}
});
}
if let Some(e) = l.init {
self.visit_expr(e);
}
}
fn visit_expr(&mut self, e: &'tcx Expr<'_>) {
if self.used_after {
return;
}
if self.after_loop {
if is_expr_same_child_or_parent_field(self.cx, e, &self.iter_expr.fields, self.iter_expr.path) {
self.used_after = true;
} else if let (e, true) = skip_fields_and_path(e) {
if let Some(e) = e {
self.visit_expr(e);
}
} else if let ExprKind::Closure(_, _, id, _, _) = e.kind {
self.used_after = is_res_used(self.cx, self.iter_expr.path, id);
} else {
walk_expr(self, e);
}
} else if e.hir_id == self.loop_id {
self.after_loop = true;
} else {
walk_expr(self, e);
}
}
}
if let Some(e) = get_enclosing_loop_or_closure(cx.tcx, loop_expr) {
// The iterator expression will be used on the next iteration (for loops), or on the next call (for
// closures) unless it is declared within the enclosing expression. TODO: Check for closures
// used where an `FnOnce` type is expected.
let local_id = match iter_expr.path {
Res::Local(id) => id,
_ => return true,
};
let mut v = NestedLoopVisitor {
cx,
iter_expr,
local_id,
loop_id: loop_expr.hir_id,
after_loop: false,
found_local: false,
used_after: false,
};
v.visit_expr(e);
v.used_after || !v.found_local
} else {
let mut v = AfterLoopVisitor {
cx,
iter_expr,
loop_id: loop_expr.hir_id,
after_loop: false,
used_iter: false,
};
v.visit_expr(&cx.tcx.hir().body(cx.enclosing_body.unwrap()).value);
v.used_iter
}
}
| {
e = base;
fields = tail_fields;
} |
sublist.py | def check_lists(l1, l2):
def | (l1, l2):
| contains |
db.rs | extern crate env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate edge;
extern crate rusqlite;
use edge::{json, Edge, Router, Request, Response, Result, Status};
use edge::json::value::ToJson;
use rusqlite::Connection;
use rusqlite::Error;
use std::collections::BTreeMap;
use std::path::Path;
use std::{fs, result};
struct User {
id: i32,
name: String
}
#[derive(Default)]
struct Db;
impl Db {
fn home(&mut self, req: &Request, _res: &mut Response) -> Result {
let mut user_id = req.param("user_id").unwrap().to_string();
user_id.trim();
if user_id.len() == 0 {
user_id = "1".to_string();
}
let connection = try!(Connection::open("db/demo.db").map_err(|e| (Status::InternalServerError, e.to_string())));
let user = try!(connection.query_row("SELECT * FROM users WHERE user_id = ?", &[&user_id], |row|
User {
id: row.get(0),
name: row.get(1)
}
).map_err(|e| (Status::InternalServerError, match e {
Error::QueryReturnedNoRows => format!("no user known with id {}", user_id),
_ => format!("error when requesting user: {}", e)
})));
let mut data = BTreeMap::new();
data.insert("id", json::to_value(&user.id));
data.insert("name", json::to_value(&user.name));
ok!("db", data.to_json())
}
}
fn check_db() -> result::Result<(), Error> {
let db = Path::new("db");
if !db.exists() {
fs::create_dir(db).unwrap();
}
let connection = try!(Connection::open("db/demo.db"));
try!(connection.execute_batch("CREATE TABLE IF NOT EXISTS users(user_id INTEGER PRIMARY KEY, name TEXT);"));
let num_users: i32 = try!(connection.query_row("SELECT COUNT(*) FROM users", &[], |row| row.get(0)));
if num_users == 0 {
assert!(try!(connection.execute("INSERT INTO USERS VALUES(1, 'John Doe')", &[])) == 1);
}
Ok(())
}
fn main() {
env_logger::init().unwrap();
check_db().unwrap();
let mut edge = Edge::new("0.0.0.0:3000");
let mut router = Router::new();
router.get("/:user_id", Db::home); | edge.mount("/", router);
edge.register_template("db");
edge.start().unwrap();
} |
|
db_log_app.go | package models
import (
// "github.com/rakutentech/dotconf-assist/src/backend/settings"
)
func SaveApp(app App) error {
// user, err := GetUser(userName)
// if err != nil {
// return err
// }
// app.UserID = user.ID
// app.SplunkHostID = -1
app.DeployStatus = -1 // not configured
res := mysqldb.Save(&app)
if res.Error != nil {
return res.Error
}
return BindAppWithInputs(&app)
}
func BindAppWithInputs(app *App) error {
var inputIDs []int
if !app.UnixApp {
for _, ids := range app.FileInputIDs {
inputIDs = append(inputIDs, ids.ID)
}
UpdateInputFields(inputIDs, []string{"app_id"}, []interface{}{app.ID}, "file_inputs")
inputIDs = inputIDs[:0]
for _, ids := range app.ScriptInputIDs {
inputIDs = append(inputIDs, ids.ID)
}
UpdateInputFields(inputIDs, []string{"app_id"}, []interface{}{app.ID}, "script_inputs")
} else {
for _, ids := range app.UnixAppInputIDs {
inputIDs = append(inputIDs, ids.ID)
}
UpdateInputFields(inputIDs, []string{"app_id"}, []interface{}{app.ID}, "unix_app_inputs")
}
return nil
}
func unbindAppFromInputs(unixApp bool, appID int) error {
if !unixApp {
UpdateInputsFieldsByAppID(appID, []string{"app_id"}, []interface{}{-1}, "file_inputs")
UpdateInputsFieldsByAppID(appID, []string{"app_id"}, []interface{}{-1}, "script_inputs")
} else {
UpdateInputsFieldsByAppID(appID, []string{"app_id"}, []interface{}{-1}, "unix_app_inputs")
}
return nil
}
func GetApps(envUser []string, getInputs bool, getServerClasses bool, getForwarders bool, isAdmin bool) ([]App, error) {
var apps []App
// user, err := GetUser(envUser[1])
// if err != nil {
// return nil, err
// }
// serverClass := map[int][]ServerClass{}
| if res.Error != nil { // res.Error is nil even if no record found
return nil, res.Error
}
} else {
// res := mysqldb.Where("env = ? AND user_id = ?", envUser[0], user.ID).Order("id").Find(&apps)
res := mysqldb.Where("env = ? AND user_name = ?", envUser[0], envUser[1]).Order("id").Find(&apps)
if res.Error != nil { // res.Error is nil even if no record found
return nil, res.Error
}
}
if getInputs {
for i, app := range apps {
if !app.UnixApp {
inputs, err := GetInputsByAppID(envUser, app.ID, "file", isAdmin)
if err != nil {
return nil, err
}
apps[i].FileInputs = inputs.([]FileInput)
inputs, err = GetInputsByAppID(envUser, app.ID, "script", isAdmin)
if err != nil {
return nil, err
}
apps[i].ScriptInputs = inputs.([]ScriptInput)
} else {
inputs, err := GetInputsByAppID(envUser, app.ID, "unixapp", isAdmin)
if err != nil {
return nil, err
}
apps[i].UnixAppInputs = inputs.([]UnixAppInput)
}
}
}
if getServerClasses {
for i, app := range apps {
scIDs, err := GetServerClassIDsByAppID(app.ID, getForwarders)
if err != nil {
return nil, err
}
apps[i].ServerClass, err = GetServerClassesByIDs(scIDs, true)
}
}
return apps, nil
}
func GetApp(envUser []string, appID int, getInputs bool, isAdmin bool) (App, error) {
var app App
res := mysqldb.Where("id = ?", appID).Find(&app)
if res.Error != nil { //record not found
return App{}, res.Error
}
if getInputs {
if app.UnixApp {
inputs, err := GetInputsByAppID(envUser, app.ID, "file", isAdmin)
if err != nil {
return app, err
}
app.FileInputs = inputs.([]FileInput)
inputs, err = GetInputsByAppID(envUser, app.ID, "script", isAdmin)
if err != nil {
return app, err
}
app.ScriptInputs = inputs.([]ScriptInput)
} else {
inputs, err := GetInputsByAppID(envUser, app.ID, "unixapp", isAdmin)
if err != nil {
return app, err
}
app.UnixAppInputs = inputs.([]UnixAppInput)
}
}
return app, nil
}
func GetAppName(appID int) (string, error) {
var app App
res := mysqldb.Select("name").Where("id = ?", appID).Find(&app)
if res.Error != nil { //record not found
return "", res.Error
}
return app.Name, nil
}
func UpdateApp(appID int, newApp App) error {
var app App
res := mysqldb.Where("id = ?", appID).Find(&app)
if res.Error != nil { //record not found
return res.Error
}
oldAppName := app.Name
app.Name = newApp.Name
res = mysqldb.Save(&app)
if res.Error != nil {
return res.Error
}
if oldAppName == newApp.Name { //name not changed, update list
newApp.ID = app.ID
unbindAppFromInputs(app.UnixApp, appID)
BindAppWithInputs(&newApp)
}
return nil
}
func UpdateAppFieldsByID(appID int, fields []string, values []interface{}, tableName string) error { //call when change app status
keyValue := map[string]interface{}{}
for i, _ := range fields {
keyValue[fields[i]] = values[i]
}
res := mysqldb.Table(tableName).Where("id = ?", appID).Updates(keyValue)
if res.Error != nil {
return res.Error
}
return nil
}
func DeleteApp(appID int) error {
app, err := GetApp([]string{"", ""}, appID, false, false)
if err != nil {
return err
}
unbindAppFromInputs(app.UnixApp, appID)
return mysqldb.Delete(&app).Error
} | if isAdmin {
res := mysqldb.Where("env = ?", envUser[0]).Order("id").Find(&apps) |
benchmarking.rs | // This file is part of Webb.
// Copyright (C) 2021 Webb Technologies Inc.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Merkle Tree pallet benchmarking.
#![cfg(feature = "runtime-benchmarks")]
use super::*;
use webb_primitives::traits::merkle_tree::TreeInterface;
use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller};
use frame_support::traits::Currency;
use frame_system::RawOrigin;
use sp_runtime::traits::Bounded;
use sp_std::vec;
type BalanceOf<T> = <<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
fn assert_last_event<T: Config>(generic_event: <T as Config>::Event) {
frame_system::Pallet::<T>::assert_last_event(generic_event.into());
}
const SEED: u32 = 0;
benchmarks! {
create {
let d in 1..T::MaxTreeDepth::get() as u32;
let caller: T::AccountId = whitelisted_caller();
<<T as Config>::Currency as Currency<T::AccountId>>::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
let tree_id = Pallet::<T>::next_tree_id();
}:_(RawOrigin::Signed(caller.clone()), d as u8)
verify {
assert_last_event::<T>(Event::TreeCreation{tree_id: tree_id, who: caller}.into())
}
insert {
let caller: T::AccountId = whitelisted_caller();
let tree_id: T::TreeId = <Pallet<T> as TreeInterface<_,_,_>>::create(caller.clone(), T::MaxTreeDepth::get()).unwrap();
let leaf_index = Pallet::<T>::next_leaf_index(tree_id);
let element: T::Element = T::DefaultZeroElement::get();
}:_(RawOrigin::Signed(caller.clone()), tree_id, element)
verify {
assert_last_event::<T>(Event::LeafInsertion{tree_id, leaf_index, leaf: element}.into()) | set_maintainer {
let caller: T::AccountId = whitelisted_caller();
let new_maintainer: T::AccountId = account("maintainer", 0, SEED);
Maintainer::<T>::put::<T::AccountId>(caller.clone());
}:_(RawOrigin::Signed(caller.clone()), new_maintainer.clone())
verify {
assert_last_event::<T>(Event::MaintainerSet{old_maintainer: caller, new_maintainer: new_maintainer.into()}.into());
}
force_set_maintainer {
let new_maintainer: T::AccountId = account("maintainer", 0, SEED);
}:_(RawOrigin::Root, new_maintainer.clone())
verify {
assert_last_event::<T>(Event::MaintainerSet{old_maintainer: Default::default(), new_maintainer: new_maintainer.into()}.into());
}
force_set_default_hashes {
let p in 1..T::MaxTreeDepth::get() as u32;
let default_hashes = vec![T::DefaultZeroElement::get();p as usize];
}:_(RawOrigin::Root, default_hashes)
verify {
assert_eq!(DefaultHashes::<T>::get().len(), p as usize)
}
}
impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); | }
|
net_error_test.go | package neterror_test
import (
"errors"
"fmt"
"net"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/mec07/neterror"
"gotest.tools/assert"
)
func TestGetNetError(t *testing.T) {
// The error from os.Stat for a non-existent file satisfies the
// net.Error interface.
_, err := os.Stat("non-existent-file")
var invalidAddrErr *net.InvalidAddrError
var unknownNetworkError *net.UnknownNetworkError
table := []struct {
name string
err error
shouldSucceed bool
}{
{
name: "top level DNS error",
err: &net.DNSError{},
shouldSucceed: true,
},
{
name: "wrapped DNS error",
err: fmt.Errorf("stuff to wrap with: %w", &net.DNSError{}),
shouldSucceed: true,
},
{
name: "DNS config error",
err: &net.DNSConfigError{},
shouldSucceed: true,
},
{
name: "Address error",
err: &net.AddrError{},
shouldSucceed: true,
},
{
name: "Invalid address error",
err: invalidAddrErr,
shouldSucceed: true,
},
{
name: "Operation error",
err: &net.OpError{},
shouldSucceed: true,
},
{ | err: unknownNetworkError,
shouldSucceed: true,
},
{
name: "random error",
err: errors.New("hello world"),
shouldSucceed: false,
},
{
name: "nil error",
err: nil,
shouldSucceed: false,
},
{
name: "os.PathError",
err: err,
shouldSucceed: false,
},
}
for _, test := range table {
test := test
t.Run(test.name, func(t *testing.T) {
_, ok := neterror.GetNetError(test.err)
assert.Equal(t, test.shouldSucceed, ok)
})
}
}
func ExampleGetNetError() {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(time.Second)
w.WriteHeader(200)
})
server := httptest.NewServer(handler)
client := http.Client{Timeout: time.Millisecond}
req, err := http.NewRequest("GET", server.URL, nil)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
return
}
_, err = client.Do(req)
netError, ok := neterror.GetNetError(err)
if !ok {
fmt.Println("Expected a net.Error")
return
}
if netError.Temporary() && netError.Timeout() {
fmt.Println("Temporary Timeout error")
return
}
// Output: Temporary Timeout error
} | name: "Unknown network error", |
urls.py | """
Top-level URL lookup for InvenTree application.
Passes URL lookup downstream to each app as required.
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from qr_code import urls as qr_code_urls
from company.urls import company_urls
from company.urls import supplier_part_urls
from company.urls import price_break_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import po_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import IndexView, SearchView, SettingsView, EditUserView, SetPasswordView
from .views import InfoView
from users.urls import user_urls, user_api_urls
from access.urls import access_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^common/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^po/', include(po_api_urls)),
# User URLs
url(r'^user/', include(user_api_urls)),
# InvenTree information endpoint
url(r'^$', InfoView.as_view(), name='inventree-info'),
]
settings_urls = [
url(r'^user/?', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings-user'),
url(r'^currency/?', SettingsView.as_view(template_name='InvenTree/settings/currency.html'), name='settings-currency'),
url(r'^part/?', SettingsView.as_view(template_name='InvenTree/settings/part.html'), name='settings-part'),
url(r'^other/?', SettingsView.as_view(template_name='InvenTree/settings/other.html'), name='settings-other'),
# Catch any other urls
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings'),
]
urlpatterns = [
# User URLs
url(r'^user/', include(user_urls)),
url(r'^access/', include(access_urls)),
url(r'^part/', include(part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^price-break/', include(price_break_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/', auth_views.LoginView.as_view(), name='login'),
url(r'^logout/', auth_views.LogoutView.as_view(template_name='registration/logout.html'), name='logout'),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^admin/', admin.site.urls, name='inventree-admin'), | url(r'^search/', SearchView.as_view(), name='search'),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
]
# Static file access
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
# Media file access
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Send any unknown URLs to the parts page
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')] |
url(r'^qr_code/', include(qr_code_urls, namespace='qr_code')),
url(r'^index/', IndexView.as_view(), name='index'), |
client.go | package client
import (
"context"
"encoding/json"
"fmt"
etcd "github.com/coreos/etcd/clientv3"
"github.com/mesanine/gaffer/config"
"github.com/mesanine/gaffer/host"
"github.com/mesanine/gaffer/log"
"time"
)
const (
RegistrationKey = "gaffer_host_"
DailTimeout = 5 * time.Second
RegistrationLeaseTTL = 60
)
// Client is an HTTP client for
// interacting with a Gaffer cluster.
type Client struct {
etcd *etcd.Client
} | Endpoints: cfg.Endpoints,
DialTimeout: DailTimeout,
})
if err != nil {
return nil, err
}
return &Client{etcd: cli}, nil
}
func (s Client) Close() error { return s.etcd.Close() }
func (c Client) Register() error {
self, err := host.Self()
if err != nil {
return err
}
raw, err := json.Marshal(self)
if err != nil {
return err
}
lease, err := c.etcd.Grant(context.TODO(), RegistrationLeaseTTL)
if err != nil {
return err
}
key := fmt.Sprintf("%s_%s", RegistrationKey, self.Mac)
_, err = c.etcd.Put(context.TODO(), key, string(raw), etcd.WithLease(lease.ID))
if err != nil {
return err
}
log.Log.Debug(fmt.Sprintf("registered self: %s", key))
return nil
}
func (c Client) Hosts() ([]*host.Host, error) {
resp, err := c.etcd.Get(context.TODO(), RegistrationKey, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
if err != nil {
return nil, err
}
hosts := []*host.Host{}
for _, kv := range resp.Kvs {
host := &host.Host{}
err = json.Unmarshal(kv.Value, host)
if err != nil {
return nil, err
}
hosts = append(hosts, host)
}
return hosts, nil
} |
func New(cfg config.Config) (*Client, error) {
cli, err := etcd.New(etcd.Config{ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.