file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
mutable.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_arrow::arrow::bitmap::MutableBitmap;
use common_exception::Result;
use crate::columns::mutable::MutableColumn;
use crate::types::DataTypePtr;
use crate::ColumnRef;
use crate::NullableColumn;
pub struct MutableNullableColumn {
values: MutableBitmap,
inner: Box<dyn MutableColumn>,
data_type: DataTypePtr,
}
impl MutableColumn for MutableNullableColumn {
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_mut_any(&mut self) -> &mut dyn std::any::Any {
self
}
fn data_type(&self) -> DataTypePtr {
self.data_type.clone()
}
fn shrink_to_fit(&mut self) {
self.inner.shrink_to_fit()
}
fn append_default(&mut self) {
self.values.push(false);
self.inner.append_default();
}
fn len(&self) -> usize {
self.values.len()
}
fn to_column(&mut self) -> ColumnRef {
let col = self.inner.to_column();
let validity = std::mem::take(&mut self.values);
Arc::new(NullableColumn::new(col, validity.into()))
}
fn append_data_value(&mut self, value: crate::DataValue) -> Result<()> {
self.values.push(true);
self.inner.append_data_value(value)
}
}
impl MutableNullableColumn {
pub fn new(inner: Box<dyn MutableColumn>, data_type: DataTypePtr) -> Self {
Self {
inner,
values: MutableBitmap::with_capacity(0),
data_type,
}
}
#[inline]
pub fn append_value(&mut self, value: bool) |
pub fn inner_mut(&mut self) -> &mut Box<dyn MutableColumn> {
&mut self.inner
}
pub fn validity_mut(&mut self) -> &mut MutableBitmap {
&mut self.values
}
}
| {
self.values.push(value);
} |
schema.py | import graphene
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql_relay.node.node import from_global_id
from . import models
class TransactionNode(DjangoObjectType):
class Meta:
model = models.Transaction
filter_fields = {
'payee': ['exact'],
'date': ['exact', 'lt', 'lte', 'gt', 'gte']
}
interfaces = (graphene.relay.Node, )
class EntryNode(DjangoObjectType):
class | :
model = models.Entry
filter_fields = ['account', 'is_cleared']
interfaces = (graphene.relay.Node, )
class Query(object):
transaction_list = DjangoFilterConnectionField(
TransactionNode,
ledger_id=graphene.ID(required=True)
)
def resolve_transaction_list(self, info, **kwargs):
node, ledger_id = from_global_id(kwargs.get('ledger_id'))
assert node == 'LedgerNode'
return models.Transaction.objects.filter(
ledger_id=ledger_id,
ledger__creator=info.context.user
).order_by('-date', 'id')
| Meta |
logger.py | # Last Updated: 2.2
from datetime import datetime
from util.diagMessage import DiagMessage
# Logger class
# Buffers and writes messages to a file
class Logger:
BUFFER_MAX = 10
DEFAULT_FN = "../log.txt"
# Constructor for logger class
# Params: fn - file name to use or leave default | # Return: Logger instance
def __init__(self, fn = DEFAULT_FN, log = True):
#{{{
self.keep_log = log
self.fn = fn
self.log_buffer = []
if self.keep_log:
self.log(DiagMessage("LOG0000I"))
#}}}
# Append line to internal log buffer, flush if needed
# Params: diag - DiagMessage to log
# flush - bool flag for flushing buffer early
# Return: None
def log(self, diag, flush=False):
#{{{
if self.keep_log:
self.log_buffer.append(str(datetime.now()) + " - " + diag.msg)
if len(self.log_buffer) >= self.BUFFER_MAX or flush:
self._write()
elif not flush:
print(diag.msg)
#}}}
# Write contents of buffer out to file
# Params: None
# Return: None
def _write(self):
#{{{
print("Writing log...") if debug else None
with open(self.fn,'a') as logfile:
for line in self.log_buffer:
try:
logfile.write(line)
except TypeError:
logfile.write(str(datetime.now())+" - LOG ERR")
except UnicodeEncodeError:
logfile.write(str(line.encode("utf-8","replace")))
logfile.write("\n")
del self.log_buffer[:]
#}}} | # log - flag to keep a log file or not |
util.go | package pcscommand
import (
"fmt"
"github.com/iikira/BaiduPCS-Go/baidupcs"
"github.com/iikira/BaiduPCS-Go/baidupcs/pcserror"
"github.com/iikira/BaiduPCS-Go/pcspath"
"github.com/iikira/BaiduPCS-Go/pcsutil/waitgroup"
fpath "path"
"regexp"
"strings"
)
var (
// 通配符仅对 ? 和 * 起作用
patternRE = regexp.MustCompile(`[\*\?]`)
) | MaxRetry int // 最大重试次数
retry int // 任务失败的重试次数
}
// getAllAbsPaths 获取所有绝对路径
func getAllAbsPaths(paths ...string) (absPaths []string, err error) {
for k := range paths {
p, err := parsePath(paths[k])
if err != nil {
return nil, err
}
absPaths = append(absPaths, p...)
}
return
}
// getAbsPath 使用通配符获取绝对路径, 返回值为第一个匹配结果, 获取错误将会返回 原路径 和 错误信息
func getAbsPath(path string) (first string, err error) {
p, err := parsePath(path)
if err != nil {
return path, err
}
if len(p) >= 0 {
return p[0], nil
}
return path, fmt.Errorf("未找到路径")
}
// parsePath 解析通配符
func parsePath(path string) (paths []string, err error) {
pcsPath := pcspath.NewPCSPath(&GetActiveUser().Workdir, path)
path = pcsPath.AbsPathNoMatch()
if patternRE.MatchString(path) {
// 递归
paths, err = recurseParsePath(path)
if err != nil {
return nil, err
}
if len(paths) == 0 {
return nil, fmt.Errorf("文件路径匹配失败, 请检查通配符")
}
return paths, nil
}
paths = []string{path}
return
}
// recurseParsePath 递归解析通配符
func recurseParsePath(path string) (paths []string, err pcserror.Error) {
if !patternRE.MatchString(path) {
// 检测路径是否存在
_, err = GetBaiduPCS().FilesDirectoriesMeta(path)
if err != nil {
return nil, nil
}
paths = []string{path}
return
}
names := pcspath.SplitAll(path)
namesLen := len(names)
for k := range names {
if !patternRE.MatchString(names[k]) {
continue
}
pfiles, err := GetBaiduPCS().FilesDirectoriesList(strings.Join(names[:k], ""), baidupcs.DefaultOrderOptions)
if err != nil {
return nil, err
}
// 多线程获取信息
wg := waitgroup.NewWaitGroup(10)
for k2 := range pfiles {
wg.AddDelta()
go func(k2 int) {
ok, _ := fpath.Match(pcspath.EscapeBracketOne(names[k]), "/"+pfiles[k2].Filename)
if ok {
if k >= namesLen-1 {
wg.Lock()
paths = append(paths, pfiles[k2].Path) // 插入数据
wg.Unlock()
} else if pfiles[k2].Isdir {
recPaths, goerr := recurseParsePath(pfiles[k2].Path + strings.Join(names[k+1:], ""))
if goerr != nil {
err = goerr
return
}
wg.Lock()
paths = append(paths, recPaths...) // 插入数据
wg.Unlock()
}
}
wg.Done()
}(k2)
}
wg.Wait()
break
}
return
} |
// ListTask 队列状态 (基类)
type ListTask struct {
ID int // 任务id |
map.js | import GoogleMapsLoader from 'google-maps';
import { getJSON, getCoDataEngineData } from '../helpers/dataHelpers';
import { defaultZoom, denverLatitude, denverLongitude } from '../constants/graphConstants';
import tinycolor from 'tinycolor2';
import {
Metro_Denver_Federally_Subsidized_Affordable_Housing_2014_id
} from '../constants/datasetConstants';
GoogleMapsLoader.VERSION = '3.23';
const limitRadius = 10000;
const numberOfBlocksToGet = 10000;
const geoJSONUrl = 'https://data.colorado.gov/resource/49x6-nvb5.geojson' +
`?$where=within_circle(the_geom,${denverLatitude},${denverLongitude},${limitRadius})&$limit=${numberOfBlocksToGet}`;
const blocksPromise = getJSON(geoJSONUrl);
const dataPromise = getCoDataEngineData(Metro_Denver_Federally_Subsidized_Affordable_Housing_2014_id);
function initMap(mapEl) {
return new Promise((resolve, reject) => {
GoogleMapsLoader.load((google) => {
const map = window.map = new google.maps.Map(mapEl, {
center: { lat: denverLatitude, lng: denverLongitude },
zoom: defaultZoom
});
resolve({ google, map });
});
});
}
function addDataToMap({ google, map, data }) {
const markers = [];
data.forEach(point => {
const loc = {
lng: parseFloat(point['affhousing_metro_fedsubsidized_2014.x'], 10),
lat: parseFloat(point['affhousing_metro_fedsubsidized_2014.y'], 10)
};
markers.push(new google.maps.Marker({
position: loc,
map,
title: `Total Subsidized Units: ${point['affhousing_metro_fedsubsidized_2014.restunit']}`
}));
});
}
function getDataForGeoId(geoId, dataSet, dataSetKey) {
return dataSet.find(datum => datum[dataSetKey] === geoId);
}
export function getColorFromNumber(number) {
const hueScale = 200;
return tinycolor({
h: number * 100 / hueScale,
s: 100,
v: 100
}).toHexString();
}
function addGeoJsonToMap({ google, map, geoJson, data }) {
const filteredFeatures = geoJson.features.filter(feature => {
return getDataForGeoId(feature.properties.geoidblock, data, 'affhousing_metro_fedsubsidized_2014.geoid10');
});
geoJson.features = filteredFeatures;
map.data.addGeoJson(geoJson); | map.data.setStyle(feature => {
const featureData = getDataForGeoId(feature.H.geoidblock, data, 'affhousing_metro_fedsubsidized_2014.geoid10');
const affordableUnits = featureData && featureData['affhousing_metro_fedsubsidized_2014.restunit'];
const color = getColorFromNumber(affordableUnits);
return {
fillColor: color,
strokeWeight: 1
};
});
}
export default function makeMap() {
const mapEl = document.getElementById('map');
Promise.all([
initMap(mapEl),
dataPromise
]).then(([{ google, map }, data]) => {
addDataToMap({ google, map, data });
blocksPromise.then(geoJson => {
addGeoJsonToMap({ google, map, geoJson, data });
});
});
} | |
main.js | import TheMovieThumb from './components/TheMovieThumbnailComponent.js';
import HomePage from './components/TheHomePageComponent.js';
import HeaderComponent from './components/HeaderComponent.js';
import HomeComponent from './components/TheHomeComponent.js';
import FooterComponent from './components/FooterComponent.js';
import LoginPage from './components/TheLoginComponent.js';
import Protected from './components/TheProtectedComponent.js';
import AllUsers from './components/TheAllUsersComponent.js';
import MediaView from './components/MediaViewComponent.js'
// import e from 'express';
(() => {
console.log('fired!');
// Register VueRouter plugin with Vue
// https://stackoverflow.com/questions/50438605/vue-warn-unknown-custom-element-router-view-did-you-register-the-compone
Vue.use(VueRouter);
// add our Vue Router here
const router = new VueRouter({
routes: [{
path: "/",
name: 'root',
component: LoginPage,
beforeEnter: (to, from, next) => {
//if you're authenticated (set in localstorage), then go to the home page.
if (localStorage.getItem('cacheduser')) {
let user = JSON.parse(localStorage.getItem('cacheduser'));
} else {
next();
}
}
},
{ path: "/login", component: LoginPage },
{ path: "/users", name: 'users', component: AllUsers },
{ path: '/home', name: 'home', component: HomeComponent, props: true },
{
path: '/view/:mediaType/:id',
component: MediaView,
props: true
}
//only access this route or section if you're logged in /authenticated
// {
// path: "/admin",
// component: Protected,
// beforeEnter: (to, from, next) => {
// if (!vm.authenticated) {
// next('/login');
// } else {
// next();
// }
// }
// }
]
})
const vm = new Vue({
data: {
allMovies: [],
message: "Hello!",
authenticated: false,
user: "",
isAdmin: false,
currentUser: JSON.parse(window.localStorage.getItem('cacheduser')) || undefined | watch: {
currentUser: (oldvalue, newvalue) => console.log(oldvalue, newvalue)
},
created: function() {
let cachedUser = window.localStorage.getItem('cacheduser');
if (cachedUser) {
this.cachedUser = true;
console.log(cachedUser)
}
if (window.localStorage.getItem("creds")) {
this.authenticated = true;
this.user = JSON.parse(window.localStorage.getItem("creds")).name;
fetch('api/movies')
.then(res => res.json())
.then(data => {
console.table(data);
this.allMovies = data;
})
.catch(err => console.error(err));
}
},
methods: {
logout() {
//remove the cached user from local storage, if it exists
if (localStorage.getItem('cacheduser')) {
localStorage.removeItem('cacheduser');
}
// return to log in page
this.$router.push({ name: 'root' });
this.currentUser = undefined;
},
authenticateuser(user) {
// debugger;
console.log('YEEEEEEEEEEEHAAAAW', user);
this.currentUser = user;
}
},
components: {
'footer-component': FooterComponent,
'header-component': HeaderComponent
},
router
}).$mount('#app')
})(); | }, |
config-map.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package config
import (
"path/filepath"
"kubevirt.io/kubevirt/pkg/api/v1"
)
// GetConfigMapSourcePath returns a path to ConfigMap mounted on a pod
func | (volumeName string) string {
return filepath.Join(ConfigMapSourceDir, volumeName)
}
// GetConfigMapDiskPath returns a path to ConfigMap iso image created based on a volume name
func GetConfigMapDiskPath(volumeName string) string {
return filepath.Join(ConfigMapDisksDir, volumeName+".iso")
}
// CreateConfigMapDisks creates ConfigMap iso disks which are attached to vmis
func CreateConfigMapDisks(vmi *v1.VirtualMachineInstance) error {
for _, volume := range vmi.Spec.Volumes {
if volume.ConfigMap != nil {
var filesPath []string
filesPath, err := getFilesLayout(GetConfigMapSourcePath(volume.Name))
if err != nil {
return err
}
err = createIsoConfigImage(GetConfigMapDiskPath(volume.Name), filesPath)
if err != nil {
return err
}
}
}
return nil
}
| GetConfigMapSourcePath |
index.json.ts | import type { Request, Response } from "@tinyhttp/app";
import posts from "./_posts.js";
const contents = JSON.stringify(
posts.map((post) => {
return {
title: post.title,
slug: post.slug,
};
})
);
export function get(_: Request, res: Response) {
res.writeHead(200, {
"Content-Type": "application/json",
}); | res.end(contents);
} | |
room_id.rs | //! Matrix room identifiers.
use std::{convert::TryFrom, num::NonZeroU8};
use crate::{Error, ServerName};
/// A Matrix room ID.
///
/// A `RoomId` is generated randomly or converted from a string slice, and can be converted back
/// into a string as needed.
///
/// ```
/// # use std::convert::TryFrom;
/// # use ruma_identifiers::RoomId;
/// assert_eq!(
/// RoomId::try_from("!n8f893n9:example.com").unwrap().as_ref(),
/// "!n8f893n9:example.com"
/// );
/// ```
#[derive(Clone, Debug)]
pub struct RoomId {
pub(crate) full_id: Box<str>,
pub(crate) colon_idx: NonZeroU8,
}
impl RoomId {
/// Attempts to generate a `RoomId` for the given origin server with a localpart consisting of
/// 18 random ASCII characters.
///
/// Fails if the given homeserver cannot be parsed as a valid host.
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
pub fn new(server_name: &ServerName) -> Self {
use crate::generate_localpart;
let full_id = format!("!{}:{}", generate_localpart(18), server_name).into();
Self { full_id, colon_idx: NonZeroU8::new(19).unwrap() }
}
}
impl RoomId {
/// Returns the rooms's unique ID.
pub fn localpart(&self) -> &str {
&self.full_id[1..self.colon_idx.get() as usize]
}
/// Returns the server name of the room ID.
pub fn server_name(&self) -> &ServerName {
<&ServerName>::try_from(&self.full_id[self.colon_idx.get() as usize + 1..]).unwrap()
}
}
/// Attempts to create a new Matrix room ID from a string representation.
///
/// The string must include the leading ! sigil, the localpart, a literal colon, and a server name.
fn try_from<S>(room_id: S) -> Result<RoomId, Error>
where
S: AsRef<str> + Into<Box<str>>,
{
let colon_idx = ruma_identifiers_validation::room_id::validate(room_id.as_ref())?;
Ok(RoomId { full_id: room_id.into(), colon_idx })
}
common_impls!(RoomId, try_from, "a Matrix room ID");
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
#[cfg(feature = "serde")]
use serde_json::{from_str, to_string};
use super::RoomId;
use crate::Error;
#[test]
fn valid_room_id() {
assert_eq!(
RoomId::try_from("!29fhd83h92h0:example.com")
.expect("Failed to create RoomId.")
.as_ref(),
"!29fhd83h92h0:example.com"
);
}
#[test]
fn empty_localpart() {
assert_eq!(
RoomId::try_from("!:example.com").expect("Failed to create RoomId.").as_ref(),
"!:example.com"
);
}
#[cfg(feature = "rand")]
#[test]
fn generate_random_valid_room_id() {
use crate::ServerName;
let server_name =
<&ServerName>::try_from("example.com").expect("Failed to parse ServerName");
let room_id = RoomId::new(server_name);
let id_str = room_id.as_str();
assert!(id_str.starts_with('!'));
assert_eq!(id_str.len(), 31);
}
#[cfg(feature = "serde")]
#[test]
fn serialize_valid_room_id() {
assert_eq!(
to_string(
&RoomId::try_from("!29fhd83h92h0:example.com").expect("Failed to create RoomId.")
)
.expect("Failed to convert RoomId to JSON."),
r#""!29fhd83h92h0:example.com""#
);
}
#[cfg(feature = "serde")]
#[test]
fn deserialize_valid_room_id() {
assert_eq!(
from_str::<RoomId>(r#""!29fhd83h92h0:example.com""#)
.expect("Failed to convert JSON to RoomId"),
RoomId::try_from("!29fhd83h92h0:example.com").expect("Failed to create RoomId.")
);
}
#[test]
fn valid_room_id_with_explicit_standard_port() {
assert_eq!(
RoomId::try_from("!29fhd83h92h0:example.com:443")
.expect("Failed to create RoomId.")
.as_ref(),
"!29fhd83h92h0:example.com:443"
);
}
#[test]
fn valid_room_id_with_non_standard_port() |
#[test]
fn missing_room_id_sigil() {
assert_eq!(RoomId::try_from("carl:example.com").unwrap_err(), Error::MissingSigil);
}
#[test]
fn missing_room_id_delimiter() {
assert_eq!(RoomId::try_from("!29fhd83h92h0").unwrap_err(), Error::MissingDelimiter);
}
#[test]
fn invalid_room_id_host() {
assert_eq!(RoomId::try_from("!29fhd83h92h0:/").unwrap_err(), Error::InvalidServerName);
}
#[test]
fn invalid_room_id_port() {
assert_eq!(
RoomId::try_from("!29fhd83h92h0:example.com:notaport").unwrap_err(),
Error::InvalidServerName
);
}
}
| {
assert_eq!(
RoomId::try_from("!29fhd83h92h0:example.com:5000")
.expect("Failed to create RoomId.")
.as_ref(),
"!29fhd83h92h0:example.com:5000"
);
} |
views.py | from django.http import HttpResponse
def | (request):
return HttpResponse("<h1> This is the music app homepage </h1>") | index |
pkg.rs | use crate::{
lock::Lock,
manifest::{Dependency, Manifest, ManifestFile},
};
use anyhow::{anyhow, bail, Context, Error, Result};
use forc_util::{
find_file_name, git_checkouts_directory, kebab_to_snake_case, print_on_failure,
print_on_success, print_on_success_library, println_yellow_err,
};
use fuels_types::JsonABI;
use petgraph::{
self,
visit::{EdgeRef, IntoNodeReferences},
Directed, Direction,
};
use serde::{Deserialize, Serialize};
use std::{
collections::{hash_map, BTreeSet, HashMap, HashSet},
fmt,
hash::{Hash, Hasher},
path::{Path, PathBuf},
str::FromStr,
};
use sway_core::{
semantic_analysis::namespace, source_map::SourceMap, types::*, BytecodeCompilationResult,
CompileAstResult, CompileError, TreeType,
};
use sway_utils::constants;
use tracing::info;
use url::Url;
type GraphIx = u32;
type Node = Pinned;
type Edge = DependencyName;
pub type Graph = petgraph::stable_graph::StableGraph<Node, Edge, Directed, GraphIx>;
pub type NodeIx = petgraph::graph::NodeIndex<GraphIx>;
pub type PathMap = HashMap<PinnedId, PathBuf>;
/// A unique ID for a pinned package.
///
/// The internal value is produced by hashing the package's name and `SourcePinned`.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct PinnedId(u64);
/// The result of successfully compiling a package.
pub struct Compiled {
pub json_abi: JsonABI,
pub bytecode: Vec<u8>,
pub tree_type: TreeType,
}
/// A package uniquely identified by name along with its source.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Deserialize, Serialize)]
pub struct Pkg {
/// The unique name of the package as declared in its manifest.
pub name: String,
/// Where the package is sourced from.
pub source: Source,
}
/// A package uniquely identified by name along with its pinned source.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct Pinned {
pub name: String,
pub source: SourcePinned,
}
/// Specifies a base source for a package.
///
/// - For registry packages, this includes a base version.
/// - For git packages, this includes a base git reference like a branch or tag.
///
/// Note that a `Source` does not specify a specific, pinned version. Rather, it specifies a source
/// at which the current latest version may be located.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Deserialize, Serialize)]
pub enum Source {
/// Used to refer to the root project.
Root,
/// A git repo with a `Forc.toml` manifest at its root.
Git(SourceGit),
/// A path to a directory with a `Forc.toml` manifest at its root.
Path(PathBuf),
/// A forc project hosted on the official registry.
Registry(SourceRegistry),
}
/// A git repo with a `Forc.toml` manifest at its root.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Deserialize, Serialize)]
pub struct SourceGit {
/// The URL at which the repository is located.
pub repo: Url,
/// A git reference, e.g. a branch or tag.
pub reference: GitReference,
}
/// Used to distinguish between types of git references.
///
/// For the most part, `GitReference` is useful to refine the `refspecs` used to fetch remote
/// repositories.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Deserialize, Serialize)] | pub enum GitReference {
Branch(String),
Tag(String),
Rev(String),
DefaultBranch,
}
/// A package from the official registry.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Deserialize, Serialize)]
pub struct SourceRegistry {
/// The base version specified for the package.
pub version: semver::Version,
}
/// A pinned instance of a git source.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct SourceGitPinned {
/// The git source that is being pinned.
pub source: SourceGit,
/// The hash to which we have pinned the source.
pub commit_hash: String,
}
/// A pinned instance of a path source.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct SourcePathPinned {
/// The ID of the package that is the root of the subgraph of path dependencies that this
/// package is a part of.
///
/// In other words, when traversing the parents of this package, this is the ID of the first
/// non-path ancestor package.
///
/// As a result, this will always be either a git package or the root package.
///
/// This allows for disambiguating path dependencies of the same name that have different path
/// roots.
pub path_root: PinnedId,
}
/// A pinned instance of the registry source.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct SourceRegistryPinned {
/// The registry package with base version.
pub source: SourceRegistry,
/// The pinned version.
pub version: semver::Version,
}
/// A pinned instance of the package source.
///
/// Specifies an exact version to use, or an exact commit in the case of git dependencies. The
/// pinned version or commit is updated upon creation of the lock file and on `forc update`.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub enum SourcePinned {
Root,
Git(SourceGitPinned),
Path(SourcePathPinned),
Registry(SourceRegistryPinned),
}
/// Represents the full build plan for a project.
#[derive(Clone)]
pub struct BuildPlan {
graph: Graph,
path_map: PathMap,
compilation_order: Vec<NodeIx>,
}
/// Parameters to pass through to the `sway_core::BuildConfig` during compilation.
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct BuildConfig {
pub print_ir: bool,
pub print_finalized_asm: bool,
pub print_intermediate_asm: bool,
pub silent: bool,
}
/// Error returned upon failed parsing of `PinnedId::from_str`.
#[derive(Clone, Debug)]
pub struct PinnedIdParseError;
/// Error returned upon failed parsing of `SourcePathPinned::from_str`.
#[derive(Clone, Debug)]
pub struct SourcePathPinnedParseError;
/// Error returned upon failed parsing of `SourceGitPinned::from_str`.
#[derive(Clone, Debug)]
pub enum SourceGitPinnedParseError {
Prefix,
Url,
Reference,
CommitHash,
}
/// Error returned upon failed parsing of `SourcePinned::from_str`.
#[derive(Clone, Debug)]
pub struct SourcePinnedParseError;
/// The name specified on the left hand side of the `=` in a depenedency declaration under
/// `[dependencies]` within a forc manifest.
///
/// The name of a dependency may differ from the package name in the case that the dependency's
/// `package` field is specified.
///
/// For example, in the following, `foo` is assumed to be both the package name and the dependency
/// name:
///
/// ```toml
/// foo = { git = "https://github.com/owner/repo", branch = "master" }
/// ```
///
/// In the following case however, `foo` is the package name, but the dependency name is `foo-alt`:
///
/// ```toml
/// foo-alt = { git = "https://github.com/owner/repo", branch = "master", package = "foo" }
/// ```
pub type DependencyName = String;
pub struct PkgDiff {
pub added: Vec<(DependencyName, Pkg)>,
pub removed: Vec<(DependencyName, Pkg)>,
}
impl BuildPlan {
/// Create a new build plan for the project by fetching and pinning dependenies.
pub fn new(manifest: &ManifestFile, sway_git_tag: &str, offline: bool) -> Result<Self> {
let path = manifest.dir().to_path_buf();
let (graph, path_map) = fetch_deps(path, manifest, sway_git_tag, offline)?;
let compilation_order = compilation_order(&graph)?;
Ok(Self {
graph,
path_map,
compilation_order,
})
}
/// Create a new build plan from an existing one. Needs the difference with the existing plan with the lock.
pub fn apply_pkg_diff(
&self,
pkg_diff: PkgDiff,
sway_git_tag: &str,
offline_mode: bool,
) -> Result<Self> {
let mut graph = self.graph.clone();
let mut path_map = self.path_map.clone();
let proj_node = *self
.compilation_order
.last()
.ok_or_else(|| anyhow!("Invalid Graph"))?;
let PkgDiff { added, removed } = pkg_diff;
remove_deps(&mut graph, &path_map, proj_node, &removed);
let mut visited_map: HashMap<Pinned, NodeIx> = graph
.node_references()
.into_iter()
.map(|(node_index, pinned)| (pinned.clone(), node_index))
.collect();
add_deps(
&mut graph,
&mut path_map,
&self.compilation_order,
&added,
sway_git_tag,
offline_mode,
&mut visited_map,
)?;
let compilation_order = compilation_order(&graph)?;
Ok(Self {
graph,
path_map,
compilation_order,
})
}
/// Attempt to load the build plan from the `Lock`.
pub fn from_lock(proj_path: &Path, lock: &Lock, sway_git_tag: &str) -> Result<Self> {
let graph = lock.to_graph()?;
let compilation_order = compilation_order(&graph)?;
let path_map = graph_to_path_map(proj_path, &graph, &compilation_order, sway_git_tag)?;
Ok(Self {
graph,
path_map,
compilation_order,
})
}
/// Attempt to load the build plan from the `Forc.lock` file.
pub fn from_lock_file(lock_path: &Path, sway_git_tag: &str) -> Result<Self> {
let proj_path = lock_path.parent().unwrap();
let lock = Lock::from_path(lock_path)?;
Self::from_lock(proj_path, &lock, sway_git_tag)
}
/// Ensure that the build plan is valid for the given manifest.
pub fn validate(&self, manifest: &Manifest, sway_git_tag: &str) -> Result<PkgDiff> {
let mut added = vec![];
let mut removed = vec![];
// Retrieve project's graph node.
let proj_node = *self
.compilation_order
.last()
.ok_or_else(|| anyhow!("Invalid Graph"))?;
// Collect dependency `Source`s from graph.
let plan_dep_pkgs: BTreeSet<_> = self
.graph
.edges_directed(proj_node, Direction::Outgoing)
.map(|e| {
let dep_name = e.weight();
let dep_pkg = self.graph[e.target()].unpinned(&self.path_map);
(dep_name, dep_pkg)
})
.collect();
// Collect dependency `Source`s from manifest.
let proj_id = self.graph[proj_node].id();
let proj_path = &self.path_map[&proj_id];
let manifest_dep_pkgs = manifest
.deps()
.map(|(dep_name, dep)| {
// NOTE: Temporarily warn about `version` until we have support for registries.
if let Dependency::Detailed(det) = dep {
if det.version.is_some() {
println_yellow_err(&format!(
" WARNING! Dependency \"{}\" specifies the unused `version` field: \
consider using `branch` or `tag` instead",
dep_name
));
}
}
let name = dep.package().unwrap_or(dep_name).to_string();
let source = dep_to_source(proj_path, dep)?;
let dep_pkg = Pkg { name, source };
Ok((dep_name, dep_pkg))
})
.collect::<Result<BTreeSet<_>>>()?;
// Ensure both `pkg::Source` are equal. If not, produce added and removed.
if plan_dep_pkgs != manifest_dep_pkgs {
added = manifest_dep_pkgs
.difference(&plan_dep_pkgs)
.into_iter()
.map(|pkg| (pkg.0.clone(), pkg.1.clone()))
.collect();
removed = plan_dep_pkgs
.difference(&manifest_dep_pkgs)
.into_iter()
.map(|pkg| (pkg.0.clone(), pkg.1.clone()))
.collect();
}
// Ensure the pkg names of all nodes match their associated manifests.
for node in self.graph.node_indices() {
let pkg = &self.graph[node];
let id = pkg.id();
let path = &self.path_map[&id];
let manifest = ManifestFile::from_dir(path, sway_git_tag)?;
if pkg.name != manifest.project.name {
bail!(
"package name {:?} does not match the associated manifest project name {:?}",
pkg.name,
manifest.project.name,
);
}
}
Ok(PkgDiff { added, removed })
}
/// View the build plan's compilation graph.
pub fn graph(&self) -> &Graph {
&self.graph
}
/// View the build plan's map of pinned package IDs to the path containing a local copy of
/// their source.
pub fn path_map(&self) -> &PathMap {
&self.path_map
}
/// The order in which nodes are compiled, determined via a toposort of the package graph.
pub fn compilation_order(&self) -> &[NodeIx] {
&self.compilation_order
}
}
/// Remove the given set of packages from `graph` along with any dependencies that are no
/// longer required as a result.
fn remove_deps(
graph: &mut Graph,
path_map: &PathMap,
proj_node: NodeIx,
to_remove: &[(DependencyName, Pkg)],
) {
use petgraph::visit::Bfs;
// Do a BFS from the root and remove all nodes that does not have any incoming edge or one of the removed dependencies.
let mut bfs = Bfs::new(&*graph, proj_node);
bfs.next(&*graph); // Skip the root node (aka project node).
while let Some(node) = bfs.next(&*graph) {
if graph
.edges_directed(node, Direction::Incoming)
.next()
.is_none()
|| to_remove
.iter()
.any(|removed_dep| removed_dep.1 == graph[node].unpinned(path_map))
{
graph.remove_node(node);
}
}
}
/// Add the given set of packages to `graph`. If a dependency of an newly added package is already
/// pinned use that. Otherwise fetch and pin it.
fn add_deps(
graph: &mut Graph,
path_map: &mut PathMap,
compilation_order: &[NodeIx],
to_add: &[(DependencyName, Pkg)],
sway_git_tag: &str,
offline_mode: bool,
visited_map: &mut HashMap<Pinned, NodeIx>,
) -> Result<()> {
let proj_node = *compilation_order
.last()
.ok_or_else(|| anyhow!("Invalid Graph"))?;
let proj_id = graph[proj_node].id();
let proj_path = &path_map[&proj_id];
let fetch_ts = std::time::Instant::now();
let fetch_id = fetch_id(proj_path, fetch_ts);
let path_root = proj_id;
for (added_dep_name, added_package) in to_add {
let pinned_pkg = pin_pkg(fetch_id, proj_id, added_package, path_map, sway_git_tag)?;
let manifest = Manifest::from_dir(&path_map[&pinned_pkg.id()], sway_git_tag)?;
let added_package_node = graph.add_node(pinned_pkg.clone());
fetch_children(
fetch_id,
offline_mode,
added_package_node,
&manifest,
path_root,
sway_git_tag,
graph,
path_map,
visited_map,
)?;
graph.add_edge(proj_node, added_package_node, added_dep_name.to_string());
}
Ok(())
}
impl GitReference {
/// Resolves the parsed forc git reference to the associated git ID.
pub fn resolve(&self, repo: &git2::Repository) -> Result<git2::Oid> {
// Find the commit associated with this tag.
fn resolve_tag(repo: &git2::Repository, tag: &str) -> Result<git2::Oid> {
let refname = format!("refs/remotes/origin/tags/{}", tag);
let id = repo.refname_to_id(&refname)?;
let obj = repo.find_object(id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
// Resolve to the target for the given branch.
fn resolve_branch(repo: &git2::Repository, branch: &str) -> Result<git2::Oid> {
let name = format!("origin/{}", branch);
let b = repo
.find_branch(&name, git2::BranchType::Remote)
.with_context(|| format!("failed to find branch `{}`", branch))?;
b.get()
.target()
.ok_or_else(|| anyhow::format_err!("branch `{}` did not have a target", branch))
}
// Use the HEAD commit when default branch is specified.
fn resolve_default_branch(repo: &git2::Repository) -> Result<git2::Oid> {
let head_id = repo.refname_to_id("refs/remotes/origin/HEAD")?;
let head = repo.find_object(head_id, None)?;
Ok(head.peel(git2::ObjectType::Commit)?.id())
}
// Find the commit for the given revision.
fn resolve_rev(repo: &git2::Repository, rev: &str) -> Result<git2::Oid> {
let obj = repo.revparse_single(rev)?;
match obj.as_tag() {
Some(tag) => Ok(tag.target_id()),
None => Ok(obj.id()),
}
}
match self {
GitReference::Tag(s) => {
resolve_tag(repo, s).with_context(|| format!("failed to find tag `{}`", s))
}
GitReference::Branch(s) => resolve_branch(repo, s),
GitReference::DefaultBranch => resolve_default_branch(repo),
GitReference::Rev(s) => resolve_rev(repo, s),
}
}
}
impl Pinned {
/// Retrieve the unique ID for the pinned package.
///
/// The internal value is produced by hashing the package's name and `SourcePinned`.
pub fn id(&self) -> PinnedId {
PinnedId::new(&self.name, &self.source)
}
/// Retrieve the unpinned version of this source.
pub fn unpinned(&self, path_map: &PathMap) -> Pkg {
let id = self.id();
let source = match &self.source {
SourcePinned::Root => Source::Root,
SourcePinned::Git(git) => Source::Git(git.source.clone()),
SourcePinned::Path(_) => Source::Path(path_map[&id].clone()),
SourcePinned::Registry(reg) => Source::Registry(reg.source.clone()),
};
let name = self.name.clone();
Pkg { name, source }
}
}
impl PinnedId {
/// Hash the given name and pinned source to produce a unique pinned package ID.
pub fn new(name: &str, source: &SourcePinned) -> Self {
let mut hasher = hash_map::DefaultHasher::default();
name.hash(&mut hasher);
source.hash(&mut hasher);
Self(hasher.finish())
}
}
impl SourcePathPinned {
pub const PREFIX: &'static str = "path";
}
impl SourceGitPinned {
pub const PREFIX: &'static str = "git";
}
impl fmt::Display for PinnedId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Format the inner `u64` as hex.
write!(f, "{:016X}", self.0)
}
}
impl fmt::Display for SourcePathPinned {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// path+from-root-<id>
write!(f, "{}+from-root-{}", Self::PREFIX, self.path_root)
}
}
impl fmt::Display for SourceGitPinned {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// git+<url/to/repo>?<ref_kind>=<ref_string>#<commit>
write!(
f,
"{}+{}?{}#{}",
Self::PREFIX,
self.source.repo,
self.source.reference,
self.commit_hash
)
}
}
impl fmt::Display for GitReference {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
GitReference::Branch(ref s) => write!(f, "branch={}", s),
GitReference::Tag(ref s) => write!(f, "tag={}", s),
GitReference::Rev(ref _s) => write!(f, "rev"),
GitReference::DefaultBranch => write!(f, "default-branch"),
}
}
}
impl fmt::Display for SourcePinned {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
SourcePinned::Root => write!(f, "root"),
SourcePinned::Path(src) => src.fmt(f),
SourcePinned::Git(src) => src.fmt(f),
SourcePinned::Registry(_reg) => unimplemented!("pkg registries not yet implemented"),
}
}
}
impl FromStr for PinnedId {
type Err = PinnedIdParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(
u64::from_str_radix(s, 16).map_err(|_| PinnedIdParseError)?,
))
}
}
impl FromStr for SourcePathPinned {
type Err = SourcePathPinnedParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// path+from-root-<id>
let s = s.trim();
// Check for prefix at the start.
let prefix_plus = format!("{}+", Self::PREFIX);
if s.find(&prefix_plus) != Some(0) {
return Err(SourcePathPinnedParseError);
}
let s = &s[prefix_plus.len()..];
// Parse the `from-root-*` section.
let path_root = s
.split("from-root-")
.nth(1)
.ok_or(SourcePathPinnedParseError)?
.parse()
.map_err(|_| SourcePathPinnedParseError)?;
Ok(Self { path_root })
}
}
impl FromStr for SourceGitPinned {
type Err = SourceGitPinnedParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// git+<url/to/repo>?<reference>#<commit>
let s = s.trim();
// Check for "git+" at the start.
let prefix_plus = format!("{}+", Self::PREFIX);
if s.find(&prefix_plus) != Some(0) {
return Err(SourceGitPinnedParseError::Prefix);
}
let s = &s[prefix_plus.len()..];
// Parse the `repo` URL.
let repo_str = s.split('?').next().ok_or(SourceGitPinnedParseError::Url)?;
let repo = Url::parse(repo_str).map_err(|_| SourceGitPinnedParseError::Url)?;
let s = &s[repo_str.len() + "?".len()..];
// Parse the git reference and commit hash. This can be any of either:
// - `branch=<branch-name>#<commit-hash>`
// - `tag=<tag-name>#<commit-hash>`
// - `rev#<commit-hash>`
// - `default#<commit-hash>`
let mut s_iter = s.split('#');
let reference = s_iter.next().ok_or(SourceGitPinnedParseError::Reference)?;
let commit_hash = s_iter
.next()
.ok_or(SourceGitPinnedParseError::CommitHash)?
.to_string();
validate_git_commit_hash(&commit_hash)
.map_err(|_| SourceGitPinnedParseError::CommitHash)?;
const BRANCH: &str = "branch=";
const TAG: &str = "tag=";
let reference = if reference.find(BRANCH) == Some(0) {
GitReference::Branch(reference[BRANCH.len()..].to_string())
} else if reference.find(TAG) == Some(0) {
GitReference::Tag(reference[TAG.len()..].to_string())
} else if reference == "rev" {
GitReference::Rev(commit_hash.to_string())
} else if reference == "default-branch" {
GitReference::DefaultBranch
} else {
return Err(SourceGitPinnedParseError::Reference);
};
let source = SourceGit { repo, reference };
Ok(Self {
source,
commit_hash,
})
}
}
impl FromStr for SourcePinned {
type Err = SourcePinnedParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let source = if s == "root" {
SourcePinned::Root
} else if let Ok(src) = SourcePathPinned::from_str(s) {
SourcePinned::Path(src)
} else if let Ok(src) = SourceGitPinned::from_str(s) {
SourcePinned::Git(src)
} else {
// TODO: Try parse registry source.
return Err(SourcePinnedParseError);
};
Ok(source)
}
}
fn validate_git_commit_hash(commit_hash: &str) -> Result<()> {
const LEN: usize = 40;
if commit_hash.len() != LEN {
bail!(
"invalid hash length: expected {}, found {}",
LEN,
commit_hash.len()
);
}
if !commit_hash.chars().all(|c| c.is_ascii_alphanumeric()) {
bail!("hash contains one or more non-ascii-alphanumeric characters");
}
Ok(())
}
impl Default for GitReference {
fn default() -> Self {
Self::DefaultBranch
}
}
/// The `pkg::Graph` is of *a -> b* where *a* depends on *b*. We can determine compilation order by
/// performing a toposort of the graph with reversed weights. The resulting order ensures all
/// dependencies are always compiled before their dependents.
pub fn compilation_order(graph: &Graph) -> Result<Vec<NodeIx>> {
let rev_pkg_graph = petgraph::visit::Reversed(&graph);
petgraph::algo::toposort(rev_pkg_graph, None).map_err(|_| {
// Find strongly connected components
// If the vector has an element with length more than 1, it contains a cyclic path.
let scc = petgraph::algo::kosaraju_scc(&graph);
let mut path = String::new();
scc.iter()
.filter(|path| path.len() > 1)
.for_each(|cyclic_path| {
// We are sure that there is an element in cyclic_path vec.
let starting_node = &graph[*cyclic_path.last().unwrap()];
// Adding first node of the path
path.push_str(&starting_node.name.to_string());
path.push_str(" -> ");
for (node_index, node) in cyclic_path.iter().enumerate() {
path.push_str(&graph[*node].name.to_string());
if node_index != cyclic_path.len() - 1 {
path.push_str(" -> ");
}
}
path.push('\n');
});
anyhow!("dependency cycle detected: {}", path)
})
}
/// Given graph of pinned dependencies and the directory for the root node, produce a path map
/// containing the path to the local source for every node in the graph.
pub fn graph_to_path_map(
proj_manifest_dir: &Path,
graph: &Graph,
compilation_order: &[NodeIx],
sway_git_tag: &str,
) -> Result<PathMap> {
let mut path_map = PathMap::new();
// We resolve all paths in reverse compilation order.
// That is, we follow paths starting from the project root.
let mut path_resolve_order = compilation_order.iter().cloned().rev();
// Add the project's package to the map.
let proj_node = path_resolve_order
.next()
.ok_or_else(|| anyhow!("graph must contain at least the project node"))?;
let proj_id = graph[proj_node].id();
path_map.insert(proj_id, proj_manifest_dir.to_path_buf().canonicalize()?);
// Produce the unique `fetch_id` in case we need to fetch a missing git dep.
let fetch_ts = std::time::Instant::now();
let fetch_id = fetch_id(&path_map[&proj_id], fetch_ts);
// Resolve all following dependencies, knowing their parents' paths will already be resolved.
for dep_node in path_resolve_order {
let dep = &graph[dep_node];
let dep_path = match &dep.source {
SourcePinned::Root => bail!("more than one root package detected in graph"),
SourcePinned::Git(git) => {
let repo_path = git_commit_path(&dep.name, &git.source.repo, &git.commit_hash);
if !repo_path.exists() {
info!(" Fetching {}", git.to_string());
fetch_git(fetch_id, &dep.name, git)?;
}
find_dir_within(&repo_path, &dep.name, sway_git_tag).ok_or_else(|| {
anyhow!(
"failed to find package `{}` in {}",
dep.name,
git.to_string()
)
})?
}
SourcePinned::Path(path) => {
// This is already checked during `Graph::from_lock`, but we check again here just
// in case this is being called with a `Graph` constructed via some other means.
validate_path_root(graph, dep_node, path.path_root)?;
// Retrieve the parent node to construct the relative path.
let (parent_node, dep_name) = graph
.edges_directed(dep_node, Direction::Incoming)
.next()
.map(|edge| (edge.source(), edge.weight().clone()))
.ok_or_else(|| anyhow!("more than one root package detected in graph"))?;
let parent = &graph[parent_node];
// Construct the path relative to the parent's path.
let parent_path = &path_map[&parent.id()];
let parent_manifest = ManifestFile::from_dir(parent_path, sway_git_tag)?;
let detailed = parent_manifest
.dependencies
.as_ref()
.and_then(|deps| deps.get(&dep_name))
.ok_or_else(|| {
anyhow!(
"dependency required for path reconstruction \
has been removed from the manifest"
)
})
.and_then(|dep| match dep {
Dependency::Detailed(detailed) => Ok(detailed),
Dependency::Simple(_) => {
bail!("missing path info for dependency: {}", &dep_name);
}
})?;
let rel_dep_path = detailed
.path
.as_ref()
.ok_or_else(|| anyhow!("missing path info for dependency: {}", dep.name))?;
let path = parent_path.join(rel_dep_path);
if !path.exists() {
bail!("pinned `path` dependency \"{}\" source missing", dep.name);
}
path
}
SourcePinned::Registry(_reg) => {
bail!("registry dependencies are not yet supported");
}
};
path_map.insert(dep.id(), dep_path.canonicalize()?);
}
Ok(path_map)
}
/// Given a `graph`, the node index of a path dependency within that `graph`, and the supposed
/// `path_root` of the path dependency, ensure that the `path_root` is valid.
///
/// See the `path_root` field of the [SourcePathPinned] type for further details.
pub(crate) fn validate_path_root(
graph: &Graph,
path_dep: NodeIx,
path_root: PinnedId,
) -> Result<()> {
let mut node = path_dep;
let invalid_path_root = || {
anyhow!(
"invalid `path_root` for path dependency package {:?}",
&graph[path_dep].name
)
};
loop {
let parent = graph
.edges_directed(node, Direction::Incoming)
.next()
.map(|edge| edge.source())
.ok_or_else(invalid_path_root)?;
let parent_pkg = &graph[parent];
match &parent_pkg.source {
SourcePinned::Path(src) if src.path_root != path_root => bail!(invalid_path_root()),
SourcePinned::Git(_) | SourcePinned::Registry(_) | SourcePinned::Root => {
if parent_pkg.id() != path_root {
bail!(invalid_path_root());
}
return Ok(());
}
_ => node = parent,
}
}
}
/// Fetch all depedencies and produce the dependency graph along with a map from each node's unique
/// ID to its local fetched path.
///
/// This will determine pinned versions and commits for remote dependencies during traversal.
pub(crate) fn fetch_deps(
proj_manifest_dir: PathBuf,
proj_manifest: &Manifest,
sway_git_tag: &str,
offline_mode: bool,
) -> Result<(Graph, PathMap)> {
let mut graph = Graph::new();
let mut path_map = PathMap::new();
// Add the project to the graph as the root node.
let name = proj_manifest.project.name.clone();
let path = proj_manifest_dir.canonicalize()?;
let source = SourcePinned::Root;
let pkg = Pinned { name, source };
let pkg_id = pkg.id();
path_map.insert(pkg_id, path);
let root = graph.add_node(pkg);
// The set of visited packages, starting with the root.
let mut visited = HashMap::new();
visited.insert(graph[root].clone(), root);
// Recursively fetch children and add them to the graph.
// TODO: Convert this recursion to use loop & stack to ensure deps can't cause stack overflow.
let fetch_ts = std::time::Instant::now();
let fetch_id = fetch_id(&path_map[&pkg_id], fetch_ts);
let manifest = Manifest::from_dir(&path_map[&pkg_id], sway_git_tag)?;
let path_root = pkg_id;
fetch_children(
fetch_id,
offline_mode,
root,
&manifest,
path_root,
sway_git_tag,
&mut graph,
&mut path_map,
&mut visited,
)?;
Ok((graph, path_map))
}
/// Produce a unique ID for a particular fetch pass.
///
/// This is used in the temporary git directory and allows for avoiding contention over the git repo directory.
pub fn fetch_id(path: &Path, timestamp: std::time::Instant) -> u64 {
let mut hasher = hash_map::DefaultHasher::new();
path.hash(&mut hasher);
timestamp.hash(&mut hasher);
hasher.finish()
}
/// Fetch children nodes of the given node and add unvisited nodes to the graph.
#[allow(clippy::too_many_arguments)]
fn fetch_children(
fetch_id: u64,
offline_mode: bool,
node: NodeIx,
manifest: &Manifest,
path_root: PinnedId,
sway_git_tag: &str,
graph: &mut Graph,
path_map: &mut PathMap,
visited: &mut HashMap<Pinned, NodeIx>,
) -> Result<()> {
let parent = &graph[node];
let parent_id = parent.id();
let parent_path = path_map[&parent_id].clone();
for (dep_name, dep) in manifest.deps() {
let name = dep.package().unwrap_or(dep_name).to_string();
let source = dep_to_source(&parent_path, dep)?;
if offline_mode && !matches!(source, Source::Path(_)) {
bail!("Unable to fetch pkg {:?} in offline mode", source);
}
let pkg = Pkg { name, source };
let pinned = pin_pkg(fetch_id, path_root, &pkg, path_map, sway_git_tag)?;
let pkg_id = pinned.id();
let path_root = match pkg.source {
Source::Root | Source::Git(_) | Source::Registry(_) => pkg_id,
Source::Path(_) => path_root,
};
let manifest = Manifest::from_dir(&path_map[&pkg_id], sway_git_tag)?;
if pinned.name != manifest.project.name {
bail!(
"dependency name {:?} must match the manifest project name {:?} \
unless `package = {:?}` is specified in the dependency declaration",
pinned.name,
manifest.project.name,
manifest.project.name,
);
}
let dep_node = if let hash_map::Entry::Vacant(entry) = visited.entry(pinned.clone()) {
let node = graph.add_node(pinned);
entry.insert(node);
fetch_children(
fetch_id,
offline_mode,
node,
&manifest,
path_root,
sway_git_tag,
graph,
path_map,
visited,
)?;
node
} else {
visited[&pinned]
};
graph.add_edge(node, dep_node, dep_name.to_string());
}
Ok(())
}
/// The name to use for a package's git repository under the user's forc directory.
fn git_repo_dir_name(name: &str, repo: &Url) -> String {
let repo_url_hash = hash_url(repo);
format!("{}-{:x}", name, repo_url_hash)
}
fn hash_url(url: &Url) -> u64 {
let mut hasher = hash_map::DefaultHasher::new();
url.hash(&mut hasher);
hasher.finish()
}
/// A temporary directory that we can use for cloning a git-sourced package's repo and discovering
/// the current HEAD for the given git reference.
///
/// The resulting directory is:
///
/// ```ignore
/// $HOME/.forc/git/checkouts/tmp/<fetch_id>-name-<repo_url_hash>
/// ```
///
/// A unique `fetch_id` may be specified to avoid contention over the git repo directory in the
/// case that multiple processes or threads may be building different projects that may require
/// fetching the same dependency.
fn tmp_git_repo_dir(fetch_id: u64, name: &str, repo: &Url) -> PathBuf {
let repo_dir_name = format!("{:x}-{}", fetch_id, git_repo_dir_name(name, repo));
git_checkouts_directory().join("tmp").join(repo_dir_name)
}
/// Given a git reference, build a list of `refspecs` required for the fetch opration.
///
/// Also returns whether or not our reference implies we require fetching tags.
fn git_ref_to_refspecs(reference: &GitReference) -> (Vec<String>, bool) {
let mut refspecs = vec![];
let mut tags = false;
match reference {
GitReference::Branch(s) => {
refspecs.push(format!("+refs/heads/{0}:refs/remotes/origin/{0}", s));
}
GitReference::Tag(s) => {
refspecs.push(format!("+refs/tags/{0}:refs/remotes/origin/tags/{0}", s));
}
GitReference::Rev(s) => {
if s.starts_with("refs/") {
refspecs.push(format!("+{0}:{0}", s));
} else {
// We can't fetch the commit directly, so we fetch all branches and tags in order
// to find it.
refspecs.push("+refs/heads/*:refs/remotes/origin/*".to_string());
refspecs.push("+HEAD:refs/remotes/origin/HEAD".to_string());
tags = true;
}
}
GitReference::DefaultBranch => {
refspecs.push("+HEAD:refs/remotes/origin/HEAD".to_string());
}
}
(refspecs, tags)
}
/// Initializes a temporary git repo for the package and fetches only the reference associated with
/// the given source.
fn with_tmp_git_repo<F, O>(fetch_id: u64, name: &str, source: &SourceGit, f: F) -> Result<O>
where
F: FnOnce(git2::Repository) -> Result<O>,
{
// Clear existing temporary directory if it exists.
let repo_dir = tmp_git_repo_dir(fetch_id, name, &source.repo);
if repo_dir.exists() {
let _ = std::fs::remove_dir_all(&repo_dir);
}
// Initialise the repository.
let repo = git2::Repository::init(&repo_dir)
.map_err(|e| anyhow!("failed to init repo at \"{}\": {}", repo_dir.display(), e))?;
// Fetch the necessary references.
let (refspecs, tags) = git_ref_to_refspecs(&source.reference);
// Fetch the refspecs.
let mut fetch_opts = git2::FetchOptions::new();
if tags {
fetch_opts.download_tags(git2::AutotagOption::All);
}
repo.remote_anonymous(source.repo.as_str())?
.fetch(&refspecs, Some(&mut fetch_opts), None)
.with_context(|| format!("failed to fetch `{}`", &source.repo))?;
// Call the user function.
let output = f(repo)?;
// Clean up the temporary directory.
let _ = std::fs::remove_dir_all(&repo_dir);
Ok(output)
}
/// Pin the given git-sourced package.
///
/// This clones the repository to a temporary directory in order to determine the commit at the
/// HEAD of the given git reference.
pub fn pin_git(fetch_id: u64, name: &str, source: SourceGit) -> Result<SourceGitPinned> {
let commit_hash = with_tmp_git_repo(fetch_id, name, &source, |repo| {
// Resolve the reference to the commit ID.
let commit_id = source
.reference
.resolve(&repo)
.with_context(|| "failed to resolve reference".to_string())?;
Ok(format!("{}", commit_id))
})?;
Ok(SourceGitPinned {
source,
commit_hash,
})
}
/// Given a package source, attempt to determine the pinned version or commit.
///
/// Also updates the `path_map` with a path to the local copy of the source.
///
/// The `path_root` is required for `Path` dependencies and must specify the package that is the
/// root of the current subgraph of path dependencies.
fn pin_pkg(
fetch_id: u64,
path_root: PinnedId,
pkg: &Pkg,
path_map: &mut PathMap,
sway_git_tag: &str,
) -> Result<Pinned> {
let name = pkg.name.clone();
let pinned = match &pkg.source {
Source::Root => unreachable!("Root package is \"pinned\" prior to fetching"),
Source::Path(path) => {
let path_pinned = SourcePathPinned { path_root };
let source = SourcePinned::Path(path_pinned);
let pinned = Pinned { name, source };
let id = pinned.id();
path_map.insert(id, path.clone());
pinned
}
Source::Git(ref git_source) => {
let pinned_git = pin_git(fetch_id, &name, git_source.clone())?;
let repo_path =
git_commit_path(&name, &pinned_git.source.repo, &pinned_git.commit_hash);
let source = SourcePinned::Git(pinned_git.clone());
let pinned = Pinned { name, source };
let id = pinned.id();
if let hash_map::Entry::Vacant(entry) = path_map.entry(id) {
// TODO: Here we assume that if the local path already exists, that it contains the full and
// correct source for that commit and hasn't been tampered with. This is probably fine for most
// cases as users should never be touching these directories, however we should add some code
// to validate this. E.g. can we recreate the git hash by hashing the directory or something
// along these lines using git?
if !repo_path.exists() {
info!(" Fetching {}", pinned_git.to_string());
fetch_git(fetch_id, &pinned.name, &pinned_git)?;
}
let path =
find_dir_within(&repo_path, &pinned.name, sway_git_tag).ok_or_else(|| {
anyhow!(
"failed to find package `{}` in {}",
pinned.name,
pinned_git.to_string()
)
})?;
entry.insert(path);
}
pinned
}
Source::Registry(ref _source) => {
// TODO: determine registry pkg git URL, fetch to determine latest available
// semver-compatible version
bail!("registry dependencies are not yet supported");
}
};
Ok(pinned)
}
/// The path to which a git package commit should be checked out.
///
/// The resulting directory is:
///
/// ```ignore
/// $HOME/.forc/git/checkouts/name-<repo_url_hash>/<commit_hash>
/// ```
///
/// where `<repo_url_hash>` is a hash of the source repository URL.
pub fn git_commit_path(name: &str, repo: &Url, commit_hash: &str) -> PathBuf {
let repo_dir_name = git_repo_dir_name(name, repo);
git_checkouts_directory()
.join(repo_dir_name)
.join(commit_hash)
}
/// Fetch the repo at the given git package's URL and checkout the pinned commit.
///
/// Returns the location of the checked out commit.
pub fn fetch_git(fetch_id: u64, name: &str, pinned: &SourceGitPinned) -> Result<PathBuf> {
let path = git_commit_path(name, &pinned.source.repo, &pinned.commit_hash);
// Checkout the pinned hash to the path.
with_tmp_git_repo(fetch_id, name, &pinned.source, |repo| {
// Change HEAD to point to the pinned commit.
let id = git2::Oid::from_str(&pinned.commit_hash)?;
repo.set_head_detached(id)?;
if path.exists() {
let _ = std::fs::remove_dir_all(&path);
}
std::fs::create_dir_all(&path)?;
// Checkout HEAD to the target directory.
let mut checkout = git2::build::CheckoutBuilder::new();
checkout.force().target_dir(&path);
repo.checkout_head(Some(&mut checkout))?;
Ok(())
})?;
Ok(path)
}
/// Given the path to a package and a `Dependency` parsed from one of its forc dependencies,
/// produce the `Source` for that dependendency.
fn dep_to_source(pkg_path: &Path, dep: &Dependency) -> Result<Source> {
let source = match dep {
Dependency::Simple(ref ver_str) => {
bail!(
"Unsupported dependency declaration in \"{}\": `{}` - \
currently only `git` and `path` dependencies are supported",
pkg_path.display(),
ver_str
)
}
Dependency::Detailed(ref det) => match (&det.path, &det.version, &det.git) {
(Some(relative_path), _, _) => {
let path = pkg_path.join(relative_path);
Source::Path(path.canonicalize()?)
}
(_, _, Some(repo)) => {
let reference = match (&det.branch, &det.tag, &det.rev) {
(Some(branch), None, None) => GitReference::Branch(branch.clone()),
(None, Some(tag), None) => GitReference::Tag(tag.clone()),
(None, None, Some(rev)) => GitReference::Rev(rev.clone()),
(None, None, None) => GitReference::DefaultBranch,
_ => bail!(
"git dependencies support at most one reference: \
either `branch`, `tag` or `rev`"
),
};
let repo = Url::parse(repo)?;
let source = SourceGit { repo, reference };
Source::Git(source)
}
_ => {
bail!("unsupported set of fields for dependency: {:?}", dep);
}
},
};
Ok(source)
}
/// Given a `forc_pkg::BuildConfig`, produce the necessary `sway_core::BuildConfig` required for
/// compilation.
pub fn sway_build_config(
manifest_dir: &Path,
entry_path: &Path,
build_conf: &BuildConfig,
) -> Result<sway_core::BuildConfig> {
// Prepare the build config to pass through to the compiler.
let file_name = find_file_name(manifest_dir, entry_path)?;
let build_config = sway_core::BuildConfig::root_from_file_name_and_manifest_path(
file_name.to_path_buf(),
manifest_dir.to_path_buf(),
)
.print_finalized_asm(build_conf.print_finalized_asm)
.print_intermediate_asm(build_conf.print_intermediate_asm)
.print_ir(build_conf.print_ir);
Ok(build_config)
}
/// Builds the dependency namespace for the package at the given node index within the graph.
///
/// This function is designed to be called for each node in order of compilation.
pub fn dependency_namespace(
namespace_map: &HashMap<NodeIx, namespace::Module>,
graph: &Graph,
compilation_order: &[NodeIx],
node: NodeIx,
) -> namespace::Module {
use petgraph::visit::{Dfs, Walker};
// Find all nodes that are a dependency of this one with a depth-first search.
let deps: HashSet<NodeIx> = Dfs::new(graph, node).iter(graph).collect();
// In order of compilation, accumulate dependency namespaces as submodules.
let mut namespace = namespace::Module::default();
for &dep_node in compilation_order.iter().filter(|n| deps.contains(n)) {
if dep_node == node {
break;
}
// Add the namespace once for each of its names.
let dep_namespace = &namespace_map[&dep_node];
let dep_names: BTreeSet<_> = graph
.edges_directed(dep_node, Direction::Incoming)
.map(|e| e.weight())
.collect();
for dep_name in dep_names {
let dep_name = kebab_to_snake_case(dep_name);
namespace.insert_submodule(dep_name.to_string(), dep_namespace.clone());
}
}
namespace
}
/// Compiles the given package.
///
/// ## Program Types
///
/// Behaviour differs slightly based on the package's program type.
///
/// ### Library Packages
///
/// A Library package will have JSON ABI generated for all publicly exposed `abi`s. The library's
/// namespace is returned as the second argument of the tuple.
///
/// ### Contract
///
/// Contracts will output both their JSON ABI and compiled bytecode.
///
/// ### Script, Predicate
///
/// Scripts and Predicates will be compiled to bytecode and will not emit any JSON ABI.
pub fn compile(
pkg: &Pinned,
manifest: &ManifestFile,
build_config: &BuildConfig,
namespace: namespace::Module,
source_map: &mut SourceMap,
) -> Result<(Compiled, Option<namespace::Root>)> {
let entry_path = manifest.entry_path();
let source = manifest.entry_string()?;
let sway_build_config = sway_build_config(manifest.dir(), &entry_path, build_config)?;
let silent_mode = build_config.silent;
// First, compile to an AST. We'll update the namespace and check for JSON ABI output.
let ast_res = sway_core::compile_to_ast(source, namespace, Some(&sway_build_config));
match &ast_res {
CompileAstResult::Failure { warnings, errors } => {
print_on_failure(silent_mode, warnings, errors);
bail!("Failed to compile {}", pkg.name);
}
CompileAstResult::Success {
typed_program,
warnings,
} => {
let json_abi = typed_program.kind.generate_json_abi();
let tree_type = typed_program.kind.tree_type();
match tree_type {
// If we're compiling a library, we don't need to compile any further.
// Instead, we update the namespace with the library's top-level module.
TreeType::Library { .. } => {
print_on_success_library(silent_mode, &pkg.name, warnings);
let bytecode = vec![];
let lib_namespace = typed_program.root.namespace.clone();
let compiled = Compiled {
json_abi,
bytecode,
tree_type,
};
Ok((compiled, Some(lib_namespace.into())))
}
// For all other program types, we'll compile the bytecode.
TreeType::Contract | TreeType::Predicate | TreeType::Script => {
let asm_res = sway_core::ast_to_asm(ast_res, &sway_build_config);
let bc_res = sway_core::asm_to_bytecode(asm_res, source_map);
match bc_res {
BytecodeCompilationResult::Success { bytes, warnings } => {
print_on_success(silent_mode, &pkg.name, &warnings, &tree_type);
let bytecode = bytes;
let compiled = Compiled {
json_abi,
bytecode,
tree_type,
};
Ok((compiled, None))
}
BytecodeCompilationResult::Library { .. } => {
unreachable!("compilation of library program types is handled above")
}
BytecodeCompilationResult::Failure { errors, warnings } => {
print_on_failure(silent_mode, &warnings, &errors);
bail!("Failed to compile {}", pkg.name);
}
}
}
}
}
}
}
/// Build an entire forc package and return the compiled output.
///
/// This compiles all packages (including dependencies) in the order specified by the `BuildPlan`.
///
/// Also returns the resulting `sway_core::SourceMap` which may be useful for debugging purposes.
pub fn build(
plan: &BuildPlan,
conf: &BuildConfig,
sway_git_tag: &str,
) -> anyhow::Result<(Compiled, SourceMap)> {
let mut namespace_map = Default::default();
let mut source_map = SourceMap::new();
let mut json_abi = vec![];
let mut bytecode = vec![];
let mut tree_type = None;
for &node in &plan.compilation_order {
let dep_namespace =
dependency_namespace(&namespace_map, &plan.graph, &plan.compilation_order, node);
let pkg = &plan.graph[node];
let path = &plan.path_map[&pkg.id()];
let manifest = ManifestFile::from_dir(path, sway_git_tag)?;
let res = compile(pkg, &manifest, conf, dep_namespace, &mut source_map)?;
let (compiled, maybe_namespace) = res;
if let Some(namespace) = maybe_namespace {
namespace_map.insert(node, namespace.into());
}
json_abi.extend(compiled.json_abi);
bytecode = compiled.bytecode;
tree_type = Some(compiled.tree_type);
source_map.insert_dependency(path.clone());
}
let tree_type =
tree_type.ok_or_else(|| anyhow!("build plan must contain at least one package"))?;
let compiled = Compiled {
bytecode,
json_abi,
tree_type,
};
Ok((compiled, source_map))
}
/// Attempt to find a `Forc.toml` with the given project name within the given directory.
///
/// Returns the path to the package on success, or `None` in the case it could not be found.
pub fn find_within(dir: &Path, pkg_name: &str, sway_git_tag: &str) -> Option<PathBuf> {
walkdir::WalkDir::new(dir)
.into_iter()
.filter_map(Result::ok)
.filter(|entry| entry.path().ends_with(constants::MANIFEST_FILE_NAME))
.find_map(|entry| {
let path = entry.path();
let manifest = Manifest::from_file(path, sway_git_tag).ok()?;
if manifest.project.name == pkg_name {
Some(path.to_path_buf())
} else {
None
}
})
}
/// The same as [find_within], but returns the package's project directory.
pub fn find_dir_within(dir: &Path, pkg_name: &str, sway_git_tag: &str) -> Option<PathBuf> {
find_within(dir, pkg_name, sway_git_tag).and_then(|path| path.parent().map(Path::to_path_buf))
}
#[test]
fn test_source_git_pinned_parsing() {
let strings = [
"git+https://github.com/foo/bar?branch=baz#64092602dd6158f3e41d775ed889389440a2cd86",
"git+https://github.com/fuellabs/sway-lib-std?tag=v0.1.0#0000000000000000000000000000000000000000",
"git+https://github.com/fuellabs/sway-lib-core?tag=v0.0.1#0000000000000000000000000000000000000000",
"git+https://some-git-host.com/owner/repo?rev#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
"git+https://some-git-host.com/owner/repo?default-branch#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
];
let expected = [
SourceGitPinned {
source: SourceGit {
repo: Url::parse("https://github.com/foo/bar").unwrap(),
reference: GitReference::Branch("baz".to_string()),
},
commit_hash: "64092602dd6158f3e41d775ed889389440a2cd86".to_string(),
},
SourceGitPinned {
source: SourceGit {
repo: Url::parse("https://github.com/fuellabs/sway-lib-std").unwrap(),
reference: GitReference::Tag("v0.1.0".to_string()),
},
commit_hash: "0000000000000000000000000000000000000000".to_string(),
},
SourceGitPinned {
source: SourceGit {
repo: Url::parse("https://github.com/fuellabs/sway-lib-core").unwrap(),
reference: GitReference::Tag("v0.0.1".to_string()),
},
commit_hash: "0000000000000000000000000000000000000000".to_string(),
},
SourceGitPinned {
source: SourceGit {
repo: Url::parse("https://some-git-host.com/owner/repo").unwrap(),
reference: GitReference::Rev(
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".to_string(),
),
},
commit_hash: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".to_string(),
},
SourceGitPinned {
source: SourceGit {
repo: Url::parse("https://some-git-host.com/owner/repo").unwrap(),
reference: GitReference::DefaultBranch,
},
commit_hash: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".to_string(),
},
];
for (&string, expected) in strings.iter().zip(&expected) {
let parsed = SourceGitPinned::from_str(string).unwrap();
assert_eq!(&parsed, expected);
let serialized = expected.to_string();
assert_eq!(&serialized, string);
}
}
/// Format an error message for an absent `Forc.toml`.
pub fn manifest_file_missing(dir: &Path) -> anyhow::Error {
let message = format!(
"could not find `{}` in `{}` or any parent directory",
constants::MANIFEST_FILE_NAME,
dir.display()
);
Error::msg(message)
}
/// Format an error message for failed parsing of a manifest.
pub fn parsing_failed(project_name: &str, errors: Vec<CompileError>) -> anyhow::Error {
let error = errors
.iter()
.map(|e| format!("{}", e))
.collect::<Vec<String>>()
.join("\n");
let message = format!("Parsing {} failed: \n{}", project_name, error);
Error::msg(message)
}
/// Format an error message if an incorrect program type is present.
pub fn wrong_program_type(
project_name: &str,
expected_types: Vec<TreeType>,
parse_type: TreeType,
) -> anyhow::Error {
let message = format!(
"{} is not a '{:?}' it is a '{:?}'",
project_name, expected_types, parse_type
);
Error::msg(message)
}
/// Format an error message if a given URL fails to produce a working node.
pub fn fuel_core_not_running(node_url: &str) -> anyhow::Error {
let message = format!("could not get a response from node at the URL {}. Start a node with `fuel-core`. See https://github.com/FuelLabs/fuel-core#running for more information", node_url);
Error::msg(message)
} | |
load.rs | use super::{export::Exports, helpers::Helpers, Bundler};
use crate::{
bundler::{export::RawExports, import::RawImports},
id::{Id, ModuleId},
load::ModuleData,
util,
util::IntoParallelIterator,
Load, Resolve,
};
use anyhow::{Context, Error};
use is_macro::Is;
#[cfg(feature = "rayon")]
use rayon::iter::ParallelIterator;
use swc_atoms::js_word;
use swc_common::{sync::Lrc, FileName, SourceFile, SyntaxContext, DUMMY_SP};
use swc_ecma_ast::{
CallExpr, Expr, ExprOrSuper, Ident, ImportDecl, ImportSpecifier, Invalid, MemberExpr, Module,
ModuleDecl, Str,
};
use swc_ecma_transforms::resolver_with_mark;
use swc_ecma_visit::{noop_visit_type, FoldWith, Node, Visit, VisitWith};
/// Module after applying transformations.
#[derive(Debug, Clone)]
pub(crate) struct TransformedModule {
pub id: ModuleId,
pub fm: Lrc<SourceFile>,
pub module: Lrc<Module>,
pub imports: Lrc<Imports>,
pub exports: Lrc<Exports>,
/// If false, the module will be wrapped with a small helper function.
pub is_es6: bool,
/// Used helpers
pub helpers: Lrc<Helpers>,
pub swc_helpers: Lrc<swc_ecma_transforms::helpers::Helpers>,
local_ctxt: SyntaxContext,
export_ctxt: SyntaxContext,
}
impl TransformedModule {
/// [SyntaxContext] for exported items.
pub fn export_ctxt(&self) -> SyntaxContext {
self.export_ctxt
}
/// Top level contexts.
pub fn local_ctxt(&self) -> SyntaxContext |
}
impl<L, R> Bundler<'_, L, R>
where
L: Load,
R: Resolve,
{
/// Phase 1 (discovery)
///
/// We apply transforms at this phase to make cache efficient.
/// As we cache in this phase, changing dependency does not affect cache.
pub(super) fn load_transformed(
&self,
file_name: &FileName,
) -> Result<Option<TransformedModule>, Error> {
self.run(|| {
tracing::trace!("load_transformed: ({})", file_name);
// In case of common module
if let Some(cached) = self.scope.get_module_by_path(&file_name) {
tracing::debug!("Cached: {}", file_name);
return Ok(Some(cached));
}
let (_, data) = self.load(&file_name).context("Bundler.load() failed")?;
let (v, mut files) = self
.analyze(&file_name, data)
.context("failed to analyze module")?;
files.dedup_by_key(|v| v.1.clone());
tracing::debug!(
"({:?}, {:?}, {:?}) Storing module: {}",
v.id,
v.local_ctxt(),
v.export_ctxt(),
file_name
);
self.scope.store_module(v.clone());
// Load dependencies and store them in the `Scope`
let results = files
.into_par_iter()
.map(|(_src, path)| {
tracing::trace!("loading dependency: {}", path);
self.load_transformed(&path)
})
.collect::<Vec<_>>();
// Do tasks in parallel, and then wait for result
for result in results {
result?;
}
Ok(Some(v))
})
}
fn load(&self, file_name: &FileName) -> Result<(ModuleId, ModuleData), Error> {
self.run(|| {
let (module_id, _, _) = self.scope.module_id_gen.gen(file_name);
let data = self
.loader
.load(&file_name)
.with_context(|| format!("Bundler.loader.load({}) failed", file_name))?;
self.scope.mark_as_loaded(module_id);
Ok((module_id, data))
})
}
/// This methods returns [Source]s which should be loaded.
fn analyze(
&self,
file_name: &FileName,
data: ModuleData,
) -> Result<(TransformedModule, Vec<(Source, Lrc<FileName>)>), Error> {
self.run(|| {
tracing::trace!("transform_module({})", data.fm.name);
let (id, local_mark, export_mark) = self.scope.module_id_gen.gen(file_name);
let mut module = data.module.fold_with(&mut resolver_with_mark(local_mark));
// {
// let code = self
// .swc
// .print(
// &module.clone().fold_with(&mut HygieneVisualizer),
// SourceMapsConfig::Bool(false),
// None,
// false,
// )
// .unwrap()
// .code;
//
// println!("Resolved:\n{}\n\n", code);
// }
let imports = self.extract_import_info(file_name, &mut module, local_mark);
// {
// let code = self
// .swc
// .print(
// &module.clone().fold_with(&mut HygieneVisualizer),
// SourceMapsConfig::Bool(false),
// None,
// false,
// )
// .unwrap()
// .code;
//
// println!("After imports:\n{}\n", code,);
// }
let exports = self.extract_export_info(
file_name,
&mut module,
SyntaxContext::empty().apply_mark(export_mark),
);
let is_es6 = if !self.config.require {
true
} else {
let mut v = Es6ModuleDetector {
forced_es6: false,
found_other: false,
};
module.visit_with(&Invalid { span: DUMMY_SP } as _, &mut v);
v.forced_es6 || !v.found_other
};
let (imports, exports) = util::join(
|| self.resolve_imports(file_name, imports),
|| self.resolve_exports(file_name, exports),
);
let (imports, mut import_files) = imports?;
let (exports, reexport_files) = exports?;
import_files.extend(reexport_files);
let module = Lrc::new(module);
Ok((
TransformedModule {
id,
fm: data.fm,
module,
imports: Lrc::new(imports),
exports: Lrc::new(exports),
is_es6,
helpers: Default::default(),
swc_helpers: Lrc::new(data.helpers),
local_ctxt: SyntaxContext::empty().apply_mark(local_mark),
export_ctxt: SyntaxContext::empty().apply_mark(export_mark),
},
import_files,
))
})
}
/// Resolve re-exports.
fn resolve_exports(
&self,
base: &FileName,
raw: RawExports,
) -> Result<(Exports, Vec<(Source, Lrc<FileName>)>), Error> {
self.run(|| {
tracing::trace!("resolve_exports({})", base);
let mut files = vec![];
let mut exports = Exports::default();
let items = raw
.items
.into_par_iter()
.map(|(src, ss)| -> Result<_, Error> {
self.run(|| {
let info = match src {
Some(src) => {
let name = self.resolve(base, &src.value)?;
let (id, local_mark, export_mark) =
self.scope.module_id_gen.gen(&name);
Some((id, local_mark, export_mark, name, src))
}
None => None,
};
Ok((info, ss))
})
})
.collect::<Vec<_>>();
for res in items {
let (info, specifiers) = res?;
match info {
None => exports.items.extend(specifiers),
Some((id, local_mark, export_mark, name, src)) => {
//
let src = Source {
is_loaded_synchronously: true,
is_unconditional: false,
module_id: id,
local_ctxt: SyntaxContext::empty().apply_mark(local_mark),
export_ctxt: SyntaxContext::empty().apply_mark(export_mark),
src,
};
exports.reexports.push((src.clone(), specifiers));
files.push((src, name));
}
}
}
Ok((exports, files))
})
}
/// Resolve dependencies
fn resolve_imports(
&self,
base: &FileName,
info: RawImports,
) -> Result<(Imports, Vec<(Source, Lrc<FileName>)>), Error> {
self.run(|| {
tracing::trace!("resolve_imports({})", base);
let mut files = vec![];
let mut merged = Imports::default();
let RawImports {
imports,
lazy_imports,
dynamic_imports,
forced_ns,
} = info;
let loaded = imports
.into_par_iter()
.map(|v| (v, false, true))
.chain(lazy_imports.into_par_iter().map(|v| (v, false, false)))
.chain(dynamic_imports.into_par_iter().map(|src| {
(
ImportDecl {
span: src.span,
specifiers: vec![],
src,
type_only: false,
asserts: None,
},
true,
false,
)
}))
.map(|(decl, dynamic, unconditional)| -> Result<_, Error> {
self.run(|| {
//
let file_name = self.resolve(base, &decl.src.value)?;
let (id, local_mark, export_mark) =
self.scope.module_id_gen.gen(&file_name);
Ok((
id,
local_mark,
export_mark,
file_name,
decl,
dynamic,
unconditional,
))
})
})
.collect::<Vec<_>>();
for res in loaded {
// TODO: Report error and proceed instead of returning an error
let (id, local_mark, export_mark, file_name, decl, is_dynamic, is_unconditional) =
res?;
let src = Source {
is_loaded_synchronously: !is_dynamic,
is_unconditional,
module_id: id,
local_ctxt: SyntaxContext::empty().apply_mark(local_mark),
export_ctxt: SyntaxContext::empty().apply_mark(export_mark),
src: decl.src,
};
files.push((src.clone(), file_name));
// TODO: Handle rename
let mut specifiers = vec![];
for s in decl.specifiers {
match s {
ImportSpecifier::Named(s) => specifiers.push(Specifier::Specific {
local: s.local.into(),
alias: s.imported.map(From::from),
}),
ImportSpecifier::Default(s) => specifiers.push(Specifier::Specific {
local: s.local.into(),
alias: Some(Id::new(js_word!("default"), s.span.ctxt())),
}),
ImportSpecifier::Namespace(s) => {
specifiers.push(Specifier::Namespace {
local: s.local.into(),
all: forced_ns.contains(&src.src.value),
});
}
}
}
merged.specifiers.push((src, specifiers));
}
Ok((merged, files))
})
}
}
#[derive(Debug, Default)]
pub(crate) struct Imports {
/// If imported ids are empty, it is a side-effect import.
pub specifiers: Vec<(Source, Vec<Specifier>)>,
}
/// Clone is relatively cheap
#[derive(Debug, Clone, Is)]
pub(crate) enum Specifier {
Specific {
local: Id,
alias: Option<Id>,
},
Namespace {
local: Id,
/// True for `import * as foo from 'foo'; foo[computedKey()]`
all: bool,
},
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct Source {
pub is_loaded_synchronously: bool,
pub is_unconditional: bool,
pub module_id: ModuleId,
pub local_ctxt: SyntaxContext,
pub export_ctxt: SyntaxContext,
// Clone is relatively cheap, thanks to string_cache.
pub src: Str,
}
struct Es6ModuleDetector {
/// If import statement or export is detected, it's an es6 module regardless
/// of other codes.
forced_es6: bool,
/// True if other module system is detected.
found_other: bool,
}
impl Visit for Es6ModuleDetector {
noop_visit_type!();
fn visit_call_expr(&mut self, e: &CallExpr, _: &dyn Node) {
e.visit_children_with(self);
match &e.callee {
ExprOrSuper::Expr(e) => match &**e {
Expr::Ident(Ident {
sym: js_word!("require"),
..
}) => {
self.found_other = true;
}
_ => {}
},
ExprOrSuper::Super(_) => {}
}
}
fn visit_member_expr(&mut self, e: &MemberExpr, _: &dyn Node) {
e.obj.visit_with(e as _, self);
if e.computed {
e.prop.visit_with(e as _, self);
}
match &e.obj {
ExprOrSuper::Expr(e) => {
match &**e {
Expr::Ident(i) => {
// TODO: Check syntax context (Check if marker is the global mark)
if i.sym == *"module" {
self.found_other = true;
}
if i.sym == *"exports" {
self.found_other = true;
}
}
_ => {}
}
}
_ => {}
}
//
}
fn visit_module_decl(&mut self, decl: &ModuleDecl, _: &dyn Node) {
match decl {
ModuleDecl::Import(_)
| ModuleDecl::ExportDecl(_)
| ModuleDecl::ExportNamed(_)
| ModuleDecl::ExportDefaultDecl(_)
| ModuleDecl::ExportDefaultExpr(_)
| ModuleDecl::ExportAll(_) => {
self.forced_es6 = true;
}
ModuleDecl::TsImportEquals(_) => {}
ModuleDecl::TsExportAssignment(_) => {}
ModuleDecl::TsNamespaceExport(_) => {}
}
}
}
| {
self.local_ctxt
} |
0001_initial.py | # Generated by Django 2.2.2 on 2019-06-13 17:37
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('description', models.TextField(blank=True)),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('image', models.ImageField(blank=True, upload_to='images/%y/%m/%d')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='images.Description')),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
]
| Migration |
GetTypeChart.js | var console = require('console');
var config = require('config');
var http = require('http');
const buildUrl = (type)=>{
return '/images/types/'+type+'.png';
}
| }
else if(multiplier === .25){
return '/images/multiplier/25.png';
}
else if(multiplier === .5){
return '/images/multiplier/5.png';
}
else if(multiplier === 1){
return '/images/multiplier/1.png';
}
else if(multiplier === 2){
return '/images/multiplier/2.png';
}
else if(multiplier === 4){
return '/images/multiplier/4.png';
}
}
module.exports.function = function GetEvolutions (pokemon, $vivContext) {
var rand = Math.floor(Math.random()*10000000000000)
var typeChart = http.getUrl(config.get('remote.newPokemonUrl') + '/pokemon/types/'+pokemon.name+'?userId='+$vivContext.userId+'+&rand='+rand+'', { format: 'json'});
typeChart.resistances.forEach(type=>{
type.type = buildUrl(type.type);
type.multiplier = buildMultiplierUrl(type.multiplier);
})
typeChart.neutral.forEach(type=>{
type.type = buildUrl(type.type);
type.multiplier = buildMultiplierUrl(type.multiplier);
})
typeChart.weaknesses.forEach(type=>{
type.type = buildUrl(type.type);
type.multiplier = buildMultiplierUrl(type.multiplier);
})
console.log(typeChart)
return typeChart;
} | const buildMultiplierUrl = (multiplier)=>{
console.log(multiplier);
if(multiplier === 0){
return '/images/multiplier/0.png'; |
main.go | // >>> print map(lambda x: x * 2, range(1,11))
// [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
package main
func time2(n int) int {
return n * 2
}
func | () {
var s [10]int
for i := 0; i < len(s); i++ {
s[i] = i + 1
}
var t2s [10]int
for i, num := range s {
t2s[i] = time2(num)
}
for _, n := range t2s {
print(n)
print(" ")
}
}
| main |
base.rs | //! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the `Ty` type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
use super::ModuleLlvm;
use crate::attributes;
use crate::builder::Builder;
use crate::common;
use crate::context::CodegenCx;
use crate::llvm;
use crate::metadata;
use crate::value::Value;
use rustc::dep_graph;
use rustc::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc::middle::cstore::EncodedMetadata;
use rustc::middle::exported_symbols; | use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_session::config::DebugInfo;
use rustc_span::symbol::Symbol;
use std::ffi::CString;
use std::time::Instant;
pub fn write_compressed_metadata<'tcx>(
tcx: TyCtxt<'tcx>,
metadata: &EncodedMetadata,
llvm_module: &mut ModuleLlvm,
) {
use flate2::write::DeflateEncoder;
use flate2::Compression;
use std::io::Write;
let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
let mut compressed = tcx.metadata_encoding_version();
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data)
.unwrap();
let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal =
unsafe { llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) };
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
// Also generate a .section directive to force no
// flags, at least for ELF outputs, so that the
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
let directive = CString::new(directive).unwrap();
llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
}
}
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn next(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
}
old
}
}
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } }
}
pub fn compile_codegen_unit(
tcx: TyCtxt<'tcx>,
cgu_name: Symbol,
) -> (ModuleCodegen<ModuleLlvm>, u64) {
let prof_timer = tcx.prof.generic_activity("codegen_module");
let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let (module, _) =
tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
let time_to_codegen = start_time.elapsed();
drop(prof_timer);
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
{
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
}
// ... and now that we have everything pre-defined, fill out those definitions.
for &(mono_item, _) in &mono_items {
mono_item.define::<Builder<'_, '_, '_>>(&cx);
}
// If this codegen unit contains the main function, also create the
// wrapper here
if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
attributes::sanitize(&cx, CodegenFnAttrFlags::empty(), entry);
}
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
}
// Create the llvm.used variable
// This variable has type [N x i8*] and is stored in the llvm.metadata section
if !cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
}
// Finalize debuginfo
if cx.sess().opts.debuginfo != DebugInfo::None {
cx.debuginfo_finalize();
}
// If we collected SIR in this codegen unit, then merge it with that of the other units.
if let Some(sir) = cx.sir {
cx.tcx.sir.update(sir);
}
}
ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: llvm_module,
kind: ModuleKind::Regular,
}
}
(module, cost)
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(§.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
match linkage {
Linkage::External => llvm::Linkage::ExternalLinkage,
Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
Linkage::Appending => llvm::Linkage::AppendingLinkage,
Linkage::Internal => llvm::Linkage::InternalLinkage,
Linkage::Private => llvm::Linkage::PrivateLinkage,
Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
Linkage::Common => llvm::Linkage::CommonLinkage,
}
}
pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
match linkage {
Visibility::Default => llvm::Visibility::Default,
Visibility::Hidden => llvm::Visibility::Hidden,
Visibility::Protected => llvm::Visibility::Protected,
}
} | use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty::TyCtxt;
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use rustc_codegen_ssa::mono_item::MonoItemExt; |
migrate_test.go | package keymigrate
import (
"context"
"errors"
"fmt"
"math"
"strings"
"testing"
"github.com/google/orderedcode"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
)
func makeKey(t *testing.T, elems ...interface{}) []byte {
t.Helper()
out, err := orderedcode.Append([]byte{}, elems...)
require.NoError(t, err)
return out
}
func getLegacyPrefixKeys(val int) map[string][]byte {
vstr := fmt.Sprintf("%02x", byte(val))
return map[string][]byte{
"Height": []byte(fmt.Sprintf("H:%d", val)),
"BlockPart": []byte(fmt.Sprintf("P:%d:%d", val, val)),
"BlockPartTwo": []byte(fmt.Sprintf("P:%d:%d", val+2, val+val)),
"BlockCommit": []byte(fmt.Sprintf("C:%d", val)),
"SeenCommit": []byte(fmt.Sprintf("SC:%d", val)),
"BlockHeight": []byte(fmt.Sprintf("BH:%x", val)),
"Validators": []byte(fmt.Sprintf("validatorsKey:%d", val)),
"ConsensusParams": []byte(fmt.Sprintf("consensusParamsKey:%d", val)),
"ABCIResponse": []byte(fmt.Sprintf("abciResponsesKey:%d", val)),
"State": []byte("stateKey"),
"CommittedEvidence": append([]byte{0x00}, []byte(fmt.Sprintf("%0.16X/%X", int64(val), []byte("committed")))...),
"PendingEvidence": append([]byte{0x01}, []byte(fmt.Sprintf("%0.16X/%X", int64(val), []byte("pending")))...),
"LightBLock": []byte(fmt.Sprintf("lb/foo/%020d", val)),
"Size": []byte("size"),
"UserKey0": []byte(fmt.Sprintf("foo/bar/%d/%d", val, val)), | []byte(strings.Repeat(vstr[:1], 16)),
[]byte(strings.Repeat(vstr[1:], 16))...,
),
// Transaction hashes that could be mistaken for evidence keys.
"TxHashMimic0": append([]byte{0}, []byte(strings.Repeat(vstr, 16)[:31])...),
"TxHashMimic1": append([]byte{1}, []byte(strings.Repeat(vstr, 16)[:31])...),
}
}
func getNewPrefixKeys(t *testing.T, val int) map[string][]byte {
t.Helper()
vstr := fmt.Sprintf("%02x", byte(val))
return map[string][]byte{
"Height": makeKey(t, int64(0), int64(val)),
"BlockPart": makeKey(t, int64(1), int64(val), int64(val)),
"BlockPartTwo": makeKey(t, int64(1), int64(val+2), int64(val+val)),
"BlockCommit": makeKey(t, int64(2), int64(val)),
"SeenCommit": makeKey(t, int64(3), int64(val)),
"BlockHeight": makeKey(t, int64(4), int64(val)),
"Validators": makeKey(t, int64(5), int64(val)),
"ConsensusParams": makeKey(t, int64(6), int64(val)),
"ABCIResponse": makeKey(t, int64(7), int64(val)),
"State": makeKey(t, int64(8)),
"CommittedEvidence": makeKey(t, int64(9), int64(val)),
"PendingEvidence": makeKey(t, int64(10), int64(val)),
"LightBLock": makeKey(t, int64(11), int64(val)),
"Size": makeKey(t, int64(12)),
"UserKey0": makeKey(t, "foo", "bar", int64(val), int64(val)),
"UserKey1": makeKey(t, "foo", "bar/baz", int64(val), int64(val)),
"TxHeight": makeKey(t, "tx.height", fmt.Sprint(val), int64(val), int64(val+2), int64(val+val)),
"TxHash": makeKey(t, "tx.hash", strings.Repeat(vstr, 16)),
"TxHashMimic0": makeKey(t, "tx.hash", "\x00"+strings.Repeat(vstr, 16)[:31]),
"TxHashMimic1": makeKey(t, "tx.hash", "\x01"+strings.Repeat(vstr, 16)[:31]),
}
}
func getLegacyDatabase(t *testing.T) (int, dbm.DB) {
db := dbm.NewMemDB()
batch := db.NewBatch()
ct := 0
generated := []map[string][]byte{
getLegacyPrefixKeys(8),
getLegacyPrefixKeys(9001),
getLegacyPrefixKeys(math.MaxInt32 << 1),
getLegacyPrefixKeys(math.MaxInt64 - 8),
}
// populate database
for _, km := range generated {
for _, key := range km {
ct++
require.NoError(t, batch.Set(key, []byte(fmt.Sprintf(`{"value": %d}`, ct))))
}
}
require.NoError(t, batch.WriteSync())
require.NoError(t, batch.Close())
return ct - (2 * len(generated)) + 2, db
}
func TestMigration(t *testing.T) {
t.Run("Idempotency", func(t *testing.T) {
// we want to make sure that the key space for new and
// legacy keys are entirely non-overlapping.
legacyPrefixes := getLegacyPrefixKeys(42)
newPrefixes := getNewPrefixKeys(t, 42)
require.Equal(t, len(legacyPrefixes), len(newPrefixes))
t.Run("Legacy", func(t *testing.T) {
for kind, le := range legacyPrefixes {
require.True(t, checkKeyType(le).isLegacy(), kind)
}
})
t.Run("New", func(t *testing.T) {
for kind, ne := range newPrefixes {
require.False(t, checkKeyType(ne).isLegacy(), kind)
}
})
t.Run("Conversion", func(t *testing.T) {
for kind, le := range legacyPrefixes {
nk, err := migrateKey(le)
require.NoError(t, err, kind)
require.False(t, checkKeyType(nk).isLegacy(), kind)
}
})
t.Run("Hashes", func(t *testing.T) {
t.Run("NewKeysAreNotHashes", func(t *testing.T) {
for _, key := range getNewPrefixKeys(t, 9001) {
require.True(t, len(key) != 32)
}
})
t.Run("ContrivedLegacyKeyDetection", func(t *testing.T) {
// length 32: should appear to be a hash
require.Equal(t, txHashKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")))
// length ≠ 32: should not appear to be a hash
require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx--")))
require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")))
})
})
})
t.Run("Migrations", func(t *testing.T) {
t.Run("Errors", func(t *testing.T) {
table := map[string][]byte{
"Height": []byte(fmt.Sprintf("H:%f", 4.22222)),
"BlockPart": []byte(fmt.Sprintf("P:%f", 4.22222)),
"BlockPartTwo": []byte(fmt.Sprintf("P:%d", 42)),
"BlockPartThree": []byte(fmt.Sprintf("P:%f:%f", 4.222, 8.444)),
"BlockPartFour": []byte(fmt.Sprintf("P:%d:%f", 4222, 8.444)),
"BlockCommit": []byte(fmt.Sprintf("C:%f", 4.22222)),
"SeenCommit": []byte(fmt.Sprintf("SC:%f", 4.22222)),
"BlockHeight": []byte(fmt.Sprintf("BH:%f", 4.22222)),
"Validators": []byte(fmt.Sprintf("validatorsKey:%f", 4.22222)),
"ConsensusParams": []byte(fmt.Sprintf("consensusParamsKey:%f", 4.22222)),
"ABCIResponse": []byte(fmt.Sprintf("abciResponsesKey:%f", 4.22222)),
"LightBlockShort": []byte(fmt.Sprintf("lb/foo/%010d", 42)),
"LightBlockLong": []byte("lb/foo/12345678910.1234567890"),
"Invalid": {0x03},
"BadTXHeight0": []byte(fmt.Sprintf("tx.height/%s/%f/%f", "boop", 4.4, 4.5)),
"BadTXHeight1": []byte(fmt.Sprintf("tx.height/%s/%f", "boop", 4.4)),
"UserKey0": []byte("foo/bar/1.3/3.4"),
"UserKey1": []byte("foo/bar/1/3.4"),
"UserKey2": []byte("foo/bar/baz/1/3.4"),
"UserKey3": []byte("foo/bar/baz/1.2/4"),
}
for kind, key := range table {
out, err := migrateKey(key)
require.Error(t, err, kind)
require.Nil(t, out, kind)
}
})
t.Run("Replacement", func(t *testing.T) {
t.Run("MissingKey", func(t *testing.T) {
db := dbm.NewMemDB()
require.NoError(t, replaceKey(db, keyID("hi"), nil))
})
t.Run("ReplacementFails", func(t *testing.T) {
db := dbm.NewMemDB()
key := keyID("hi")
require.NoError(t, db.Set(key, []byte("world")))
require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) {
return nil, errors.New("hi")
}))
})
t.Run("KeyDisappears", func(t *testing.T) {
db := dbm.NewMemDB()
key := keyID("hi")
require.NoError(t, db.Set(key, []byte("world")))
require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) {
require.NoError(t, db.Delete(key))
return keyID("wat"), nil
}))
exists, err := db.Has(key)
require.NoError(t, err)
require.False(t, exists)
exists, err = db.Has(keyID("wat"))
require.NoError(t, err)
require.False(t, exists)
})
})
})
t.Run("Integration", func(t *testing.T) {
t.Run("KeyDiscovery", func(t *testing.T) {
size, db := getLegacyDatabase(t)
keys, err := getAllLegacyKeys(db)
require.NoError(t, err)
require.Equal(t, size, len(keys))
legacyKeys := 0
for _, k := range keys {
if checkKeyType(k).isLegacy() {
legacyKeys++
}
}
require.Equal(t, size, legacyKeys)
})
t.Run("KeyIdempotency", func(t *testing.T) {
for _, key := range getNewPrefixKeys(t, 84) {
require.False(t, checkKeyType(key).isLegacy())
}
})
t.Run("Migrate", func(t *testing.T) {
_, db := getLegacyDatabase(t)
ctx := context.Background()
err := Migrate(ctx, db)
require.NoError(t, err)
keys, err := getAllLegacyKeys(db)
require.NoError(t, err)
require.Equal(t, 0, len(keys))
})
})
} | "UserKey1": []byte(fmt.Sprintf("foo/bar/baz/%d/%d", val, val)),
"TxHeight": []byte(fmt.Sprintf("tx.height/%s/%d/%d", fmt.Sprint(val), val, val)),
"TxHash": append( |
debugContentProvider.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import uri from 'vs/base/common/uri';
import { localize } from 'vs/nls';
import { TPromise } from 'vs/base/common/winjs.base';
import { guessMimeTypes, MIME_TEXT } from 'vs/base/common/mime';
import { ITextModel } from 'vs/editor/common/model';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { ITextModelService, ITextModelContentProvider } from 'vs/editor/common/services/resolverService';
import { IWorkbenchContribution } from 'vs/workbench/common/contributions';
import { DEBUG_SCHEME, IDebugService, ISession } from 'vs/workbench/parts/debug/common/debug';
import { Source } from 'vs/workbench/parts/debug/common/debugSource';
/**
* Debug URI format
*
* a debug URI represents a Source object and the debug session where the Source comes from.
*
* debug:arbitrary_path?session=123e4567-e89b-12d3-a456-426655440000&ref=1016
* \___/ \____________/ \__________________________________________/ \______/
* | | | |
* scheme source.path session id source.reference
*
* the arbitrary_path and the session id are encoded with 'encodeURIComponent'
*
*/
export class DebugContentProvider implements IWorkbenchContribution, ITextModelContentProvider {
constructor(
@ITextModelService textModelResolverService: ITextModelService,
@IDebugService private debugService: IDebugService,
@IModelService private modelService: IModelService,
@IModeService private modeService: IModeService
) {
textModelResolverService.registerTextModelContentProvider(DEBUG_SCHEME, this);
}
|
if (resource.query) {
const data = Source.getEncodedDebugData(resource);
session = this.debugService.getModel().getSessions().filter(p => p.getId() === data.sessionId).pop();
sourceRef = data.sourceReference;
}
if (!session) {
// fallback: use focused session
session = this.debugService.getViewModel().focusedSession;
}
if (!session) {
return TPromise.wrapError<ITextModel>(new Error(localize('unable', "Unable to resolve the resource without a debug session")));
}
const source = session.getSourceForUri(resource);
let rawSource: DebugProtocol.Source;
if (source) {
rawSource = source.raw;
if (!sourceRef) {
sourceRef = source.reference;
}
} else {
// create a Source
rawSource = {
path: resource.with({ scheme: '', query: '' }).toString(true), // Remove debug: scheme
sourceReference: sourceRef
};
}
const createErrModel = (message: string) => {
this.debugService.sourceIsNotAvailable(resource);
const modePromise = this.modeService.getOrCreateMode(MIME_TEXT);
const model = this.modelService.createModel(message, modePromise, resource);
return model;
};
return session.raw.source({ sourceReference: sourceRef, source: rawSource }).then(response => {
if (!response) {
return createErrModel(localize('canNotResolveSource', "Could not resolve resource {0}, no response from debug extension.", resource.toString()));
}
const mime = response.body.mimeType || guessMimeTypes(resource.path)[0];
const modePromise = this.modeService.getOrCreateMode(mime);
const model = this.modelService.createModel(response.body.content, modePromise, resource);
return model;
}, (err: DebugProtocol.ErrorResponse) => createErrModel(err.message));
}
} | public provideTextContent(resource: uri): TPromise<ITextModel> {
let session: ISession;
let sourceRef: number; |
career-details-action-items.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { CareerDetailsActionItemsComponent } from './career-details-action-items.component';
describe('CareerDetailsActionItemsComponent', () => {
let component: CareerDetailsActionItemsComponent;
let fixture: ComponentFixture<CareerDetailsActionItemsComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ CareerDetailsActionItemsComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(CareerDetailsActionItemsComponent);
component = fixture.componentInstance;
fixture.detectChanges(); | });
it('should create', () => {
expect(component).toBeTruthy();
});
}); | |
bplustree.py | # BPlusTree with Python https://github.com/Nero5023/bplustree/tree/main/bplus_tree
import pandas as pd
import bisect
import math
def flatten(l):
return [y for x in l for y in x]
class Leaf:
def __init__(self, previous_leaf, next_leaf, parent, b_factor):
self.previous = previous_leaf
self.next = next_leaf
self.parent = parent
self.b_factor = b_factor
self.a_factor = math.ceil(b_factor/2)
self.keys = []
self.children = []
@property
def is_root(self):
return self.parent is None
def insert(self, key, value):
index = bisect.bisect_left(self.keys, key)
if index < len(self.keys) and self.keys[index] == key:
self.children[index].append(value)
else:
self.keys.insert(index, key)
self.children.insert(index, [value])
if len(self.keys) > self.b_factor:
split_index = math.ceil(self.b_factor/2)
self.split(split_index)
def get(self, key):
index = bisect.bisect_left(self.keys, key)
if index < len(self.keys) and self.keys[index] == key:
return self.children[index]
else:
return None
def split(self, index):
new_leaf_node = Leaf(self, self.next, self.parent, self.b_factor)
new_leaf_node.keys = self.keys[index:]
new_leaf_node.children = self.children[index:]
self.keys = self.keys[:index]
self.children = self.children[:index]
if self.next is not None:
self.next.previous = new_leaf_node
self.next = new_leaf_node
if self.is_root:
self.parent = Node(None, None, [new_leaf_node.keys[0]], [self, self.next], b_factor=self.b_factor, parent=None)
self.next.parent = self.parent
else:
self.parent.add_child(self.next.keys[0], self.next)
def find_left(self, key, include_key=True):
items = []
index = bisect.bisect_right(self.keys, key) - 1
if index == -1:
items = []
else:
if include_key:
items = self.children[:index+1]
else:
if key == self.keys[index]:
index -= 1
items = self.children[:index+1]
return self.left_items() + flatten(items)
def find_right(self, key, include_key=True):
items = []
index = bisect.bisect_left(self.keys, key)
if index == len(self.keys):
items = []
else:
if include_key:
items = self.children[index:]
else:
if key == self.keys[index]:
index += 1
items = self.children[index:]
return flatten(items) + self.right_items()
def left_items(self):
items = []
node = self
while node.previous is not None:
node = node.previous
while node != self:
for elem in node.children:
if type(elem) == list:
items.extend(elem)
else:
items.append(elem)
node = node.next
return items
def right_items(self):
items = []
node = self.next
while node is not None:
for elem in node.children:
if type(elem) == list:
items.extend(elem)
else:
items.append(elem)
node = node.next
return items
def items(self):
return zip(self.keys, self.children)
# Node in BTree
class Node:
def __init__(self, previous_node, next_node, keys, children, b_factor, parent=None):
self.previous = previous_node
self.next = next_node
self.keys = keys
self.children = children
self.b_factor = b_factor
self.a_factor = math.ceil(b_factor / 2)
self.parent = parent
@property
def degree(self):
return len(self.children)
@property
def is_root(self):
return self.parent is None
def insert(self, key, value):
index = bisect.bisect_right(self.keys, key)
node = self.children[index]
node.insert(key, value)
def get(self, key):
index = bisect.bisect_right(self.keys, key)
return self.children[index].get(key)
def find_left(self, key, include_key=True):
index = bisect.bisect_right(self.keys, key)
return self.children[index].find_left(key, include_key)
def find_right(self, key, include_key=True):
index = bisect.bisect_right(self.keys, key)
return self.children[index].find_right(key, include_key)
def add_child(self, key, child):
index = bisect.bisect_right(self.keys, key)
self.keys.insert(index, key)
self.children.insert(index+1, child)
if self.degree > self.b_factor:
split_index = math.floor(self.b_factor / 2)
self.split(split_index)
def split(self, index):
split_key = self.keys[index]
new_node = Node(self, self.next, self.keys[index+1:], self.children[index+1:], self.b_factor, self.parent)
for node in self.children[index+1:]:
node.parent = new_node
self.keys = self.keys[:index]
self.children = self.children[:index+1]
if self.next is not None:
|
self.next = new_node
if self.is_root:
self.parent = Node(None, None, [split_key], [self, self.next], b_factor=self.b_factor, parent=None)
self.next.parent = self.parent
else:
self.parent.add_child(split_key, self.next)
# BPlusTree Class
class BPlusTree:
def __init__(self, b_factor=32):
self.b_factor = b_factor
self.root = Leaf(None, None, None, b_factor)
self.size = 0
def get(self, key):
return self.root.get(key)
def __getitem__(self, key):
return self.get(key)
def __len__(self):
return self.size
def build(self, keys, values):
if len(keys) != len(values):
return
for ind in range(len(keys)):
# print(Item(keys[ind]))
# print(values[ind])
self.insert(keys[ind], values[ind])
def predict(self, key):
search_result = self.get(key)
return search_result
def insert(self, key, value):
self.root.insert(key, value)
self.size += 1
if self.root.parent is not None:
self.root = self.root.parent
def range_search(self, notation, cmp_key):
notation = notation.strip()
if notation not in [">", "<", ">=", "<="]:
raise Exception("Nonsupport notation: {}. Only '>' '<' '>=' '<=' are supported".format(notation))
if notation == '>':
return self.root.find_right(cmp_key, False)
if notation == '>=':
return self.root.find_right(cmp_key, True)
if notation == '<':
return self.root.find_left(cmp_key, False)
if notation == '<=':
return self.root.find_left(cmp_key, True)
def search(self, notation, cmp_key):
notation = notation.strip()
if notation not in [">", "<", ">=", "<=", "==", "!="]:
raise Exception("Nonsupport notation: {}. Only '>' '<' '>=' '<=' '==' '!=' are supported".format(notation))
if notation == '==':
res = self.get(cmp_key)
if res is None:
return []
else:
return res
if notation == '!=':
return self.root.find_left(cmp_key, False) + self.root.find_right(cmp_key, False)
return self.range_search(notation, cmp_key)
def show(self):
layer = 0
node = self.root
while node is not None:
print("Layer: {}".format(layer))
inner_node = node
while inner_node is not None:
print(inner_node.keys, end=' ')
inner_node = inner_node.next
print('')
node = node.children[0]
layer += 1
if type(node) != Leaf and type(node) != Node:
break
def leftmost_leaf(self):
leaf = self.root
while type(leaf) != Leaf:
leaf = leaf.children[0]
return leaf
def items(self):
leaf = self.leftmost_leaf()
items = []
while leaf is not None:
pairs = list(leaf.items())
items.extend(pairs)
leaf = leaf.next
return items
def keys(self):
leaf = self.leftmost_leaf()
ks = []
while leaf is not None:
ks.extend(leaf.keys)
leaf = leaf.next
return ks
def values(self):
leaf = self.leftmost_leaf()
vals = []
while leaf is not None:
for elem in leaf.children:
if type(elem) == list:
vals.extend(elem)
else:
vals.append(elem)
leaf = leaf.next
return vals
def height(self):
node = self.root
height = 0
while type(node) != Leaf:
height += 1
node = node.children[0]
return height
# Value in Node
class Item():
def __init__(self, k, v):
self.k = k
self.v = v
def __gt__(self, other):
if self.k > other.k:
return True
else:
return False
def __ge__(self, other):
if self.k >= other.k:
return True
else:
return False
def __eq__(self, other):
if self.k == other.k:
return True
else:
return False
def __le__(self, other):
if self.k <= other.k:
return True
else:
return False
def __lt__(self, other):
if self.k < other.k:
return True
else:
return False
# For Test
def b_plus_tree_main():
t = BPlusTree(32)
nums = [55,44,65,16,80,74,14,19,95,36,2,90,74,94,27,89,85]
for x in nums:
t.insert(x, x)
print(t.items())
for ni in t.items():
print(ni)
if ni is None:
continue
item = {"key": ni[0], "value": ni[1][0]}
print(item)
if __name__ == '__main__':
b_plus_tree_main()
| self.next.previous = new_node |
block.rs | extern crate string_cache;
extern crate parking_lot;
use self::string_cache::DefaultAtom as Atom;
use std::collections::HashMap;
use self::parking_lot::Mutex;
use voxel::voxelarray::VoxelArray;
pub type BlockID = u32;
pub type BlockName = Atom;
pub type Chunk = VoxelArray<BlockID, u8>;
pub struct BlockRegistry {
id_to_name : Vec<BlockName>,
name_to_id : HashMap<BlockName,BlockID>,
}
impl BlockRegistry {
pub fn id_for_name(&self, id : &BlockID) -> BlockName{
self.id_to_name.get(*id as usize).unwrap().clone()
}
pub fn name_for_id(&self, name : &BlockName) -> BlockID{ self.name_to_id.get(name).unwrap().clone() }
pub fn all_mappings(&self) -> HashMap<BlockName, BlockID> { self.name_to_id.clone()}
pub fn register_block(&mut self, name: &BlockName) -> BlockID |
}
lazy_static! {
pub static ref MASTER_BLOCK_REGISTRY : Mutex<BlockRegistry> = {
Mutex::new(BlockRegistry {
id_to_name : Vec::new(),
name_to_id : HashMap::new(),
})
};
}
| {
{
assert!(self.name_to_id.contains_key(name) == false);
}
let new_id = self.id_to_name.len() as BlockID;
self.id_to_name.push(name.clone());
self.name_to_id.insert(name.clone(), new_id.clone());
return new_id;
} |
test.sqs.ts | import { expect, haveResource, ResourcePart } from '@aws-cdk/assert-internal';
import * as iam from '@aws-cdk/aws-iam';
import * as kms from '@aws-cdk/aws-kms';
import { CfnParameter, Duration, Stack, App } from '@aws-cdk/core';
import { Test } from 'nodeunit';
import * as sqs from '../lib';
/* eslint-disable quote-props */
export = {
'default properties'(test: Test) {
const stack = new Stack();
const q = new sqs.Queue(stack, 'Queue');
test.deepEqual(q.fifo, false);
expect(stack).toMatch({
'Resources': {
'Queue4A7E3555': {
'Type': 'AWS::SQS::Queue',
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
},
});
expect(stack).to(haveResource('AWS::SQS::Queue', {
DeletionPolicy: 'Delete',
}, ResourcePart.CompleteDefinition));
test.done();
},
'with a dead letter queue'(test: Test) {
const stack = new Stack();
const dlq = new sqs.Queue(stack, 'DLQ');
new sqs.Queue(stack, 'Queue', { deadLetterQueue: { queue: dlq, maxReceiveCount: 3 } });
expect(stack).toMatch({
'Resources': {
'DLQ581697C4': {
'Type': 'AWS::SQS::Queue',
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
'Queue4A7E3555': {
'Type': 'AWS::SQS::Queue',
'Properties': {
'RedrivePolicy': {
'deadLetterTargetArn': {
'Fn::GetAtt': [
'DLQ581697C4',
'Arn',
],
},
'maxReceiveCount': 3,
},
},
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
},
});
test.done();
},
'message retention period must be between 1 minute to 14 days'(test: Test) {
// GIVEN
const stack = new Stack();
// THEN
test.throws(() => new sqs.Queue(stack, 'MyQueue', {
retentionPeriod: Duration.seconds(30),
}), /message retention period must be 60 seconds or more/);
test.throws(() => new sqs.Queue(stack, 'AnotherQueue', {
retentionPeriod: Duration.days(15),
}), /message retention period must be 1209600 seconds or less/);
test.done();
},
'message retention period can be provided as a parameter'(test: Test) {
// GIVEN
const stack = new Stack(); | const parameter = new CfnParameter(stack, 'my-retention-period', {
type: 'Number',
default: 30,
});
// WHEN
new sqs.Queue(stack, 'MyQueue', {
retentionPeriod: Duration.seconds(parameter.valueAsNumber),
});
// THEN
expect(stack).toMatch({
'Parameters': {
'myretentionperiod': {
'Type': 'Number',
'Default': 30,
},
},
'Resources': {
'MyQueueE6CA6235': {
'Type': 'AWS::SQS::Queue',
'Properties': {
'MessageRetentionPeriod': {
'Ref': 'myretentionperiod',
},
},
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
},
});
test.done();
},
'addToPolicy will automatically create a policy for this queue'(test: Test) {
const stack = new Stack();
const queue = new sqs.Queue(stack, 'MyQueue');
queue.addToResourcePolicy(new iam.PolicyStatement({
resources: ['*'],
actions: ['sqs:*'],
principals: [new iam.ArnPrincipal('arn')],
}));
expect(stack).toMatch({
'Resources': {
'MyQueueE6CA6235': {
'Type': 'AWS::SQS::Queue',
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
'MyQueuePolicy6BBEDDAC': {
'Type': 'AWS::SQS::QueuePolicy',
'Properties': {
'PolicyDocument': {
'Statement': [
{
'Action': 'sqs:*',
'Effect': 'Allow',
'Principal': {
'AWS': 'arn',
},
'Resource': '*',
},
],
'Version': '2012-10-17',
},
'Queues': [
{
'Ref': 'MyQueueE6CA6235',
},
],
},
},
},
});
test.done();
},
'export and import': {
'importing works correctly'(test: Test) {
// GIVEN
const stack = new Stack();
// WHEN
const imports = sqs.Queue.fromQueueArn(stack, 'Imported', 'arn:aws:sqs:us-east-1:123456789012:queue1');
// THEN
// "import" returns an IQueue bound to `Fn::ImportValue`s.
test.deepEqual(stack.resolve(imports.queueArn), 'arn:aws:sqs:us-east-1:123456789012:queue1');
test.deepEqual(stack.resolve(imports.queueUrl), {
'Fn::Join':
['', ['https://sqs.us-east-1.', { Ref: 'AWS::URLSuffix' }, '/123456789012/queue1']],
});
test.deepEqual(stack.resolve(imports.queueName), 'queue1');
test.done();
},
'importing fifo and standard queues are detected correctly'(test: Test) {
const stack = new Stack();
const stdQueue = sqs.Queue.fromQueueArn(stack, 'StdQueue', 'arn:aws:sqs:us-east-1:123456789012:queue1');
const fifoQueue = sqs.Queue.fromQueueArn(stack, 'FifoQueue', 'arn:aws:sqs:us-east-1:123456789012:queue2.fifo');
test.deepEqual(stdQueue.fifo, false);
test.deepEqual(fifoQueue.fifo, true);
test.done();
},
'importing works correctly for cross region queue'(test: Test) {
// GIVEN
const stack = new Stack(undefined, 'Stack', { env: { region: 'us-east-1' } });
// WHEN
const imports = sqs.Queue.fromQueueArn(stack, 'Imported', 'arn:aws:sqs:us-west-2:123456789012:queue1');
// THEN
// "import" returns an IQueue bound to `Fn::ImportValue`s.
test.deepEqual(stack.resolve(imports.queueArn), 'arn:aws:sqs:us-west-2:123456789012:queue1');
test.deepEqual(stack.resolve(imports.queueUrl), {
'Fn::Join':
['', ['https://sqs.us-west-2.', { Ref: 'AWS::URLSuffix' }, '/123456789012/queue1']],
});
test.deepEqual(stack.resolve(imports.queueName), 'queue1');
test.done();
},
},
'grants': {
'grantConsumeMessages'(test: Test) {
testGrant((q, p) => q.grantConsumeMessages(p),
'sqs:ReceiveMessage',
'sqs:ChangeMessageVisibility',
'sqs:GetQueueUrl',
'sqs:DeleteMessage',
'sqs:GetQueueAttributes',
);
test.done();
},
'grantSendMessages'(test: Test) {
testGrant((q, p) => q.grantSendMessages(p),
'sqs:SendMessage',
'sqs:GetQueueAttributes',
'sqs:GetQueueUrl',
);
test.done();
},
'grantPurge'(test: Test) {
testGrant((q, p) => q.grantPurge(p),
'sqs:PurgeQueue',
'sqs:GetQueueAttributes',
'sqs:GetQueueUrl',
);
test.done();
},
'grant() is general purpose'(test: Test) {
testGrant((q, p) => q.grant(p, 'service:hello', 'service:world'),
'service:hello',
'service:world',
);
test.done();
},
'grants also work on imported queues'(test: Test) {
const stack = new Stack();
const queue = sqs.Queue.fromQueueAttributes(stack, 'Import', {
queueArn: 'arn:aws:sqs:us-east-1:123456789012:queue1',
queueUrl: 'https://queue-url',
});
const user = new iam.User(stack, 'User');
queue.grantPurge(user);
expect(stack).to(haveResource('AWS::IAM::Policy', {
'PolicyDocument': {
'Statement': [
{
'Action': [
'sqs:PurgeQueue',
'sqs:GetQueueAttributes',
'sqs:GetQueueUrl',
],
'Effect': 'Allow',
'Resource': 'arn:aws:sqs:us-east-1:123456789012:queue1',
},
],
'Version': '2012-10-17',
},
}));
test.done();
},
},
'queue encryption': {
'encryptionMasterKey can be set to a custom KMS key'(test: Test) {
const stack = new Stack();
const key = new kms.Key(stack, 'CustomKey');
const queue = new sqs.Queue(stack, 'Queue', { encryptionMasterKey: key });
test.same(queue.encryptionMasterKey, key);
expect(stack).to(haveResource('AWS::SQS::Queue', {
'KmsMasterKeyId': { 'Fn::GetAtt': ['CustomKey1E6D0D07', 'Arn'] },
}));
test.done();
},
'a kms key will be allocated if encryption = kms but a master key is not specified'(test: Test) {
const stack = new Stack();
new sqs.Queue(stack, 'Queue', { encryption: sqs.QueueEncryption.KMS });
expect(stack).to(haveResource('AWS::KMS::Key'));
expect(stack).to(haveResource('AWS::SQS::Queue', {
'KmsMasterKeyId': {
'Fn::GetAtt': [
'QueueKey39FCBAE6',
'Arn',
],
},
}));
test.done();
},
'it is possible to use a managed kms key'(test: Test) {
const stack = new Stack();
new sqs.Queue(stack, 'Queue', { encryption: sqs.QueueEncryption.KMS_MANAGED });
expect(stack).toMatch({
'Resources': {
'Queue4A7E3555': {
'Type': 'AWS::SQS::Queue',
'Properties': {
'KmsMasterKeyId': 'alias/aws/sqs',
},
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
},
});
test.done();
},
'grant also affects key on encrypted queue'(test: Test) {
// GIVEN
const stack = new Stack();
const queue = new sqs.Queue(stack, 'Queue', {
encryption: sqs.QueueEncryption.KMS,
});
const role = new iam.Role(stack, 'Role', {
assumedBy: new iam.ServicePrincipal('someone'),
});
// WHEN
queue.grantSendMessages(role);
// THEN
expect(stack).to(haveResource('AWS::IAM::Policy', {
'PolicyDocument': {
'Statement': [
{
'Action': [
'sqs:SendMessage',
'sqs:GetQueueAttributes',
'sqs:GetQueueUrl',
],
'Effect': 'Allow',
'Resource': { 'Fn::GetAtt': ['Queue4A7E3555', 'Arn'] },
},
{
'Action': [
'kms:Decrypt',
'kms:Encrypt',
'kms:ReEncrypt*',
'kms:GenerateDataKey*',
],
'Effect': 'Allow',
'Resource': { 'Fn::GetAtt': ['QueueKey39FCBAE6', 'Arn'] },
},
],
'Version': '2012-10-17',
},
}));
test.done();
},
},
'test ".fifo" suffixed queues register as fifo'(test: Test) {
const stack = new Stack();
const queue = new sqs.Queue(stack, 'Queue', {
queueName: 'MyQueue.fifo',
});
test.deepEqual(queue.fifo, true);
expect(stack).toMatch({
'Resources': {
'Queue4A7E3555': {
'Type': 'AWS::SQS::Queue',
'Properties': {
'QueueName': 'MyQueue.fifo',
'FifoQueue': true,
},
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
},
});
test.done();
},
'test a fifo queue is observed when the "fifo" property is specified'(test: Test) {
const stack = new Stack();
const queue = new sqs.Queue(stack, 'Queue', {
fifo: true,
});
test.deepEqual(queue.fifo, true);
expect(stack).toMatch({
'Resources': {
'Queue4A7E3555': {
'Type': 'AWS::SQS::Queue',
'Properties': {
'FifoQueue': true,
},
'UpdateReplacePolicy': 'Delete',
'DeletionPolicy': 'Delete',
},
},
});
test.done();
},
'test metrics'(test: Test) {
// GIVEN
const stack = new Stack();
const queue = new sqs.Queue(stack, 'Queue');
// THEN
test.deepEqual(stack.resolve(queue.metricNumberOfMessagesSent()), {
dimensions: { QueueName: { 'Fn::GetAtt': ['Queue4A7E3555', 'QueueName'] } },
namespace: 'AWS/SQS',
metricName: 'NumberOfMessagesSent',
period: Duration.minutes(5),
statistic: 'Sum',
});
test.deepEqual(stack.resolve(queue.metricSentMessageSize()), {
dimensions: { QueueName: { 'Fn::GetAtt': ['Queue4A7E3555', 'QueueName'] } },
namespace: 'AWS/SQS',
metricName: 'SentMessageSize',
period: Duration.minutes(5),
statistic: 'Average',
});
test.done();
},
'fails if queue policy has no actions'(test: Test) {
// GIVEN
const app = new App();
const stack = new Stack(app, 'my-stack');
const queue = new sqs.Queue(stack, 'Queue');
// WHEN
queue.addToResourcePolicy(new iam.PolicyStatement({
resources: ['*'],
principals: [new iam.ArnPrincipal('arn')],
}));
// THEN
test.throws(() => app.synth(), /A PolicyStatement must specify at least one \'action\' or \'notAction\'/);
test.done();
},
'fails if queue policy has no IAM principals'(test: Test) {
// GIVEN
const app = new App();
const stack = new Stack(app, 'my-stack');
const queue = new sqs.Queue(stack, 'Queue');
// WHEN
queue.addToResourcePolicy(new iam.PolicyStatement({
resources: ['*'],
actions: ['sqs:*'],
}));
// THEN
test.throws(() => app.synth(), /A PolicyStatement used in a resource-based policy must specify at least one IAM principal/);
test.done();
},
};
function testGrant(action: (q: sqs.Queue, principal: iam.IPrincipal) => void, ...expectedActions: string[]) {
const stack = new Stack();
const queue = new sqs.Queue(stack, 'MyQueue');
const principal = new iam.User(stack, 'User');
action(queue, principal);
expect(stack).to(haveResource('AWS::IAM::Policy', {
'PolicyDocument': {
'Statement': [
{
'Action': expectedActions,
'Effect': 'Allow',
'Resource': {
'Fn::GetAtt': [
'MyQueueE6CA6235',
'Arn',
],
},
},
],
'Version': '2012-10-17',
},
}));
} | |
leetCode22_test.go | "reflect"
"testing"
)
func TestGenerateParenthesis(t *testing.T) {
in := 3
want := []string{"((()))","(()())","(())()","()(())","()()()"}
got := generateParenthesis(in)
if !reflect.DeepEqual(want, got) {
t.Errorf("expected:%v, got:%v", want, got)
}
}
func benchmarkDFS(b *testing.B, n int) {
for i:=0; i<b.N; i++ {
generateParenthesis(n)
}
}
func benchmarkBFS(b *testing.B, n int) {
for i:=0; i<b.N; i++ {
generateParenthesisBFS(n)
}
}
func BenchmarkDFS(b *testing.B) {
benchmarkDFS(b,8)
}
func BenchmarkBFS(b *testing.B) {
benchmarkBFS(b,8)
} | package leetcode22
import ( |
|
aboutmeeting-jitsi.js | define(['jquery', 'oae.core'], function($, oae) {
return function (uid) {
// The widget container
var $rootel = $('#' + uid);
/**
* Render the metadata for the current meeting item
*
* @param {Meeting} meetingProfile Meeting for which the metadata should be rendered
*/
var renderMetadata = function (meetingProfile) {
oae.api.util.template().render($('#aboutmeeting-jitsi-template', $rootel), {
'meetingProfile': meetingProfile,
'displayOptions': {
'linkTarget': '_blank'
}
}, $('#aboutmeeting-jitsi-container'), $rootel);
};
/** | $(document).on('click', '.oae-trigger-aboutmeeting-jitsi', function (e, data) {
// Request the context profile information
$(document).trigger('oae.context.get', 'aboutmeeting-jitsi');
});
// Receive the context's profile information and set ip the aboutmeetting-jitsi modal
$(document).on('oae.context.send.aboutmeeting-jitsi', function (e, meetingProfile) {
// Show the aboutmeeting-jitsi modal
$('#aboutmeeting-jitsi-modal', $rootel).modal();
// Render the metadata for the current meeting item
renderMetadata(meetingProfile);
});
};
setUpAboutMeeting();
};
}); | * Initialize the aboutmeeting-jitsi modal dialog
*/
var setUpAboutMeeting = function () {
|
transaction.py | from uuid import UUID
import pytest
from flexlate.transactions.transaction import FlexlateTransaction, TransactionType
ADD_SOURCE_ID = UUID("93f984ca-6e8f-45e9-b9b0-aebebfe798c1")
ADD_OUTPUT_ID = UUID("86465f4d-9752-4ae5-aaa7-791b4c814e8d")
ADD_SOURCE_AND_OUTPUT_ID = UUID("bf4cd42c-10b1-4bf9-a15f-294f5be738b0")
REMOVE_SOURCE_ID = UUID("c034ec63-d2b5-4d8c-aef1-f96e29a6f5d1")
REMOVE_OUTPUT_ID = UUID("79715a11-a3c4-40b1-a49b-9d8388e5c28d")
UPDATE_TRANSACTION_ID = UUID("347711b7-3bf9-484e-be52-df488f3cf598")
SYNC_TRANSACTION_ID = UUID("4825ce35-1a03-43de-ad8a-1ecc0ed68b62")
BOOTSTRAP_TRANSACTION_ID = UUID("37c61224-2b8d-4ee5-8846-49d5474a40bd")
UPDATE_TARGET_VERSION_ID = UUID("a5632854-48b4-4f82-904b-bff81dc40b02")
@pytest.fixture
def add_source_transaction() -> FlexlateTransaction:
|
@pytest.fixture
def add_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.ADD_OUTPUT, id=ADD_OUTPUT_ID)
@pytest.fixture
def add_source_and_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.ADD_SOURCE_AND_OUTPUT, id=ADD_SOURCE_AND_OUTPUT_ID
)
@pytest.fixture
def remove_source_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.REMOVE_SOURCE, id=REMOVE_SOURCE_ID)
@pytest.fixture
def remove_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.REMOVE_OUTPUT, id=REMOVE_OUTPUT_ID)
@pytest.fixture
def update_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.UPDATE, id=UPDATE_TRANSACTION_ID)
@pytest.fixture
def sync_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.SYNC, id=SYNC_TRANSACTION_ID)
@pytest.fixture
def bootstrap_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.BOOTSTRAP, id=BOOTSTRAP_TRANSACTION_ID
)
@pytest.fixture
def update_target_version_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.UPDATE_TARGET_VERSION, id=UPDATE_TARGET_VERSION_ID
)
| yield FlexlateTransaction(type=TransactionType.ADD_SOURCE, id=ADD_SOURCE_ID) |
main.rs | #![feature(termination_trait)]
extern crate regex;
extern crate clap;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::fs::OpenOptions;
use std::io;
use regex::bytes::Regex;
use clap::{Arg, App};
fn out_file_name(base: &str, ix: usize, extension: Option<&str>) -> String |
fn main() -> Result<(), io::Error> {
let matches = App::new("Betaflight blackbox log splitter")
.version("1.0")
.author("Ilya Epifanov <[email protected]>")
.arg(Arg::with_name("INPUT")
.help("Input .bbl file possibly containing multiple logs")
.required(true)
.index(1))
.get_matches();
let filename = Path::new(matches.value_of("INPUT").unwrap());
let filename_base = filename.file_stem().unwrap().to_string_lossy();
let filename_extension_string = filename.extension()
.map(|e| String::from(e.to_string_lossy()));
let filename_extension = filename_extension_string
.as_ref()
.map(String::as_ref);
let mut input_file = OpenOptions::new()
.read(true)
.open(filename)?;
let mut bytes = Vec::new();
input_file.read_to_end(&mut bytes)?;
drop(input_file);
let re = Regex::new(r"(?-u)\xffEnd of log\x00\xff*").unwrap();
for (ix, mut chunk) in re.split(&mut bytes).enumerate() {
let mut out_file = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename.with_file_name(out_file_name(&filename_base, ix, filename_extension)))?;
out_file.write_all(chunk)?;
out_file.write_all(b"\xffEnd of log\x00")?;
}
Ok(())
}
| {
match extension {
None => format!("{}.{:03}", base, ix),
Some(ext) => format!("{}.{:03}.{}", base, ix, ext)
}
} |
handler.go | package roles
import (
sdk "github.com/cosmos/cosmos-sdk"
"github.com/cosmos/cosmos-sdk/errors"
"github.com/cosmos/cosmos-sdk/state"
)
const (
//NameRole - name space of the roles module
NameRole = "role"
// CostCreate is the cost to create a new role
CostCreate = uint64(40)
// CostAssume is the cost to assume a role as part of a tx
CostAssume = uint64(5)
)
// Handler allows us to create new roles
type Handler struct {
sdk.NopInitState
sdk.NopInitValidate
}
var _ sdk.Handler = Handler{}
// NewHandler makes a role handler to create roles
func NewHandler() Handler {
return Handler{}
}
// Name - return name space
func (Handler) Name() string {
return NameRole | func (h Handler) CheckTx(ctx sdk.Context, store state.SimpleDB, tx sdk.Tx) (res sdk.CheckResult, err error) {
var cr CreateRoleTx
cr, err = checkTx(ctx, tx)
if err != nil {
return
}
res = sdk.NewCheck(CostCreate, "")
err = checkNoRole(store, cr.Role)
return
}
// DeliverTx tries to create a new role.
//
// Returns an error if the role already exists
func (h Handler) DeliverTx(ctx sdk.Context, store state.SimpleDB, tx sdk.Tx) (res sdk.DeliverResult, err error) {
create, err := checkTx(ctx, tx)
if err != nil {
return res, err
}
// lets try...
role := NewRole(create.MinSigs, create.Signers)
err = createRole(store, create.Role, role)
return res, err
}
func checkTx(ctx sdk.Context, tx sdk.Tx) (create CreateRoleTx, err error) {
// check if the tx is proper type and valid
create, ok := tx.Unwrap().(CreateRoleTx)
if !ok {
return create, errors.ErrInvalidFormat(TypeCreateRoleTx, tx)
}
err = create.ValidateBasic()
return create, err
} | }
// CheckTx verifies if the transaction is properly formated |
parameters.ts | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
import { RequestParameters } from "@azure-rest/core-client";
import {
DataPlaneAccountUpdateParameters,
AccessKeyOptions,
Collection,
ResourceSetRuleConfig,
} from "./models";
export type AccountsGetAccountPropertiesParameters = RequestParameters;
export interface AccountsUpdateAccountPropertiesBodyParam {
body: DataPlaneAccountUpdateParameters;
}
export type AccountsUpdateAccountPropertiesParameters = AccountsUpdateAccountPropertiesBodyParam &
RequestParameters;
export type AccountsGetAccessKeysParameters = RequestParameters;
export interface AccountsRegenerateAccessKeyBodyParam {
body: AccessKeyOptions;
}
export type AccountsRegenerateAccessKeyParameters = AccountsRegenerateAccessKeyBodyParam &
RequestParameters;
export type CollectionsGetCollectionParameters = RequestParameters;
export interface CollectionsCreateOrUpdateCollectionBodyParam {
body: Collection;
}
export type CollectionsCreateOrUpdateCollectionParameters = CollectionsCreateOrUpdateCollectionBodyParam &
RequestParameters;
export type CollectionsDeleteCollectionParameters = RequestParameters;
export interface CollectionsListCollectionsQueryParamProperties {
$skipToken?: string;
}
export interface CollectionsListCollectionsQueryParam {
queryParameters?: CollectionsListCollectionsQueryParamProperties;
}
export type CollectionsListCollectionsParameters = CollectionsListCollectionsQueryParam &
RequestParameters;
export interface CollectionsListChildCollectionNamesQueryParamProperties {
$skipToken?: string;
}
export interface CollectionsListChildCollectionNamesQueryParam {
queryParameters?: CollectionsListChildCollectionNamesQueryParamProperties;
}
export type CollectionsListChildCollectionNamesParameters = CollectionsListChildCollectionNamesQueryParam &
RequestParameters;
export type CollectionsGetCollectionPathParameters = RequestParameters;
export type ResourceSetRulesGetResourceSetRuleParameters = RequestParameters;
export interface ResourceSetRulesCreateOrUpdateResourceSetRuleBodyParam {
body: ResourceSetRuleConfig;
}
export type ResourceSetRulesCreateOrUpdateResourceSetRuleParameters = ResourceSetRulesCreateOrUpdateResourceSetRuleBodyParam &
RequestParameters;
export type ResourceSetRulesDeleteResourceSetRuleParameters = RequestParameters;
export interface ResourceSetRulesListResourceSetRulesQueryParamProperties {
$skipToken?: string;
}
export interface ResourceSetRulesListResourceSetRulesQueryParam {
queryParameters?: ResourceSetRulesListResourceSetRulesQueryParamProperties;
}
export type ResourceSetRulesListResourceSetRulesParameters = ResourceSetRulesListResourceSetRulesQueryParam & | RequestParameters; |
|
0.35d867ee0a4bc3c0087d.hot-update.js | webpackHotUpdate(0,{
/***/ "./src/Views/Posters.js":
/*!******************************!*\
!*** ./src/Views/Posters.js ***!
\******************************/
/*! exports provided: default */
/*! exports used: default */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_react__ = __webpack_require__(/*! react */ "./node_modules/react/index.js");
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_react__);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1_react_router_dom__ = __webpack_require__(/*! react-router-dom */ "./node_modules/react-router-dom/es/index.js");
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2_axios__ = __webpack_require__(/*! axios */ "./node_modules/axios/index.js");
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2_axios___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_axios__);
var _jsxFileName = 'C:\\StarterProject\\MoviePosterArena\\StarterProject\\MovieArtArena.Web\\app\\public\\src\\Views\\Posters.js';
var _createClass = function () { function | (target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var Posters = function (_React$Component) {
_inherits(Posters, _React$Component);
function Posters(props) {
_classCallCheck(this, Posters);
var _this = _possibleConstructorReturn(this, (Posters.__proto__ || Object.getPrototypeOf(Posters)).call(this, props));
_this.componentWillMount = function () {
_this.getMovies();
};
_this.getMovies = function () {
__WEBPACK_IMPORTED_MODULE_2_axios___default.a.get("http://localhost:65332/api/poster/getall").then(function (resp) {
var movies = resp.data.items;
_this.setState({ movies: movies });
console.log(resp);
console.log(movies);
});
};
_this.state = {
movies: []
};
return _this;
}
_createClass(Posters, [{
key: 'render',
value: function render() {
var _this2 = this;
return __WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{ className: 'container', __source: {
fileName: _jsxFileName,
lineNumber: 33
},
__self: this
},
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{ className: 'container', __source: {
fileName: _jsxFileName,
lineNumber: 34
},
__self: this
},
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'h2',
{ className: 'movietitlefont', __source: {
fileName: _jsxFileName,
lineNumber: 35
},
__self: this
},
'MoviePosters'
)
),
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{ className: 'row', __source: {
fileName: _jsxFileName,
lineNumber: 37
},
__self: this
},
this.state.movies.map(function (movie) {
return __WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{ key: movie.id, __source: {
fileName: _jsxFileName,
lineNumber: 39
},
__self: _this2
},
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{ className: 'row col', __source: {
fileName: _jsxFileName,
lineNumber: 41
},
__self: _this2
},
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement('img', { className: 'row ibox md-3 movies', src: movie.imageUrl, href: '', __source: {
fileName: _jsxFileName,
lineNumber: 42
},
__self: _this2
}),
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{
__source: {
fileName: _jsxFileName,
lineNumber: 43
},
__self: _this2
},
movie.title
),
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{
__source: {
fileName: _jsxFileName,
lineNumber: 44
},
__self: _this2
},
movie.description
),
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{
__source: {
fileName: _jsxFileName,
lineNumber: 45
},
__self: _this2
},
movie.type
),
__WEBPACK_IMPORTED_MODULE_0_react___default.a.createElement(
'div',
{
__source: {
fileName: _jsxFileName,
lineNumber: 46
},
__self: _this2
},
movie.price
)
)
);
})
)
);
}
}]);
return Posters;
}(__WEBPACK_IMPORTED_MODULE_0_react___default.a.Component);
/* harmony default export */ __webpack_exports__["a"] = (Posters);
/***/ })
})
//# sourceMappingURL=0.35d867ee0a4bc3c0087d.hot-update.js.map | defineProperties |
objects.py | """
This module defines the basic `DefaultObject` and its children
`DefaultCharacter`, `DefaultAccount`, `DefaultRoom` and `DefaultExit`.
These are the (default) starting points for all in-game visible
entities.
"""
import time
import inflect
from builtins import object
from future.utils import with_metaclass
from collections import defaultdict
from django.conf import settings
from evennia.typeclasses.models import TypeclassBase
from evennia.typeclasses.attributes import NickHandler
from evennia.objects.manager import ObjectManager
from evennia.objects.models import ObjectDB
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands import cmdset, command
from evennia.commands.cmdsethandler import CmdSetHandler
from evennia.commands import cmdhandler
from evennia.utils import search
from evennia.utils import logger
from evennia.utils import ansi
from evennia.utils.utils import (variable_from_module, lazy_property,
make_iter, to_unicode, is_iter, list_to_string,
to_str)
from django.utils.translation import ugettext as _
_INFLECT = inflect.engine()
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_ScriptDB = None
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
# the sessid_max is based on the length of the db_sessid csv field (excluding commas)
_SESSID_MAX = 16 if _MULTISESSION_MODE in (1, 3) else 1
class ObjectSessionHandler(object):
|
#
# Base class to inherit from.
class DefaultObject(with_metaclass(TypeclassBase, ObjectDB)):
"""
This is the root typeclass object, representing all entities that
have an actual presence in-game. DefaultObjects generally have a
location. They can also be manipulated and looked at. Game
entities you define should inherit from DefaultObject at some distance.
It is recommended to create children of this class using the
`evennia.create_object()` function rather than to initialize the class
directly - this will both set things up and efficiently save the object
without `obj.save()` having to be called explicitly.
"""
objects = ObjectManager()
# on-object properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
@lazy_property
def sessions(self):
return ObjectSessionHandler(self)
@property
def is_connected(self):
# we get an error for objects subscribed to channels without this
if self.account: # seems sane to pass on the account
return self.account.is_connected
else:
return False
@property
def has_account(self):
"""
Convenience property for checking if an active account is
currently connected to this object.
"""
return self.sessions.count()
@property
def is_superuser(self):
"""
Check if user has an account, and if so, if it is a superuser.
"""
return self.db_account and self.db_account.is_superuser \
and not self.db_account.attributes.get("_quell")
def contents_get(self, exclude=None):
"""
Returns the contents of this object, i.e. all
objects that has this object set as its location.
This should be publically available.
Args:
exclude (Object): Object to exclude from returned
contents list
Returns:
contents (list): List of contents of this Object.
Notes:
Also available as the `contents` property.
"""
con = self.contents_cache.get(exclude=exclude)
# print "contents_get:", self, con, id(self), calledby() # DEBUG
return con
contents = property(contents_get)
@property
def exits(self):
"""
Returns all exits from this object, i.e. all objects at this
location having the property destination != `None`.
"""
return [exi for exi in self.contents if exi.destination]
# main methods
def get_display_name(self, looker, **kwargs):
"""
Displays the name of the object in a viewer-aware manner.
Args:
looker (TypedObject): The object or account that is looking
at/getting inforamtion for this object.
Returns:
name (str): A string containing the name of the object,
including the DBREF if this user is privileged to control
said object.
Notes:
This function could be extended to change how object names
appear to users in character, but be wary. This function
does not change an object's keys or aliases when
searching, and is expected to produce something useful for
builders.
"""
if self.locks.check_lockstring(looker, "perm(Builder)"):
return "{}(#{})".format(self.name, self.id)
return self.name
def get_numbered_name(self, count, looker, **kwargs):
"""
Return the numbered (singular, plural) forms of this object's key. This is by default called
by return_appearance and is used for grouping multiple same-named of this object. Note that
this will be called on *every* member of a group even though the plural name will be only
shown once. Also the singular display version, such as 'an apple', 'a tree' is determined
from this method.
Args:
count (int): Number of objects of this type
looker (Object): Onlooker. Not used by default.
Kwargs:
key (str): Optional key to pluralize, if given, use this instead of the object's key.
Returns:
singular (str): The singular form to display.
plural (str): The determined plural form of the key, including the count.
"""
key = kwargs.get("key", self.key)
key = ansi.ANSIString(key) # this is needed to allow inflection of colored names
plural = _INFLECT.plural(key, 2)
plural = "%s %s" % (_INFLECT.number_to_words(count, threshold=12), plural)
singular = _INFLECT.an(key)
if not self.aliases.get(plural, category="plural_key"):
# we need to wipe any old plurals/an/a in case key changed in the interrim
self.aliases.clear(category="plural_key")
self.aliases.add(plural, category="plural_key")
# save the singular form as an alias here too so we can display "an egg" and also
# look at 'an egg'.
self.aliases.add(singular, category="plural_key")
return singular, plural
def search(self, searchdata,
global_search=False,
use_nicks=True,
typeclass=None,
location=None,
attribute_name=None,
quiet=False,
exact=False,
candidates=None,
nofound_string=None,
multimatch_string=None,
use_dbref=None):
"""
Returns an Object matching a search string/condition
Perform a standard object search in the database, handling
multiple results and lack thereof gracefully. By default, only
objects in the current `location` of `self` or its inventory are searched for.
Args:
searchdata (str or obj): Primary search criterion. Will be matched
against `object.key` (with `object.aliases` second) unless
the keyword attribute_name specifies otherwise.
**Special strings:**
- `#<num>`: search by unique dbref. This is always
a global search.
- `me,self`: self-reference to this object
- `<num>-<string>` - can be used to differentiate
between multiple same-named matches
global_search (bool): Search all objects globally. This is overruled
by `location` keyword.
use_nicks (bool): Use nickname-replace (nicktype "object") on `searchdata`.
typeclass (str or Typeclass, or list of either): Limit search only
to `Objects` with this typeclass. May be a list of typeclasses
for a broader search.
location (Object or list): Specify a location or multiple locations
to search. Note that this is used to query the *contents* of a
location and will not match for the location itself -
if you want that, don't set this or use `candidates` to specify
exactly which objects should be searched.
attribute_name (str): Define which property to search. If set, no
key+alias search will be performed. This can be used
to search database fields (db_ will be automatically
prepended), and if that fails, it will try to return
objects having Attributes with this name and value
equal to searchdata. A special use is to search for
"key" here if you want to do a key-search without
including aliases.
quiet (bool): don't display default error messages - this tells the
search method that the user wants to handle all errors
themselves. It also changes the return value type, see
below.
exact (bool): if unset (default) - prefers to match to beginning of
string rather than not matching at all. If set, requires
exact matching of entire string.
candidates (list of objects): this is an optional custom list of objects
to search (filter) between. It is ignored if `global_search`
is given. If not set, this list will automatically be defined
to include the location, the contents of location and the
caller's contents (inventory).
nofound_string (str): optional custom string for not-found error message.
multimatch_string (str): optional custom string for multimatch error header.
use_dbref (bool or None, optional): If `True`, allow to enter e.g. a query "#123"
to find an object (globally) by its database-id 123. If `False`, the string "#123"
will be treated like a normal string. If `None` (default), the ability to query by
#dbref is turned on if `self` has the permission 'Builder' and is turned off
otherwise.
Returns:
match (Object, None or list): will return an Object/None if `quiet=False`,
otherwise it will return a list of 0, 1 or more matches.
Notes:
To find Accounts, use eg. `evennia.account_search`. If
`quiet=False`, error messages will be handled by
`settings.SEARCH_AT_RESULT` and echoed automatically (on
error, return will be `None`). If `quiet=True`, the error
messaging is assumed to be handled by the caller.
"""
is_string = isinstance(searchdata, basestring)
if is_string:
# searchdata is a string; wrap some common self-references
if searchdata.lower() in ("here", ):
return [self.location] if quiet else self.location
if searchdata.lower() in ("me", "self",):
return [self] if quiet else self
if use_dbref is None:
use_dbref = self.locks.check_lockstring(self, "_dummy:perm(Builder)")
if use_nicks:
# do nick-replacement on search
searchdata = self.nicks.nickreplace(searchdata, categories=("object", "account"), include_account=True)
if (global_search or (is_string and searchdata.startswith("#") and
len(searchdata) > 1 and searchdata[1:].isdigit())):
# only allow exact matching if searching the entire database
# or unique #dbrefs
exact = True
candidates = None
elif candidates is None:
# no custom candidates given - get them automatically
if location:
# location(s) were given
candidates = []
for obj in make_iter(location):
candidates.extend(obj.contents)
else:
# local search. Candidates are taken from
# self.contents, self.location and
# self.location.contents
location = self.location
candidates = self.contents
if location:
candidates = candidates + [location] + location.contents
else:
# normally we don't need this since we are
# included in location.contents
candidates.append(self)
results = ObjectDB.objects.object_search(searchdata,
attribute_name=attribute_name,
typeclass=typeclass,
candidates=candidates,
exact=exact,
use_dbref=use_dbref)
if quiet:
return results
return _AT_SEARCH_RESULT(results, self, query=searchdata,
nofound_string=nofound_string, multimatch_string=multimatch_string)
def search_account(self, searchdata, quiet=False):
"""
Simple shortcut wrapper to search for accounts, not characters.
Args:
searchdata (str): Search criterion - the key or dbref of the account
to search for. If this is "here" or "me", search
for the account connected to this object.
quiet (bool): Returns the results as a list rather than
echo eventual standard error messages. Default `False`.
Returns:
result (Account, None or list): Just what is returned depends on
the `quiet` setting:
- `quiet=True`: No match or multumatch auto-echoes errors
to self.msg, then returns `None`. The esults are passed
through `settings.SEARCH_AT_RESULT` and
`settings.SEARCH_AT_MULTIMATCH_INPUT`. If there is a
unique match, this will be returned.
- `quiet=True`: No automatic error messaging is done, and
what is returned is always a list with 0, 1 or more
matching Accounts.
"""
if isinstance(searchdata, basestring):
# searchdata is a string; wrap some common self-references
if searchdata.lower() in ("me", "self",):
return [self.account] if quiet else self.account
results = search.search_account(searchdata)
if quiet:
return results
return _AT_SEARCH_RESULT(results, self, query=searchdata)
def execute_cmd(self, raw_string, session=None, **kwargs):
"""
Do something as this object. This is never called normally,
it's only used when wanting specifically to let an object be
the caller of a command. It makes use of nicks of eventual
connected accounts as well.
Args:
raw_string (string): Raw command input
session (Session, optional): Session to
return results to
Kwargs:
Other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
Returns:
defer (Deferred): This is an asynchronous Twisted object that
will not fire until the command has actually finished
executing. To overload this one needs to attach
callback functions to it, with addCallback(function).
This function will be called with an eventual return
value from the command execution. This return is not
used at all by Evennia by default, but might be useful
for coders intending to implement some sort of nested
command structure.
"""
# nick replacement - we require full-word matching.
# do text encoding conversion
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string, categories=("inputline", "channel"), include_account=True)
return cmdhandler.cmdhandler(self, raw_string, callertype="object", session=session, **kwargs)
def msg(self, text=None, from_obj=None, session=None, options=None, **kwargs):
"""
Emits something to a session attached to the object.
Args:
text (str or tuple, optional): The message to send. This
is treated internally like any send-command, so its
value can be a tuple if sending multiple arguments to
the `text` oob command.
from_obj (obj or list, optional): object that is sending. If
given, at_msg_send will be called. This value will be
passed on to the protocol. If iterable, will execute hook
on all entities in it.
session (Session or list, optional): Session or list of
Sessions to relay data to, if any. If set, will force send
to these sessions. If unset, who receives the message
depends on the MULTISESSION_MODE.
options (dict, optional): Message-specific option-value
pairs. These will be applied at the protocol level.
Kwargs:
any (string or tuples): All kwarg keys not listed above
will be treated as send-command names and their arguments
(which can be a string or a tuple).
Notes:
`at_msg_receive` will be called on this Object.
All extra kwargs will be passed on to the protocol.
"""
# try send hooks
if from_obj:
for obj in make_iter(from_obj):
try:
obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
logger.log_trace()
kwargs["options"] = options
try:
if not self.at_msg_receive(text=text, **kwargs):
# if at_msg_receive returns false, we abort message to this object
return
except Exception:
logger.log_trace()
if text is not None:
if not (isinstance(text, basestring) or isinstance(text, tuple)):
# sanitize text before sending across the wire
try:
text = to_str(text, force_string=True)
except Exception:
text = repr(text)
kwargs['text'] = text
# relay to session(s)
sessions = make_iter(session) if session else self.sessions.all()
for session in sessions:
session.data_out(**kwargs)
def for_contents(self, func, exclude=None, **kwargs):
"""
Runs a function on every object contained within this one.
Args:
func (callable): Function to call. This must have the
formal call sign func(obj, **kwargs), where obj is the
object currently being processed and `**kwargs` are
passed on from the call to `for_contents`.
exclude (list, optional): A list of object not to call the
function on.
Kwargs:
Keyword arguments will be passed to the function for all objects.
"""
contents = self.contents
if exclude:
exclude = make_iter(exclude)
contents = [obj for obj in contents if obj not in exclude]
for obj in contents:
func(obj, **kwargs)
def msg_contents(self, text=None, exclude=None, from_obj=None, mapping=None, **kwargs):
"""
Emits a message to all objects inside this object.
Args:
text (str or tuple): Message to send. If a tuple, this should be
on the valid OOB outmessage form `(message, {kwargs})`,
where kwargs are optional data passed to the `text`
outputfunc.
exclude (list, optional): A list of objects not to send to.
from_obj (Object, optional): An object designated as the
"sender" of the message. See `DefaultObject.msg()` for
more info.
mapping (dict, optional): A mapping of formatting keys
`{"key":<object>, "key2":<object2>,...}. The keys
must match `{key}` markers in the `text` if this is a string or
in the internal `message` if `text` is a tuple. These
formatting statements will be
replaced by the return of `<object>.get_display_name(looker)`
for every looker in contents that receives the
message. This allows for every object to potentially
get its own customized string.
Kwargs:
Keyword arguments will be passed on to `obj.msg()` for all
messaged objects.
Notes:
The `mapping` argument is required if `message` contains
{}-style format syntax. The keys of `mapping` should match
named format tokens, and its values will have their
`get_display_name()` function called for each object in
the room before substitution. If an item in the mapping does
not have `get_display_name()`, its string value will be used.
Example:
Say Char is a Character object and Npc is an NPC object:
char.location.msg_contents(
"{attacker} kicks {defender}",
mapping=dict(attacker=char, defender=npc), exclude=(char, npc))
This will result in everyone in the room seeing 'Char kicks NPC'
where everyone may potentially see different results for Char and Npc
depending on the results of `char.get_display_name(looker)` and
`npc.get_display_name(looker)` for each particular onlooker
"""
# we also accept an outcommand on the form (message, {kwargs})
is_outcmd = text and is_iter(text)
inmessage = text[0] if is_outcmd else text
outkwargs = text[1] if is_outcmd and len(text) > 1 else {}
contents = self.contents
if exclude:
exclude = make_iter(exclude)
contents = [obj for obj in contents if obj not in exclude]
for obj in contents:
if mapping:
substitutions = {t: sub.get_display_name(obj)
if hasattr(sub, 'get_display_name')
else str(sub) for t, sub in mapping.items()}
outmessage = inmessage.format(**substitutions)
else:
outmessage = inmessage
obj.msg(text=(outmessage, outkwargs), from_obj=from_obj, **kwargs)
def move_to(self, destination, quiet=False,
emit_to_obj=None, use_destination=True, to_none=False, move_hooks=True,
**kwargs):
"""
Moves this object to a new location.
Args:
destination (Object): Reference to the object to move to. This
can also be an exit object, in which case the
destination property is used as destination.
quiet (bool): If true, turn off the calling of the emit hooks
(announce_move_to/from etc)
emit_to_obj (Object): object to receive error messages
use_destination (bool): Default is for objects to use the "destination"
property of destinations as the target to move to. Turning off this
keyword allows objects to move "inside" exit objects.
to_none (bool): Allow destination to be None. Note that no hooks are run when
moving to a None location. If you want to run hooks, run them manually
(and make sure they can manage None locations).
move_hooks (bool): If False, turn off the calling of move-related hooks
(at_before/after_move etc) with quiet=True, this is as quiet a move
as can be done.
Kwargs:
Passed on to announce_move_to and announce_move_from hooks.
Returns:
result (bool): True/False depending on if there were problems with the move.
This method may also return various error messages to the
`emit_to_obj`.
Notes:
No access checks are done in this method, these should be handled before
calling `move_to`.
The `DefaultObject` hooks called (if `move_hooks=True`) are, in order:
1. `self.at_before_move(destination)` (if this returns False, move is aborted)
2. `source_location.at_object_leave(self, destination)`
3. `self.announce_move_from(destination)`
4. (move happens here)
5. `self.announce_move_to(source_location)`
6. `destination.at_object_receive(self, source_location)`
7. `self.at_after_move(source_location)`
"""
def logerr(string="", err=None):
"""Simple log helper method"""
logger.log_trace()
self.msg("%s%s" % (string, "" if err is None else " (%s)" % err))
return
errtxt = _("Couldn't perform move ('%s'). Contact an admin.")
if not emit_to_obj:
emit_to_obj = self
if not destination:
if to_none:
# immediately move to None. There can be no hooks called since
# there is no destination to call them with.
self.location = None
return True
emit_to_obj.msg(_("The destination doesn't exist."))
return False
if destination.destination and use_destination:
# traverse exits
destination = destination.destination
# Before the move, call eventual pre-commands.
if move_hooks:
try:
if not self.at_before_move(destination):
return False
except Exception as err:
logerr(errtxt % "at_before_move()", err)
return False
# Save the old location
source_location = self.location
# Call hook on source location
if move_hooks and source_location:
try:
source_location.at_object_leave(self, destination)
except Exception as err:
logerr(errtxt % "at_object_leave()", err)
return False
if not quiet:
# tell the old room we are leaving
try:
self.announce_move_from(destination, **kwargs)
except Exception as err:
logerr(errtxt % "at_announce_move()", err)
return False
# Perform move
try:
self.location = destination
except Exception as err:
logerr(errtxt % "location change", err)
return False
if not quiet:
# Tell the new room we are there.
try:
self.announce_move_to(source_location, **kwargs)
except Exception as err:
logerr(errtxt % "announce_move_to()", err)
return False
if move_hooks:
# Perform eventual extra commands on the receiving location
# (the object has already arrived at this point)
try:
destination.at_object_receive(self, source_location)
except Exception as err:
logerr(errtxt % "at_object_receive()", err)
return False
# Execute eventual extra commands on this object after moving it
# (usually calling 'look')
if move_hooks:
try:
self.at_after_move(source_location)
except Exception as err:
logerr(errtxt % "at_after_move", err)
return False
return True
def clear_exits(self):
"""
Destroys all of the exits and any exits pointing to this
object as a destination.
"""
for out_exit in [exi for exi in ObjectDB.objects.get_contents(self) if exi.db_destination]:
out_exit.delete()
for in_exit in ObjectDB.objects.filter(db_destination=self):
in_exit.delete()
def clear_contents(self):
"""
Moves all objects (accounts/things) to their home location or
to default home.
"""
# Gather up everything that thinks this is its location.
default_home_id = int(settings.DEFAULT_HOME.lstrip("#"))
try:
default_home = ObjectDB.objects.get(id=default_home_id)
if default_home.dbid == self.dbid:
# we are deleting default home!
default_home = None
except Exception:
string = _("Could not find default home '(#%d)'.")
logger.log_err(string % default_home_id)
default_home = None
for obj in self.contents:
home = obj.home
# Obviously, we can't send it back to here.
if not home or (home and home.dbid == self.dbid):
obj.home = default_home
home = default_home
# If for some reason it's still None...
if not home:
string = "Missing default home, '%s(#%d)' "
string += "now has a null location."
obj.location = None
obj.msg(_("Something went wrong! You are dumped into nowhere. Contact an admin."))
logger.log_err(string % (obj.name, obj.dbid))
return
if obj.has_account:
if home:
string = "Your current location has ceased to exist,"
string += " moving you to %s(#%d)."
obj.msg(_(string) % (home.name, home.dbid))
else:
# Famous last words: The account should never see this.
string = "This place should not exist ... contact an admin."
obj.msg(_(string))
obj.move_to(home)
def copy(self, new_key=None):
"""
Makes an identical copy of this object, identical except for a
new dbref in the database. If you want to customize the copy
by changing some settings, use ObjectDB.object.copy_object()
directly.
Args:
new_key (string): New key/name of copied object. If new_key is not
specified, the copy will be named <old_key>_copy by default.
Returns:
copy (Object): A copy of this object.
"""
def find_clone_key():
"""
Append 01, 02 etc to obj.key. Checks next higher number in the
same location, then adds the next number available
returns the new clone name on the form keyXX
"""
key = self.key
num = sum(1 for obj in self.location.contents
if obj.key.startswith(key) and obj.key.lstrip(key).isdigit())
return "%s%03i" % (key, num)
new_key = new_key or find_clone_key()
return ObjectDB.objects.copy_object(self, new_key=new_key)
def delete(self):
"""
Deletes this object. Before deletion, this method makes sure
to move all contained objects to their respective home
locations, as well as clean up all exits to/from the object.
Returns:
noerror (bool): Returns whether or not the delete completed
successfully or not.
"""
global _ScriptDB
if not _ScriptDB:
from evennia.scripts.models import ScriptDB as _ScriptDB
if not self.pk or not self.at_object_delete():
# This object has already been deleted,
# or the pre-delete check return False
return False
# See if we need to kick the account off.
for session in self.sessions.all():
session.msg(_("Your character %s has been destroyed.") % self.key)
# no need to disconnect, Account just jumps to OOC mode.
# sever the connection (important!)
if self.account:
for session in self.sessions.all():
self.account.unpuppet_object(session)
self.account = None
for script in _ScriptDB.objects.get_all_scripts_on_obj(self):
script.stop()
# Destroy any exits to and from this room, if any
self.clear_exits()
# Clear out any non-exit objects located within the object
self.clear_contents()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
self.location = None # this updates contents_cache for our location
# Perform the deletion of the object
super(DefaultObject, self).delete()
return True
def access(self, accessing_obj, access_type='read', default=False, no_superuser_bypass=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one.
access_type (str, optional): Type of access sought.
default (bool, optional): What to return if no lock of access_type was found.
no_superuser_bypass (bool, optional): If `True`, don't skip
lock check for superuser (be careful with this one).
Kwargs:
Passed on to the at_access hook along with the result of the access check.
"""
result = super(DefaultObject, self).access(accessing_obj, access_type=access_type,
default=default, no_superuser_bypass=no_superuser_bypass)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
#
# Hook methods
#
def at_first_save(self):
"""
This is called by the typeclass system whenever an instance of
this class is saved for the first time. It is a generic hook
for calling the startup hooks for the various game entities.
When overloading you generally don't overload this but
overload the hooks called by this method.
"""
self.basetype_setup()
self.at_object_creation()
if hasattr(self, "_createdict"):
# this will only be set if the utils.create function
# was used to create the object. We want the create
# call's kwargs to override the values set by hooks.
cdict = self._createdict
updates = []
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#%i" % self.dbid
updates.append("db_key")
elif self.key != cdict.get("key"):
updates.append("db_key")
self.db_key = cdict["key"]
if cdict.get("location") and self.location != cdict["location"]:
self.db_location = cdict["location"]
updates.append("db_location")
if cdict.get("home") and self.home != cdict["home"]:
self.home = cdict["home"]
updates.append("db_home")
if cdict.get("destination") and self.destination != cdict["destination"]:
self.destination = cdict["destination"]
updates.append("db_destination")
if updates:
self.save(update_fields=updates)
if cdict.get("permissions"):
self.permissions.batch_add(*cdict["permissions"])
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("aliases"):
self.aliases.batch_add(*cdict["aliases"])
if cdict.get("location"):
cdict["location"].at_object_receive(self, None)
self.at_after_move(None)
if cdict.get("tags"):
# this should be a list of tags, tuples (key, category) or (key, category, data)
self.tags.batch_add(*cdict["tags"])
if cdict.get("attributes"):
# this should be tuples (key, val, ...)
self.attributes.batch_add(*cdict["attributes"])
if cdict.get("nattributes"):
# this should be a dict of nattrname:value
for key, value in cdict["nattributes"]:
self.nattributes.add(key, value)
del self._createdict
self.basetype_posthook_setup()
# hooks called by the game engine #
def basetype_setup(self):
"""
This sets up the default properties of an Object, just before
the more general at_object_creation.
You normally don't need to change this unless you change some
fundamental things like names of permission groups.
"""
# the default security setup fallback for a generic
# object. Overload in child for a custom setup. Also creation
# commands may set this (create an item and you should be its
# controller, for example)
self.locks.add(";".join([
"control:perm(Developer)", # edit locks/permissions, delete
"examine:perm(Builder)", # examine properties
"view:all()", # look at object (visibility)
"edit:perm(Admin)", # edit properties/attributes
"delete:perm(Admin)", # delete object
"get:all()", # pick up object
"call:true()", # allow to call commands on this object
"tell:perm(Admin)", # allow emits to this object
"puppet:pperm(Developer)"])) # lock down puppeting only to staff by default
def basetype_posthook_setup(self):
"""
Called once, after basetype_setup and at_object_creation. This
should generally not be overloaded unless you are redefining
how a room/exit/object works. It allows for basetype-like
setup after the object is created. An example of this is
EXITs, who need to know keys, aliases, locks etc to set up
their exit-cmdsets.
"""
pass
def at_object_creation(self):
"""
Called once, when this object is first created. This is the
normal hook to overload for most object types.
"""
pass
def at_object_delete(self):
"""
Called just before the database object is permanently
delete()d from the database. If this method returns False,
deletion is aborted.
"""
return True
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this object are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the object currently
have no cmdsets.
Kwargs:
caller (Session, Object or Account): The caller requesting
this cmdset.
"""
pass
def at_pre_puppet(self, account, session=None, **kwargs):
"""
Called just before an Account connects to this object to puppet
it.
Args:
account (Account): This is the connecting account.
session (Session): Session controlling the connection.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_post_puppet(self, **kwargs):
"""
Called just after puppeting has been completed and all
Account<->Object links have been established.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Note:
You can use `self.account` and `self.sessions.get()` to get
account and sessions at this point; the last entry in the
list from `self.sessions.get()` is the latest Session
puppeting this Object.
"""
self.account.db._last_puppet = self
def at_pre_unpuppet(self, **kwargs):
"""
Called just before beginning to un-connect a puppeting from
this Account.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Note:
You can use `self.account` and `self.sessions.get()` to get
account and sessions at this point; the last entry in the
list from `self.sessions.get()` is the latest Session
puppeting this Object.
"""
pass
def at_post_unpuppet(self, account, session=None, **kwargs):
"""
Called just after the Account successfully disconnected from
this object, severing all connections.
Args:
account (Account): The account object that just disconnected
from this object.
session (Session): Session id controlling the connection that
just disconnected.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
Args:
result (bool): The outcome of the access call.
accessing_obj (Object or Account): The entity trying to gain access.
access_type (str): The type of access that was requested.
Kwargs:
Not used by default, added for possible expandability in a
game.
"""
pass
# hooks called when moving the object
def at_before_move(self, destination, **kwargs):
"""
Called just before starting to move this object to
destination.
Args:
destination (Object): The object we are moving to
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldmove (bool): If we should move or not.
Notes:
If this method returns False/None, the move is cancelled
before it is even started.
"""
# return has_perm(self, destination, "can_move")
return True
def announce_move_from(self, destination, msg=None, mapping=None, **kwargs):
"""
Called if the move is to be announced. This is
called while we are still standing in the old
location.
Args:
destination (Object): The place we are going to.
msg (str, optional): a replacement message.
mapping (dict, optional): additional mapping objects.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
You can override this method and call its parent with a
message to simply change the default message. In the string,
you can use the following as mappings (between braces):
object: the object which is moving.
exit: the exit from which the object is moving (if found).
origin: the location of the object before the move.
destination: the location of the object after moving.
"""
if not self.location:
return
if msg:
string = msg
else:
string = "{object} is leaving {origin}, heading for {destination}."
location = self.location
exits = [o for o in location.contents if o.location is location and o.destination is destination]
if not mapping:
mapping = {}
mapping.update({
"object": self,
"exit": exits[0] if exits else "somewhere",
"origin": location or "nowhere",
"destination": destination or "nowhere",
})
location.msg_contents(string, exclude=(self, ), mapping=mapping)
def announce_move_to(self, source_location, msg=None, mapping=None, **kwargs):
"""
Called after the move if the move was not quiet. At this point
we are standing in the new location.
Args:
source_location (Object): The place we came from
msg (str, optional): the replacement message if location.
mapping (dict, optional): additional mapping objects.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
You can override this method and call its parent with a
message to simply change the default message. In the string,
you can use the following as mappings (between braces):
object: the object which is moving.
exit: the exit from which the object is moving (if found).
origin: the location of the object before the move.
destination: the location of the object after moving.
"""
if not source_location and self.location.has_account:
# This was created from nowhere and added to an account's
# inventory; it's probably the result of a create command.
string = "You now have %s in your possession." % self.get_display_name(self.location)
self.location.msg(string)
return
if source_location:
if msg:
string = msg
else:
string = "{object} arrives to {destination} from {origin}."
else:
string = "{object} arrives to {destination}."
origin = source_location
destination = self.location
exits = []
if origin:
exits = [o for o in destination.contents if o.location is destination and o.destination is origin]
if not mapping:
mapping = {}
mapping.update({
"object": self,
"exit": exits[0] if exits else "somewhere",
"origin": origin or "nowhere",
"destination": destination or "nowhere",
})
destination.msg_contents(string, exclude=(self, ), mapping=mapping)
def at_after_move(self, source_location, **kwargs):
"""
Called after move has completed, regardless of quiet mode or
not. Allows changes to the object due to the location it is
now in.
Args:
source_location (Object): Wwhere we came from. This may be `None`.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_object_leave(self, moved_obj, target_location, **kwargs):
"""
Called just before an object leaves from inside this object
Args:
moved_obj (Object): The object leaving
target_location (Object): Where `moved_obj` is going.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_object_receive(self, moved_obj, source_location, **kwargs):
"""
Called after an object has been moved into this object.
Args:
moved_obj (Object): The object moved into this one
source_location (Object): Where `moved_object` came from.
Note that this could be `None`.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_traverse(self, traversing_object, target_location, **kwargs):
"""
This hook is responsible for handling the actual traversal,
normally by calling
`traversing_object.move_to(target_location)`. It is normally
only implemented by Exit objects. If it returns False (usually
because `move_to` returned False), `at_after_traverse` below
should not be called and instead `at_failed_traverse` should be
called.
Args:
traversing_object (Object): Object traversing us.
target_location (Object): Where target is going.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_after_traverse(self, traversing_object, source_location, **kwargs):
"""
Called just after an object successfully used this object to
traverse to another object (i.e. this object is a type of
Exit)
Args:
traversing_object (Object): The object traversing us.
source_location (Object): Where `traversing_object` came from.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
The target location should normally be available as `self.destination`.
"""
pass
def at_failed_traverse(self, traversing_object, **kwargs):
"""
This is called if an object fails to traverse this object for
some reason.
Args:
traversing_object (Object): The object that failed traversing us.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
Using the default exits, this hook will not be called if an
Attribute `err_traverse` is defined - this will in that case be
read for an error string instead.
"""
pass
def at_msg_receive(self, text=None, from_obj=None, **kwargs):
"""
This hook is called whenever someone sends a message to this
object using the `msg` method.
Note that from_obj may be None if the sender did not include
itself as an argument to the obj.msg() call - so you have to
check for this. .
Consider this a pre-processing method before msg is passed on
to the user session. If this method returns False, the msg
will not be passed on.
Args:
text (str, optional): The message received.
from_obj (any, optional): The object sending the message.
Kwargs:
This includes any keywords sent to the `msg` method.
Returns:
receive (bool): If this message should be received.
Notes:
If this method returns False, the `msg` operation
will abort without sending the message.
"""
return True
def at_msg_send(self, text=None, to_obj=None, **kwargs):
"""
This is a hook that is called when *this* object sends a
message to another object with `obj.msg(text, to_obj=obj)`.
Args:
text (str, optional): Text to send.
to_obj (any, optional): The object to send to.
Kwargs:
Keywords passed from msg()
Notes:
Since this method is executed by `from_obj`, if no `from_obj`
was passed to `DefaultCharacter.msg` this hook will never
get called.
"""
pass
# hooks called by the default cmdset.
def return_appearance(self, looker, **kwargs):
"""
This formats a description. It is the hook a 'look' command
should call.
Args:
looker (Object): Object doing the looking.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if not looker:
return ""
# get and identify all objects
visible = (con for con in self.contents if con != looker and
con.access(looker, "view"))
exits, users, things = [], [], defaultdict(list)
for con in visible:
key = con.get_display_name(looker)
if con.destination:
exits.append(key)
elif con.has_account:
users.append("|c%s|n" % key)
else:
# things can be pluralized
things[key].append(con)
# get description, build string
string = "|c%s|n\n" % self.get_display_name(looker)
desc = self.db.desc
if desc:
string += "%s" % desc
if exits:
string += "\n|wExits:|n " + list_to_string(exits)
if users or things:
# handle pluralization of things (never pluralize users)
thing_strings = []
for key, itemlist in sorted(things.iteritems()):
nitem = len(itemlist)
if nitem == 1:
key, _ = itemlist[0].get_numbered_name(nitem, looker, key=key)
else:
key = [item.get_numbered_name(nitem, looker, key=key)[1] for item in itemlist][0]
thing_strings.append(key)
string += "\n|wYou see:|n " + list_to_string(users + thing_strings)
return string
def at_look(self, target, **kwargs):
"""
Called when this object performs a look. It allows to
customize just what this means. It will not itself
send any data.
Args:
target (Object): The target being looked at. This is
commonly an object or the current location. It will
be checked for the "view" type access.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
lookstring (str): A ready-processed look string
potentially ready to return to the looker.
"""
if not target.access(self, "view"):
try:
return "Could not view '%s'." % target.get_display_name(self)
except AttributeError:
return "Could not view '%s'." % target.key
description = target.return_appearance(self)
# the target's at_desc() method.
# this must be the last reference to target so it may delete itself when acted on.
target.at_desc(looker=self)
return description
def at_desc(self, looker=None, **kwargs):
"""
This is called whenever someone looks at this object.
Args:
looker (Object, optional): The object requesting the description.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
pass
def at_before_get(self, getter, **kwargs):
"""
Called by the default `get` command before this object has been
picked up.
Args:
getter (Object): The object about to get this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldget (bool): If the object should be gotten or not.
Notes:
If this method returns False/None, the getting is cancelled
before it is even started.
"""
return True
def at_get(self, getter, **kwargs):
"""
Called by the default `get` command when this object has been
picked up.
Args:
getter (Object): The object getting this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
This hook cannot stop the pickup from happening. Use
permissions or the at_before_get() hook for that.
"""
pass
def at_before_give(self, giver, getter, **kwargs):
"""
Called by the default `give` command before this object has been
given.
Args:
giver (Object): The object about to give this object.
getter (Object): The object about to get this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldgive (bool): If the object should be given or not.
Notes:
If this method returns False/None, the giving is cancelled
before it is even started.
"""
return True
def at_give(self, giver, getter, **kwargs):
"""
Called by the default `give` command when this object has been
given.
Args:
giver (Object): The object giving this object.
getter (Object): The object getting this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
This hook cannot stop the give from happening. Use
permissions or the at_before_give() hook for that.
"""
pass
def at_before_drop(self, dropper, **kwargs):
"""
Called by the default `drop` command before this object has been
dropped.
Args:
dropper (Object): The object which will drop this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shoulddrop (bool): If the object should be dropped or not.
Notes:
If this method returns False/None, the dropping is cancelled
before it is even started.
"""
return True
def at_drop(self, dropper, **kwargs):
"""
Called by the default `drop` command when this object has been
dropped.
Args:
dropper (Object): The object which just dropped this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
This hook cannot stop the drop from happening. Use
permissions or the at_before_drop() hook for that.
"""
pass
def at_before_say(self, message, **kwargs):
"""
Before the object says something.
This hook is by default used by the 'say' and 'whisper'
commands as used by this command it is called before the text
is said/whispered and can be used to customize the outgoing
text from the object. Returning `None` aborts the command.
Args:
message (str): The suggested say/whisper text spoken by self.
Kwargs:
whisper (bool): If True, this is a whisper rather than
a say. This is sent by the whisper command by default.
Other verbal commands could use this hook in similar
ways.
receivers (Object or iterable): If set, this is the target or targets for the say/whisper.
Returns:
message (str): The (possibly modified) text to be spoken.
"""
return message
def at_say(self, message, msg_self=None, msg_location=None,
receivers=None, msg_receivers=None, **kwargs):
"""
Display the actual say (or whisper) of self.
This hook should display the actual say/whisper of the object in its
location. It should both alert the object (self) and its
location that some text is spoken. The overriding of messages or
`mapping` allows for simple customization of the hook without
re-writing it completely.
Args:
message (str): The message to convey.
msg_self (bool or str, optional): If boolean True, echo `message` to self. If a string,
return that message. If False or unset, don't echo to self.
msg_location (str, optional): The message to echo to self's location.
receivers (Object or iterable, optional): An eventual receiver or receivers of the message
(by default only used by whispers).
msg_receivers(str): Specific message to pass to the receiver(s). This will parsed
with the {receiver} placeholder replaced with the given receiver.
Kwargs:
whisper (bool): If this is a whisper rather than a say. Kwargs
can be used by other verbal commands in a similar way.
mapping (dict): Pass an additional mapping to the message.
Notes:
Messages can contain {} markers. These are substituted against the values
passed in the `mapping` argument.
msg_self = 'You say: "{speech}"'
msg_location = '{object} says: "{speech}"'
msg_receivers = '{object} whispers: "{speech}"'
Supported markers by default:
{self}: text to self-reference with (default 'You')
{speech}: the text spoken/whispered by self.
{object}: the object speaking.
{receiver}: replaced with a single receiver only for strings meant for a specific
receiver (otherwise 'None').
{all_receivers}: comma-separated list of all receivers,
if more than one, otherwise same as receiver
{location}: the location where object is.
"""
msg_type = 'say'
if kwargs.get("whisper", False):
# whisper mode
msg_type = 'whisper'
msg_self = '{self} whisper to {all_receivers}, "{speech}"' if msg_self is True else msg_self
msg_receivers = '{object} whispers: "{speech}"'
msg_receivers = msg_receivers or '{object} whispers: "{speech}"'
msg_location = None
else:
msg_self = '{self} say, "{speech}"' if msg_self is True else msg_self
msg_location = msg_location or '{object} says, "{speech}"'
msg_receivers = msg_receivers or message
custom_mapping = kwargs.get('mapping', {})
receivers = make_iter(receivers) if receivers else None
location = self.location
if msg_self:
self_mapping = {"self": "You",
"object": self.get_display_name(self),
"location": location.get_display_name(self) if location else None,
"receiver": None,
"all_receivers": ", ".join(
recv.get_display_name(self)
for recv in receivers) if receivers else None,
"speech": message}
self_mapping.update(custom_mapping)
self.msg(text=(msg_self.format(**self_mapping), {"type": msg_type}), from_obj=self)
if receivers and msg_receivers:
receiver_mapping = {"self": "You",
"object": None,
"location": None,
"receiver": None,
"all_receivers": None,
"speech": message}
for receiver in make_iter(receivers):
individual_mapping = {"object": self.get_display_name(receiver),
"location": location.get_display_name(receiver),
"receiver": receiver.get_display_name(receiver),
"all_receivers": ", ".join(
recv.get_display_name(recv)
for recv in receivers) if receivers else None}
receiver_mapping.update(individual_mapping)
receiver_mapping.update(custom_mapping)
receiver.msg(text=(msg_receivers.format(**receiver_mapping),
{"type": msg_type}), from_obj=self)
if self.location and msg_location:
location_mapping = {"self": "You",
"object": self,
"location": location,
"all_receivers": ", ".join(str(recv) for recv in receivers) if receivers else None,
"receiver": None,
"speech": message}
location_mapping.update(custom_mapping)
exclude = []
if msg_self:
exclude.append(self)
if receivers:
exclude.extend(receivers)
self.location.msg_contents(text=(msg_location, {"type": msg_type}),
from_obj=self,
exclude=exclude,
mapping=location_mapping)
#
# Base Character object
#
class DefaultCharacter(DefaultObject):
"""
This implements an Object puppeted by a Session - that is,
a character avatar controlled by an account.
"""
def basetype_setup(self):
"""
Setup character-specific security.
You should normally not need to overload this, but if you do,
make sure to reproduce at least the two last commands in this
method (unless you want to fundamentally change how a
Character object works).
"""
super(DefaultCharacter, self).basetype_setup()
self.locks.add(";".join(["get:false()", # noone can pick up the character
"call:false()"])) # no commands can be called on character from outside
# add the default cmdset
self.cmdset.add_default(settings.CMDSET_CHARACTER, permanent=True)
def at_after_move(self, source_location, **kwargs):
"""
We make sure to look around after a move.
"""
if self.location.access(self, "view"):
self.msg(self.at_look(self.location))
def at_pre_puppet(self, account, session=None, **kwargs):
"""
Return the character from storage in None location in `at_post_unpuppet`.
Args:
account (Account): This is the connecting account.
session (Session): Session controlling the connection.
"""
if self.location is None: # Make sure character's location is never None before being puppeted.
# Return to last location (or home, which should always exist),
self.location = self.db.prelogout_location if self.db.prelogout_location else self.home
self.location.at_object_receive(self, None) # and trigger the location's reception hook.
if self.location: # If the character is verified to be somewhere,
self.db.prelogout_location = self.location # save location again to be sure.
else:
account.msg("|r%s has no location and no home is set.|n" % self, session=session) # Note to set home.
def at_post_puppet(self, **kwargs):
"""
Called just after puppeting has been completed and all
Account<->Object links have been established.
Args:
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Note:
You can use `self.account` and `self.sessions.get()` to get
account and sessions at this point; the last entry in the
list from `self.sessions.get()` is the latest Session
puppeting this Object.
"""
# NOTE: commenting out extraneous info
#self.msg("\nYou become |c%s|n.\n" % self.name)
self.msg((self.at_look(self.location), {'type':'look'}), options = None)
def message(obj, from_obj):
obj.msg("%s has entered the game." % self.get_display_name(obj), from_obj=from_obj)
self.location.for_contents(message, exclude=[self], from_obj=self)
def at_post_unpuppet(self, account, session=None, **kwargs):
"""
We stove away the character when the account goes ooc/logs off,
otherwise the character object will remain in the room also
after the account logged off ("headless", so to say).
Args:
account (Account): The account object that just disconnected
from this object.
session (Session): Session controlling the connection that
just disconnected.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if not self.sessions.count():
# only remove this char from grid if no sessions control it anymore.
if self.location:
def message(obj, from_obj):
obj.msg("%s has left the game." % self.get_display_name(obj), from_obj=from_obj)
self.location.for_contents(message, exclude=[self], from_obj=self)
self.db.prelogout_location = self.location
self.location = None
@property
def idle_time(self):
"""
Returns the idle time of the least idle session in seconds. If
no sessions are connected it returns nothing.
"""
idle = [session.cmd_last_visible for session in self.sessions.all()]
if idle:
return time.time() - float(max(idle))
return None
@property
def connection_time(self):
"""
Returns the maximum connection time of all connected sessions
in seconds. Returns nothing if there are no sessions.
"""
conn = [session.conn_time for session in self.sessions.all()]
if conn:
return time.time() - float(min(conn))
return None
#
# Base Room object
class DefaultRoom(DefaultObject):
"""
This is the base room object. It's just like any Object except its
location is always `None`.
"""
def basetype_setup(self):
"""
Simple room setup setting locks to make sure the room
cannot be picked up.
"""
super(DefaultRoom, self).basetype_setup()
self.locks.add(";".join(["get:false()",
"puppet:false()"])) # would be weird to puppet a room ...
self.location = None
#
# Default Exit command, used by the base exit object
#
class ExitCommand(command.Command):
"""
This is a command that simply cause the caller to traverse
the object it is attached to.
"""
obj = None
def func(self):
"""
Default exit traverse if no syscommand is defined.
"""
if self.obj.access(self.caller, 'traverse'):
# we may traverse the exit.
self.obj.at_traverse(self.caller, self.obj.destination)
else:
# exit is locked
if self.obj.db.err_traverse:
# if exit has a better error message, let's use it.
self.caller.msg(self.obj.db.err_traverse)
else:
# No shorthand error message. Call hook.
self.obj.at_failed_traverse(self.caller)
def get_extra_info(self, caller, **kwargs):
"""
Shows a bit of information on where the exit leads.
Args:
caller (Object): The object (usually a character) that entered an ambiguous command.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
A string with identifying information to disambiguate the command, conventionally with a preceding space.
"""
if self.obj.destination:
return " (exit to %s)" % self.obj.destination.get_display_name(caller)
else:
return " (%s)" % self.obj.get_display_name(caller)
#
# Base Exit object
class DefaultExit(DefaultObject):
"""
This is the base exit object - it connects a location to another.
This is done by the exit assigning a "command" on itself with the
same name as the exit object (to do this we need to remember to
re-create the command when the object is cached since it must be
created dynamically depending on what the exit is called). This
command (which has a high priority) will thus allow us to traverse
exits simply by giving the exit-object's name on its own.
"""
exit_command = ExitCommand
priority = 101
# Helper classes and methods to implement the Exit. These need not
# be overloaded unless one want to change the foundation for how
# Exits work. See the end of the class for hook methods to overload.
def create_exit_cmdset(self, exidbobj):
"""
Helper function for creating an exit command set + command.
The command of this cmdset has the same name as the Exit
object and allows the exit to react when the account enter the
exit's name, triggering the movement between rooms.
Args:
exidbobj (Object): The DefaultExit object to base the command on.
"""
# create an exit command. We give the properties here,
# to always trigger metaclass preparations
cmd = self.exit_command(key=exidbobj.db_key.strip().lower(),
aliases=exidbobj.aliases.all(),
locks=str(exidbobj.locks),
auto_help=False,
destination=exidbobj.db_destination,
arg_regex=r"^$",
is_exit=True,
obj=exidbobj)
# create a cmdset
exit_cmdset = cmdset.CmdSet(None)
exit_cmdset.key = 'ExitCmdSet'
exit_cmdset.priority = self.priority
exit_cmdset.duplicates = True
# add command to cmdset
exit_cmdset.add(cmd)
return exit_cmdset
# Command hooks
def basetype_setup(self):
"""
Setup exit-security
You should normally not need to overload this - if you do make
sure you include all the functionality in this method.
"""
super(DefaultExit, self).basetype_setup()
# setting default locks (overload these in at_object_creation()
self.locks.add(";".join(["puppet:false()", # would be weird to puppet an exit ...
"traverse:all()", # who can pass through exit by default
"get:false()"])) # noone can pick up the exit
# an exit should have a destination (this is replaced at creation time)
if self.location:
self.destination = self.location
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this object are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the object currently
has no cmdsets.
Kwargs:
force_init (bool): If `True`, force a re-build of the cmdset
(for example to update aliases).
"""
if "force_init" in kwargs or not self.cmdset.has_cmdset("ExitCmdSet", must_be_default=True):
# we are resetting, or no exit-cmdset was set. Create one dynamically.
self.cmdset.add_default(self.create_exit_cmdset(self), permanent=False)
def at_init(self):
"""
This is called when this objects is re-loaded from cache. When
that happens, we make sure to remove any old ExitCmdSet cmdset
(this most commonly occurs when renaming an existing exit)
"""
self.cmdset.remove_default()
def at_traverse(self, traversing_object, target_location, **kwargs):
"""
This implements the actual traversal. The traverse lock has
already been checked (in the Exit command) at this point.
Args:
traversing_object (Object): Object traversing us.
target_location (Object): Where target is going.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
source_location = traversing_object.location
if traversing_object.move_to(target_location):
self.at_after_traverse(traversing_object, source_location)
else:
if self.db.err_traverse:
# if exit has a better error message, let's use it.
self.caller.msg(self.db.err_traverse)
else:
# No shorthand error message. Call hook.
self.at_failed_traverse(traversing_object)
def at_failed_traverse(self, traversing_object, **kwargs):
"""
Overloads the default hook to implement a simple default error message.
Args:
traversing_object (Object): The object that failed traversing us.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Notes:
Using the default exits, this hook will not be called if an
Attribute `err_traverse` is defined - this will in that case be
read for an error string instead.
"""
traversing_object.msg("You cannot go there.")
| """
Handles the get/setting of the sessid
comma-separated integer field
"""
def __init__(self, obj):
"""
Initializes the handler.
Args:
obj (Object): The object on which the handler is defined.
"""
self.obj = obj
self._sessid_cache = []
self._recache()
def _recache(self):
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
self._sessid_cache = list(set(int(val) for val in (self.obj.db_sessid or "").split(",") if val))
if any(sessid for sessid in self._sessid_cache if sessid not in _SESSIONS):
# cache is out of sync with sessionhandler! Only retain the ones in the handler.
self._sessid_cache = [sessid for sessid in self._sessid_cache if sessid in _SESSIONS]
self.obj.db_sessid = ",".join(str(val) for val in self._sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def get(self, sessid=None):
"""
Get the sessions linked to this Object.
Args:
sessid (int, optional): A specific session id.
Returns:
sessions (list): The sessions connected to this object. If `sessid` is given,
this is a list of one (or zero) elements.
Notes:
Aliased to `self.all()`.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
if sessid:
sessions = [_SESSIONS[sessid] if sessid in _SESSIONS else None] if sessid in self._sessid_cache else []
else:
sessions = [_SESSIONS[ssid] if ssid in _SESSIONS else None for ssid in self._sessid_cache]
if None in sessions:
# this happens only if our cache has gone out of sync with the SessionHandler.
self._recache()
return self.get(sessid=sessid)
return sessions
def all(self):
"""
Alias to get(), returning all sessions.
Returns:
sessions (list): All sessions.
"""
return self.get()
def add(self, session):
"""
Add session to handler.
Args:
session (Session or int): Session or session id to add.
Notes:
We will only add a session/sessid if this actually also exists
in the the core sessionhandler.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
try:
sessid = session.sessid
except AttributeError:
sessid = session
sessid_cache = self._sessid_cache
if sessid in _SESSIONS and sessid not in sessid_cache:
if len(sessid_cache) >= _SESSID_MAX:
return
sessid_cache.append(sessid)
self.obj.db_sessid = ",".join(str(val) for val in sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def remove(self, session):
"""
Remove session from handler.
Args:
session (Session or int): Session or session id to remove.
"""
try:
sessid = session.sessid
except AttributeError:
sessid = session
sessid_cache = self._sessid_cache
if sessid in sessid_cache:
sessid_cache.remove(sessid)
self.obj.db_sessid = ",".join(str(val) for val in sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def clear(self):
"""
Clear all handled sessids.
"""
self._sessid_cache = []
self.obj.db_sessid = None
self.obj.save(update_fields=["db_sessid"])
def count(self):
"""
Get amount of sessions connected.
Returns:
sesslen (int): Number of sessions handled.
"""
return len(self._sessid_cache) |
fr_ML.go | package fr_ML
import (
"math"
"strconv"
"time"
"github.com/DeineAgenturUG/locales"
"github.com/DeineAgenturUG/locales/currency"
)
type fr_ML struct {
locale string
pluralsCardinal []locales.PluralRule
pluralsOrdinal []locales.PluralRule
pluralsRange []locales.PluralRule
decimal string
group string
minus string
percent string
percentSuffix string
perMille string
timeSeparator string
inifinity string
currencies []string // idx = enum of currency code
currencyPositiveSuffix string
currencyNegativePrefix string
currencyNegativeSuffix string
monthsAbbreviated []string
monthsNarrow []string
monthsWide []string
daysAbbreviated []string
daysNarrow []string
daysShort []string
daysWide []string
periodsAbbreviated []string
periodsNarrow []string
periodsShort []string
periodsWide []string
erasAbbreviated []string
erasNarrow []string
erasWide []string
timezones map[string]string
}
// New returns a new instance of translator for the 'fr_ML' locale
func New() locales.Translator {
return &fr_ML{
locale: "fr_ML",
pluralsCardinal: []locales.PluralRule{2, 6},
pluralsOrdinal: []locales.PluralRule{2, 6},
pluralsRange: []locales.PluralRule{2, 6},
decimal: ",",
group: " ",
minus: "-",
percent: "%",
perMille: "‰",
timeSeparator: ":",
inifinity: "∞",
currencies: []string{"ADP", "AED", "AFA", "AFN", "ALK", "ALL", "AMD", "ANG", "AOA", "AOK", "AON", "AOR", "ARA", "ARL", "ARM", "ARP", "ARS", "ATS", "AUD", "AWG", "AZM", "AZN", "BAD", "BAM", "BAN", "BBD", "BDT", "BEC", "BEF", "BEL", "BGL", "BGM", "BGN", "BGO", "BHD", "BIF", "BMD", "BND", "BOB", "BOL", "BOP", "BOV", "BRB", "BRC", "BRE", "BRL", "BRN", "BRR", "BRZ", "BSD", "BTN", "BUK", "BWP", "BYB", "BYN", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW", "CLE", "CLF", "CLP", "CNH", "CNX", "CNY", "COP", "COU", "CRC", "CSD", "CSK", "CUC", "CUP", "CVE", "CYP", "CZK", "DDM", "DEM", "DJF", "DKK", "DOP", "DZD", "ECS", "ECV", "EEK", "EGP", "ERN", "ESA", "ESB", "ESP", "ETB", "EUR", "FIM", "FJD", "FKP", "FRF", "GBP", "GEK", "GEL", "GHC", "GHS", "GIP", "GMD", "GNF", "GNS", "GQE", "GRD", "GTQ", "GWE", "GWP", "GYD", "HKD", "HNL", "HRD", "HRK", "HTG", "HUF", "IDR", "IEP", "ILP", "ILR", "ILS", "INR", "IQD", "IRR", "ISJ", "ISK", "ITL", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRH", "KRO", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LTT", "LUC", "LUF", "LUL", "LVL", "LVR", "LYD", "MAD", "MAF", "MCF", "MDC", "MDL", "MGA", "MGF", "MKD", "MKN", "MLF", "MMK", "MNT", "MOP", "MRO", "MRU", "MTL", "MTP", "MUR", "MVP", "MVR", "MWK", "MXN", "MXP", "MXV", "MYR", "MZE", "MZM", "MZN", "NAD", "NGN", "NIC", "NIO", "NLG", "NOK", "NPR", "NZD", "OMR", "PAB", "PEI", "PEN", "PES", "PGK", "PHP", "PKR", "PLN", "PLZ", "PTE", "PYG", "QAR", "RHD", "ROL", "RON", "RSD", "RUB", "RUR", "RWF", "SAR", "SBD", "SCR", "SDD", "SDG", "SDP", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "SRG", "SSP", "STD", "STN", "SUR", "SVC", "SYP", "SZL", "THB", "TJR", "TJS", "TMM", "TMT", "TND", "TOP", "TPE", "TRL", "TRY", "TTD", "TWD", "TZS", "UAH", "UAK", "UGS", "UGX", "USD", "USN", "USS", "UYI", "UYP", "UYU", "UYW", "UZS", "VEB", "VEF", "VES", "VND", "VNN", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XEU", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XRE", "XSU", "XTS", "XUA", "XXX", "YDD", "YER", "YUD", "YUM", "YUN", "YUR", "ZAL", "ZAR", "ZMK", "ZMW", "ZRN", "ZRZ", "ZWD", "ZWL", "ZWR"},
percentSuffix: " ",
currencyPositiveSuffix: " ",
currencyNegativePrefix: "(",
currencyNegativeSuffix: " )",
monthsAbbreviated: []string{"", "janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."},
monthsNarrow: []string{"", "J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"},
monthsWide: []string{"", "janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"},
daysAbbreviated: []string{"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."},
daysNarrow: []string{"D", "L", "M", "M", "J", "V", "S"},
daysShort: []string{"di", "lu", "ma", "me", "je", "ve", "sa"},
daysWide: []string{"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"},
periodsAbbreviated: []string{"AM", "PM"},
periodsNarrow: []string{"AM", "PM"},
periodsWide: []string{"AM", "PM"},
erasAbbreviated: []string{"av. J.-C.", "ap. J.-C."},
erasNarrow: []string{"av. J.-C.", "ap. J.-C."},
erasWide: []string{"avant Jésus-Christ", "après Jésus-Christ"},
timezones: map[string]string{"CHADT": "heure d’été des îles Chatham", "∅∅∅": "heure d’été de Brasilia", "MST": "heure normale de Macao", "EAT": "heure normale d’Afrique de l’Est", "OEZ": "heure normale d’Europe de l’Est", "BT": "heure du Bhoutan", "EDT": "heure d’été de l’Est", "WAT": "heure normale d’Afrique de l’Ouest", "HKST": "heure d’été de Hong Kong", "MEZ": "heure normale d’Europe centrale", "HNCU": "heure normale de Cuba", "PDT": "heure d’été du Pacifique", "SAST": "heure normale d’Afrique méridionale", "CST": "heure normale du centre nord-américain", "MYT": "heure de la Malaisie", "LHST": "heure normale de Lord Howe", "ACWDT": "heure d’été du centre-ouest de l’Australie", "GMT": "heure moyenne de Greenwich", "MDT": "heure d’été de Macao", "AEDT": "heure d’été de l’Est de l’Australie", "NZDT": "heure d’été de la Nouvelle-Zélande", "HEEG": "heure d’été de l’Est du Groenland", "MESZ": "heure d’été d’Europe centrale", "WESZ": "heure d’été d’Europe de l’Ouest", "BOT": "heure de Bolivie", "HNEG": "heure normale de l’Est du Groenland", "EST": "heure normale de l’Est nord-américain", "UYST": "heure d’été de l’Uruguay", "AST": "heure normale de l’Atlantique", "ARST": "heure d’été de l’Argentine", "AKDT": "heure d’été de l’Alaska", "COT": "heure normale de Colombie", "HNOG": "heure normale de l’Ouest du Groenland", "ADT": "heure d’été de l’Atlantique", "HKT": "heure normale de Hong Kong", "GFT": "heure de la Guyane française", "ECT": "heure de l’Équateur", "JST": "heure normale du Japon", "WART": "heure normale de l’Ouest argentin", "WIB": "heure de l’Ouest indonésien", "HNPMX": "heure normale du Pacifique mexicain", "ACWST": "heure normale du centre-ouest de l’Australie", "SRT": "heure du Suriname", "SGT": "heure de Singapour", "NZST": "heure normale de la Nouvelle-Zélande", "ACST": "heure normale du centre de l’Australie", "HNPM": "heure normale de Saint-Pierre-et-Miquelon", "UYT": "heure normale de l’Uruguay", "COST": "heure d’été de Colombie", "WIT": "heure de l’Est indonésien", "VET": "heure du Venezuela", "HNNOMX": "heure normale du Nord-Ouest du Mexique", "HNT": "heure normale de Terre-Neuve", "ACDT": "heure d’été du centre de l’Australie", "WEZ": "heure normale d’Europe de l’Ouest", "CLT": "heure normale du Chili", "TMT": "heure normale du Turkménistan", "OESZ": "heure d’été d’Europe de l’Est", "WAST": "heure d’été d’Afrique de l’Ouest", "WARST": "heure d’été de l’Ouest argentin", "HECU": "heure d’été de Cuba", "HEPM": "heure d’été de Saint-Pierre-et-Miquelon", "CAT": "heure normale d’Afrique centrale", "CDT": "heure d’été du Centre", "LHDT": "heure d’été de Lord Howe", "HADT": "heure d’été d’Hawaii - Aléoutiennes", "ART": "heure normale d’Argentine", "HAT": "heure d’été de Terre-Neuve", "IST": "heure de l’Inde", "GYT": "heure du Guyana", "WITA": "heure du Centre indonésien", "HENOMX": "heure d’été du Nord-Ouest du Mexique", "PST": "heure normale du Pacifique nord-américain", "HEOG": "heure d’été de l’Ouest du Groenland", "AWST": "heure normale de l’Ouest de l’Australie", "JDT": "heure d’été du Japon", "TMST": "heure d’été du Turkménistan", "HAST": "heure normale d’Hawaii - Aléoutiennes", "CLST": "heure d’été du Chili", "HEPMX": "heure d’été du Pacifique mexicain", "AEST": "heure normale de l’Est de l’Australie", "CHAST": "heure normale des îles Chatham", "ChST": "heure des Chamorro", "AWDT": "heure d’été de l’Ouest de l’Australie", "AKST": "heure normale de l’Alaska"},
}
}
// Locale returns the current translators string locale
func (fr *fr_ML) Locale() string {
return fr.locale
}
// PluralsCardinal returns the list of cardinal plural rules associated with 'fr_ML'
func (fr *fr_ML) PluralsCardinal() []locales.PluralRule {
return fr.pluralsCardinal
}
| }
// PluralsRange returns the list of range plural rules associated with 'fr_ML'
func (fr *fr_ML) PluralsRange() []locales.PluralRule {
return fr.pluralsRange
}
// CardinalPluralRule returns the cardinal PluralRule given 'num' and digits/precision of 'v' for 'fr_ML'
func (fr *fr_ML) CardinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
i := int64(n)
if i == 0 || i == 1 {
return locales.PluralRuleOne
}
return locales.PluralRuleOther
}
// OrdinalPluralRule returns the ordinal PluralRule given 'num' and digits/precision of 'v' for 'fr_ML'
func (fr *fr_ML) OrdinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
if n == 1 {
return locales.PluralRuleOne
}
return locales.PluralRuleOther
}
// RangePluralRule returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for 'fr_ML'
func (fr *fr_ML) RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) locales.PluralRule {
start := fr.CardinalPluralRule(num1, v1)
end := fr.CardinalPluralRule(num2, v2)
if start == locales.PluralRuleOne && end == locales.PluralRuleOne {
return locales.PluralRuleOne
} else if start == locales.PluralRuleOne && end == locales.PluralRuleOther {
return locales.PluralRuleOther
}
return locales.PluralRuleOther
}
// MonthAbbreviated returns the locales abbreviated month given the 'month' provided
func (fr *fr_ML) MonthAbbreviated(month time.Month) string {
return fr.monthsAbbreviated[month]
}
// MonthsAbbreviated returns the locales abbreviated months
func (fr *fr_ML) MonthsAbbreviated() []string {
return fr.monthsAbbreviated[1:]
}
// MonthNarrow returns the locales narrow month given the 'month' provided
func (fr *fr_ML) MonthNarrow(month time.Month) string {
return fr.monthsNarrow[month]
}
// MonthsNarrow returns the locales narrow months
func (fr *fr_ML) MonthsNarrow() []string {
return fr.monthsNarrow[1:]
}
// MonthWide returns the locales wide month given the 'month' provided
func (fr *fr_ML) MonthWide(month time.Month) string {
return fr.monthsWide[month]
}
// MonthsWide returns the locales wide months
func (fr *fr_ML) MonthsWide() []string {
return fr.monthsWide[1:]
}
// WeekdayAbbreviated returns the locales abbreviated weekday given the 'weekday' provided
func (fr *fr_ML) WeekdayAbbreviated(weekday time.Weekday) string {
return fr.daysAbbreviated[weekday]
}
// WeekdaysAbbreviated returns the locales abbreviated weekdays
func (fr *fr_ML) WeekdaysAbbreviated() []string {
return fr.daysAbbreviated
}
// WeekdayNarrow returns the locales narrow weekday given the 'weekday' provided
func (fr *fr_ML) WeekdayNarrow(weekday time.Weekday) string {
return fr.daysNarrow[weekday]
}
// WeekdaysNarrow returns the locales narrow weekdays
func (fr *fr_ML) WeekdaysNarrow() []string {
return fr.daysNarrow
}
// WeekdayShort returns the locales short weekday given the 'weekday' provided
func (fr *fr_ML) WeekdayShort(weekday time.Weekday) string {
return fr.daysShort[weekday]
}
// WeekdaysShort returns the locales short weekdays
func (fr *fr_ML) WeekdaysShort() []string {
return fr.daysShort
}
// WeekdayWide returns the locales wide weekday given the 'weekday' provided
func (fr *fr_ML) WeekdayWide(weekday time.Weekday) string {
return fr.daysWide[weekday]
}
// WeekdaysWide returns the locales wide weekdays
func (fr *fr_ML) WeekdaysWide() []string {
return fr.daysWide
}
// Decimal returns the decimal point of number
func (fr *fr_ML) Decimal() string {
return fr.decimal
}
// Group returns the group of number
func (fr *fr_ML) Group() string {
return fr.group
}
// Group returns the minus sign of number
func (fr *fr_ML) Minus() string {
return fr.minus
}
// FmtNumber returns 'num' with digits/precision of 'v' for 'fr_ML' and handles both Whole and Real numbers based on 'v'
func (fr *fr_ML) FmtNumber(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 2 + 3*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, fr.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
for j := len(fr.group) - 1; j >= 0; j-- {
b = append(b, fr.group[j])
}
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, fr.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
return string(b)
}
// FmtPercent returns 'num' with digits/precision of 'v' for 'fr_ML' and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
func (fr *fr_ML) FmtPercent(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 5
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, fr.decimal[0])
continue
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, fr.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
b = append(b, fr.percentSuffix...)
b = append(b, fr.percent...)
return string(b)
}
// FmtCurrency returns the currency representation of 'num' with digits/precision of 'v' for 'fr_ML'
func (fr *fr_ML) FmtCurrency(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := fr.currencies[currency]
l := len(s) + len(symbol) + 4 + 3*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, fr.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
for j := len(fr.group) - 1; j >= 0; j-- {
b = append(b, fr.group[j])
}
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, fr.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, fr.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
b = append(b, fr.currencyPositiveSuffix...)
b = append(b, symbol...)
return string(b)
}
// FmtAccounting returns the currency representation of 'num' with digits/precision of 'v' for 'fr_ML'
// in accounting notation.
func (fr *fr_ML) FmtAccounting(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := fr.currencies[currency]
l := len(s) + len(symbol) + 6 + 3*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, fr.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
for j := len(fr.group) - 1; j >= 0; j-- {
b = append(b, fr.group[j])
}
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, fr.currencyNegativePrefix[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, fr.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
if num < 0 {
b = append(b, fr.currencyNegativeSuffix...)
b = append(b, symbol...)
} else {
b = append(b, fr.currencyPositiveSuffix...)
b = append(b, symbol...)
}
return string(b)
}
// FmtDateShort returns the short date representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtDateShort(t time.Time) string {
b := make([]byte, 0, 32)
if t.Day() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2f}...)
if t.Month() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Month()), 10)
b = append(b, []byte{0x2f}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateMedium returns the medium date representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtDateMedium(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, fr.monthsAbbreviated[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateLong returns the long date representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtDateLong(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, fr.monthsWide[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateFull returns the full date representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtDateFull(t time.Time) string {
b := make([]byte, 0, 32)
b = append(b, fr.daysWide[t.Weekday()]...)
b = append(b, []byte{0x20}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, fr.monthsWide[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtTimeShort returns the short time representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtTimeShort(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, fr.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
return string(b)
}
// FmtTimeMedium returns the medium time representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtTimeMedium(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, fr.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, fr.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
return string(b)
}
// FmtTimeLong returns the long time representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtTimeLong(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, fr.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, fr.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
b = append(b, tz...)
return string(b)
}
// FmtTimeFull returns the full time representation of 't' for 'fr_ML'
func (fr *fr_ML) FmtTimeFull(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, fr.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, fr.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
if btz, ok := fr.timezones[tz]; ok {
b = append(b, btz...)
} else {
b = append(b, tz...)
}
return string(b)
} | // PluralsOrdinal returns the list of ordinal plural rules associated with 'fr_ML'
func (fr *fr_ML) PluralsOrdinal() []locales.PluralRule {
return fr.pluralsOrdinal |
utils.py | from functools import partial, wraps
from logging import getLogger
from time import sleep
from cornice.resource import resource
from couchdb import ResourceConflict
from dateorro import calc_datetime
from jsonpointer import resolve_pointer
from pyramid.compat import decode_path_info
from pyramid.exceptions import URLDecodeError
from openprocurement.api.constants import WORKING_DAYS
from openprocurement.api.utils import (
error_handler,
update_logging_context,
set_modetest_titles,
get_revision_changes,
get_now,
handle_store_exceptions,
context_unpack,
apply_data_patch,
append_revision,
get_doc_by_id,
ACCELERATOR_RE,
generate_id,
)
from openprocurement.framework.core.models import IAgreement
from openprocurement.framework.core.traversal import (
framework_factory,
submission_factory,
qualification_factory,
agreement_factory,
)
LOGGER = getLogger("openprocurement.framework.core")
ENQUIRY_PERIOD_DURATION = 10
SUBMISSION_STAND_STILL_DURATION = 30
frameworksresource = partial(resource, error_handler=error_handler, factory=framework_factory)
submissionsresource = partial(resource, error_handler=error_handler, factory=submission_factory)
qualificationsresource = partial(resource, error_handler=error_handler, factory=qualification_factory)
agreementsresource = partial(resource, error_handler=error_handler, factory=agreement_factory)
class isFramework(object):
"""Framework Route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "frameworkType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.framework is not None:
return getattr(request.framework, "frameworkType", None) == self.val
return False
class isSubmission(object):
"""Submission Route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "submissionType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.submission is not None:
return getattr(request.submission, "submissionType", None) == self.val
return False
class isQualification(object):
"""Qualification Route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "qualificationType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.qualification is not None:
return getattr(request.qualification, "qualificationType", None) == self.val
return False
class IsAgreement(object):
""" Agreement route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "agreementType = %s" % (self.val,)
phash = text
def | (self, context, request):
if request.agreement is not None:
return getattr(request.agreement, "agreementType", None) == self.val
return False
def register_framework_frameworkType(config, model):
"""Register a framework frameworkType.
:param config:
The pyramid configuration object that will be populated.
:param model:
The framework model class
"""
config.registry.framework_frameworkTypes[model.frameworkType.default] = model
def register_submission_submissionType(config, model):
submission_type = model.submissionType.default
config.registry.submission_submissionTypes[submission_type] = model
def register_qualification_qualificationType(config, model):
qualification_type = model.qualificationType.default
config.registry.qualification_qualificationTypes[qualification_type] = model
def register_agreement_agreementType(config, model):
agreement_type = model.agreementType.default
config.registry.agreement_agreementTypes[agreement_type] = model
def object_from_data(request, data, obj_name, raise_error=True, create=True):
objType = data.get("%sType" % obj_name, "electronicCatalogue")
model_types = getattr(request.registry, "%s_%sTypes" % (obj_name, obj_name))
model = model_types.get(objType)
if model is None and raise_error:
request.errors.add("body", "%sType" % obj_name, "Not implemented")
request.errors.status = 415
raise error_handler(request)
update_logging_context(request, {"%s_type" % obj_name: objType})
if model is not None and create:
model = model(data)
return model
def framework_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "framework", raise_error=raise_error, create=create)
def submission_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "submission", raise_error=raise_error, create=create)
def qualification_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "qualification", raise_error=raise_error, create=create)
def agreement_from_data(request, data, raise_error=True, create=True):
if request.authenticated_role == "agreements":
data["agreementType"] = "cfaua"
if not data.get("agreementType") and raise_error:
request.errors.add("data", "agreementType", "This field is required")
request.errors.status = 422
raise error_handler(request)
return object_from_data(request, data, "agreement", raise_error=raise_error, create=create)
def extract_doc_adapter(request, doc_type, doc_id):
doc_type_singular = doc_type[:-1] # lower, without last symbol "frameworks" --> "framework"
doc = get_doc_by_id(request.registry.databases[doc_type], doc_type_singular.capitalize(), doc_id)
if doc is None:
request.errors.add("url", "%s_id" % doc_type_singular, "Not Found")
request.errors.status = 404
raise error_handler(request)
# obsolete lowercase doc_type in agreements
if doc is not None and doc.get("doc_type") == "agreement":
request.errors.add("url", "agreement_id", "Archived")
request.errors.status = 410
raise error_handler(request)
method = getattr(request, "%s_from_data" % doc_type_singular)
return method(doc)
def extract_doc(request):
try:
# empty if mounted under a path in mod_wsgi, for example
path = decode_path_info(request.environ["PATH_INFO"] or "/")
except KeyError:
path = "/"
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason)
# obj_id = ""
# extract object id
parts = path.split("/")
if len(parts) < 4 or parts[3] not in ("frameworks", "submissions", "qualifications", "agreements"):
return
# obj_type = parts[3][0].upper() + parts[3][1:-1]
obj_type = parts[3]
obj_id = parts[4]
return extract_doc_adapter(request, obj_type, obj_id)
def generate_framework_pretty_id(ctime, db, server_id=""):
key = ctime.date().isoformat()
prettyIDdoc = "frameworkPrettyID_" + server_id if server_id else "frameworkPrettyID"
while True:
try:
prettyID = db.get(prettyIDdoc, {"_id": prettyIDdoc})
index = prettyID.get(key, 1)
prettyID[key] = index + 1
db.save(prettyID)
except ResourceConflict: # pragma: no cover
pass
except Exception: # pragma: no cover
sleep(1)
else:
break
return "UA-F-{:04}-{:02}-{:02}-{:06}{}".format(
ctime.year, ctime.month, ctime.day, index, server_id and "-" + server_id
)
def generate_agreementID(ctime, db, server_id=""):
key = ctime.date().isoformat()
prettyIDdoc = "agreementID_" + server_id if server_id else "agreementID"
while True:
try:
agreementID = db.get(prettyIDdoc, {"_id": prettyIDdoc})
index = agreementID.get(key, 1)
agreementID[key] = index + 1
db.save(agreementID)
except ResourceConflict: # pragma: no cover
pass
except Exception: # pragma: no cover
sleep(1)
else:
break
return "UA-{:04}-{:02}-{:02}-{:06}{}".format(
ctime.year, ctime.month, ctime.day, index, server_id and "-" + server_id
)
def save_object(request, obj_name, with_test_mode=True, additional_obj_names=""):
obj = request.validated[obj_name]
if with_test_mode and obj.mode == "test":
set_modetest_titles(obj)
patch = get_revision_changes(obj.serialize("plain"), request.validated["%s_src" % obj_name])
if patch:
now = get_now()
append_obj_revision(request, obj, patch, now)
old_date_modified = obj.dateModified
if getattr(obj, "modified", True):
obj.dateModified = now
for i in additional_obj_names:
if i in request.validated:
request.validated[i].dateModified = now
with handle_store_exceptions(request):
obj.store(request.registry.databases[f"{obj_name}s"]) # TODO a better way to specify db name?
LOGGER.info(
"Saved {} {}: dateModified {} -> {}".format(
obj_name,
obj.id,
old_date_modified and old_date_modified.isoformat(),
obj.dateModified.isoformat()
),
extra=context_unpack(request, {"MESSAGE_ID": "save_{}".format(obj_name)}, {"RESULT": obj.rev}),
)
return True
def save_framework(request, additional_obj_names=""):
return save_object(request, "framework", additional_obj_names=additional_obj_names)
def save_submission(request, additional_obj_names=""):
return save_object(request, "submission", with_test_mode=False, additional_obj_names=additional_obj_names)
def save_qualification(request, additional_obj_names=""):
return save_object(request, "qualification", with_test_mode=False, additional_obj_names=additional_obj_names)
def save_agreement(request, additional_obj_names=""):
return save_object(request, "agreement", with_test_mode=False, additional_obj_names=additional_obj_names)
def get_framework_accelerator(context):
if context and "frameworkDetails" in context and context["frameworkDetails"]:
re_obj = ACCELERATOR_RE.search(context["frameworkDetails"])
if re_obj and "accelerator" in re_obj.groupdict():
return int(re_obj.groupdict()["accelerator"])
return None
def acceleratable(wrapped):
@wraps(wrapped)
def wrapper(date_obj, timedelta_obj, framework=None, working_days=False, calendar=WORKING_DAYS, **kwargs):
accelerator = get_framework_accelerator(framework)
if accelerator:
return calc_datetime(date_obj, timedelta_obj, accelerator=accelerator)
return wrapped(
date_obj, timedelta_obj, framework=framework, working_days=working_days, calendar=calendar, **kwargs
)
return wrapper
def apply_patch(request, obj_name, data=None, save=True, src=None, additional_obj_names=""):
save_map = {
"framework": save_framework,
"submission": save_submission,
"qualification": save_qualification,
"agreement": save_agreement,
}
data = request.validated["data"] if data is None else data
patch = data and apply_data_patch(src or request.context.serialize(), data)
if patch:
# Can't be replaced to "obj_name in save_map" because obj_name for child patch same as for parent
if request.context.__class__.__name__.lower() in save_map:
request.validated[obj_name].import_data(patch)
else:
request.context.import_data(patch)
if save:
save_func = save_map.get(obj_name)
return save_func(request, additional_obj_names=additional_obj_names)
def append_obj_revision(request, obj, patch, date):
status_changes = [p for p in patch if all([
p["path"].endswith("/status"),
p["op"] == "replace"
])]
changed_obj = obj
for change in status_changes:
changed_obj = resolve_pointer(obj, change["path"].replace("/status", ""))
if changed_obj and hasattr(changed_obj, "date") and hasattr(changed_obj, "revisions"):
date_path = change["path"].replace("/status", "/date")
if changed_obj.date and not any([p for p in patch if date_path == p["path"]]):
patch.append({"op": "replace", "path": date_path, "value": changed_obj.date.isoformat()})
elif not changed_obj.date:
patch.append({"op": "remove", "path": date_path})
changed_obj.date = date
else:
changed_obj = obj
return append_revision(request, changed_obj, patch)
def obj_serialize(request, framework_data, fields):
obj = request.framework_from_data(framework_data, raise_error=False)
obj.__parent__ = request.context
return dict([(i, j) for i, j in obj.serialize("view").items() if i in fields])
def agreement_serialize(request, agreement_data, fields):
agreement = request.agreement_from_data(agreement_data, raise_error=False)
agreement.__parent__ = request.context
return {i: j for i, j in agreement.serialize("view").items() if i in fields}
def get_submission_by_id(request, submission_id):
if submission_id:
return request.registry.databases.submissions.get(submission_id)
def get_framework_by_id(request, framework_id):
if framework_id:
return request.registry.databases.frameworks.get(framework_id)
def get_agreement_by_id(request, agreement_id):
if agreement_id:
return request.registry.databases.agreements.get(agreement_id)
def set_agreement_ownership(item, request):
item.owner_token = generate_id()
def get_agreement(model):
while not IAgreement.providedBy(model):
model = model.__parent__
return model
| __call__ |
cmd.go | package util
import (
"bytes"
"fmt"
"os"
"os/exec"
)
// RunCmd will open the sub process
func | (args []string, cwd string, env []string, background bool, stdout, stderr *string) (int, error) {
var stdoutBuf, stderrBuf bytes.Buffer
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
if len(env) > 0 {
cmd.Env = append(os.Environ(), env...)
}
cmd.Dir = cwd
err := cmd.Start()
if err != nil {
return -1, fmt.Errorf(`Start command "%v" error: %s`, args, err)
}
if background {
return 0, nil
}
var exitCode int
err = cmd.Wait()
if err != nil {
exitError, ok := err.(*exec.ExitError)
if ok {
exitCode = exitError.ExitCode()
} else {
return -1, fmt.Errorf(`Run command "%v" error: %s`, args, err)
}
}
if stdout != nil {
*stdout = stdoutBuf.String()
}
if stderr != nil {
*stderr = stderrBuf.String()
}
return exitCode, nil
}
| RunCmd |
gen_CustomElementRegistry.rs | #![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "C" {
# [ wasm_bindgen ( extends = :: js_sys :: Object , js_name = CustomElementRegistry , typescript_type = "CustomElementRegistry" ) ]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `CustomElementRegistry` class."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `CustomElementRegistry`*"] | #[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/define)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `CustomElementRegistry`*"]
pub fn define(
this: &CustomElementRegistry,
name: &str,
function_constructor: &::js_sys::Function,
) -> Result<(), JsValue>;
#[cfg(feature = "ElementDefinitionOptions")]
# [ wasm_bindgen ( catch , method , structural , js_class = "CustomElementRegistry" , js_name = define ) ]
#[doc = "The `define()` method."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/define)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `CustomElementRegistry`, `ElementDefinitionOptions`*"]
pub fn define_with_options(
this: &CustomElementRegistry,
name: &str,
function_constructor: &::js_sys::Function,
options: &ElementDefinitionOptions,
) -> Result<(), JsValue>;
# [ wasm_bindgen ( method , structural , js_class = "CustomElementRegistry" , js_name = get ) ]
#[doc = "The `get()` method."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/get)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `CustomElementRegistry`*"]
pub fn get(this: &CustomElementRegistry, name: &str) -> ::wasm_bindgen::JsValue;
#[cfg(feature = "Node")]
# [ wasm_bindgen ( method , structural , js_class = "CustomElementRegistry" , js_name = upgrade ) ]
#[doc = "The `upgrade()` method."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/upgrade)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `CustomElementRegistry`, `Node`*"]
pub fn upgrade(this: &CustomElementRegistry, root: &Node);
# [ wasm_bindgen ( catch , method , structural , js_class = "CustomElementRegistry" , js_name = whenDefined ) ]
#[doc = "The `whenDefined()` method."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/whenDefined)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `CustomElementRegistry`*"]
pub fn when_defined(
this: &CustomElementRegistry,
name: &str,
) -> Result<::js_sys::Promise, JsValue>;
} | pub type CustomElementRegistry;
# [ wasm_bindgen ( catch , method , structural , js_class = "CustomElementRegistry" , js_name = define ) ]
#[doc = "The `define()` method."] |
data.py | import random
import logging
# import torch
from stanza.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanza.models.common.vocab import PAD_ID, VOCAB_PREFIX, ROOT_ID, CompositeVocab, CharVocab
from stanza.models.pos.vocab import WordVocab, XPOSVocab, FeatureVocab, MultiVocab
from stanza.models.pos.xpos_vocab_factory import xpos_vocab_factory
from stanza.models.common.doc import *
logger = logging.getLogger('stanza')
def data_to_batches(data, batch_size, eval_mode, sort_during_eval, min_length_to_batch_separately):
"""
Given a list of lists, where the first element of each sublist
represents the sentence, group the sentences into batches.
During training mode (not eval_mode) the sentences are sorted by
length with a bit of random shuffling. During eval mode, the
sentences are sorted by length if sort_during_eval is true.
Refactored from the data structure in case other models could use
it and for ease of testing.
Returns (batches, original_order), where original_order is None
when in train mode or when unsorted and represents the original
location of each sentence in the sort
"""
res = []
if not eval_mode:
# sort sentences (roughly) by length for better memory utilization
data = sorted(data, key = lambda x: len(x[0]), reverse=random.random() > .5)
data_orig_idx = None
elif sort_during_eval:
(data, ), data_orig_idx = sort_all([data], [len(x[0]) for x in data])
else:
data_orig_idx = None
current = []
currentlen = 0
for x in data:
if min_length_to_batch_separately is not None and len(x[0]) > min_length_to_batch_separately:
if currentlen > 0:
res.append(current)
current = []
currentlen = 0
res.append([x])
else:
if len(x[0]) + currentlen > batch_size and currentlen > 0:
res.append(current)
current = []
currentlen = 0
current.append(x)
currentlen += len(x[0])
if currentlen > 0:
res.append(current)
return res, data_orig_idx
class DataLoader:
def __init__(self, doc, batch_size, args, pretrain, vocab=None, evaluation=False, sort_during_eval=False, min_length_to_batch_separately=None):
self.batch_size = batch_size
self.min_length_to_batch_separately=min_length_to_batch_separately
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
self.sort_during_eval = sort_during_eval
self.doc = doc
data = self.load_doc(doc)
# handle vocab
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
# handle pretrain; pretrain vocab is used when args['pretrain'] == True and pretrain is not None
self.pretrain_vocab = None
if pretrain is not None and args['pretrain']:
self.pretrain_vocab = pretrain.vocab
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
logger.debug("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab, self.pretrain_vocab, args)
# shuffle for training
if self.shuffled:
random.shuffle(data)
self.num_examples = len(data)
# chunk into batches
self.data = self.chunk_batches(data)
logger.debug("{} batches created.".format(len(self.data)))
def init_vocab(self, data):
assert self.eval == False # for eval vocab must exist
charvocab = CharVocab(data, self.args['shorthand'])
wordvocab = WordVocab(data, self.args['shorthand'], cutoff=7, lower=True)
uposvocab = WordVocab(data, self.args['shorthand'], idx=1)
xposvocab = xpos_vocab_factory(data, self.args['shorthand'])
featsvocab = FeatureVocab(data, self.args['shorthand'], idx=3)
lemmavocab = WordVocab(data, self.args['shorthand'], cutoff=7, idx=4, lower=True)
deprelvocab = WordVocab(data, self.args['shorthand'], idx=6)
vocab = MultiVocab({'char': charvocab,
'word': wordvocab,
'upos': uposvocab,
'xpos': xposvocab,
'feats': featsvocab,
'lemma': lemmavocab,
'deprel': deprelvocab})
return vocab
def preprocess(self, data, vocab, pretrain_vocab, args):
processed = []
xpos_replacement = [[ROOT_ID] * len(vocab['xpos'])] if isinstance(vocab['xpos'], CompositeVocab) else [ROOT_ID]
feats_replacement = [[ROOT_ID] * len(vocab['feats'])]
for sent in data:
processed_sent = [[ROOT_ID] + vocab['word'].map([w[0] for w in sent])]
processed_sent += [[[ROOT_ID]] + [vocab['char'].map([x for x in w[0]]) for w in sent]]
processed_sent += [[ROOT_ID] + vocab['upos'].map([w[1] for w in sent])]
processed_sent += [xpos_replacement + vocab['xpos'].map([w[2] for w in sent])]
processed_sent += [feats_replacement + vocab['feats'].map([w[3] for w in sent])]
if pretrain_vocab is not None:
# always use lowercase lookup in pretrained vocab
processed_sent += [[ROOT_ID] + pretrain_vocab.map([w[0].lower() for w in sent])]
else:
processed_sent += [[ROOT_ID] + [PAD_ID] * len(sent)]
processed_sent += [[ROOT_ID] + vocab['lemma'].map([w[4] for w in sent])]
processed_sent += [[to_int(w[5], ignore_error=self.eval) for w in sent]]
processed_sent += [vocab['deprel'].map([w[6] for w in sent])]
processed.append(processed_sent)
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 9
# sort sentences by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# sort words by lens for easy char-RNN operations
batch_words = [w for sent in batch[1] for w in sent]
word_lens = [len(x) for x in batch_words]
batch_words, word_orig_idx = sort_all([batch_words], word_lens)
batch_words = batch_words[0]
word_lens = [len(x) for x in batch_words]
# convert to tensors
words = batch[0]
words = get_long_tensor(words, batch_size)
words_mask = torch.eq(words, PAD_ID)
wordchars = get_long_tensor(batch_words, len(word_lens))
wordchars_mask = torch.eq(wordchars, PAD_ID)
upos = get_long_tensor(batch[2], batch_size)
xpos = get_long_tensor(batch[3], batch_size)
ufeats = get_long_tensor(batch[4], batch_size)
pretrained = get_long_tensor(batch[5], batch_size)
sentlens = [len(x) for x in batch[0]]
lemma = get_long_tensor(batch[6], batch_size)
head = get_long_tensor(batch[7], batch_size)
deprel = get_long_tensor(batch[8], batch_size)
return words, words_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, orig_idx, word_orig_idx, sentlens, word_lens
def load_doc(self, doc):
data = doc.get([TEXT, UPOS, XPOS, FEATS, LEMMA, HEAD, DEPREL], as_sentences=True)
data = self.resolve_none(data)
return data
def resolve_none(self, data):
# replace None to '_'
for sent_idx in range(len(data)):
for tok_idx in range(len(data[sent_idx])):
for feat_idx in range(len(data[sent_idx][tok_idx])):
if data[sent_idx][tok_idx][feat_idx] is None:
data[sent_idx][tok_idx][feat_idx] = '_'
return data
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def reshuffle(self):
data = [y for x in self.data for y in x]
self.data = self.chunk_batches(data)
random.shuffle(self.data)
def chunk_batches(self, data):
batches, data_orig_idx = data_to_batches(data=data, batch_size=self.batch_size,
eval_mode=self.eval, sort_during_eval=self.sort_during_eval,
min_length_to_batch_separately=self.min_length_to_batch_separately)
# data_orig_idx might be None at train time, since we don't anticipate unsorting
self.data_orig_idx = data_orig_idx
return batches
def to_int(string, ignore_error=False):
| try:
res = int(string)
except ValueError as err:
if ignore_error:
return 0
else:
raise err
return res |
|
util.js | import Vue from 'vue';
import { isString, isObject } from './types';
const hasOwnProperty = Object.prototype.hasOwnProperty;
export function noop() {}
export function hasOwn(obj, key) {
return hasOwnProperty.call(obj, key);
}
function extend(to, _from) {
for (let key in _from) {
to[key] = _from[key];
}
return to;
}
export function toObject(arr) {
var res = {};
for (let i = 0; i < arr.length; i++) {
if (arr[i]) {
extend(res, arr[i]);
}
}
return res;
}
export const getValueByPath = function(object, prop) {
prop = prop || '';
const paths = prop.split('.');
let current = object;
let result = null;
for (let i = 0, j = paths.length; i < j; i++) {
const path = paths[i];
if (!current) break;
if (i === j - 1) {
result = current[path];
break;
}
current = current[path];
}
return result;
};
export function getPropByPath(obj, path, strict) {
let tempObj = obj;
path = path.replace(/\[(\w+)\]/g, '.$1');
path = path.replace(/^\./, '');
let keyArr = path.split('.');
let i = 0;
for (let len = keyArr.length; i < len - 1; ++i) {
if (!tempObj && !strict) break;
let key = keyArr[i];
if (key in tempObj) {
tempObj = tempObj[key];
} else {
if (strict) {
throw new Error('please transfer a valid prop path to form item!');
}
break;
}
}
return {
o: tempObj,
k: keyArr[i],
v: tempObj ? tempObj[keyArr[i]] : null
};
}
export const generateId = function() {
return Math.floor(Math.random() * 10000);
};
export const valueEquals = (a, b) => {
// see: https://stackoverflow.com/questions/3115982/how-to-check-if-two-arrays-are-equal-with-javascript
if (a === b) return true;
if (!(a instanceof Array)) return false;
if (!(b instanceof Array)) return false;
if (a.length !== b.length) return false;
for (let i = 0; i !== a.length; ++i) {
if (a[i] !== b[i]) return false;
}
return true;
};
export const escapeRegexpString = (value = '') => String(value).replace(/[|\\{}()[\]^$+*?.]/g, '\\$&');
// TODO: use native Array.find, Array.findIndex when IE support is dropped
export const arrayFindIndex = function(arr, pred) {
for (let i = 0; i !== arr.length; ++i) {
if (pred(arr[i])) {
return i;
}
}
return -1;
};
export const arrayFind = function(arr, pred) {
const idx = arrayFindIndex(arr, pred);
return idx !== -1 ? arr[idx] : undefined;
};
// coerce truthy value to array
export const coerceTruthyValueToArray = function(val) {
if (Array.isArray(val)) {
return val;
} else if (val) {
return [val];
} else {
return [];
}
};
export const isIE = function() {
return !Vue.prototype.$isServer && !isNaN(Number(document.documentMode));
};
export const isEdge = function() {
return !Vue.prototype.$isServer && navigator.userAgent.indexOf('Edge') > -1;
};
export const isFirefox = function() {
return !Vue.prototype.$isServer && !!window.navigator.userAgent.match(/firefox/i);
};
export const autoprefixer = function(style) {
if (typeof style !== 'object') return style;
const rules = ['transform', 'transition', 'animation'];
const prefixes = ['ms-', 'webkit-'];
rules.forEach(rule => {
const value = style[rule];
if (rule && value) {
prefixes.forEach(prefix => {
style[prefix + rule] = value;
});
}
});
return style;
};
export const kebabCase = function(str) {
const hyphenateRE = /([^-])([A-Z])/g;
return str
.replace(hyphenateRE, '$1-$2')
.replace(hyphenateRE, '$1-$2')
.toLowerCase();
};
export const capitalize = function(str) {
if (!isString(str)) return str;
return str.charAt(0).toUpperCase() + str.slice(1);
};
export const looseEqual = function(a, b) {
const isObjectA = isObject(a);
const isObjectB = isObject(b);
if (isObjectA && isObjectB) {
return JSON.stringify(a) === JSON.stringify(b);
} else if (!isObjectA && !isObjectB) {
return String(a) === String(b);
} else {
return false;
}
};
export const arrayEquals = function(arrayA, arrayB) {
arrayA = arrayA || [];
arrayB = arrayB || [];
if (arrayA.length !== arrayB.length) {
return false;
}
for (let i = 0; i < arrayA.length; i++) {
if (!looseEqual(arrayA[i], arrayB[i])) {
return false;
}
}
return true;
};
export const isEqual = function(value1, value2) {
if (Array.isArray(value1) && Array.isArray(value2)) {
return arrayEquals(value1, value2);
}
return looseEqual(value1, value2);
};
export const isEmpty = function(val) {
// null or undefined
if (val == null) return true;
if (typeof val === 'boolean') return false;
if (typeof val === 'number') return !val;
if (val instanceof Error) return val.message === '';
switch (Object.prototype.toString.call(val)) {
// String or Array
case '[object String]':
case '[object Array]':
return !val.length;
// Map or Set or File
case '[object File]':
case '[object Map]':
case '[object Set]':
{
return !val.size;
}
// Plain Object
case '[object Object]':
{
return !Object.keys(val).length;
}
}
return false;
};
export function | (fn) {
let locked = false;
return function(...args) {
if (locked) return;
locked = true;
// eslint-disable-next-line no-unused-vars
window.requestAnimationFrame(_ => {
fn.apply(this, args);
locked = false;
});
};
}
export function objToArray(obj) {
if (Array.isArray(obj)) {
return obj;
}
return isEmpty(obj) ? [] : [obj];
} | rafThrottle |
useless_learner.py | '''
Training a trivial parametric monomial function "wx" (with no bias parameter)
to approximate the true hypothesis f(x)= 2x given 3 datapoints of observation (1, 2),(2, 4),(3, 6)
This learner has no practical usage (hence, its name).
We are using non-stochastic gradient descent and running weight updates for 30 epochs.
A rudimentary squared difference is used for the loss function:
From data, we get:
L(w) = (2-w)(2-w) + (4-2w)(4-2w) + (6-3w)(6-3w)
= (4 - 4w + w^2) + (16 - 16w + 4w^2) + (36 - 36w + 9w^2)
L(w) = 56 - 56w + 14w^2
L'(w) = -56 + 28w
Solving this analytically gives us w = 2
But for the sake of this exercise, we apply gradient descent with w starting at 0 <=> w_t = 0 for t=0 where t is epoch
w_t+1 = w_t - learning_rate * L'(w_t)
Training effectively overfits the data as the setup is completely hypothetical (e.g. there is no test data)
The point of the exercise is solely to get familiar with operating Google TensorFlow framework
'''
import tensorflow as tf
# Parameters
n_epoch = 30
n_features = 1
n_examples = 3
n_outputs = 1
learning_rate = .01
# Fetch the data
def fetch():
return {xx:[[1],[2],[3]], yy:[[2],[4],[6]]}
# Define the model
# Model inputs & outputs definitions
xx = tf.placeholder(tf.float32, shape=(n_examples, n_features), name = "MyInputs")
yy = tf.placeholder(tf.float32, shape=(n_examples, n_outputs), name = "MyLabels")
# Model hypothesis
ww = tf.Variable(tf.zeros(dtype=tf.float32, shape=(n_features, 1)), name = "MyWeights", trainable=True)
predict_yy = tf.matmul(xx, ww)
# Evaluate the loss
loss = tf.reduce_sum(tf.squared_difference(predict_yy, yy), name = "MyLoss")
| # Calculate gradient of the loss for each weight
# + Update each weight
opt = tf.train.GradientDescentOptimizer(learning_rate= learning_rate)
minimizer = opt.minimize(loss, var_list=[ww])
# Evaluate the model against the test data. Test the model
def eval(inputs):
return tf.matmul(inputs, ww)
# Init variables
init = tf.initialize_all_variables()
tf.scalar_summary("Loss", tf.reduce_mean(loss))
tf.scalar_summary("Weight", tf.reduce_mean(ww))
merged = tf.merge_all_summaries()
def main():
print "Running %s" % __file__
#tf.is_variable_initialized(ww)
with tf.Session() as sess:
# Create a summary writer, add the 'graph' to the event file.
writer = tf.train.SummaryWriter(".", sess.graph)
init.run()
for epoch in range(n_epoch):
summaries, _, loss_value,_ =sess.run([merged, minimizer, loss, ww], feed_dict = fetch())
print 'epoch {:d}: loss is {:f}'.format(epoch, loss_value)
writer.add_summary(summaries, epoch)
# eval(test_data)
if __name__ == '__main__': main() | # Train the model / Apply gradient updates (One Step) |
take_images.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Graba video leido desde la arducam
# Se le debe indicar el archivo de video a grabar y
# la duración de la captura en segundos.
# SINTAXIS: python capturar_video.py VIDEO TIEMPO
# 1- Ruta del video
# 2- Tiempo de grabacion en segundos
from ctypes import *
import ctypes
import sys
import os
import time
from PIL import Image
import numpy as np
import thread as thread
import math
from select import select
from evdev import InputDevice
from evdev import ecodes
from astropy.io import fits
import ArducamSDK
# Analisis de argumentos
if (len(sys.argv)==3):
NOMBREIMG = sys.argv[1];
NUMIMG = int(sys.argv[2]);
else:
print ("Se requieren 2 argumentos: NOMBRE_IMAGENES NUMERO_IMAGENES")
exit()
#### CONFIGURACION ARDUCAMSDK ################
COLOR_BYTE2RGB = 47 # No se modifico del original
CAMERA_MT9M001 = 0x4D091031 # No se modifico del original
SensorShipAddr = 186
I2C_MODE_8_16 = 1
usbVid = 0x52CB # No se modifico del original
Width = 1280 #1280
Height = 1024 #1024
cfg ={"u32CameraType":CAMERA_MT9M001,
"u32Width":Width,"u32Height":Height,
"u32UsbVersion":1,
"u8PixelBytes":1,
"u16Vid":0x52cb,
"u8PixelBits":8,
"u32SensorShipAddr":SensorShipAddr,
"emI2cMode":I2C_MODE_8_16 }
# FLAGS
global saveFlag,downFlag,flag,H_value,V_value,lx,ly,mx,my,dx,dy,W_zoom,H_zooM,handle,openFlag,initTime,storeFlag,bufferData,globalGain
global testPatternFlag
global integrationTime
global shutterWidth
openFlag = False
handle = {}
downFlag = False
flag = True
saveFlag = False
storeFlag = False
saveNum=0
H_value = 0
V_value = 0
W_zoom = 0
H_zoom = 0
lx = 0
ly = 0
mx = 0
my = 0
dx = 0
dy = 0
testPatternFlag = False;
regArr=[[0x01, 0x000C], # Row Start
[0x02, 0x0014], # Column Start
[0x03, Height - 1], # Window Height 0x03FF
[0x04, Width - 1], # Window Width 0x04FF
[0x05, 0x0009], # Horizontal Blanking
[0x06, 0x0019], # Vertical Blanking
[0x07, 0x0002], # Output Control
[0x09, 0x0419], # Shutter Width 0x0419 (max: 0x3FFF)
[0x0B, 0x0000], # Frame Restart
[0x0C, 0x0000],#0x0100],
[0x0D, 0x0000],
[0x1E, 0x8000], # Read Mode 1 0x8000
[0x20, 0x1104],
[0x2B, 0x0008],
[0x2C, 0x0008],
[0x2D, 0x0008],
[0x2E, 0x0008],
[0x32, 0x0FFC], # Test Data Register
[0x35, 0x0067], # Global Gain 0x0008 (max: 0x0067)
[0x5F, 0x0904],
#[0x60, 0x0000], # BLC offset: Even row, even column
#[0x61, 0x0000], # BLC offset: Odd row, odd column
#[0x62, 0x049F], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)
#[0x63, 0x0000], # BLC offset: Even row, odd column
#[0x64, 0x0000], # BLC offset: Odd row, Even column
[0x60, 0x002F], # BLC offset: Even row, even column
[0x61, 0x002F], # BLC offset: Odd row, odd column
[0x62, 0x0499], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)
[0x63, 0x000F], # BLC offset: Even row, odd column
[0x64, 0x000F], # BLC offset: Odd row, Even column
[0xF1, 0x0001],
[0xFFFF, 0xFFFF]
]
globalGain = regArr[18][1];
# Cálculo del tiempo de integración inicial (pag 16 del datasheet)
rowTime = regArr[3][1] + 1 + 244 + regArr[4][1] - 19; #[pixel clock periods] default: 1514
resetDelay = 4*regArr[9][1] #[pixel clock periods] default: 0
overheadTime = 180; #[pixel clock periods]
shutterWidth = regArr[7][1]
integrationPeriods = shutterWidth*rowTime - overheadTime - resetDelay;
clockPeriod = 1000.0/24e6; #[ms]
integrationTime = integrationPeriods * clockPeriod; #[ms]
with open('integrationtime.txt','w') as it:
it.write(str(integrationTime)+"\n")
print ("Initial integration time: %.3fms"%(integrationTime));
print ("Initial gain: 0x%02x"%(globalGain));
a_lock = thread.allocate_lock();
def readThread(threadName,read_Flag):
global flag,handle,storeFlag,bufferData,openFlag
global a_lock
count = 0
time0 = time.time()
time1 = time.time()
data = {}
# Wait for the arducam object to be ready
while openFlag == False:
time1 = time.time();
if time1 - time0 > 20:
#timeout
exit;
while flag:
res = ArducamSDK.Py_ArduCam_available(handle)
#~ print "Available frames %d"%(res)
if res > 0:
res,data = ArducamSDK.Py_ArduCam_read(handle,Width * Height)
if res == 0:
count += 1
time1 = time.time()
ArducamSDK.Py_ArduCam_del(handle)
else:
print ("read data fail!")
else:
#print "No data availiable"
time.sleep(.01);
if len(data) >= Width * Height:
if time1 - time0 >= 5:
print ("%s %f %s\n"%("fps:",count*1.0/(time1-time0),"/s"))
count = 0
time0 = time1
a_lock.acquire();
bufferData = data;
data = [];
storeFlag = True;
a_lock.release();
#show(data)
#else:
# print "data length is not enough!"
if flag == False:
break
thread.start_new_thread( readThread,("Thread-2", flag,))
pass
def showAndSave(threadName,algoquenoseusa):
glo | read.start_new_thread( showAndSave,("Thread-3",flag))
pass
def init_and_read_arducam():
global flag,regArr,handle,openFlag
regNum = 0
res,handle = ArducamSDK.Py_ArduCam_autoopen(cfg)
if res == 0:
openFlag = True
print ("device open success!")
while (regArr[regNum][0] != 0xFFFF):
ArducamSDK.Py_ArduCam_writeSensorReg(handle,regArr[regNum][0],regArr[regNum][1])
regNum = regNum + 1
res = ArducamSDK.Py_ArduCam_beginCapture(handle)
if res == 0:
print ("transfer task create success!")
while flag :
res = ArducamSDK.Py_ArduCam_capture(handle)
if res != 0:
print ("capture failed!")
flag = False;
break;
time.sleep(0.1)
if flag == False:
break
else:
print ("transfer task create fail!")
time.sleep(2);
res = ArducamSDK.Py_ArduCam_close(handle)
if res == 0:
openFlag = False
print ("device close success!")
else:
print ("device close fail!")
else:
print ("device open fail!")
if __name__ == "__main__":
initTime = time.time();
init_and_read_arducam();
| bal flag,W_zoom,H_zoom,V_value,H_value,lx,ly,downFlag,saveFlag,saveNum,bufferData,storeFlag
global a_lock
global hist_ax
global NOMBREIMG
img = np.zeros((Height, Width), dtype=np.uint8);
while flag:
a_lock.acquire();
if storeFlag == True:
storeFlag = False;
img = np.frombuffer(bufferData, np.uint8)
img = np.reshape(img, (Height, Width));
saveNum += 1
#name = NOMBREIMG + str(saveNum) + ".fits"
#name = NOMBREIMG + "_" + str(saveNum) + ".jpeg"
name = NOMBREIMG + ".fits"
hdu=fits.PrimaryHDU()
hdu.data=img
hdu.writeto(name,overwrite=True)
print ("Frame saved to %s"%(name))
a_lock.release();
if saveNum == NUMIMG:
flag=False;
print ("Total number of adq images = %d"%(saveNum))
if flag == False:
break
th |
generate_comment.go | package generation
import (
"fmt"
"strings"
"github.com/dave/jennifer/jen"
"github.com/mitchellh/go-wordwrap"
)
var (
maxAllowance = 80
minAllowance = maxAllowance - indent*maxLevels
indent = 4
maxLevels = 3
)
func generateComment(level int, format string, args ...interface{}) *jen.Statement | {
allowance := maxAllowance - indent*level - 3
if allowance < minAllowance {
allowance = minAllowance
}
commentText := fmt.Sprintf(format, args...)
wrapped := wordwrap.WrapString(commentText, uint(allowance))
lines := strings.Split(wrapped, "\n")
commentBlock := jen.Comment(lines[0]).Line()
for i := 1; i < len(lines); i++ {
commentBlock = commentBlock.Comment(lines[i]).Line()
}
return commentBlock
} |
|
funcs.go | package mongoutils
import (
"go.mongodb.org/mongo-driver/bson/primitive"
)
// Array generate primitive.A
func Array(args ...interface{}) primitive.A {
return args
}
// Map generate primitive.M
//
// Args count must even
func Map(args ...interface{}) primitive.M {
if len(args)%2 == 0 {
res := make(primitive.M, len(args)/2)
for i := 0; i < len(args); i++ {
if i%2 == 0 {
if k, ok := args[i].(string); ok {
res[k] = args[i+1]
}
}
}
return res
}
return primitive.M{}
}
// Maps generate []primitive.M
//
// Args count must even | func Maps(args ...interface{}) []primitive.M {
res := make([]primitive.M, 0)
if len(args)%2 == 0 {
for i := 0; i < len(args); i++ {
if i%2 == 0 {
if k, ok := args[i].(string); ok {
res = append(res, primitive.M{k: args[i+1]})
}
}
}
}
return res
}
// Doc generate primitive.D from args
//
// Args count must even
// Example: Doc("_id", 1, "name", "John")
func Doc(args ...interface{}) primitive.D {
res := make([]primitive.E, 0)
if len(args)%2 == 0 {
for i := 0; i < len(args); i++ {
if i%2 == 0 {
if k, ok := args[i].(string); ok {
res = append(res, primitive.E{Key: k, Value: args[i+1]})
}
}
}
}
return res
}
// Regex generate Regex
//
// { pattern: "John.*", options: "i" }
func Regex(pattern string, opt string) primitive.Regex {
return primitive.Regex{Pattern: pattern, Options: opt}
}
// RegexFor generate map with regex parameter
//
// { "name": { pattern: "John.*", options: "i" } }
func RegexFor(k string, pattern string, opt string) primitive.M {
return primitive.M{k: Regex(pattern, opt)}
}
// In generate $in map
//
// {k: {$in: v}}
func In(k string, v ...interface{}) primitive.M {
return primitive.M{k: primitive.M{"$in": v}}
}
// Set generate simple set map
//
// {$set: v}
func Set(v interface{}) primitive.M {
return primitive.M{"$set": v}
}
// SetNested generate nested set map
//
// {$set: {k: v}}
func SetNested(k string, v interface{}) primitive.M {
return primitive.M{"$set": primitive.M{k: v}}
}
// Match generate nested set map
//
// {$match: v}
func Match(v interface{}) primitive.M {
return primitive.M{"$match": v}
} | |
virtualMachineScaleSet.ts | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import { input as inputs, output as outputs, enums } from "../../types";
import * as utilities from "../../utilities";
/**
* Describes a Virtual Machine Scale Set.
*/
export class VirtualMachineScaleSet extends pulumi.CustomResource {
/**
* Get an existing VirtualMachineScaleSet resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): VirtualMachineScaleSet {
return new VirtualMachineScaleSet(name, undefined as any, { ...opts, id: id });
}
/** @internal */
public static readonly __pulumiType = 'azure-native:compute/v20200601:VirtualMachineScaleSet';
/**
* Returns true if the given object is an instance of VirtualMachineScaleSet. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
public static isInstance(obj: any): obj is VirtualMachineScaleSet {
if (obj === undefined || obj === null) {
return false;
}
return obj['__pulumiType'] === VirtualMachineScaleSet.__pulumiType;
}
/**
* Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
*/
public readonly additionalCapabilities!: pulumi.Output<outputs.compute.v20200601.AdditionalCapabilitiesResponse | undefined>;
/**
* Policy for automatic repairs.
*/
public readonly automaticRepairsPolicy!: pulumi.Output<outputs.compute.v20200601.AutomaticRepairsPolicyResponse | undefined>;
/**
* When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
*/
public readonly doNotRunExtensionsOnOverprovisionedVMs!: pulumi.Output<boolean | undefined>;
/**
* Specifies information about the dedicated host group that the virtual machine scale set resides in. <br><br>Minimum api-version: 2020-06-01.
*/
public readonly hostGroup!: pulumi.Output<outputs.compute.v20200601.SubResourceResponse | undefined>;
/**
* The identity of the virtual machine scale set, if configured.
*/
public readonly identity!: pulumi.Output<outputs.compute.v20200601.VirtualMachineScaleSetIdentityResponse | undefined>;
/**
* Resource location
*/
public readonly location!: pulumi.Output<string>;
/**
* Resource name
*/
public /*out*/ readonly name!: pulumi.Output<string>;
/**
* Specifies whether the Virtual Machine Scale Set should be overprovisioned.
*/
public readonly overprovision!: pulumi.Output<boolean | undefined>;
/**
* Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
*/
public readonly plan!: pulumi.Output<outputs.compute.v20200601.PlanResponse | undefined>;
/**
* Fault Domain count for each placement group.
*/
public readonly platformFaultDomainCount!: pulumi.Output<number | undefined>;
/**
* The provisioning state, which only appears in the response.
*/
public /*out*/ readonly provisioningState!: pulumi.Output<string>;
/**
* Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
*/
public readonly proximityPlacementGroup!: pulumi.Output<outputs.compute.v20200601.SubResourceResponse | undefined>;
/**
* Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.
*/
public readonly scaleInPolicy!: pulumi.Output<outputs.compute.v20200601.ScaleInPolicyResponse | undefined>;
/**
* When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
*/
public readonly singlePlacementGroup!: pulumi.Output<boolean | undefined>;
/**
* The virtual machine scale set sku.
*/
public readonly sku!: pulumi.Output<outputs.compute.v20200601.SkuResponse | undefined>;
/**
* Resource tags
*/
public readonly tags!: pulumi.Output<{[key: string]: string} | undefined>;
/**
* Resource type
*/
public /*out*/ readonly type!: pulumi.Output<string>;
/** | */
public /*out*/ readonly uniqueId!: pulumi.Output<string>;
/**
* The upgrade policy.
*/
public readonly upgradePolicy!: pulumi.Output<outputs.compute.v20200601.UpgradePolicyResponse | undefined>;
/**
* The virtual machine profile.
*/
public readonly virtualMachineProfile!: pulumi.Output<outputs.compute.v20200601.VirtualMachineScaleSetVMProfileResponse | undefined>;
/**
* Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage.
*/
public readonly zoneBalance!: pulumi.Output<boolean | undefined>;
/**
* The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
*/
public readonly zones!: pulumi.Output<string[] | undefined>;
/**
* Create a VirtualMachineScaleSet resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: VirtualMachineScaleSetArgs, opts?: pulumi.CustomResourceOptions) {
let inputs: pulumi.Inputs = {};
opts = opts || {};
if (!opts.id) {
if ((!args || args.resourceGroupName === undefined) && !opts.urn) {
throw new Error("Missing required property 'resourceGroupName'");
}
inputs["additionalCapabilities"] = args ? args.additionalCapabilities : undefined;
inputs["automaticRepairsPolicy"] = args ? args.automaticRepairsPolicy : undefined;
inputs["doNotRunExtensionsOnOverprovisionedVMs"] = args ? args.doNotRunExtensionsOnOverprovisionedVMs : undefined;
inputs["hostGroup"] = args ? args.hostGroup : undefined;
inputs["identity"] = args ? args.identity : undefined;
inputs["location"] = args ? args.location : undefined;
inputs["overprovision"] = args ? args.overprovision : undefined;
inputs["plan"] = args ? args.plan : undefined;
inputs["platformFaultDomainCount"] = args ? args.platformFaultDomainCount : undefined;
inputs["proximityPlacementGroup"] = args ? args.proximityPlacementGroup : undefined;
inputs["resourceGroupName"] = args ? args.resourceGroupName : undefined;
inputs["scaleInPolicy"] = args ? args.scaleInPolicy : undefined;
inputs["singlePlacementGroup"] = args ? args.singlePlacementGroup : undefined;
inputs["sku"] = args ? args.sku : undefined;
inputs["tags"] = args ? args.tags : undefined;
inputs["upgradePolicy"] = args ? args.upgradePolicy : undefined;
inputs["virtualMachineProfile"] = args ? args.virtualMachineProfile : undefined;
inputs["vmScaleSetName"] = args ? args.vmScaleSetName : undefined;
inputs["zoneBalance"] = args ? args.zoneBalance : undefined;
inputs["zones"] = args ? args.zones : undefined;
inputs["name"] = undefined /*out*/;
inputs["provisioningState"] = undefined /*out*/;
inputs["type"] = undefined /*out*/;
inputs["uniqueId"] = undefined /*out*/;
} else {
inputs["additionalCapabilities"] = undefined /*out*/;
inputs["automaticRepairsPolicy"] = undefined /*out*/;
inputs["doNotRunExtensionsOnOverprovisionedVMs"] = undefined /*out*/;
inputs["hostGroup"] = undefined /*out*/;
inputs["identity"] = undefined /*out*/;
inputs["location"] = undefined /*out*/;
inputs["name"] = undefined /*out*/;
inputs["overprovision"] = undefined /*out*/;
inputs["plan"] = undefined /*out*/;
inputs["platformFaultDomainCount"] = undefined /*out*/;
inputs["provisioningState"] = undefined /*out*/;
inputs["proximityPlacementGroup"] = undefined /*out*/;
inputs["scaleInPolicy"] = undefined /*out*/;
inputs["singlePlacementGroup"] = undefined /*out*/;
inputs["sku"] = undefined /*out*/;
inputs["tags"] = undefined /*out*/;
inputs["type"] = undefined /*out*/;
inputs["uniqueId"] = undefined /*out*/;
inputs["upgradePolicy"] = undefined /*out*/;
inputs["virtualMachineProfile"] = undefined /*out*/;
inputs["zoneBalance"] = undefined /*out*/;
inputs["zones"] = undefined /*out*/;
}
if (!opts.version) {
opts = pulumi.mergeOptions(opts, { version: utilities.getVersion()});
}
const aliasOpts = { aliases: [{ type: "azure-nextgen:compute/v20200601:VirtualMachineScaleSet" }, { type: "azure-native:compute:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20150615:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20150615:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20160330:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20160330:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20160430preview:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20160430preview:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20170330:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20170330:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20171201:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20171201:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20180401:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20180401:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20180601:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20180601:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20181001:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20181001:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20190301:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20190301:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20190701:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20190701:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20191201:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20191201:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20201201:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20201201:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20210301:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20210301:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20210401:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20210401:VirtualMachineScaleSet" }, { type: "azure-native:compute/v20210701:VirtualMachineScaleSet" }, { type: "azure-nextgen:compute/v20210701:VirtualMachineScaleSet" }] };
opts = pulumi.mergeOptions(opts, aliasOpts);
super(VirtualMachineScaleSet.__pulumiType, name, inputs, opts);
}
}
/**
* The set of arguments for constructing a VirtualMachineScaleSet resource.
*/
export interface VirtualMachineScaleSetArgs {
/**
* Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
*/
additionalCapabilities?: pulumi.Input<inputs.compute.v20200601.AdditionalCapabilitiesArgs>;
/**
* Policy for automatic repairs.
*/
automaticRepairsPolicy?: pulumi.Input<inputs.compute.v20200601.AutomaticRepairsPolicyArgs>;
/**
* When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
*/
doNotRunExtensionsOnOverprovisionedVMs?: pulumi.Input<boolean>;
/**
* Specifies information about the dedicated host group that the virtual machine scale set resides in. <br><br>Minimum api-version: 2020-06-01.
*/
hostGroup?: pulumi.Input<inputs.compute.v20200601.SubResourceArgs>;
/**
* The identity of the virtual machine scale set, if configured.
*/
identity?: pulumi.Input<inputs.compute.v20200601.VirtualMachineScaleSetIdentityArgs>;
/**
* Resource location
*/
location?: pulumi.Input<string>;
/**
* Specifies whether the Virtual Machine Scale Set should be overprovisioned.
*/
overprovision?: pulumi.Input<boolean>;
/**
* Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
*/
plan?: pulumi.Input<inputs.compute.v20200601.PlanArgs>;
/**
* Fault Domain count for each placement group.
*/
platformFaultDomainCount?: pulumi.Input<number>;
/**
* Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
*/
proximityPlacementGroup?: pulumi.Input<inputs.compute.v20200601.SubResourceArgs>;
/**
* The name of the resource group.
*/
resourceGroupName: pulumi.Input<string>;
/**
* Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.
*/
scaleInPolicy?: pulumi.Input<inputs.compute.v20200601.ScaleInPolicyArgs>;
/**
* When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
*/
singlePlacementGroup?: pulumi.Input<boolean>;
/**
* The virtual machine scale set sku.
*/
sku?: pulumi.Input<inputs.compute.v20200601.SkuArgs>;
/**
* Resource tags
*/
tags?: pulumi.Input<{[key: string]: pulumi.Input<string>}>;
/**
* The upgrade policy.
*/
upgradePolicy?: pulumi.Input<inputs.compute.v20200601.UpgradePolicyArgs>;
/**
* The virtual machine profile.
*/
virtualMachineProfile?: pulumi.Input<inputs.compute.v20200601.VirtualMachineScaleSetVMProfileArgs>;
/**
* The name of the VM scale set to create or update.
*/
vmScaleSetName?: pulumi.Input<string>;
/**
* Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage.
*/
zoneBalance?: pulumi.Input<boolean>;
/**
* The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
*/
zones?: pulumi.Input<pulumi.Input<string>[]>;
} | * Specifies the ID which uniquely identifies a Virtual Machine Scale Set. |
staticSitesMappers.ts | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is regenerated.
*/
export {
AbnormalTimePeriod,
Address,
AddressResponse,
AnalysisData,
AnalysisDefinition,
ApiDefinitionInfo,
ApiKVReference,
ApiManagementConfig,
ApplicationLogsConfig,
ApplicationStack,
ApplicationStackResource,
AppServiceCertificate,
AppServiceCertificateOrder,
AppServiceCertificateOrderPatchResource,
AppServiceCertificatePatchResource,
AppServiceCertificateResource,
AppServiceEnvironmentPatchResource,
AppServiceEnvironmentResource,
AppServicePlan,
AppServicePlanPatchResource,
AutoHealActions,
AutoHealCustomAction,
AutoHealRules,
AutoHealTriggers,
AzureBlobStorageApplicationLogsConfig,
AzureBlobStorageHttpLogsConfig,
AzureStorageInfoValue,
AzureStoragePropertyDictionaryResource,
AzureTableStorageApplicationLogsConfig,
BackupItem,
BackupRequest,
BackupSchedule,
BaseResource,
BillingMeter,
Capability,
Certificate,
CertificateDetails,
CertificateEmail,
CertificateOrderAction,
CertificatePatchResource,
CloningInfo,
ConnectionStringDictionary,
ConnStringInfo,
ConnStringValueTypePair,
Contact,
ContainerCpuStatistics,
ContainerCpuUsage,
ContainerInfo,
ContainerMemoryStatistics,
ContainerNetworkInterfaceStatistics,
ContainerThrottlingData,
ContinuousWebJob,
CorsSettings,
CustomHostnameAnalysisResult,
DatabaseBackupSetting,
DataSource,
DataTableResponseColumn,
DataTableResponseObject,
DefaultErrorResponse,
DefaultErrorResponseError,
DefaultErrorResponseErrorDetailsItem,
DeletedAppRestoreRequest,
DeletedSite,
Deployment,
DetectorAbnormalTimePeriod,
DetectorDefinition,
DetectorInfo,
DetectorResponse,
DiagnosticAnalysis,
DiagnosticCategory,
DiagnosticData,
DiagnosticDetectorResponse,
DiagnosticMetricSample,
DiagnosticMetricSet,
Domain,
DomainOwnershipIdentifier,
DomainPatchResource,
DomainPurchaseConsent,
EnabledConfig,
ErrorEntity,
Experiments,
FileSystemApplicationLogsConfig,
FileSystemHttpLogsConfig,
FunctionEnvelope,
FunctionSecrets,
GeoRegion,
HandlerMapping,
HostingEnvironmentProfile,
HostName,
HostNameBinding,
HostNameSslState,
HttpLogsConfig,
HybridConnection,
HybridConnectionKey,
HybridConnectionLimits,
Identifier,
IpSecurityRestriction,
KeyVaultReferenceCollection,
KeyVaultReferenceResource,
ManagedServiceIdentity,
ManagedServiceIdentityUserAssignedIdentitiesValue,
MigrateMySqlRequest,
MigrateMySqlStatus,
MSDeploy,
MSDeployLog,
MSDeployLogEntry,
MSDeployStatus,
NameValuePair,
NetworkAccessControlEntry,
NetworkFeatures,
PremierAddOn,
PremierAddOnOffer,
PremierAddOnPatchResource,
PrivateAccess,
PrivateAccessSubnet,
PrivateAccessVirtualNetwork,
ProcessInfo,
ProcessModuleInfo,
ProcessThreadInfo,
ProxyOnlyResource,
PublicCertificate,
PushSettings,
RampUpRule,
Recommendation,
RecommendationRule,
ReissueCertificateOrderRequest,
RelayServiceConnectionEntity,
Rendering,
RenewCertificateOrderRequest,
RequestsBasedTrigger,
Resource,
ResourceHealthMetadata,
ResourceMetricAvailability,
ResourceMetricDefinition,
ResponseMetaData,
RestoreRequest,
Site,
SiteAuthSettings,
SiteConfig,
SiteConfigResource,
SiteConfigurationSnapshotInfo,
SiteExtensionInfo,
SiteInstance,
SiteLimits,
SiteLogsConfig,
SiteMachineKey,
SitePatchResource,
SitePhpErrorLogFlag,
SiteSourceControl,
SkuCapacity,
SkuDescription,
SlotConfigNamesResource,
SlotDifference,
SlotSwapStatus,
SlowRequestsBasedTrigger,
Snapshot,
SnapshotRecoverySource,
SnapshotRestoreRequest, | StampCapacity,
StaticSiteARMResource,
StaticSiteBuildARMResource,
StaticSiteBuildCollection,
StaticSiteBuildProperties,
StaticSiteCollection,
StaticSiteCustomDomainOverviewARMResource,
StaticSiteCustomDomainOverviewCollection,
StaticSiteFunctionOverviewARMResource,
StaticSiteFunctionOverviewCollection,
StaticSitePatchResource,
StaticSiteResetPropertiesARMResource,
StaticSiteUserARMResource,
StaticSiteUserCollection,
StaticSiteUserInvitationRequestResource,
StaticSiteUserInvitationResponseResource,
StatusCodesBasedTrigger,
StorageMigrationOptions,
StorageMigrationResponse,
StringDictionary,
SwiftVirtualNetwork,
TopLevelDomain,
TriggeredJobHistory,
TriggeredJobRun,
TriggeredWebJob,
Usage,
User,
VirtualApplication,
VirtualDirectory,
VirtualIPMapping,
VirtualNetworkProfile,
VnetGateway,
VnetInfo,
VnetParameters,
VnetRoute,
VnetValidationFailureDetails,
VnetValidationTestFailure,
WebJob,
WebSiteInstanceStatus,
WorkerPool,
WorkerPoolResource
} from "../models/mappers"; | Solution,
SourceControl,
StackMajorVersion,
StackMinorVersion, |
rpc_cli.py | #!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line msgpack-rpc client
#
# a usage example:
# % PYTHONPATH=. ./bin/rpc-cli \
# --peers=echo-server=localhost:9999,hoge=localhost:9998
# (Cmd) request echo-server echo ["hoge"]
# RESULT hoge
# (Cmd) request echo-server notify ["notify-method", ["param1","param2"]]
# RESULT notify-method
# (Cmd)
# NOTIFICATION from echo-server ['notify-method', ['param1', 'param2']]
# (Cmd)
import ryu.contrib
ryu.contrib.update_module_path()
from ryu import cfg
import cmd
import signal
import socket
import sys
import termios
from ryu.lib import rpc
CONF = cfg.CONF
CONF.register_cli_opts([
# eg. rpc-cli --peers=hoge=localhost:9998,fuga=localhost:9999
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(object):
def __init__(self, name, addr):
self._name = name
self._addr = addr
self.client = None
try:
self.connect()
except:
pass
def connect(self):
self.client = None
s = socket.create_connection(self._addr)
self.client = rpc.Client(s, notification_callback=self.notification)
def try_to_connect(self, verbose=False):
if self.client:
return
try:
self.connect()
assert self.client
except Exception as e:
if verbose:
print("connection failure %s" % e)
raise EOFError
def notification(self, n):
print("NOTIFICATION from %s %s" % (self._name, n))
def call(self, method, params):
return self._do(lambda: self.client.call(method, params))
def send_notification(self, method, params):
self._do(lambda: self.client.send_notification(method, params))
def _do(self, f):
def g():
try:
return f()
except EOFError:
self.client = None
raise
self.try_to_connect(verbose=True)
try:
return g()
except EOFError:
print("disconnected. trying to connect...")
self.try_to_connect(verbose=True)
print("connected. retrying the request...")
return g()
peers = {}
def add_peer(name, host, port):
peers[name] = Peer(name, (host, port))
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
self._notification_check_interval = 1 # worth to be configurable?
self._saved_termios = None
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split(None, 2)
try:
peer = args[0]
method = args[1]
params = eval(args[2])
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer %s" % peer)
return
try:
f(p, method, params)
except rpc.RPCError as e:
print("RPC ERROR %s" % e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_request(self, line):
"""request <peer> <method> <params>
send a msgpack-rpc request and print a response.
<params> is a python code snippet, it should be eval'ed to a list.
"""
def f(p, method, params):
result = p.call(method, params)
print("RESULT %s" % result)
self._request(line, f)
def do_notify(self, line):
"""notify <peer> <method> <params>
send a msgpack-rpc notification.
<params> is a python code snippet, it should be eval'ed to a list.
"""
def f(p, method, params):
p.send_notification(method, params)
self._request(line, f)
def complete_request(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def complete_notify(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def do_EOF(self, _line):
sys.exit(0)
def emptyline(self):
self._peek_notification()
def postcmd(self, _stop, _line):
self._peek_notification()
def _peek_notification(self):
for k, p in peers.iteritems():
if p.client:
try:
p.client.peek_notification()
except EOFError:
p.client = None
print("disconnected %s" % k)
@staticmethod
def _save_termios():
return termios.tcgetattr(sys.stdin.fileno())
@staticmethod
def _restore_termios(t):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, t)
def preloop(self):
self._saved_termios = self._save_termios()
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(1)
def onecmd(self, string): | return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def _timeout(self, _sig, _frame):
if not self._in_onecmd:
# restore terminal settings. (cooked/raw, ...)
# required for pypy at least.
# this doesn't seem to be needed for cpython readline
# module but i'm not sure if it's by spec or luck.
o = self._save_termios()
self._restore_termios(self._saved_termios)
self._peek_notification()
self._restore_termios(o)
signal.alarm(self._notification_check_interval)
def main(args=None, prog=None):
CONF(args=args, prog=prog, project='rpc-cli', version='rpc-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port = addr.rsplit(':', 1)
add_peer(name, host, port)
Cmd().cmdloop()
if __name__ == "__main__":
main() | self._in_onecmd = True
try: |
base_classes_resource_registry.py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A list of resources and their canonical format. This is deprecated."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core.resource import resource_info
RESOURCE_REGISTRY = {
'compute.addresses':
resource_info.ResourceInfo(
cache_command='compute addresses list',
list_format="""
table(
name,
region.basename(),
address,
status
)
""",),
'compute.autoscalers':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute autoscaler list',
list_format="""
table(
name,
target.basename(),
autoscalingPolicy.policy():label=POLICY
)
""",),
'compute.backendBuckets':
resource_info.ResourceInfo(
list_format="""
table(
name,
bucketName:label=GCS_BUCKET_NAME,
enableCdn
)
""",),
'compute.backendServiceGroupHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.backendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol
)
""",),
'compute.backendServices.alpha':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.regionBackendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.commitments':
resource_info.ResourceInfo(
cache_command='compute commitments list',
list_format="""
table(name,
region.basename(),
endTimestamp,
status)
""",),
'compute.disks':
resource_info.ResourceInfo(
cache_command='compute disks list',
list_format="""
table(
name,
zone.basename(),
sizeGb,
type.basename(),
status
)
""",),
'compute.diskTypes':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
zone.basename(),
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.diskTypes.alpha':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.firewalls':
resource_info.ResourceInfo(
cache_command='compute firewall-rules list',
list_format="""
table(
name,
network.basename(),
direction,
priority,
allowed[].map().firewall_rule().list():label=ALLOW,
denied[].map().firewall_rule().list():label=DENY
)
""",),
'compute.forwardingRules':
resource_info.ResourceInfo(
cache_command='compute forwarding-rules list',
list_format="""
table(
name,
region.basename(),
IPAddress,
IPProtocol,
firstof(
target,
backendService).scope():label=TARGET
)
""",),
'compute.groups':
resource_info.ResourceInfo(
cache_command='compute groups list',
list_format="""
table(
name,
members.len():label=NUM_MEMBERS,
description
)
""",),
'compute.healthChecks':
resource_info.ResourceInfo(
cache_command='compute health-checks list',
list_format="""
table(
name,
type:label=PROTOCOL
)
""",),
'compute.httpHealthChecks':
resource_info.ResourceInfo(
cache_command='compute http-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.httpsHealthChecks':
resource_info.ResourceInfo(
cache_command='compute https-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.images':
resource_info.ResourceInfo(
cache_command='compute images list',
list_format="""
table(
name,
selfLink.map().scope(projects).segment(0):label=PROJECT,
family,
deprecated.state:label=DEPRECATED,
status
)
""",),
'compute.instanceGroups':
resource_info.ResourceInfo(
cache_command='compute instance-groups list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
network.basename(),
isManaged:label=MANAGED,
size:label=INSTANCES
)
""",),
'compute.instanceGroupManagers':
resource_info.ResourceInfo(
cache_command='compute instance-groups managed list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
baseInstanceName,
size,
targetSize,
instanceTemplate.basename(),
autoscaled
)
""",),
'compute.instances':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute instances list',
list_format="""
table(
name,
zone.basename(),
machineType.machine_type().basename(),
scheduling.preemptible.yesno(yes=true, no=''),
networkInterfaces[].networkIP.notnull().list():label=INTERNAL_IP,
networkInterfaces[].accessConfigs[0].natIP.notnull().list()\
:label=EXTERNAL_IP,
status
)
""",),
'compute.instanceTemplates':
resource_info.ResourceInfo(
cache_command='compute instance-templates list',
list_format="""
table(
name,
properties.machineType.machine_type(),
properties.scheduling.preemptible.yesno(yes=true, no=''),
creationTimestamp
)
""",),
'compute.invalidations':
resource_info.ResourceInfo(
cache_command='beta compute url-maps list-cdn-cache-invalidations',
list_format="""
table(
description,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.machineTypes':
resource_info.ResourceInfo(
cache_command='compute machine-types list',
list_format="""
table(
name,
zone.basename(),
guestCpus:label=CPUS,
memoryMb.size(units_in=MiB, units_out=GiB, precision=2):label=MEMORY_GB,
deprecated.state:label=DEPRECATED
)
""",),
'compute.networks':
resource_info.ResourceInfo(
cache_command='compute networks list',
list_format="""
table(
name,
x_gcloud_mode:label=MODE,
IPv4Range:label=IPV4_RANGE,
gatewayIPv4
)
""",),
'compute.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
targetLink.scope():label=TARGET,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
) | 'compute.peerings':
resource_info.ResourceInfo(
cache_command='compute networks peerings list',
list_format="""
table(
name,
source_network.basename():label=NETWORK,
network.map().scope(projects).segment(0):label=PEER_PROJECT,
network.basename():label=PEER_NETWORK,
autoCreateRoutes,
state,
stateDetails
)
""",),
'compute.projects':
resource_info.ResourceInfo(
list_format="""
value(
format("There is no API support yet.")
)
""",),
'compute.xpnProjects':
resource_info.ResourceInfo(
list_format="""
table(
name,
creationTimestamp,
xpnProjectStatus
)
""",),
'compute.xpnResourceId':
resource_info.ResourceInfo(
list_format="""
table(
id:label=RESOURCE_ID,
type:label=RESOURCE_TYPE)
""",),
'compute.regions':
resource_info.ResourceInfo(
cache_command='compute regions list',
list_format="""
table(
name,
quotas.metric.CPUS.quota():label=CPUS,
quotas.metric.DISKS_TOTAL_GB.quota():label=DISKS_GB,
quotas.metric.IN_USE_ADDRESSES.quota():label=ADDRESSES,
quotas.metric.STATIC_ADDRESSES.quota():label=RESERVED_ADDRESSES,
status():label=STATUS,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
'compute.routers':
resource_info.ResourceInfo(
cache_command='compute routers list',
list_format="""
table(
name,
region.basename(),
network.basename()
)
""",),
'compute.routes':
resource_info.ResourceInfo(
cache_command='compute routes list',
list_format="""
table(
name,
network.basename(),
destRange,
firstof(
nextHopInstance,
nextHopGateway,
nextHopIp,
nextHopVpnTunnel,
nextHopPeering).scope()
:label=NEXT_HOP,
priority
)
""",),
'compute.snapshots':
resource_info.ResourceInfo(
cache_command='compute snapshots list',
list_format="""
table(
name,
diskSizeGb,
sourceDisk.scope():label=SRC_DISK,
status
)
""",),
'compute.sslCertificates':
resource_info.ResourceInfo(
cache_command='compute ssl-certificates list',
list_format="""
table(
name,
creationTimestamp
)
""",),
'compute.subnetworks':
resource_info.ResourceInfo(
cache_command='compute networks subnets list',
list_format="""
table(
name,
region.basename(),
network.basename(),
ipCidrRange:label=RANGE
)
""",),
'compute.targetHttpProxies':
resource_info.ResourceInfo(
cache_command='compute target-http-proxies list',
list_format="""
table(
name,
urlMap.basename()
)
""",),
'compute.targetHttpsProxies':
resource_info.ResourceInfo(
cache_command='compute target-https-proxies list',
list_format="""
table(
name,
sslCertificates.map().basename().list():label=SSL_CERTIFICATES,
urlMap.basename()
)
""",),
'compute.targetInstances':
resource_info.ResourceInfo(
cache_command='compute target-instances list',
list_format="""
table(
name,
zone.basename(),
instance.basename(),
natPolicy
)
""",),
'compute.targetPoolInstanceHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.targetPools':
resource_info.ResourceInfo(
cache_command='compute target-pools list',
list_format="""
table(
name,
region.basename(),
sessionAffinity,
backupPool.basename():label=BACKUP,
healthChecks[].map().basename().list():label=HEALTH_CHECKS
)
""",),
'compute.targetSslProxies':
resource_info.ResourceInfo(
cache_command='compute target-ssl-proxies list',),
'compute.targetTcpProxies':
resource_info.ResourceInfo(
cache_command='compute target-tcp-proxies list',),
'compute.targetVpnGateways':
resource_info.ResourceInfo(
cache_command='compute target-vpn-gateways list',
list_format="""
table(
name,
network.basename(),
region.basename()
)
""",),
'compute.urlMaps':
resource_info.ResourceInfo(
cache_command='compute url-maps list',
list_format="""
table(
name,
defaultService
)
""",),
'compute.users':
resource_info.ResourceInfo(
cache_command='compute users list',
list_format="""
table(
name,
owner,
description
)
""",),
'compute.vpnTunnels':
resource_info.ResourceInfo(
cache_command='compute vpn-tunnels list',
list_format="""
table(
name,
region.basename(),
targetVpnGateway.basename():label=GATEWAY,
peerIp:label=PEER_ADDRESS
)
""",),
'compute.zones':
resource_info.ResourceInfo(
cache_command='compute zones list',
list_format="""
table(
name,
region.basename(),
status():label=STATUS,
maintenanceWindows.next_maintenance():label=NEXT_MAINTENANCE,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
} | """,), |
zz_categorymapper_types.go | /*
Copyright 2021 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by terrajet. DO NOT EDIT.
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
)
type CategoryMapperAncestorsObservation struct {
}
type CategoryMapperAncestorsParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// The Moid of the referenced REST resource.
// +kubebuilder:validation:Optional
Moid *string `json:"moid,omitempty" tf:"moid,omitempty"`
// The fully-qualified name of the remote type referred by this relationship.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// An OData $filter expression which describes the REST resource to be referenced. This field may
// be set instead of 'moid' by clients.
// 1. If 'moid' is set this field is ignored.
// 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the
// resource matching the filter expression and populates it in the MoRef that is part of the object
// instance being inserted/updated to fulfill the REST request.
// An error is returned if the filter matches zero or more than one REST resource.
// An example filter string is: Serial eq '3AA8B7T11'.
// +kubebuilder:validation:Optional
Selector *string `json:"selector,omitempty" tf:"selector,omitempty"`
}
type CategoryMapperObservation struct {
}
type CategoryMapperParameters struct {
// The Account ID for this managed object.
// +kubebuilder:validation:Optional
AccountMoid *string `json:"accountMoid,omitempty" tf:"account_moid,omitempty"`
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// An array of relationships to moBaseMo resources.
// +kubebuilder:validation:Optional
Ancestors []CategoryMapperAncestorsParameters `json:"ancestors,omitempty" tf:"ancestors,omitempty"`
// The category of the model series.
// +kubebuilder:validation:Optional
Category *string `json:"category,omitempty" tf:"category,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// The time when this managed object was created.
// +kubebuilder:validation:Optional
CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"`
// The DomainGroup ID for this managed object.
// +kubebuilder:validation:Optional
DomainGroupMoid *string `json:"domainGroupMoid,omitempty" tf:"domain_group_moid,omitempty"`
// The type of distributable image, example huu, scu, driver, os.
// * `Distributable` - Stores firmware host utility images and fabric images.
// * `DriverDistributable` - Stores driver distributable images.
// * `ServerConfigurationUtilityDistributable` - Stores server configuration utility images.
// * `OperatingSystemFile` - Stores operating system iso images.
// * `HyperflexDistributable` - It stores HyperFlex images.
// +kubebuilder:validation:Optional
FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"`
// The type of image based on the endpoint it can upgrade. For example, ucs-c420m5-huu-3.2.1a.iso can upgrade standalone servers, so the image type is Standalone Server.
// +kubebuilder:validation:Optional
ImageType *string `json:"imageType,omitempty" tf:"image_type,omitempty"`
// Cisco software repository image category identifier.
// +kubebuilder:validation:Optional
MdfID *string `json:"mdfId,omitempty" tf:"mdf_id,omitempty"`
// The time when this managed object was last modified.
// +kubebuilder:validation:Optional
ModTime *string `json:"modTime,omitempty" tf:"mod_time,omitempty"`
// The unique identifier of this Managed Object instance.
// +kubebuilder:validation:Optional
Moid *string `json:"moid,omitempty" tf:"moid,omitempty"`
// The image can be downloaded from cisco.com or external cloud store.
// * `Cisco` - External repository hosted on cisco.com.
// * `IntersightCloud` - Repository hosted by the Intersight Cloud.
// * `LocalMachine` - The file is available on the local client machine. Used as an upload source type.
// * `NetworkShare` - External repository in the customer datacenter. This will typically be a file server.
// +kubebuilder:validation:Optional
NrSource *string `json:"nrSource,omitempty" tf:"nr_source,omitempty"`
// The version from which user can download images from amazon store, if source is external cloud store.
// +kubebuilder:validation:Optional
NrVersion *string `json:"nrVersion,omitempty" tf:"nr_version,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// The value should be the same as the 'ClassId' property.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// +kubebuilder:validation:Optional
Owners []*string `json:"owners,omitempty" tf:"owners,omitempty"`
// A reference to a moBaseMo resource.
// When the $expand query parameter is specified, the referenced resource is returned inline.
// +kubebuilder:validation:Optional
Parent []CategoryMapperParentParameters `json:"parent,omitempty" tf:"parent,omitempty"`
// An array of relationships to moBaseMo resources.
// +kubebuilder:validation:Optional
PermissionResources []CategoryMapperPermissionResourcesParameters `json:"permissionResources,omitempty" tf:"permission_resources,omitempty"`
// The regex that all images of this category follow.
// +kubebuilder:validation:Optional
RegexPattern *string `json:"regexPattern,omitempty" tf:"regex_pattern,omitempty"`
// Intersight provides pre-built workflows, tasks and policies to end users through global catalogs.
// Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
// +kubebuilder:validation:Optional
SharedScope *string `json:"sharedScope,omitempty" tf:"shared_scope,omitempty"`
// +kubebuilder:validation:Optional
SupportedModels []*string `json:"supportedModels,omitempty" tf:"supported_models,omitempty"`
// The software type id provided by cisco.com.
// +kubebuilder:validation:Optional
SwID *string `json:"swId,omitempty" tf:"sw_id,omitempty"`
// +kubebuilder:validation:Optional
TagTypes []*string `json:"tagTypes,omitempty" tf:"tag_types,omitempty"`
// +kubebuilder:validation:Optional
Tags []CategoryMapperTagsParameters `json:"tags,omitempty" tf:"tags,omitempty"`
// The versioning info for this managed object.
// +kubebuilder:validation:Optional
VersionContext []CategoryMapperVersionContextParameters `json:"versionContext,omitempty" tf:"version_context,omitempty"`
}
type CategoryMapperParentObservation struct {
}
type CategoryMapperParentParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// The Moid of the referenced REST resource.
// +kubebuilder:validation:Optional
Moid *string `json:"moid,omitempty" tf:"moid,omitempty"`
// The fully-qualified name of the remote type referred by this relationship.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// An OData $filter expression which describes the REST resource to be referenced. This field may
// be set instead of 'moid' by clients.
// 1. If 'moid' is set this field is ignored.
// 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the
// resource matching the filter expression and populates it in the MoRef that is part of the object
// instance being inserted/updated to fulfill the REST request.
// An error is returned if the filter matches zero or more than one REST resource.
// An example filter string is: Serial eq '3AA8B7T11'.
// +kubebuilder:validation:Optional
Selector *string `json:"selector,omitempty" tf:"selector,omitempty"`
}
type CategoryMapperPermissionResourcesObservation struct {
}
type CategoryMapperPermissionResourcesParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// The Moid of the referenced REST resource.
// +kubebuilder:validation:Optional
Moid *string `json:"moid,omitempty" tf:"moid,omitempty"`
// The fully-qualified name of the remote type referred by this relationship.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// An OData $filter expression which describes the REST resource to be referenced. This field may
// be set instead of 'moid' by clients.
// 1. If 'moid' is set this field is ignored.
// 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the
// resource matching the filter expression and populates it in the MoRef that is part of the object
// instance being inserted/updated to fulfill the REST request.
// An error is returned if the filter matches zero or more than one REST resource.
// An example filter string is: Serial eq '3AA8B7T11'.
// +kubebuilder:validation:Optional
Selector *string `json:"selector,omitempty" tf:"selector,omitempty"`
}
type CategoryMapperTagsObservation struct {
}
type CategoryMapperTagsParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The string representation of a tag key.
// +kubebuilder:validation:Optional
Key *string `json:"key,omitempty" tf:"key,omitempty"`
// The string representation of a tag value.
// +kubebuilder:validation:Optional
Value *string `json:"value,omitempty" tf:"value,omitempty"`
}
type CategoryMapperVersionContextObservation struct {
}
type CategoryMapperVersionContextParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// +kubebuilder:validation:Optional
InterestedMos []VersionContextInterestedMosParameters `json:"interestedMos,omitempty" tf:"interested_mos,omitempty"`
// The version of the Managed Object, e.g. an incrementing number or a hash id.
// +kubebuilder:validation:Optional
NrVersion *string `json:"nrVersion,omitempty" tf:"nr_version,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// The value should be the same as the 'ClassId' property.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// A reference to the original Managed Object.
// +kubebuilder:validation:Optional
RefMo []VersionContextRefMoParameters `json:"refMo,omitempty" tf:"ref_mo,omitempty"`
// The time this versioned Managed Object was created.
// +kubebuilder:validation:Optional
Timestamp *string `json:"timestamp,omitempty" tf:"timestamp,omitempty"`
// Specifies type of version. Currently the only supported value is "Configured"
// that is used to keep track of snapshots of policies and profiles that are intended
// to be configured to target endpoints.
// * `Modified` - Version created every time an object is modified.
// * `Configured` - Version created every time an object is configured to the service profile.
// * `Deployed` - Version created for objects related to a service profile when it is deployed.
// +kubebuilder:validation:Optional
VersionType *string `json:"versionType,omitempty" tf:"version_type,omitempty"`
}
type VersionContextInterestedMosObservation struct {
}
type VersionContextInterestedMosParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// The Moid of the referenced REST resource.
// +kubebuilder:validation:Optional
Moid *string `json:"moid,omitempty" tf:"moid,omitempty"`
// The fully-qualified name of the remote type referred by this relationship.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// An OData $filter expression which describes the REST resource to be referenced. This field may
// be set instead of 'moid' by clients.
// 1. If 'moid' is set this field is ignored.
// 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the
// resource matching the filter expression and populates it in the MoRef that is part of the object
// instance being inserted/updated to fulfill the REST request.
// An error is returned if the filter matches zero or more than one REST resource.
// An example filter string is: Serial eq '3AA8B7T11'.
// +kubebuilder:validation:Optional
Selector *string `json:"selector,omitempty" tf:"selector,omitempty"`
}
type VersionContextRefMoObservation struct {
}
type VersionContextRefMoParameters struct {
// +kubebuilder:validation:Optional
AdditionalProperties *string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"`
// The fully-qualified name of the instantiated, concrete type.
// This property is used as a discriminator to identify the type of the payload
// when marshaling and unmarshaling data.
// +kubebuilder:validation:Optional
ClassID *string `json:"classId,omitempty" tf:"class_id,omitempty"`
// The Moid of the referenced REST resource.
// +kubebuilder:validation:Optional
Moid *string `json:"moid,omitempty" tf:"moid,omitempty"`
// The fully-qualified name of the remote type referred by this relationship.
// +kubebuilder:validation:Optional
ObjectType *string `json:"objectType,omitempty" tf:"object_type,omitempty"`
// An OData $filter expression which describes the REST resource to be referenced. This field may
// be set instead of 'moid' by clients.
// 1. If 'moid' is set this field is ignored.
// 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the
// resource matching the filter expression and populates it in the MoRef that is part of the object
// instance being inserted/updated to fulfill the REST request.
// An error is returned if the filter matches zero or more than one REST resource.
// An example filter string is: Serial eq '3AA8B7T11'.
// +kubebuilder:validation:Optional
Selector *string `json:"selector,omitempty" tf:"selector,omitempty"`
}
// CategoryMapperSpec defines the desired state of CategoryMapper
type CategoryMapperSpec struct {
v1.ResourceSpec `json:",inline"`
ForProvider CategoryMapperParameters `json:"forProvider"`
}
// CategoryMapperStatus defines the observed state of CategoryMapper.
type CategoryMapperStatus struct {
v1.ResourceStatus `json:",inline"`
AtProvider CategoryMapperObservation `json:"atProvider,omitempty"`
}
// +kubebuilder:object:root=true
// CategoryMapper is the Schema for the CategoryMappers API
// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status"
// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status"
// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name"
// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,intersightjet}
type CategoryMapper struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CategoryMapperSpec `json:"spec"`
Status CategoryMapperStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// CategoryMapperList contains a list of CategoryMappers
type CategoryMapperList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CategoryMapper `json:"items"`
}
// Repository type metadata.
var (
CategoryMapper_Kind = "CategoryMapper"
CategoryMapper_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CategoryMapper_Kind}.String()
CategoryMapper_KindAPIVersion = CategoryMapper_Kind + "." + CRDGroupVersion.String()
CategoryMapper_GroupVersionKind = CRDGroupVersion.WithKind(CategoryMapper_Kind)
)
func init() | {
SchemeBuilder.Register(&CategoryMapper{}, &CategoryMapperList{})
} |
|
running_solution.rs | use std::thread;
use crossbeam::channel;
use super::{base::*, show_results::ShowResultsState};
use crate::{
interpreter::ConstantProvider, levels::{get_result, Level, TestRunResults}, math::*, prelude::*, save_system::SaveProfile
};
#[derive(Debug)]
pub struct RunningSolutionState {
level: Level,
save_profile: Arc<SaveProfile>,
handle: thread::JoinHandle<()>,
receiver: channel::Receiver<TestRunResults>,
}
impl RunningSolutionState {
pub fn new(level: Level, code: String, save_profile: Arc<SaveProfile>) -> Self {
let (sender, receiver) = channel::bounded(0);
let provider = ConstantProvider::new(level.clone(), Some(save_profile.clone()));
let handle = std::thread::spawn({
let level = level.clone();
move || {
sender
.send(level.test(code.chars(), provider))
.debug_unwrap()
}
});
Self {
level,
save_profile,
handle,
receiver,
}
}
}
const WAIT_TEXT: &str = "Running solution, please wait";
impl GameState for RunningSolutionState {
fn name(&self) -> &'static str |
fn tick(&mut self, mut data: TickData) -> GameStateEvent {
if let Ok(results) = self.receiver.try_recv() {
if get_result(&results).is_success() {
#[cfg(feature = "steam")]
if let Some(client) = data.steam_client.clone() {
use crate::utils::steam::*;
if matches!(self.level, Level::GameLevel(..)) {
update_section_achievements(client, self.save_profile.clone());
} else {
get_single_achievement(client, ManualAchievements::PlayWorkshop);
}
}
SFX::Win.play();
} else {
SFX::Wrong.play();
}
GameStateEvent::Switch(box ShowResultsState::new(
self.level.clone(),
results,
self.save_profile.clone(),
))
} else if data.pressed_key == Some(bl::VirtualKeyCode::Escape) {
SFX::Back.play();
GameStateEvent::Pop(1)
} else {
// Let's draw here in the reasonably common case where the solution runs very fast, in that case let's not print
// it for one frame cause it looks a bit weird
let dots = (data.time.as_millis() / 500) % 4;
let mut txt = String::with_capacity(WAIT_TEXT.len() + 4);
txt.push_str(WAIT_TEXT);
(0..dots).for_each(|_| txt.push('.'));
data.print(Pos::new(H / 2, W / 2 - WAIT_TEXT.len() as i32 / 2), &txt);
data.instructions(&["Press ESC to cancel run"]);
GameStateEvent::None
}
}
}
| { "RunningSolution" } |
db_init.rs | use std::path::Path;
use rusqlite::{Connection, OpenFlags};
use crate::common::error::{Error, get_path};
use crate::db::sqlite_statements;
pub fn open(db_path: &Path) -> Result<Connection, Error> {
let conn = match Connection::open_with_flags(db_path, OpenFlags::SQLITE_OPEN_READ_WRITE) {
Ok(conn) => conn,
Err(_) => {
eprintln!("Database {} does not exist, creating new...", get_path(db_path));
let conn = Connection::open_with_flags(db_path, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE)?;
conn.execute(sqlite_statements::PRAGMA_FOREIGN_KEY_ENFORCE, [])?;
create_tables(&conn)?;
conn
} | };
Ok(conn)
}
fn create_tables(conn: &Connection) -> Result<(), Error> {
conn.execute(sqlite_statements::CREATE_FILES_TABLE, [])?;
conn.execute(sqlite_statements::CREATE_TAGS_TABLE, [])?;
conn.execute(sqlite_statements::CREATE_FILE_TAGS_TABLE, [])?;
Ok(())
} | |
mapper.py | #!/usr/bin/env python3
#
# Copyright 2015 Dovetail Genomics LLC
#
#
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import range
from builtins import object
import pickle as pickle
debug=False
class node(object):
def __init__(self,s,x):
self.s = s
self.x = x
# self.forward_link = False
# self.reverse_link = False
self.links = [False,False]
self.strand = 1
self.mapped_coord = 0
self.root = self
self.leaf = self
def set_forward_link(self,o):
self.links[0] = o
def set_reverse_link(self,o):
self.links[1] = o
def get_forward_link(self):
return self.links[0]
def get_reverse_link(self):
return self.links[1]
def linkable(self):
return ((not self.links[0]) or (not self.links[1]))
def __repr__(self):
return str((self.s,self.x))#,self.strand,self.mapped_coord))
def __hash__(self):
return hash(tuple((self.s,self.x)))
def __cmp__(self,other):
if self.__hash__() < other.__hash__():
return -1
elif self.__hash__() == other.__hash__():
return 0
else:
return 1
def __eq__(self,other):
if self.__hash__() == other.__hash__():
return True
else:
return False
class map_graph(object):
def __init__(self,scaffold_name_list,scaffold_len_list,minlength=0):
self.originals_index={} #[]
self.originals_names={} #list(scaffold_name_list)
self.debug=False
# print "names:",self.originals_names
self.name_to_index = {}
self.obsoleted={}
self.roots = {}
self.length = {}
self.node={}
self.alias={}
self.alias_to_node={}
self.dont_update_coords=False
for i in range(len(scaffold_len_list)):
if scaffold_len_list[i] >= minlength:
self.originals_names[i]=scaffold_name_list[i]
self.name_to_index[scaffold_name_list[i]] = i
ll = [ node(i,1),node(i,scaffold_len_list[i]) ]
self.length[ (ll[0],ll[1]) ] = scaffold_len_list[i]-1
self.length[ (ll[1],ll[0]) ] = scaffold_len_list[i]-1
self.length[ (ll[0].s,ll[0].x) ] = scaffold_len_list[i]
self.node[ (ll[0].s,ll[0].x) ] = ll[0]
self.node[ (ll[1].s,ll[1].x) ] = ll[1]
ll[0].set_forward_link(ll[1])
ll[1].set_reverse_link(ll[0])
ll[1].root = ll[0]
ll[1].leaf = ll[1]
ll[0].root = ll[0]
ll[0].leaf = ll[1]
# ll[0].mapped_coord = 1
ll[1].mapped_coord = scaffold_len_list[i]-1
self.roots[ll[0]]=1
self.originals_index[i]=ll
#print self.length
def set_alias(self,node,alias):
self.alias[node]=alias
self.alias_to_node[alias]=node
def find_node_by_name(self,name):
if name in self.alias_to_node:
print("#alias",name,self.alias_to_node[name])
return self.alias_to_node[name]
end=False
if name[-2:]==".5" or name[-2:]==".3":
end = name[-1]
name=name[:-2]
c = name.split("_")
base = int(c[-1])
n = "_".join(c[:-1])
no = self.node[(self.name_to_index[n],base)]
if end=="3":
no = no.get_forward_link()
#print self.print_name(no),self.print_name(ni)
return no
def find_node_by_pair(self,id,base):
return self.node[(id,base)]
def write_map_to_file(self,filename):
f=open(filename,"wb")
pickle.dump( self, f)
f.close()
# f.write(
# f.write(str([ i.serialize() for i in self.originals_index ] ))
# f.write("\n")
# f.close()
def add_oriented_break(self,s,x,strand):
a,b = self.add_break_in_original(s,x)
if strand=="+": return a,b
else: return b,a
def flip(self,a):
r,l= a.root,a.leaf
if r in self.roots : del self.roots[r]
self.roots[l]=1
self.reroot(l)
def | (self,s,x):
ll = self.originals_index.get(s,False)
print("add break in",ll,s,x,len(ll))
if not ll:
print("warning: tried to add break to unknown scaffold")
return
i=len(ll)-1
while ll[i].x > x:
print(len(ll),i,x,ll[i])
i-=1
a = node(s,x)
b = node(s,x+1)
# a.root = ll[i].root
self.node[ (s,x ) ] = a
self.node[ (s,x+1) ] = b
ll.insert(i+1,b)
ll.insert(i+1,a)
if ll[i].root in self.roots: del self.roots[ll[i].root]
a.root = a
self.roots[a]=1
if ll[i+3].root in self.roots: del self.roots[ll[i+3].root]
self.roots[b]=1
b.root = b
b.mapped_coord = 0
a.mapped_coord = 0
# ll[i+3].root = b
if not ll[i].get_forward_link()==ll[i+3]:
print("wtf?",ll[i],ll[i+1],ll[i].get_forward_link(),ll[i+3])
raise Exception
ll[i].set_forward_link(a)
a.set_reverse_link(ll[i])
self.length[(ll[i],a)]=abs(x-ll[i].x)
self.length[(a,ll[i])]=abs(x-ll[i].x)
self.length[(b,a)]=1
self.length[(a,b)]=1
b.set_forward_link(ll[i+3])
ll[i+3].set_reverse_link(b)
self.length[(ll[i+3],b)]=abs(x+1-ll[i+3].x)
self.length[(b,ll[i+3])]=abs(x+1-ll[i+3].x)
self.roots[b]=1
self.reroot(a.root)
self.reroot(b.root)
return(a,b)
# def remap_coord(self,name,x):
# n = self.find_node_by_name(name)
#
# queue=[n]
# seen={}
# while len(queue)>0:
# s = queue.pop()
# if s.mapped_coord
def map_coord_by_name(self,name,x):
n = self.find_node_by_name(name)
i=0
ll = self.originals_index.get(n.s,False)
if not ll:
print("wtf?", name,x,n)
exit(0)
print("##", name,x,n,ll)
while (i < len(ll)-1) and ll[i+1].x<x:
# print i, len(ll), ll[i],x,ll[i+1]
i+=1
if i%2==1:
i+=1
# print i
xx = ll[i].strand * ( x - ll[i].x ) + ll[i].mapped_coord + 1
# print "####",xx,i,ll[i].root,ll[i],ll[i].get_reverse_link(),ll[i].get_forward_link()
# return((ll[i].s,xx,ll[i].root))
a,b = ll[i].root.s, ll[i].root.x
return((a,b),xx)
def map_coord(self,s,x):
i=0
ll = self.originals_index.get(s,False)
if not ll:
return((s,1),x)
# print s,x,ll
while (i < len(ll)-1) and ll[i+1].x-1<=x:
# print i, len(ll), ll[i],x,ll[i+1]
i+=1
if self.debug: print(s,x,i)
#XXX why was this ever done?
# if i%2==1:
# if self.debug: print "XXXX"
# i+=1
# print i
if self.debug: print(i, len(ll))
xx = ll[i].strand * ( x - (ll[i].x -1) ) + ll[i].mapped_coord
if xx<0 :
print("wtf?", ll[i].strand * ( x - (ll[i].x-1) ) + ll[i].mapped_coord, "=", ll[i].strand ,"* (", x," -", (ll[i].x-1)," ) + ", ll[i].mapped_coord, "ll[i].strand * ( x - (ll[i].x-1) ) + ll[i].mapped_coord")
print(self.print_name(ll[i]))
print("x=",x)
print("i",i)
print("s=",s)
print("ll[i]=",ll[i])
print("ll[i].x=",ll[i].x)
print("ll=",ll)
print("ll[i].mapped_coord=",ll[i].mapped_coord)
#raise Exception
# print "####",xx,i,ll[i].root,ll[i],ll[i].get_reverse_link(),ll[i].get_forward_link()
# return((ll[i].s,xx,ll[i].root))
a,b = ll[i].root.s, ll[i].root.x
return((a,b),xx)
def find_original_coord(self,node,y,gapslop=0):
#ll = self.originals_index(node.s,False)
#if not ll: print "wtf?",s,x,"find_originals_coord"
onode=node
start = node.mapped_coord
queue = [node]
last=False
visited={}
b1=False
b2=False
ox=-1
while len(queue)>0:
n = queue.pop(0)
if not visited.get(n,False):
if last:
print("visit:",last,n,last.mapped_coord,n.mapped_coord,y)
if ( last.mapped_coord <= y and y < n.mapped_coord):
if (last.s == n.s):
if (y-last.mapped_coord)<gapslop:
print("bump back:",y,last2,last)
return(last2,last,"gap")
elif (n.mapped_coord - y)<gapslop:
print("bump ahead:",y,n.mapped_coord+1)
y=n.mapped_coord+1
else:
strand="+"
if last.x>n.x : strand="-"
s = last.s
if strand=="+":
ox=last.x+y-last.mapped_coord
else:
ox=last.x-(y-last.mapped_coord)
print("forward",last, n, last.mapped_coord,"<=",y,"<", n.mapped_coord, self.print_name(last), self.print_name(n),ox)
# return(self.originals_names[last.s],ox, strand )
return(last.s,ox, strand )
else:
# print "gap forward", n, last, n.mapped_coord, last.mapped_coord, self.print_name(n), self.print_name(last)
print("gap forward", last, n, last.mapped_coord,"<=",y,"<", n.mapped_coord, self.print_name(last), self.print_name(n))
# print "gap",last,n
return(last,n,"gap")
# return((last.s,last.x),(n.s,n.x),"gap"+strand)
visited[n]=True
if n.get_forward_link(): queue.append(n.get_forward_link())
if n.get_reverse_link(): queue.append(n.get_reverse_link())
# if n.get_reverse_link() == last:
# #n.strand = 1
# pass
# else:
# pass
#n.strand = -1
last2 = last
last = n
# if b1 and b2:
# print node,node.mapped_coord ,node.leaf,node.leaf.mapped_coord, y,b1,b2,b1.mapped_coord,b2.mapped_coord, b1.s,b1.x, b2.s,b2.x
# else:
# print node,node.mapped_coord ,node.leaf,node.leaf.mapped_coord, y,b1,b2 #,b1.mapped_coord,b2.mapped_coord
print("mapper wtf?",self.print_name(onode),self.originals_names[onode.s],y,self.print_name(n),n.mapped_coord,ox,visited)
return(n,ox)
#(137628, 1) (137628, 3088) 3087 3088 False False
def originals(self,a):
oris = {}
queue=[a]
visited={}
mapped_coord = 0
a.mapped_coord = 0
visited[a]=True
if a.get_forward_link():
queue.append(a.get_forward_link())
a.strand = 1
if a.get_reverse_link():
queue.append(a.get_reverse_link())
a.strand = -1
last = a
root = a
a.root = a
while 0<len(queue):
n = queue.pop(0)
oris[n.s]=1
if not visited.get(n,False):
visited[n]=True
if n.get_forward_link(): queue.append(n.get_forward_link())
if n.get_reverse_link(): queue.append(n.get_reverse_link())
if n.get_reverse_link() == last:
n.strand = 1
else:
n.strand = -1
last = n
return(list(oris.keys()));
def print_name(self,n):
# print self.originals_names
# print n,self.originals_names[n[0]],"_",str(n[1])
if n in self.alias: return self.alias[n]
if type(n)==type((1,1)):
return self.originals_names[n[0]]+"_"+str(n[1])
else:
return self.originals_names[n.s]+"_"+str(n.x)
def nodepair_list(self,n):
lasti=-1
lastx=-1
lastn=False
for nn in self.nodes_dfs(n):
if lasti==nn.s:
yield( ( lastn , nn ) )
lastn=nn
lasti=nn.s
def region_list(self,n):
lasti=0
lastx=-1
for nn in self.nodes_dfs(n):
# print nn
if lasti==nn.s:
if lastx<nn.x:
strand=1
else:
strand=-1
range_start = min(lastx,nn.x)
range_end = max(lastx,nn.x)
if range_start>0 and range_end>0:
yield (self.originals_names[nn.s]+":"+str(range_start)+"-"+str(range_end),strand,self.originals_names[nn.s],range_start,range_end,min(lastm,nn.mapped_coord),max(lastm,nn.mapped_coord))
lasti=nn.s
lastx=nn.x
lastm=nn.mapped_coord
def nodes_dfs(self,n,colors={}):
# yield(n)
colors={}
colors[n]=1
queue = [n]
while len(queue)>0:
n = queue.pop(0)
# print "x#",self.print_name(n)
yield(n)
c1 = n.get_forward_link()
# print c1
if c1 and c1 not in colors:
queue.append(c1)
colors[c1]=1
# self.nodes_dfs(c1,colors)
c2 = n.get_reverse_link()
# print c2
if c2 and c2 not in colors:
colors[c2]=1
queue.append(c2)
# self.nodes_dfs(c2,colors)
def write_dot(self,filename):
f=open(filename,"wt")
for r in list(self.roots.keys()):
f.write(" -- ".join( map(str,self.nodes_dfs(r)) ))
# for n in self.nodes_dfs(r):
# f.write( str(n))
# f.write(" -- ")
# f.write(str(r))
f.write("\n")
f.close()
def reroot(self,a):
debug=False
if debug: print("reroot")
total_len = 0
queue=[a]
visited={}
if not a == a.root:
if a.root in self.roots:
del self.roots[a.root]
self.roots[a]=1
mapped_coord = 0
a.mapped_coord = 0
visited[a]=True
if a.get_forward_link():
queue.append(a.get_forward_link())
a.strand = 1
if a.get_reverse_link():
queue.append(a.get_reverse_link())
a.strand = -1
last = a
root = a
a.root = a
if debug: print("root",a)
if debug: print(">>>>",a)
while 0<len(queue):
# print queue,visited
n = queue.pop(0)
if not visited.get(n,False):
visited[n]=True
n.root = root
if n.get_forward_link(): queue.append(n.get_forward_link())
if n.get_reverse_link(): queue.append(n.get_reverse_link())
if n.get_reverse_link() == last:
n.strand = 1
else:
n.strand = -1
mapped_coord += self.length[(last,n)]
n.mapped_coord = mapped_coord
if debug: print(">>>>",n,self.length[(last,n)],mapped_coord,root)
total_len += self.length[(last,n)]
last = n
for k in list(visited.keys()):
k.leaf = last
self.length[a] = total_len
if debug: print("done rerooting")
def update_roots(self):
# import set
rr=set(self.roots.keys())
ob=set(self.obsoleted.keys())
self.roots={}
for rrr in rr.difference(ob):
self.roots[rrr]=1
# self.roots = list(rr.difference(ob))
# self.roots = [ r for r in self.roots if not self.obsoleted.has_key(r) ]
self.obsoleted = {}
def unjoin(self,a,b):
if a.get_forward_link()==b:
a.set_forward_link(False)
#print "unlink a forward"
elif a.get_reverse_link()==b:
a.set_reverse_link(False)
#print "unlink a reverse"
if b.get_forward_link()==a:
b.set_forward_link(False)
#print "unlink b forward"
elif b.get_reverse_link()==a:
b.set_reverse_link(False)
#print "unlink b reverse"
self.reroot(a)
self.reroot(b)
def add_join(self,a,b,g=1):
debug=False
if debug: print("add join",a,b,g)
self.length[(a,b)] = g+1
self.length[(b,a)] = g+1
if debug: print(a,b,a.root,b.root)
if a.root == b.root:
print("Tried to join two nodes already having the same root.", a,b,a.root,b.root,a.leaf,b.leaf)
return
newroot = a.root
if newroot == a:
# print "##root clash"
newroot = b.root
if newroot == b:
newroot = a.leaf
if debug: print("length",a,b,g)
if debug: print("length",b,a,g)
if not newroot == a.root:
# self.obsoleted[a.root]=True
if a.root in self.roots: del self.roots[a.root]
if not newroot == b.root:
# self.obsoleted[b.root]=True
# del self.roots[b.root]
if b.root in self.roots: del self.roots[b.root]
# --a--> --b-->
if (not a.get_forward_link()) and (not b.get_reverse_link()):
if debug: print(" --a--> --b--> ")
a.set_forward_link(b)
b.set_reverse_link(a)
# self.roots.append(a.root)
# self.reroot(a.root)
# --a--> <--b--
elif (not a.get_forward_link()) and (not b.get_forward_link()):
if debug: print(" --a--> <--b-- ")
a.set_forward_link(b)
b.set_forward_link(a)
# self.roots.append(a.root)
# self.reroot(a.root)
# <--a-- --b-->
elif (not a.get_reverse_link()) and (not b.get_reverse_link()):
if debug: print(" <--a-- --b--> ")
a.set_reverse_link(b)
b.set_reverse_link(a)
# self.roots.append(a.leaf)
# self.reroot(a.leaf)
# <--a-- <--b--
elif (not a.get_reverse_link()) and (not b.get_forward_link()):
if debug: print(" <--a-- <--b-- ")
a.set_reverse_link(b)
b.set_forward_link(a)
# self.roots
# self.roots.append(b.root)
# self.reroot(b.root)
else:
print("Tried to join to edges that are already joined",a,b)
exit(0)
self.roots[newroot]=1
if not self.dont_update_coords:
print("#rerooting")
self.reroot(newroot)
debug=False
if __name__=="__main__":
m = map_graph( ["a","b","c"],[10000,10000,10000] )
m.add_break_in_original(1,5000)
for r in list(m.roots.keys()):
print("scaffold:",r, "--" ,r.leaf)
print("#",m.map_coord(1,4001),4001)
print("#",m.map_coord(1,6001),1001)
m.add_join( m.originals_index[1][1],m.originals_index[1][2] )
#m.add_join( m.originals_index[0][1],m.originals_index[2][0] )
for r in list(m.roots.keys()):
print("scaffold:",r, "--" ,r.leaf)
print("#",m.map_coord(1,4001),4001)
print("#",m.map_coord(1,6001),6001)
print("#",m.map_coord(0,1001),1001)
print("#",m.map_coord(2,1001),1001)
m.add_join( m.originals_index[1][3],m.originals_index[2][0] )
for r in list(m.roots.keys()):
print("scaffold:",r, "--" ,r.leaf)
print("#",m.map_coord(1,4001),4001)
print("#",m.map_coord(1,6001),6001)
print("#",m.map_coord(0,1001),1001)
print("#",m.map_coord(2,1001),11001)
m.write_map_to_file("test_map.txt")
m.add_join( m.originals_index[1][0],m.originals_index[0][0] )
for r in list(m.roots.keys()):
print("scaffold:",r, "--" ,r.leaf)
print("#",m.map_coord(1,4001),4001)
print("#",m.map_coord(1,6001),6001)
print("#",m.map_coord(0,100) ,29900)
print("#",m.map_coord(2,1001),11001)
f = open("test_map.txt")
m2 = pickle.load(f)
print(m2)
for r in list(m2.roots.keys()):
print("scaffold:",r, "--" ,r.leaf)
| add_break_in_original |
AddressDialog.js | import React from "react";
import { withStyles } from "@material-ui/core/styles"
import { t } from "../utils/t";
import Button from "@material-ui/core/Button";
import Dialog from "@material-ui/core/Dialog";
import DialogActions from "@material-ui/core/DialogActions";
import DialogContent from "@material-ui/core/DialogContent";
import DialogContentText from "@material-ui/core/DialogContentText";
import DialogTitle from "@material-ui/core/DialogTitle";
import TableBody from "@material-ui/core/TableBody";
import TableRow from "@material-ui/core/TableRow";
import TableCell from "@material-ui/core/TableCell";
import Table from "@material-ui/core/Table";
import { HISTORY } from "../utils/constants";
import api from "../utils/api";
const styles = theme => ({
dialog: {
[theme.breakpoints.down("xs")]: {
"& .MuiDialog-container .MuiDialog-paper": {
margin: "0px 0px",
maxHeight: "100%",
borderRadius: 0
},
}
},
dialogBody: {
overflowY: "auto",
display: "flex",
flexDirection: "column"
},
tableCellBold: {
fontWeight: "bold"
},
underline: {
textDecoration: "underline"
} | class AddressDialog extends React.Component {
constructor(props) {
super(props);
this.state = {
classes: props.classes,
coin_id: props.coin_id,
logged_account: props.logged_account,
open: props.open,
_address: null,
_public_key: null,
_private_key: null,
_history: HISTORY
};
};
componentWillReceiveProps(new_props) {
const { coin_id, logged_account } = this.state;
this.setState({...new_props}, () => {
if(logged_account) {
if( coin_id !== new_props.coin_id || logged_account.name !== new_props.logged_account.name ) {
this._get_address_and_keys_data();
}
}
});
};
componentDidMount() {
this._get_address_and_keys_data();
}
_reset_state = () => {
setTimeout(() => {
this.setState({
_address: null,
_public_key: null,
_private_key: null,
});
}, 500);
};
_on_close = (event, account) => {
this._reset_state();
this.props.onClose(event, account);
};
_on_cancel = (event, account) => {
this._reset_state();
this.props.cancel(event, account);
};
_get_address_and_keys_data() {
const { coin_id, logged_account } = this.state;
if(coin_id && logged_account) {
const _address = api.get_address_by_seed(coin_id, logged_account.seed, logged_account.hive_username);
const _public_key = api.get_public_key_by_seed(coin_id, logged_account.seed, logged_account.hive_username, logged_account.hive_password);
const _private_key = api.get_private_key_by_seed(coin_id, logged_account.seed, logged_account.hive_username, logged_account.hive_password);
this.setState({_address, _public_key, _private_key});
}
}
_open_link = (event, link) => {
const { _history } = this.state;
_history.push(link);
};
render() {
const { classes, _address, _public_key, _private_key, coin_id, open } = this.state;
return (
<Dialog
open={open}
onClose={(event) => {this._on_close(event, coin_id)}}
className={classes.dialog}
aria-labelledby="show-address-and-keys-dialog-title"
aria-describedby="show-address-and-keys-dialog-description"
>
{
(_address && _public_key && _private_key) ?
<div className={classes.dialogBody}>
<DialogTitle id="show-address-and-keys-dialog-title" className={classes.breakWord}>{t( "components.address_dialog.title", {coin_id})}</DialogTitle>
<DialogContent className={classes.dialogBody} >
<DialogContentText id="show-address-and-keys-dialog-description">
<Table>
<Table aria-label="main-info-table">
<TableBody>
<TableRow>
<TableCell align="left" className={classes.tableCellBold}>{t( "words.address", {FLC: true})}</TableCell>
<TableCell align="right">{_address}</TableCell>
</TableRow>
<TableRow>
<TableCell align="left" className={classes.tableCellBold}>{t( "words.public key", {FLC: true})}</TableCell>
<TableCell align="right">{_public_key}</TableCell>
</TableRow>
<TableRow>
<TableCell align="left" className={classes.tableCellBold}>{t( "words.private key", {FLC: true})}</TableCell>
<TableCell align="right">{_private_key}</TableCell>
</TableRow>
<TableRow>
<TableCell align="left" className={classes.tableCellBold}>{t( "words.crypto id", {FLC: true})}</TableCell>
<TableCell align="right" className={classes.underline} onClick={(event) => {this._open_link(event, "/coins/" + coin_id + "/transactions")}}>{coin_id}</TableCell>
</TableRow>
</TableBody>
</Table>
</Table>
</DialogContentText>
</DialogContent>
</div>: null
}
<DialogActions>
<Button onClick={(event) => {this._on_cancel(event, coin_id)}} color="primary" autoFocus>
{t( "words.close")}
</Button>
</DialogActions>
</Dialog>
);
}
}
export default withStyles(styles)(AddressDialog); | });
|
noop-interceptor.service.spec.ts | import { Test, TestingModule } from '@nestjs/testing';
import { suite } from 'uvu';
import { equal } from 'uvu/assert';
import { NoopInterceptorService } from '../src/interceptor/providers/noop-interceptor.service';
const NoopInterceptorServiceSuite = suite<{ service: NoopInterceptorService }>(
'NoopInterceptorService',
{ service: undefined },
);
NoopInterceptorServiceSuite.before(async (context) => {
const module: TestingModule = await Test.createTestingModule({
providers: [NoopInterceptorService],
}).compile();
context.service = module.get<NoopInterceptorService>(NoopInterceptorService); | NoopInterceptorServiceSuite('It should return a success log object', ({ service }) => {
Date.now = () => 133;
equal(service.getSuccessContext(151, {} as any, 50, {} as any), {
callerAddress: 'caller ip',
method: 'method',
callPoint: 'call point',
responseTime: 83,
contentLength: 151,
protocol: 'protocol',
status: 'status',
meta: undefined,
});
});
NoopInterceptorServiceSuite.run(); | }); |
test_model.py | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2013 Bartosz Zaczynski
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
import numpy
from random import random
from microanalyst.model import Model
from microanalyst.model.genes import Gene
class TestFilenames(unittest.TestCase):
def test_show_full_path_by_default(self):
# given
model = TestModel.with_filenames(
['C:\\windows style\\path\\foo.xls', 'relative\\path\\bar.xls'],
['/unix style/path/baz.xls', 'relative/path/blah'])
# when
actual = model.filenames()
# then
self.assertListEqual(
['C:\\windows style\\path\\foo.xls',
'relative\\path\\bar.xls',
'/unix style/path/baz.xls',
'relative/path/blah'],
actual)
def test_drop_windows_style_path(self):
# given
model = TestModel.with_filenames(
['C:\\windows\style.xls', 'foo\\bar.xls'],
['blah\\blah.txt'])
# when
actual = model.filenames(with_path=False)
# then
self.assertListEqual(['style.xls', 'bar.xls', 'blah.txt'], actual)
def test_drop_unix_style_path(self):
# given
model = TestModel.with_filenames(
['/unix/style.xls', 'foo/bar.xls'],
['blah/blah.txt'])
# when
actual = model.filenames(with_path=False)
# then
self.assertListEqual(['style.xls', 'bar.xls', 'blah.txt'], actual)
def test_drop_unix_and_windows_style_path(self):
# given
model = TestModel.with_filenames(
['C:\\windows\\style.xls', 'foo/bar.xls'],
['blah\\blah.txt'])
# when
actual = model.filenames(with_path=False)
# then
self.assertListEqual(['style.xls', 'bar.xls', 'blah.txt'], actual)
def test_show_only_filenames_for_the_given_iteration(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
actual = model.filenames(iteration=1)
# then
self.assertIn('i2/f1', actual)
self.assertIn('i2/f2', actual)
self.assertIn('i2/f3', actual)
def test_retain_filename_order_for_the_given_iteration(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
actual = model.filenames(iteration=1)
# then
self.assertListEqual(['i2/f1', 'i2/f2', 'i2/f3'], actual)
def test_retain_filename_order_across_all_iterations(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
actual = model.filenames()
# then
self.assertListEqual(
['i1/f1', 'i1/f2', 'i2/f1', 'i2/f2', 'i2/f3', 'i3/f1'],
actual)
def test_retain_duplicate_filenames_from_different_folders(self):
# given
model = TestModel.with_filenames(['folder1/filename',
'folder2/filename'])
# when
actual = model.filenames(with_path=False)
# then
self.assertListEqual(['filename', 'filename'], actual)
def test_do_not_confuse_iterations_zero_index_for_none(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
actual = model.filenames(iteration=0)
# then
self.assertListEqual(['i1/f1', 'i1/f2'], actual)
def test_each_invocation_should_return_a_copy(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
copy1 = model.filenames()
copy2 = model.filenames()
# then
self.assertNotEqual(id(copy1), id(copy2))
def test_each_invocation_should_return_the_same_result(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
copy1 = model.filenames()
copy2 = model.filenames()
# then
self.assertListEqual(copy1, copy2)
def test_return_flat_list_if_no_iteration_specified(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# when
actual = model.filenames()
# then
for item in actual:
self.assertIsInstance(item, str)
def test_allow_variable_number_of_filenames_across_iterations(self):
# given
model = TestModel.with_filenames(
['i1/f1', 'i1/f2'],
['i2/f1', 'i2/f2', 'i2/f3'],
['i3/f1'])
# then
for i, length in enumerate((2, 3, 1)):
self.assertEqual(length, len(model.filenames(iteration=i)))
def test_raise_error_if_spreadsheet_has_no_filename_defined(self):
# given
json_data = {
'iterations': [
{
'spreadsheets': [
{
'filename': 'ok',
'microplates': {}
},
{
'microplates': {}
}
]
}
]
}
# then
with self.assertRaises(KeyError):
# when
model = Model(json_data)
class TestMicroplateNames(unittest.TestCase):
def test_return_empty_list_for_an_empty_model(self):
# given
model = TestModel.empty()
# when
actual = model.microplate_names()
# then
self.assertListEqual([], actual)
def test_return_unique_microplate_names(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['001', '002', '003'], ['007']])
# when
actual = model.microplate_names()
# then
self.assertItemsEqual(set(actual), actual)
def test_return_sorted_microplate_names(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names()
# then
self.assertListEqual(sorted(actual), actual)
def test_return_flat_list_if_no_spreadsheet_nor_iteration_defined(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names()
# then
for item in actual:
self.assertIsInstance(item, str)
def test_return_flat_list_if_only_iteration_defined(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names(iteration=1)
# thens
for item in actual:
self.assertIsInstance(item, str)
def test_return_flat_list_if_only_spreadsheet_defined(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names(spreadsheet=1)
# thens
for item in actual:
self.assertIsInstance(item, str)
def test_return_flat_list_if_both_spreadsheet_and_iteration_defined(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names(iteration=1, spreadsheet=1)
# thens
for item in actual:
self.assertIsInstance(item, str)
def test_do_not_confuse_iterations_zero_index_for_none(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names(iteration=0)
# thens
self.assertListEqual(['001', '002', '003'], actual)
def test_do_not_confuse_spreadsheets_zero_index_for_none(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# when
actual = model.microplate_names(spreadsheet=0)
# thens
self.assertListEqual(['001', '002', '011', '012', '013'], actual)
def test_pick_specified_iteration(self):
# given
model = TestModel.with_microplates(
[['001', '002']],
[['011', '012', '013'], ['017']],
[['021', '022', '023'], ['027'], ['029']])
# when
actual = model.microplate_names(iteration=1)
# then
self.assertListEqual(['011', '012', '013', '017'], actual)
def test_raise_error_when_specified_iteration_doesnt_exist(self):
# given
model = TestModel.with_microplates([['001']])
# then
with self.assertRaises(IndexError):
# when
model.microplate_names(iteration=1)
def test_pick_both_specified_iteration_and_spreadsheet(self):
# given
model = TestModel.with_microplates(
[['001', '002']],
[['011', '012', '013'], ['017']],
[['021', '022', '023'], ['025', '027'], ['029']])
# when
actual = model.microplate_names(iteration=2, spreadsheet=1)
# then
self.assertListEqual(['025', '027'], actual)
def test_raise_error_when_specified_iteration_doesnt_exist_but_spreadsheet_does(
self):
# given
model = TestModel.with_microplates([['001', '002'], ['011', '012']])
# then
with self.assertRaises(IndexError):
# when
model.microplate_names(iteration=1, spreadsheet=1)
def test_raise_error_when_specified_spreadsheet_doesnt_exist_but_iteration_does(self):
# given
model = TestModel.with_microplates([['001', '002'], ['011', '012']])
# then
with self.assertRaises(IndexError):
# when
model.microplate_names(iteration=0, spreadsheet=2)
def test_pick_specified_spreadsheet(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']],
[['021', '022', '023'], ['025', '027'], ['029']])
# when
actual = model.microplate_names(spreadsheet=1)
# then
self.assertListEqual(['001', '002', '003', '017', '025', '027'], actual)
def test_raise_error_if_spreadsheet_doesnt_exist(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']])
# then
with self.assertRaises(IndexError):
# when
actual = model.microplate_names(spreadsheet=5)
def test_omit_missing_spreadsheet(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']],
[['021', '022', '023'], ['025', '027'], ['028', '029']])
# when
actual = model.microplate_names(spreadsheet=2)
# then
self.assertListEqual(['028', '029'], actual)
def test_pick_all_microplates(self):
# given
model = TestModel.with_microplates(
[['001', '002'], ['001', '002', '003']],
[['011', '012', '013'], ['017']],
[['021', '022', '023'], ['025', '027'], ['028', '029']])
# when
actual = model.microplate_names()
# then
self.assertListEqual(
['001', '002', '003', '011', '012',
'013', '017', '021', '022', '023',
'025', '027', '028', '029'],
actual)
def test_allow_variable_number_of_microplates_per_iteration(self):
# given
model = TestModel.with_microplates(
[['001'], ['001', '002'], ['001', '002', '003']],
[['011'], ['011', '012'], ['011', '012', '013']])
# when
actual = model.microplate_names(iteration=1)
# then
self.assertListEqual(['011', '012', '013'], actual)
def test_allow_variable_number_of_microplates_per_spreadsheet(self):
# given
model = TestModel.with_microplates(
[['001'], ['001', '002'], ['001', '002', '003']],
[['001', '002'], ['001', '002', '003'], ['011', '012', '013']])
# when
actual = model.microplate_names(spreadsheet=1)
# then
self.assertListEqual(['001', '002', '003'], actual)
def test_each_invocation_should_return_a_copy(self):
# given
model = TestModel.with_microplates(
[['001'], ['001', '002'], ['001', '002', '003']],
[['011'], ['011', '012'], ['011', '012', '013']])
# when
copy1 = model.microplate_names()
copy2 = model.microplate_names()
# then
self.assertNotEqual(id(copy1), id(copy2))
def test_each_invocation_should_return_the_same_result(self):
# given
model = TestModel.with_microplates(
[['001'], ['001', '002'], ['001', '002', '003']],
[['011'], ['011', '012'], ['011', '012', '013']])
# when
copy1 = model.microplate_names()
copy2 = model.microplate_names()
# then
self.assertListEqual(copy1, copy2)
class TestGenes(unittest.TestCase):
def test_return_microplate_well_tuple_for_gene(self):
# given
model = TestModel.with_genes({'001': {'A1': 'foobar'}})
genes = model.genes()
# when
gene = genes[0]
microplate, well = gene()
# then
self.assertEqual('foobar', str(gene))
self.assertEqual('001', microplate)
self.assertEqual('A1', well)
def test_return_empty_list_if_no_genes_defined_in_json(self):
# given
model = TestModel.empty()
# when/then
self.assertListEqual([], model.genes())
self.assertListEqual([], model.genes(well='A1'))
self.assertListEqual([], model.genes(microplate='001'))
self.assertListEqual([], model.genes(well='A1', microplate='001'))
def test_each_invocation_should_return_a_copy(self):
# given
model = TestModel.with_genes({'001': {'A1': 'foobar'}})
# when
copy1 = model.genes()
copy2 = model.genes()
# then
self.assertNotEqual(id(copy1), id(copy2))
def test_each_invocation_should_return_the_same_result(self):
# given
model = TestModel.with_genes({'001': {'A1': 'foobar'}})
# when
copy1 = model.genes()
copy2 = model.genes()
# then
self.assertEqual(copy1, copy2)
def test_return_sorted_gene_names(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes()
# then
self.assertItemsEqual(sorted(actual), actual)
def test_return_unique_gene_names(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes()
# then
self.assertItemsEqual(set(actual), actual)
def test_return_flat_list_if_no_microplate_nor_well_specified(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes()
# then
for item in actual:
self.assertIsInstance(item, Gene)
def test_return_flat_list_if_only_well_specified(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A1': 'foo',
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes(well='A1')
# then
for item in actual:
self.assertIsInstance(item, Gene)
def test_return_flat_list_if_only_microplate_specified(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A1': 'foo',
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes(microplate='001')
# then
for item in actual:
self.assertIsInstance(item, Gene)
def test_remove_none_values_from_the_resulting_list(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A1': 'foo',
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes(microplate='001')
# then
self.assertNotIn(None, actual)
def test_remove_none_value_from_the_resulting_one_element_list(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A1': 'foo',
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes(well='A4', microplate='001')
# then
self.assertNotIn(None, actual)
def test_return_one_element_list_if_both_well_and_microplate_specified(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'def',
'A2': 'abc',
'A3': '123'
},
'002': {
'A1': 'foo',
'A7': 'abc',
'H12': 'ghij'
}
})
# when
actual = model.genes(well='A3', microplate='001')
# then
self.assertIsInstance(actual, list)
self.assertEqual(1, len(actual))
def test_return_scalar_for_unambiguous_gene(self):
# given
model = TestModel.with_genes({'001':{'A1':'foobar'}})
# when
actual = model.gene_at('A1', '001')
# then
self.assertIsInstance(actual, Gene)
def test_return_none_for_missing_unambiguous_gene(self):
# given
model = TestModel.with_genes({'001':{'A1':'foobar'}})
# when
actual = model.gene_at('A2', '001')
# then
self.assertIsNone(actual)
def test_show_genes_for_a_microplate_missing_from_the_iterations(self):
# given
model = Model({
'genes': {
'666': {
'A1': 'blah',
'A2': 'halb'
}
},
'iterations': [
{
'spreadsheets': [
{
'filename': 'foo.xls',
'microplates': {
'001': {
'values': []
}
}
}
]
}
]
})
# when
actual = model.genes(microplate='666')
# then
self.assertListEqual(['blah', 'halb'], sorted([str(x) for x in actual]))
def test_return_empty_list_for_a_microplate_available_in_iterations_but_not_in_genes(self):
# given
model = Model({
'genes': {
'666': {
'A1': 'blah',
'A2': 'halb'
}
},
'iterations': [
{
'spreadsheets': [
{
'filename': 'foo.xls',
'microplates': {
'001': {
'values': []
}
}
}
]
}
]
})
# when
actual = model.genes(microplate='001')
# then
self.assertListEqual([], actual)
def test_raise_error_for_a_microplate_not_available_in_iterations_nor_genes(self):
# given
model = Model({
'genes': {
'666': {
'A1': 'blah',
'A2': 'halb'
}
},
'iterations': [
{
'spreadsheets': [
{
'filename': 'foo.xls',
'microplates': {
'001': {
'values': []
}
}
}
]
}
]
})
# then
with self.assertRaises(KeyError):
# when
actual = model.genes(microplate='777')
def test_well_addressing(self):
# given
model = TestModel.with_genes({'001':{'C10':'foobar'}})
# when
copy1 = model.genes(well='C10')
copy2 = model.genes(well=33)
# then
self.assertListEqual(copy1, copy2)
def test_return_empty_list_if_well_not_present_in_genes(self):
# given
model = TestModel.with_genes({'001':{'C10':'foobar'}})
# when
actual = model.genes(well='A1')
# then
self.assertListEqual([], actual)
def test_return_empty_list_for_genes_used_if_no_genes_defined(self):
# given
model = TestModel.with_microplates(['001', '002'], ['001', '002', '003'])
# when
actual = model.genes_used()
# then
self.assertListEqual([], actual)
def test_return_empty_list_for_genes_used_if_no_microplates_defined(self):
# given
model = TestModel.with_genes({'001': {'A1': 'foobar'}})
# when
actual = model.genes_used()
# then
self.assertListEqual([], actual)
def test_return_subset_for_genes_used(self):
# given
model = Model({
'genes': {
'001': {
'A1': 'blah'
},
'B002': {
'A1': 'dummy'
}
},
'iterations': [
{
'spreadsheets': [
{
'filename': 'foo.xls',
'microplates': {
'001': {
'values': []
}
}
}
]
}
]
})
# when
num_used = len(model.genes_used())
num_all = len(model.genes())
# then
self.assertTrue(num_used < num_all)
def test_do_not_confuse_iteration_zero_index_for_none(self):
# given
genes = {'001': {'A5': 'foo'}, '002': {'G11': 'bar'}}
iterations = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']]
).json_data['iterations']
model = Model({'genes': genes, 'iterations': iterations})
# when
copy1 = model.genes()[0].values(iteration=0, spreadsheet=0)
copy2 = model.genes()[0].values(iteration=None, spreadsheet=0)
# then
self.assertFalse((copy1 == copy2).all())
def test_do_not_confuse_spreadsheet_zero_index_for_none(self):
# given
genes = {'001': {'A5': 'foo'}, '002': {'G11': 'bar'}}
iterations = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']]
).json_data['iterations']
model = Model({'genes': genes, 'iterations': iterations})
# when
copy1 = model.genes()[0].values(spreadsheet=0, iteration=0)
copy2 = model.genes()[0].values(spreadsheet=None, iteration=0)
# then
self.assertFalse((copy1 == copy2).all())
def test_return_same_values_as_with_model_values(self):
# given
genes = {'001': {'A5': 'foo'}, '002': {'G11': 'bar'}}
iterations = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']]
).json_data['iterations']
model = Model({'genes': genes, 'iterations': iterations})
# when
for gene in model.genes():
actual = gene.values()
expected = model.values(microplate=gene.microplate_name, well=gene.well_name)
# then
self.assertTrue((actual == expected).all())
def test_return_scalar_if_both_parameters_are_specified(self):
# given
genes = {'001': {'A5': 'foo'}, '002': {'G11': 'bar'}}
iterations = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']]
).json_data['iterations']
model = Model({'genes': genes, 'iterations': iterations})
# when
actual = model.genes()[0].values(iteration=0, spreadsheet=0)
# then
self.assertIsInstance(actual, float)
def test_representative_values_for_an_iteration_filter(self):
# given
genes = {'001': {'A5': 'foo'}, '002': {'G11': 'bar'}}
iterations = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']]
).json_data['iterations']
model = Model({'genes': genes, 'iterations': iterations})
# when
for gene in model.genes():
actual = gene.values(iteration=1)
expected = model.values(iteration=1,
microplate=gene.microplate_name,
well=gene.well_name)
# then
self.assertTrue((actual == expected).all())
def test_representative_values_for_a_spreadsheet_filter(self):
# given
genes = {'001': {'A5': 'foo'}, '002': {'G11': 'bar'}}
iterations = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']]
).json_data['iterations']
model = Model({'genes': genes, 'iterations': iterations})
# when
for gene in model.genes():
actual = gene.values(spreadsheet=1)
expected = model.values(spreadsheet=1,
microplate=gene.microplate_name,
well=gene.well_name)
# then
self.assertTrue((actual == expected).all())
def test_get_gene_by_name_case_insensitive(self):
# given
model = TestModel.with_genes({'001': {'A1': 'tORF9', 'A2': 'foo'}})
# when
actual = model.gene('torf9')
# then
self.assertEqual('tORF9', actual.name)
def test_return_none_if_gene_name_not_found(self):
# given
model = TestModel.with_genes({'001': {'A1': 'foo', 'A2': 'bar'}})
# when
actual = model.gene('baz')
# then
self.assertIsNone(actual)
def test_return_first_gene_if_duplicates_found(self):
# given
model = TestModel.with_genes({
'001': {
'A1': 'foo',
'A2': 'bar',
'H12': 'foo'
},
'002': {
'A4': 'baz'
},
'003': {
'E4': 'foo'
}
})
# when
actual = model.gene('foo')
# then
self.assertEqual('001', actual.microplate_name)
self.assertEqual('A1', actual.well_name)
class TestValues(unittest.TestCase):
def test_return_scalar_if_all_parameters_are_specified(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
actual = model.values(iteration=0,
spreadsheet=0,
microplate='001',
well='A1')
# then
self.assertIsInstance(actual, float)
def test_return_numpy_ndarray_if_any_parameter_is_missing(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when/then
self.assertIsInstance(model.values(), numpy.ndarray)
self.assertIsInstance(model.values(iteration=0), numpy.ndarray)
self.assertIsInstance(model.values(spreadsheet=0), numpy.ndarray)
self.assertIsInstance(model.values(microplate='001'), numpy.ndarray)
self.assertIsInstance(model.values(iteration=0, spreadsheet=0), numpy.ndarray)
self.assertIsInstance(model.values(iteration=0, microplate='001'), numpy.ndarray)
self.assertIsInstance(model.values(spreadsheet=0, microplate='001'), numpy.ndarray)
self.assertIsInstance(model.values(iteration=0, spreadsheet=0, microplate='001'), numpy.ndarray)
def test_return_array4d(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
actual = model.values()
# then
self.assertEqual(4, actual.ndim)
def test_return_array3d(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001']], [['001']])
# when/then
self.assertEqual(3, model.values(iteration=1).ndim)
self.assertEqual(3, model.values(spreadsheet=1).ndim)
self.assertEqual(3, model.values(microplate='001').ndim)
def test_return_array2d(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001']], [['001']])
# when/then
self.assertEqual(2, model.values(iteration=1, spreadsheet=1).ndim)
self.assertEqual(2, model.values(iteration=1, microplate='001').ndim)
self.assertEqual(2, model.values(spreadsheet=1, microplate='001').ndim)
def test_return_array1d(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001']], [['001']])
# when
actual = model.values(iteration=1, spreadsheet=1, microplate='001')
# then
self.assertEqual(1, actual.ndim)
def test_return_original_array_if_no_parameter_is_specified(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
actual = model.values()
# then
self.assertTrue((actual == model.array4d).all())
def test_retain_none_values(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
actual = model.values(spreadsheet=1)
# then
self.assertIn(None, list(actual[0][1]))
def test_each_invocation_should_return_a_copy(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
copy1 = model.values(well='A1')
copy2 = model.values(well='A1')
# then
self.assertNotEqual(id(copy1), id(copy2))
def test_each_invocation_should_return_the_same_result(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
copy1 = model.values(well='A1')
copy2 = model.values(well='A1')
# then
self.assertTrue((copy1 == copy2).all())
def test_well_addressing(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
copy1 = model.values(well='C10')
copy2 = model.values(well=33)
# then
self.assertTrue((copy1 == copy2).all())
def test_microplate_addressing(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
copy1 = model.values(microplate='002')
copy2 = model.values(microplate=1)
# then
self.assertTrue((copy1 == copy2).all())
def test_do_not_confuse_iteration_zero_index_for_none(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['003'], ['001', '004']])
# when
copy1 = model.values(iteration=0, microplate='001', well='A1')
copy2 = model.values(iteration=None, microplate='001', well='A1')
# then
self.assertFalse((copy1 == copy2).all())
def test_do_not_confuse_spreadsheet_zero_index_for_none(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
copy1 = model.values(spreadsheet=0, well='A1', iteration=0, microplate='001')
copy2 = model.values(spreadsheet=None, well='A1', iteration=0, microplate='001')
# then
self.assertFalse((copy1 == copy2).all())
def test_do_not_confuse_well_zero_index_for_none(self):
# given
model = TestModel.with_random_values([['001', '002'], ['001']])
# when
copy1 = model.values(well=0, iteration=0, spreadsheet=0, microplate='001')
copy2 = model.values(well=None, iteration=0, spreadsheet=0, microplate='001')
# then
self.assertFalse((copy1 == copy2).all())
def test_variable_number_of_microplates_across_spreadsheets(self):
# given
model = TestModel.with_random_values(
[['001', '002', '003'],
['001', '003'],
['001', '002', '003', '004']])
# when
actual = model.values(microplate='002', spreadsheet=1, well='A1')
# then
self.assertListEqual([None], list(actual))
def test_variable_number_of_spreadsheets_across_iterations(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002']])
# when
actual = model.values(microplate='002', spreadsheet=1, well='A1')
# then
x = model.values(iteration=0, microplate='002', spreadsheet=1, well='A1')
self.assertListEqual([x, None], list(actual))
def test_pad_missing_spreadsheets_with_empty_stubs(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002']])
# when
iterations = model.json_data[u'iterations']
num_spreadsheets = [len(x[u'spreadsheets']) for x in iterations]
max_spreadsheets = max(num_spreadsheets)
min_spreadsheets = min(num_spreadsheets)
def get_microplates(i, j):
return iterations[i][u'spreadsheets'][j][u'microplates']
# then
self.assertTrue(min_spreadsheets == max_spreadsheets == 2)
self.assertEqual({}, get_microplates(1, 1))
def test_dont_pad_spreadsheets_if_not_missing(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['003', '004']],
[['011', '012'], ['013', '014']])
# when
iterations = model.json_data[u'iterations']
num_spreadsheets = [len(x[u'spreadsheets']) for x in iterations]
max_spreadsheets = max(num_spreadsheets)
min_spreadsheets = min(num_spreadsheets)
def get_microplates(i, j):
return iterations[i][u'spreadsheets'][j][u'microplates']
# then
self.assertTrue(min_spreadsheets == max_spreadsheets == 2)
self.assertNotEqual({}, get_microplates(0, 0))
self.assertNotEqual({}, get_microplates(0, 1))
self.assertNotEqual({}, get_microplates(1, 0))
self.assertNotEqual({}, get_microplates(1, 1))
def test_return_none_if_no_iterations_available_in_JSON(self):
# given
model = TestModel.with_genes({})
# when
actual = model.values()
# then
self.assertEqual(None, actual)
def test_representative_value_for_an_iteration_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
iteration = model.values(iteration=1)
spreadsheet = iteration[1]
microplate = spreadsheet[1]
well = microplate[48]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], well)
def | (self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
spreadsheet = model.values(spreadsheet=1)
spreadsheet_within_iteration = spreadsheet[1]
microplate = spreadsheet_within_iteration[1]
well = microplate[48]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], well)
def test_representative_value_for_a_microplate_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
microplate = model.values(microplate='002')
microplate_within_iteration = microplate[1]
microplate_within_iteration_and_spreadsheet = microplate_within_iteration[1]
well = microplate_within_iteration_and_spreadsheet[48]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], well)
def test_representative_value_for_a_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
well = model.values(well='E1')
well_within_iteration = well[1]
well_within_iteration_and_spreadsheet = well_within_iteration[1]
well_within_iteration_spreadsheet_and_microplate = well_within_iteration_and_spreadsheet[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], well_within_iteration_spreadsheet_and_microplate)
def test_representative_value_for_microplate_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
microplates = model.values(microplate='002', well='E1')
wells = microplates[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], wells[1])
def test_representative_value_for_spreadsheet_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
spreadsheet = model.values(spreadsheet=1, well='E1')
microplate = spreadsheet[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], microplate[1])
def test_representative_value_for_spreadsheet_microplate_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
microplate_in_spreadsheets = model.values(spreadsheet=1, microplate='002')
microplate = microplate_in_spreadsheets[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], microplate[48])
def test_representative_value_for_iteration_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
wells = model.values(iteration=1, well='E1')
wells_within_iteration = wells[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], wells_within_iteration[1])
def test_representative_value_for_iteration_microplate_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
microplates = model.values(iteration=1, microplate='002')
microplate_within_iteration = microplates[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], microplate_within_iteration[48])
def test_representative_value_for_iteration_spreadsheet_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
spreadsheet = model.values(iteration=1, spreadsheet=1)
microplate = spreadsheet[1]
# then
self.assertEqual(model.array4d[1, 1, 1, 48], microplate[48])
def test_representative_value_for_spreadsheet_microplate_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
wells = model.values(spreadsheet=1, microplate='002', well='E1')
# then
self.assertEqual(model.array4d[1, 1, 1, 48], wells[1])
def test_representative_value_for_iteration_microplate_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
wells = model.values(iteration=1, microplate='002', well='E1')
# then
self.assertEqual(model.array4d[1, 1, 1, 48], wells[1])
def test_representative_value_for_iteration_spreadsheet_well_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
wells = model.values(iteration=1, spreadsheet=1, well='E1')
# then
self.assertEqual(model.array4d[1, 1, 1, 48], wells[1])
def test_representative_value_for_iteration_spreadsheet_microplate_filter(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
microplate = model.values(iteration=1, spreadsheet=1, microplate='002')
# then
self.assertEqual(model.array4d[1, 1, 1, 48], microplate[48])
def test_representative_value_for_all_filters(self):
# given
model = TestModel.with_random_values(
[['001', '002'], ['001', '002']],
[['001', '002'], ['001', '002']])
# when
actual = model.values(iteration=1, spreadsheet=1, microplate='002', well='E1')
# then
self.assertEqual(model.array4d[1, 1, 1, 48], actual)
class TestControl(unittest.TestCase):
def test_microplate_addressing(self):
# given
model = TestModel.with_control_wells(
{'001': ['A1', 'A2'], '002': ['A4']},
{'001': ['C1', 'C2'], '002': ['C4']})
# when
copy1 = model.control_mask(microplate='002')
copy2 = model.control_mask(microplate=1)
# then
self.assertTrue((copy1 == copy2).all())
def test_none_mask_if_no_control_wells(self):
# given
model = TestModel.empty()
# when
actual = model.control_mask
# then
self.assertIsNotNone(actual)
self.assertIsNone(actual.values)
def test_global_control_representative_values(self):
# given
model = TestModel.with_control_wells(
{'001': ['A1', 'A2'], '002': ['A4']},
None,
{'001': ['C1', 'C2'], '002': ['C4']})
# when
actual = model.control_mask
# then
self.assertListEqual(self.get_mask(0, 1), list(actual[0, 0, 0]))
self.assertListEqual(self.get_mask(3), list(actual[0, 0, 1]))
self.assertListEqual(self.get_mask(), list(actual[1, 0, 0]))
self.assertListEqual(self.get_mask(), list(actual[1, 0, 1]))
self.assertListEqual(self.get_mask(24, 25), list(actual[2, 0, 0]))
self.assertListEqual(self.get_mask(27), list(actual[2, 0, 1]))
def test_local_control_representative_values(self):
# given
model = Model({
'iterations': [
{
'control': {
'001': ['A1', 'A2'],
'002': ['A4']
},
'spreadsheets': [
{
'filename': '',
'microplates': {
'001': {
'values': []
},
'002': {
'values': []
}
}
}
]
},
{
'spreadsheets': [
{
'filename': '',
'microplates': {}
}
]
},
{
'control': {
'001': ['C1','C2'],
'002': ['C4']
},
'spreadsheets': [
{
'filename': '',
'microplates': {
'001': {
'values': []
},
'002': {
'values': []
}
}
},
{
'filename': '',
'microplates': {
'001': {
'values': []
},
'002': {
'values': []
}
},
'control': {
'001': ['D12', 'H5'],
'002': ['F12', 'G6']
}
}
]
}
]
})
# when
actual = model.control_mask
# then
self.assertListEqual(self.get_mask(0, 1), list(actual[0, 0, 0]))
self.assertListEqual(self.get_mask(3), list(actual[0, 0, 1]))
self.assertListEqual(self.get_mask(), list(actual[1, 0, 0]))
self.assertListEqual(self.get_mask(), list(actual[1, 0, 1]))
self.assertListEqual(self.get_mask(24, 25), list(actual[2, 0, 0]))
self.assertListEqual(self.get_mask(27), list(actual[2, 0, 1]))
self.assertListEqual(self.get_mask(24, 25, 47, 88), list(actual[2, 1, 0]))
self.assertListEqual(self.get_mask(27, 71, 77), list(actual[2, 1, 1]))
def test_api_equivalence(self):
# given
model = TestModel.with_control_wells(
{'001': ['A1', 'A2'], '002': ['A4']},
None,
{'001': ['C1', 'C2'], '002': ['C4']})
# when
copy1 = model.control_mask(iteration=2, spreadsheet=0, microplate='001')
copy2 = model.control_mask[2, 0, model.microplate_names().index('001')]
# then
self.assertListEqual(list(copy1), list(copy2))
def get_mask(self, *args):
mask = [False] * 96
for i in args:
mask[i] = True
return mask
class TestSideEffect(unittest.TestCase):
def test_no_side_effect_when_padding_missing_spreadsheets(self):
# given
json_data = {
'iterations': [
{
'spreadsheets': [
{
'filename': 'iteration1/spreadsheet1.xls',
'microplates': {
'001': {
'values': [random() for i in xrange(96)]
},
'002': {
'values': [random() for i in xrange(96)]
}
}
},
{
'filename': 'iteration1/spreadsheet2.xls',
'microplates': {
'001': {
'values': [random() for i in xrange(96)]
},
'002': {
'values': [random() for i in xrange(96)]
}
}
}
]
},
{
'spreadsheets': [
{
'filename': 'iteration2/spreadsheet1.xls',
'microplates': {
'001': {
'values': [random() for i in xrange(96)]
},
'002': {
'values': [random() for i in xrange(96)]
}
}
}
]
}
]
}
# when
model = Model(json_data)
# then
self.assertFalse(json_data is model.json_data)
def num_spreadsheets(data, i):
return len(data['iterations'][i]['spreadsheets'])
self.assertEqual(num_spreadsheets(json_data, 0),
num_spreadsheets(model.json_data, 0))
self.assertNotEqual(num_spreadsheets(json_data, 1),
num_spreadsheets(model.json_data, 1))
class TestModel(object):
"""Factory for microanalyst.model.Model instances."""
@staticmethod
def empty():
return Model({'iterations': []})
@staticmethod
def with_filenames(*args):
"""Model with two iterations (2 spreadsheets and 1 spreadsheet):
>>> TestModel.with_filenames(['foo', 'bar'], ['baz'])
"""
iterations = []
for filenames in args:
iterations.append({
'spreadsheets': [
{'filename': x, 'microplates': {}} for x in filenames
]
})
return Model({'iterations': iterations})
@staticmethod
def with_microplates(*args):
"""Model with two iterations and two spreadsheets each:
>>> TestModel.with_microplates(
>>> [['001', '002'], ['001', '002']],
>>> [['003'], ['004']])
"""
iterations = []
for microplates in args:
iterations.append({
'spreadsheets': [
{
'filename': '',
'microplates': {y: {'values': []} for y in x}
} for x in microplates
]
})
return Model({'iterations': iterations})
@staticmethod
def with_genes(genes):
"""Model with one gene on well A1 of microplate 001:
>>> TestModel.with_genes({'001':{'A1':'foobar'}})
"""
return Model({'genes': genes, 'iterations': []})
@staticmethod
def with_random_values(*args):
"""Model with two iterations and two spreadsheets each:
>>> TestModel.with_random_values(
>>> [['001', '002'], ['001', '002']],
>>> [['003'], ['004']])
"""
def random_values():
return {'values': [random() for i in xrange(96)]}
iterations = []
for i, microplates in enumerate(args):
iterations.append({
'spreadsheets': [
{
'filename': 'iteration%d/spreadsheet%d.xls' % (i+1, j+1),
'microplates': {y: random_values() for y in x}
} for j, x in enumerate(microplates)
]
})
return Model({'iterations': iterations})
@staticmethod
def with_control_wells(*args):
"""Model with three iterations (one spreadsheet each):
>>> TestModel.with_control_wells(
{'001': ['A1', 'A2'], '002': ['A4']},
None,
{'001': ['C1', 'C2'], '002': ['C4']})
"""
iterations = []
for control_wells in args:
if not control_wells:
iterations.append({'spreadsheets': []})
else:
spreadsheets = [{
'filename': '',
'microplates': {
microplate: {
'values': []
} for microplate in control_wells
}
}]
iterations.append({
'control': control_wells,
'spreadsheets': spreadsheets
})
return Model({'iterations': iterations})
| test_representative_value_for_a_spreadsheet_filter |
httputils.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httputils
import (
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/fatih/color"
"github.com/moul/http2curl"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/gotypes"
"yunion.io/x/pkg/trace"
"yunion.io/x/onecloud/pkg/appctx"
)
type THttpMethod string
const (
USER_AGENT = "yunioncloud-go/201708"
GET = THttpMethod("GET")
HEAD = THttpMethod("HEAD")
POST = THttpMethod("POST")
PUT = THttpMethod("PUT")
PATCH = THttpMethod("PATCH")
DELETE = THttpMethod("DELETE")
OPTION = THttpMethod("OPTION")
)
var (
red = color.New(color.FgRed, color.Bold).PrintlnFunc()
green = color.New(color.FgGreen, color.Bold).PrintlnFunc()
yellow = color.New(color.FgYellow, color.Bold).PrintlnFunc()
cyan = color.New(color.FgHiCyan, color.Bold).PrintlnFunc()
)
type Error struct {
Id string
Fields []string
}
type JSONClientError struct {
Code int
Class string
Details string
Data Error
}
type JSONClientErrorMsg struct {
Error *JSONClientError
}
func (e *JSONClientError) Error() string {
errMsg := JSONClientErrorMsg{Error: e}
return jsonutils.Marshal(errMsg).String()
}
func ErrorCode(err error) int {
if err == nil {
return 0
}
switch je := err.(type) {
case *JSONClientError:
return je.Code
}
return -1
}
func headerExists(header *http.Header, key string) bool {
keyu := strings.ToUpper(key)
for k := range *header {
if strings.ToUpper(k) == keyu {
return true
}
}
return false
}
func GetAddrPort(urlStr string) (string, int, error) {
parts, err := url.Parse(urlStr)
if err != nil {
return "", 0, err
}
host := parts.Host
commaPos := strings.IndexByte(host, ':')
if commaPos > 0 {
port, err := strconv.ParseInt(host[commaPos+1:], 10, 32)
if err != nil {
return "", 0, err
} else {
return host[:commaPos], int(port), nil
}
} else {
switch parts.Scheme {
case "http":
return parts.Host, 80, nil
case "https":
return parts.Host, 443, nil
default:
return "", 0, fmt.Errorf("Unknown schema %s", parts.Scheme)
}
}
}
func GetTransport(insecure bool, timeout time.Duration) *http.Transport {
return &http.Transport{
DialContext: (&net.Dialer{
Timeout: timeout,
KeepAlive: timeout,
}).DialContext,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
DisableCompression: true,
}
}
func GetClient(insecure bool, timeout time.Duration) *http.Client {
tr := GetTransport(insecure, timeout)
return &http.Client{
Transport: tr,
Timeout: timeout,
}
}
func GetTimeoutClient(timeout time.Duration) *http.Client {
return GetClient(true, timeout)
}
var defaultHttpClient *http.Client
func init() {
defaultHttpClient = GetClient(true, time.Second*15)
}
func GetDefaultClient() *http.Client {
return defaultHttpClient
}
func Request(client *http.Client, ctx context.Context, method THttpMethod, urlStr string, header http.Header, body io.Reader, debug bool) (*http.Response, error) {
if client == nil {
client = defaultHttpClient
}
if header == nil {
header = http.Header{}
}
ctxData := appctx.FetchAppContextData(ctx)
var clientTrace *trace.STrace
if !ctxData.Trace.IsZero() {
addr, port, err := GetAddrPort(urlStr)
if err != nil {
return nil, err
}
clientTrace = trace.StartClientTrace(&ctxData.Trace, addr, port, ctxData.ServiceName)
clientTrace.AddClientRequestHeader(header)
}
if len(ctxData.RequestId) > 0 {
header.Set("X-Request-Id", ctxData.RequestId)
}
req, err := http.NewRequest(string(method), urlStr, body)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", USER_AGENT)
if body == nil {
req.Header.Set("Content-Length", "0")
} else {
if headerExists(&header, "Content-Length") {
clen := header.Get("Content-Length")
req.ContentLength, _ = strconv.ParseInt(clen, 10, 64)
}
}
if header != nil {
for k, v := range header {
req.Header[k] = v
}
}
if debug {
yellow("Request", method, urlStr, req.Header, body)
// 忽略掉上传文件的请求,避免大量日志输出
if header.Get("Content-Type") != "application/octet-stream" {
curlCmd, _ := http2curl.GetCurlCommand(req)
cyan("CURL:", curlCmd)
}
}
resp, err := client.Do(req)
if err == nil && clientTrace != nil {
clientTrace.EndClientTraceHeader(resp.Header)
}
return resp, err
}
func JSONRequest(client *http.Client, ctx context.Context, method THttpMethod, urlStr string, header http.Header, body jsonutils.JSONObject, debug bool) (http.Header, jsonutils.JSONObject, error) {
bodystr := ""
if !gotypes.IsNil(body) {
bodystr = body.String()
}
jbody := strings.NewReader(bodystr)
if header == nil {
header = http.Header{}
}
header.Add("Content-Type", "application/json")
resp, err := Request(client, ctx, method, urlStr, header, jbody, debug)
return ParseJSONResponse(resp, err, debug)
}
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func CloseResponse(resp *http.Response) {
// Callers should close resp.Body when done reading from it.
// If resp.Body is not closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
if resp != nil && resp.Body != nil {
// Drain any remaining Body and then close the connection.
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}
func ParseJSONResponse(resp *http.Response, err error, debug bool) (http.Header, jsonutils.JSONObject, error) {
if err != nil {
ce := JSONClientError{}
ce.Code = 499
ce.Details = err.Error()
return nil, nil, &ce
}
defer CloseResponse(resp)
if debug {
if resp.StatusCode < 300 {
green("Status:", resp.StatusCode)
green(resp.Header)
} else if resp.StatusCode < 400 { | red(resp.Header)
}
}
rbody, err := ioutil.ReadAll(resp.Body)
if debug {
fmt.Fprintf(os.Stderr, "Response body: %s\n", string(rbody))
}
if err != nil {
return nil, nil, fmt.Errorf("Fail to read body: %s", err)
}
var jrbody jsonutils.JSONObject = nil
if len(rbody) > 0 {
var err error
jrbody, err = jsonutils.Parse(rbody)
if err != nil && debug {
fmt.Fprintf(os.Stderr, "parsing json failed: %s", err)
}
}
if resp.StatusCode < 300 {
return resp.Header, jrbody, nil
} else if resp.StatusCode >= 300 && resp.StatusCode < 400 {
ce := JSONClientError{}
ce.Code = resp.StatusCode
ce.Details = resp.Header.Get("Location")
ce.Class = "redirect"
return nil, nil, &ce
} else {
ce := JSONClientError{}
if jrbody == nil {
ce.Code = resp.StatusCode
ce.Details = resp.Status
return nil, nil, &ce
}
err = jrbody.Unmarshal(&ce)
if len(ce.Class) > 0 && ce.Code >= 400 && len(ce.Details) > 0 {
return nil, nil, &ce
}
jrbody1, err := jrbody.GetMap()
if err != nil {
err = jrbody.Unmarshal(&ce)
if err != nil {
ce.Details = err.Error()
}
return nil, nil, &ce
}
var jrbody2 jsonutils.JSONObject
if len(jrbody1) > 1 {
jrbody2 = jsonutils.Marshal(jrbody1)
} else {
for _, v := range jrbody1 {
jrbody2 = v
}
}
if ecode, _ := jrbody2.GetString("code"); len(ecode) > 0 {
code, err := strconv.Atoi(ecode)
if err != nil {
ce.Class = ecode
} else {
ce.Code = code
}
}
if ce.Code == 0 {
ce.Code = resp.StatusCode
}
if edetail := jsonutils.GetAnyString(jrbody2, []string{"message", "detail", "details", "error_msg"}); len(edetail) > 0 {
ce.Details = edetail
}
if eclass := jsonutils.GetAnyString(jrbody2, []string{"title", "type", "error_code"}); len(eclass) > 0 {
ce.Class = eclass
}
return nil, nil, &ce
}
} | yellow("Status:", resp.StatusCode)
yellow(resp.Header)
} else {
red("Status:", resp.StatusCode) |
run_omprace_small.py | #!/usr/bin/python
import sys, string, os, popen2, shutil, platform, subprocess, pprint, time
import util, commands, csv
from math import sqrt
#clean up the src
do_clean = True
#build the src
do_build = True
#clean, build, and run the benchmarks
do_run = True
#collect data to plot
#do_collect_data = True
if do_clean and not do_build:
print "Clean - true and build - false not allowed"
exit(0)
configs = []
entry = { "NAME" : "RUN_ALL_BENCHMARKS",
"NUM_RUNS" : 1,
"CLEAN_LINE" : " make clean ",
"BUILD_LINE" : " make ",
"BUILD_ARCH" : "x86_64",
"RUN_ARCH" : "x86_64",
"RUN_LINE" : '/usr/bin/time -f "%E" ./',
#"RUN_LINE" : 'time ./',
"ARGS" : "",
}
configs.append(entry)
ref_cwd = os.getcwd()
arch = platform.machine()
full_hostname = platform.node()
hostname=full_hostname
bench_name="MIS"
benchmarks=[
"ndMIS"
]
inner_data_folder=[
"graphData/data"
]
input_file=[
"randLocalGraph_J_5_2500000"
]
executable=[
"MIS.openmp.dynamic",
"MIS.omprn",
"MIS.ompp.dynamic",
]
inputs=[
"-r 1 -o /tmp/ofile470293_748866 ../graphData/data/randLocalGraph_J_5_2500000"
] |
if __name__ == "__main__":
with open('omprace.csv', 'wb') as csvfile:
res_writer = csv.writer(csvfile, delimiter=',')
res_writer.writerow(['test name', 'baseline openmp(s)', 'omprace no_inst(s)', 'omprace(s)', 'overhead ospg', 'overhead datarace', 'num violations'])
for config in configs:
util.log_heading(config["NAME"], character="-")
row = []
row.append(bench_name[0])
num_violations = -1
print('input file folder: ' + inner_data_folder[0])
data_input = inner_data_folder[0]+'/'+input_file[0]
print('checking if input data exists at:' + data_input)
if not os.path.exists(data_input):
print("input data doesn't exist. building input data")
util.chdir(ref_cwd + "/" + inner_data_folder[0])
build_data = config["BUILD_LINE"] + " " + input_file[0]
util.run_command(build_data, verbose=True)
util.chdir(ref_cwd)
else:
print("input data exists")
for b_index in range(len(executable)):
util.chdir(ref_cwd)
for i in range(0, config["NUM_RUNS"]):
try:
util.chdir(ref_cwd + "/" + benchmarks[0] )
util.log_heading(benchmarks[0], character="=")
try:
clean_string = config["CLEAN_LINE"]
util.run_command(clean_string, verbose=True)
except:
print "Clean failed"
build_bench_string = config["BUILD_LINE"]
util.run_command(build_bench_string, verbose=True)
util.log_heading("running: " + benchmarks[0], character="=")
run_string = config["RUN_LINE"] + executable[b_index] + " " + inputs[0]
#running applications
if b_index == 0:#warm up openmp run
util.run_command(run_string, verbose=True)
output_string = util.run_command(run_string, verbose=True)
output_lines = output_string.split('\n')
if b_index == len(executable)-1:
for output_line in output_lines:
if output_line.startswith("Number of violations ="):
num_violations=int(output_line[output_line.index('=')+1:])
time_line = output_lines[-2] #format is hh:mm:sec
time_line = time_line.split(':')
tot_secs = 0.0
for t in time_line:
tot_secs = (tot_secs*60) + float(t)
row.append(tot_secs)
print ('total secs= ' + str(tot_secs))
except util.ExperimentError, e:
print "Error: %s" % e
print "-----------"
print "%s" % e.output
continue
#finalize row
row.append("{0:.2f}".format(row[2]/row[1]))#ospg ov
row.append("{0:.2f}".format(row[3]/row[1]))#omprace ov
row.append(num_violations)
res_writer.writerow(row)
util.chdir(ref_cwd)
print("done") | |
channel_support_registrar.go | // Code generated by counterfeiter. DO NOT EDIT.
package mock
import (
sync "sync"
broadcast "github.com/tw-bc-group/fabric-gm/orderer/common/broadcast"
common "github.com/tw-bc-group/fabric-gm/protos/common"
)
type ChannelSupportRegistrar struct {
BroadcastChannelSupportStub func(*common.Envelope) (*common.ChannelHeader, bool, broadcast.ChannelSupport, error)
broadcastChannelSupportMutex sync.RWMutex
broadcastChannelSupportArgsForCall []struct {
arg1 *common.Envelope
}
broadcastChannelSupportReturns struct {
result1 *common.ChannelHeader
result2 bool
result3 broadcast.ChannelSupport
result4 error
}
broadcastChannelSupportReturnsOnCall map[int]struct {
result1 *common.ChannelHeader
result2 bool
result3 broadcast.ChannelSupport
result4 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *ChannelSupportRegistrar) BroadcastChannelSupport(arg1 *common.Envelope) (*common.ChannelHeader, bool, broadcast.ChannelSupport, error) {
fake.broadcastChannelSupportMutex.Lock()
ret, specificReturn := fake.broadcastChannelSupportReturnsOnCall[len(fake.broadcastChannelSupportArgsForCall)]
fake.broadcastChannelSupportArgsForCall = append(fake.broadcastChannelSupportArgsForCall, struct {
arg1 *common.Envelope
}{arg1})
fake.recordInvocation("BroadcastChannelSupport", []interface{}{arg1})
fake.broadcastChannelSupportMutex.Unlock()
if fake.BroadcastChannelSupportStub != nil {
return fake.BroadcastChannelSupportStub(arg1)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3, ret.result4
}
fakeReturns := fake.broadcastChannelSupportReturns
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4
}
func (fake *ChannelSupportRegistrar) BroadcastChannelSupportCallCount() int {
fake.broadcastChannelSupportMutex.RLock()
defer fake.broadcastChannelSupportMutex.RUnlock()
return len(fake.broadcastChannelSupportArgsForCall)
}
func (fake *ChannelSupportRegistrar) BroadcastChannelSupportCalls(stub func(*common.Envelope) (*common.ChannelHeader, bool, broadcast.ChannelSupport, error)) {
fake.broadcastChannelSupportMutex.Lock()
defer fake.broadcastChannelSupportMutex.Unlock()
fake.BroadcastChannelSupportStub = stub
}
func (fake *ChannelSupportRegistrar) BroadcastChannelSupportArgsForCall(i int) *common.Envelope {
fake.broadcastChannelSupportMutex.RLock()
defer fake.broadcastChannelSupportMutex.RUnlock()
argsForCall := fake.broadcastChannelSupportArgsForCall[i]
return argsForCall.arg1
}
func (fake *ChannelSupportRegistrar) BroadcastChannelSupportReturns(result1 *common.ChannelHeader, result2 bool, result3 broadcast.ChannelSupport, result4 error) {
fake.broadcastChannelSupportMutex.Lock()
defer fake.broadcastChannelSupportMutex.Unlock()
fake.BroadcastChannelSupportStub = nil
fake.broadcastChannelSupportReturns = struct {
result1 *common.ChannelHeader
result2 bool
result3 broadcast.ChannelSupport
result4 error
}{result1, result2, result3, result4}
}
func (fake *ChannelSupportRegistrar) BroadcastChannelSupportReturnsOnCall(i int, result1 *common.ChannelHeader, result2 bool, result3 broadcast.ChannelSupport, result4 error) {
fake.broadcastChannelSupportMutex.Lock()
defer fake.broadcastChannelSupportMutex.Unlock()
fake.BroadcastChannelSupportStub = nil
if fake.broadcastChannelSupportReturnsOnCall == nil {
fake.broadcastChannelSupportReturnsOnCall = make(map[int]struct {
result1 *common.ChannelHeader
result2 bool
result3 broadcast.ChannelSupport
result4 error
})
}
fake.broadcastChannelSupportReturnsOnCall[i] = struct {
result1 *common.ChannelHeader
result2 bool
result3 broadcast.ChannelSupport
result4 error
}{result1, result2, result3, result4}
}
func (fake *ChannelSupportRegistrar) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.broadcastChannelSupportMutex.RLock()
defer fake.broadcastChannelSupportMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *ChannelSupportRegistrar) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil |
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ broadcast.ChannelSupportRegistrar = new(ChannelSupportRegistrar)
| {
fake.invocations = map[string][][]interface{}{}
} |
types.rs | use byte_unit::Byte;
use std::time::Duration;
pub struct QueryResult {
pub query_execution_id: String,
pub data: Vec<Vec<String>>,
pub data_scanned_bytes: i64,
// query_execution_time_ms: i64,
// query_planning_time_ms: i64,
pub query_queue_time_ms: i64,
pub rows: i64,
pub columns: Vec<String>,
pub total_execution_time_ms: i64
}
impl QueryResult {
pub fn append_row(&mut self, row: Vec<String>) {
self.data.push(row);
self.rows += 1;
}
pub fn | (&self) -> String {
let scanned = Byte::from_bytes(self.data_scanned_bytes as u128);
let adjusted_byte = scanned.get_appropriate_unit(false);
adjusted_byte.to_string()
}
pub fn total_time(&self) -> String {
let time = Duration::from_millis(self.total_execution_time_ms as u64);
humantime::format_duration(time).to_string()
}
} | data_scanned |
user.py | import hashlib
import base64
def hash_email(email):
m = hashlib.sha256()
m.update(email.encode('utf-8'))
return base64.urlsafe_b64encode(m.digest())
def user_logging_string(user):
if user.is_anonymous:
return 'User(anonymous)'
return 'User(id={}, role={}, hashed_email={})'.format(user.id, user.role, hash_email(user.email_address))
def user_has_role(user, role):
try:
return user['users']['role'] == role
except (KeyError, TypeError):
return False
class User():
def __init__(self, user_id, email_address, supplier_code, supplier_name,
locked, active, name, role):
self.id = user_id
self.email_address = email_address
self.name = name
self.role = role
self.supplier_code = supplier_code
self.supplier_name = supplier_name
self.locked = locked
self.active = active
@property
def is_authenticated(self):
|
@property
def is_active(self):
return self.active and not self.locked
@property
def is_locked(self):
return self.locked
@property
def is_anonymous(self):
return False
def has_role(self, role):
return self.role == role
def has_any_role(self, *roles):
return any(self.has_role(role) for role in roles)
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def serialize(self):
return {
'id': self.id,
'name': self.name,
'emailAddress': self.email_address,
'supplierCode': self.supplier_code,
'supplierName': self.supplier_name,
'locked': self.locked,
}
@staticmethod
def from_json(user_json):
user = user_json["users"]
supplier_code = None
supplier_name = None
if "supplier" in user:
supplier_code = user["supplier"]["supplierCode"]
supplier_name = user["supplier"]["name"]
return User(
user_id=user["id"],
email_address=user['emailAddress'],
supplier_code=supplier_code,
supplier_name=supplier_name,
locked=user.get('locked', False),
active=user.get('active', True),
name=user['name'],
role=user['role']
)
@staticmethod
def load_user(data_api_client, user_id):
"""Load a user from the API and hydrate the User model"""
user_json = data_api_client.get_user(user_id=int(user_id))
if user_json:
user = User.from_json(user_json)
if user.is_active:
return user
| return self.is_active |
metrics.py | import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
def evaluate(clf, x_train, x_test, y_train, y_test, name, training_data_name, embedding, params=None):
predictions = clf.predict(x_train)
# train_tn, train_fp, train_fn, train_tp = confusion_matrix(y_train, predictions).ravel()
train_accuracy = accuracy_score(y_train, predictions)
# train_precision = precision_score(y_train, predictions)
# train_recall = recall_score(y_train, predictions)
train_f1_score = f1_score(y_train, predictions, average='weighted')
predictions = clf.predict(x_test)
# test_tn, test_fp, test_fn, test_tp = confusion_matrix(y_test, predictions).ravel()
test_accuracy = accuracy_score(y_test, predictions)
# test_precision = precision_score(y_test, predictions)
# test_recall = recall_score(y_test, predictions)
test_f1_score = f1_score(y_test, predictions, average='weighted')
result_dict = {
'name': [name],
'embedding': [embedding],
'params': [params],
'training_data_name': [training_data_name],
# 'train_true_negative': [train_tn],
# 'train_false_positive': [train_fp],
# 'train_false_negative': [train_fn],
# 'train_true_positive': [train_tp],
'train_accuracy': [train_accuracy],
# 'train_precision': [train_precision],
# 'train_recall': [train_recall],
'train_f1_score': [train_f1_score],
# 'test_true_negative': [test_tn],
# 'test_false_positive': [test_fp],
# 'test_false_negative': [test_fn],
# 'test_true_positive': [test_tp],
'test_accuracy': [test_accuracy],
# 'test_precision': [test_precision],
# 'test_recall': [test_recall], | 'test_f1_score': [test_f1_score],
}
return pd.DataFrame(result_dict) | |
extension.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configmodels
// Extension is the configuration of a service extension. Specific extensions
// must implement this interface and will typically embed ExtensionSettings
// struct or a struct that extends it.
type Extension interface {
NamedEntity
}
// Extensions is a map of names to extensions.
type Extensions map[string]Extension
// ExtensionSettings defines common settings for a service extension configuration.
// Specific extensions can embed this struct and extend it with more fields if needed.
type ExtensionSettings struct {
TypeVal Type `mapstructure:"-"`
NameVal string `mapstructure:"-"`
}
var _ Extension = (*ExtensionSettings)(nil)
// Name gets the extension name.
func (ext *ExtensionSettings) Name() string {
return ext.NameVal
}
// SetName sets the extension name. | }
// Type sets the extension type.
func (ext *ExtensionSettings) Type() Type {
return ext.TypeVal
} | func (ext *ExtensionSettings) SetName(name string) {
ext.NameVal = name |
mod.rs | use std::error;
use std::fmt;
use std::io::{Error, Write};
use std::path;
use std::process::{Command, Stdio};
use std::str;
use std::string::*;
use chrono::Local;
use self::parser::*;
use super::notify::notify;
#[macro_use]
pub mod parser;
#[derive(Debug)]
pub enum GifsyError {
NoRepoitory,
IoError(Error),
ParserError(String),
CmdFail(i32, String),
}
impl fmt::Display for GifsyError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self
{
GifsyError::CmdFail(code, ref out) =>
{
write!(f, "git command failed {} ({})", out, code)
}
GifsyError::NoRepoitory => write!(f, "the path is not a git repository"),
GifsyError::IoError(ref e) => write!(f, "io error {}", e),
GifsyError::ParserError(..) => write!(f, "parser error"),
}
}
}
impl error::Error for GifsyError {
fn description(&self) -> &str {
match *self
{
GifsyError::CmdFail(..) => "the git command couldn't be executed",
GifsyError::NoRepoitory => "The path used is not a git repository with a working tree",
GifsyError::IoError(ref e) => e.description(),
GifsyError::ParserError(..) => "parser error",
}
}
}
pub struct Repository {
path: String,
name: String,
}
impl Repository {
pub fn from(path: &str, name: &str) -> Result<Repository, GifsyError> {
let repository_path = path::PathBuf::from(path);
if repository_path.as_path().is_dir()
{
Ok(Repository {
path: path.to_owned(),
name: name.to_owned(),
})
}
else
{
Err(GifsyError::NoRepoitory)
}
}
pub fn name(&self) -> String {
return self.name.clone();
}
pub fn status<'a>(&self) -> Result<Vec<Box<Status>>, GifsyError> {
match Command::new("git")
.current_dir(&self.path)
.arg("status")
.arg("--porcelain")
.arg("-z")
.output()
{
Err(e) => Err(GifsyError::IoError(e)),
Ok(output) =>
{
if output.status.success()
{
let rest = String::from_utf8_lossy(&output.stdout);
let p = parsers![parse_index, parse_tree, parse_from, parse_to];
match parse::<Vec<&Status>>(&rest, p)
{
Err(e) => Err(GifsyError::ParserError(e.to_string())),
Ok(status) => Ok(status),
}
}
else
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
String::from_utf8_lossy(&output.stderr).to_string(),
))
}
}
}
}
pub fn add<'a>(&self, status: Vec<Box<Status>>) -> Result<Vec<Box<Status>>, GifsyError> {
let mut rc = Vec::new();
for s in &status
{
if s.is_unmerged()
{
warn!("unmerged file {}", s);
let msg = format!("File {}", s.file());
notify("GIt FileSYncronization needs attension", &msg).unwrap_or(());
continue;
}
let to_file = s.file();
debug!("Status: {:?}", s);
if s.index == 'D'
{
continue;
};
//let msg = format!("{} modified", to_file);
//notify("gifsy sync", &msg).unwrap_or(());
let output = Command::new("git")
.current_dir(&self.path)
.arg("add")
.arg(&to_file)
.output()
.expect("can't execute git status");
if !output.status.success()
{
return Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't add {} ({})",
&to_file,
String::from_utf8_lossy(&output.stderr)
),
));
}
rc.push(s.clone());
}
Ok(rc)
}
pub fn commit<'a>(&self, status: Vec<Box<Status>>) -> Result<(), GifsyError> {
let process = match Command::new("git")
.current_dir(&self.path)
.arg("commit")
.arg("--file")
.arg("-")
.stdin(Stdio::piped())
.spawn()
{
Err(e) => return Err(GifsyError::IoError(e)),
Ok(process) => process,
};
let msg = create_commit_message(&status, &self.name).unwrap();
match process.stdin.unwrap().write_all(msg.as_bytes())
{
Err(e) => return Err(GifsyError::IoError(e)),
Ok(_) =>
{
let mut msg = String::from("the following files have been changed:\n\n");
if status.len() > 0
{
for s in &status
{
let f = format!(" {}\n", &s.file());
msg += &f;
}
notify("GIt FileSYncronization Files Modified", &msg).unwrap_or(());
}
Ok({})
}
}
}
pub fn pull<'a>(&self) -> Result<(), GifsyError> {
let output = Command::new("git")
.current_dir(&self.path)
.arg("pull")
.arg("origin")
.output()
.expect("can't execute git status");
debug!(
"pull output stdout: {}",
String::from_utf8_lossy(&output.stdout)
);
debug!(
"pull output stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
debug!("pull status: {}", output.status);
if output.status.success()
{
match output.status.code()
{
Some(rc) =>
{
if rc != 0
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't push: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
else
{
Ok(())
}
}
None => Ok(()),
}
}
else
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't pull: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
}
pub fn push<'a>(&self) -> Result<(), GifsyError> {
let output = Command::new("git")
.current_dir(&self.path)
.arg("push")
.arg("origin")
.output()
.expect("can't execute git status");
debug!(
"push output stdout: {}",
String::from_utf8_lossy(&output.stdout)
);
debug!(
"push output stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
debug!("push status: {}", output.status);
if output.status.success()
{
match output.status.code()
{
Some(rc) =>
{
if rc != 0
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't push: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
else
{
Ok(())
}
}
None => Ok(()),
}
}
else
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't push: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
}
pub fn submodules_init<'a>(&self) -> Result<(), GifsyError> {
let output = Command::new("git")
.current_dir(&self.path)
.arg("submodule")
.arg("init")
.output()
.expect("can't execute git status");
if output.status.success()
{
match output.status.code()
{
Some(rc) =>
{
if rc != 0
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't init submodules: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
else
{
Ok(())
}
}
None => Ok(()),
}
}
else
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't init submodules: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
}
pub fn submodules_update<'a>(&self) -> Result<(), GifsyError> {
let output = Command::new("git")
.current_dir(&self.path)
.arg("submodule")
.arg("update")
.output()
.expect("can't execute git status");
if output.status.success()
{
match output.status.code()
{
Some(rc) =>
{
if rc != 0
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't update submodules: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
else
{
Ok(())
}
}
None => Ok(()),
}
}
else
{
Err(GifsyError::CmdFail(
output.status.code().unwrap(),
format!(
"can't update submodules: {} err: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
),
))
}
}
}
#[derive(Clone)]
pub struct Status {
index: char,
tree: char,
from_file: String,
to_file: String,
}
impl Status {
pub fn is_unmerged(&self) -> bool {
self.index == 'U' || self.tree == 'U'
}
pub fn file(&self) -> String {
if self.to_file == ""
{
debug!("form file ({})", self.to_file.len());
self.from_file.clone()
}
else
{
debug!("from file");
self.to_file.clone()
}
}
}
impl fmt::Debug for Status {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.to_file == ""
{
write!(f, "{}{} {}", self.index, self.tree, self.from_file)
}
else
{
write!(
f,
"{}{} {} -> {}",
self.index, self.tree, self.from_file, self.to_file
)
}
}
}
impl fmt::Display for Status {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.to_file == ""
{
write!(f, " {} {}", encode_status_flag(self.index), self.from_file)
}
else
{
write!(
f,
" {} {} -> {}",
encode_status_flag(self.index),
self.from_file,
self.to_file
)
} | status: &Vec<Box<Status>>,
name: &str,
) -> Result<String, FromUtf8Error> {
let mut commitmsg = Vec::new();
writeln!(
&mut commitmsg,
"changes on {} at {}\n",
name,
Local::now().to_rfc2822()
)
.unwrap();
for s in status
{
writeln!(&mut commitmsg, "{}", s).unwrap();
}
String::from_utf8(commitmsg)
}
fn encode_status_flag(flag: char) -> char {
match flag
{
'M' => '~',
'A' => '+',
'D' => '-',
'R' => '>',
'U' => '!',
'?' => '?',
' ' => ' ',
_ => '•',
}
} | }
}
pub fn create_commit_message( |
pathways-stylesheet.js | const cytoscape = require('cytoscape');
const sbgnStyleSheet = require('cytoscape-sbgn-stylesheet');
const { MATCHED_SEARCH_CLASS } = require('./actions');
module.exports = sbgnStyleSheet(cytoscape)
.selector('node')
.css({
'background-opacity': '0.4'
})
.selector('node:active')
.css({
'background-opacity': '0.7',
})
.selector('node[class!="compartment"]')
.css({
'font-size': 20,
'color': 'black',
'text-outline-color': 'white',
'text-outline-width': 2,
'text-outline-opacity': 0.5,
'text-wrap': 'wrap',
'text-max-width': 175,
'label': node => {
const label = node.data('label')
.split('(').join('').split(')').join('')
.split(':').join(' ');
return label;
}
})
.selector('node[class="complex"]')
.css({
'width': 45,
'height': 45, | })
.selector('.compoundcollapse-collapsed-node')
.css({
'font-size': 20,
'text-max-width': 175
})
.selector('edge')
.css({
'opacity': 0.3
})
.selector('node[class="and"],node[class="or"],node[class="not"]')
.css({
'label':node=>node.data('class')
})
.selector('.hidden')
.css({
'display':'none',
})
.selector(`.${MATCHED_SEARCH_CLASS}`)
.css({
'border-width': 8,
'border-color': 'red'
}); | 'label': node => node.isParent() ? '' : node.data('label') |
mod.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2022 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code for the ESFTree, the tree used for the ESF Views.
It's similar to the PackTree, but modified for the requirements of the ESF files.
!*/
use qt_widgets::QTreeView;
use qt_widgets::q_header_view::ResizeMode;
use qt_gui::QStandardItem;
use qt_gui::QStandardItemModel;
use qt_gui::QListOfQStandardItem;
use qt_core::QModelIndex;
use qt_core::QVariant;
use qt_core::QBox;
use qt_core::ItemFlag;
use qt_core::QFlags;
use qt_core::QSortFilterProxyModel;
use qt_core::QString;
use qt_core::QPtr;
use cpp_core::CppBox;
use cpp_core::Ptr;
use cpp_core::Ref;
use rpfm_lib::packedfile::esf::RecordNodeFlags;
use rpfm_lib::packedfile::esf::{ESF, NodeType};
const ESF_DATA: i32 = 40;
const CHILDLESS_NODE: i32 = 41;
const CHILD_NODES: i32 = 42;
const RECORD_NODE_NAME: i32 = 43;
//-------------------------------------------------------------------------------//
// Enums & Structs (and trait)
//-------------------------------------------------------------------------------//
/// This trait adds multiple util functions to the `TreeView` you implement it for.
///
/// Keep in mind that this trait has been created with `ESF TreeView's` in mind, so his methods
/// may not be suitable for all purposes.
pub(crate) trait ESFTree {
/// This function gives you the items selected in the provided `TreeView`.
unsafe fn get_items_from_selection(&self, has_filter: bool) -> Vec<Ptr<QStandardItem>>;
/// This function generates an ESF file from the contents of the `TreeView`.
unsafe fn get_esf_from_view(&self, has_filter: bool) -> ESF;
/// This function gives you the data contained within a CHILD_NODES variant of the provided item.
unsafe fn get_child_nodes_from_item(item: &QStandardItem) -> String;
/// This function is used to get the path of a specific Item in a StandardItemModel.
unsafe fn get_path_from_item(item: Ptr<QStandardItem>, model: &QPtr<QStandardItemModel>) -> Vec<String>;
/// This function is used to get the path of a specific ModelIndex in a StandardItemModel.
unsafe fn get_path_from_index(index: Ref<QModelIndex>, model: &QPtr<QStandardItemModel>) -> Vec<String>;
/// This function gives you the item corresponding to an specific path.
unsafe fn get_item_from_path(path: &[String], model: &QPtr<QStandardItemModel>) -> Ptr<QStandardItem>;
/// This function takes care of EVERY operation that manipulates the provided TreeView.
/// It does one thing or another, depending on the operation we provide it.
unsafe fn update_treeview(&self, has_filter: bool, operation: ESFTreeViewOperation);
}
/// This enum has the different possible operations we can do in a `TreeView`.
#[derive(Clone, Debug)]
pub enum ESFTreeViewOperation {
/// Build the entire `TreeView` from the provided ESF data.
Build(ESF),
}
//-------------------------------------------------------------------------------//
// Implementations of `ESFTree`
//-------------------------------------------------------------------------------//
/// Implementation of `ESFTree` for `QPtr<QTreeView>.
impl ESFTree for QBox<QTreeView> {
unsafe fn ge | self, has_filter: bool) -> Vec<Ptr<QStandardItem>> {
let filter: Option<QPtr<QSortFilterProxyModel>> = if has_filter { Some(self.model().static_downcast()) } else { None };
let model: QPtr<QStandardItemModel> = if let Some(ref filter) = filter { filter.source_model().static_downcast() } else { self.model().static_downcast()};
let indexes_visual = self.selection_model().selection().indexes();
let mut indexes_visual = (0..indexes_visual.count_0a()).rev().map(|x| indexes_visual.take_at(x)).collect::<Vec<CppBox<QModelIndex>>>();
indexes_visual.reverse();
let indexes_real = if let Some(filter) = filter {
indexes_visual.iter().map(|x| filter.map_to_source(x.as_ref())).collect::<Vec<CppBox<QModelIndex>>>()
} else {
indexes_visual
};
indexes_real.iter().map(|x| model.item_from_index(x.as_ref())).collect()
}
unsafe fn get_child_nodes_from_item(item: &QStandardItem) -> String {
item.data_1a(CHILD_NODES).to_string().to_std_string()
}
unsafe fn get_path_from_item(item: Ptr<QStandardItem>, model: &QPtr<QStandardItemModel>) -> Vec<String> {
let index = item.index();
Self::get_path_from_index(index.as_ref(), model)
}
unsafe fn get_path_from_index(index: Ref<QModelIndex>, model: &QPtr<QStandardItemModel>) -> Vec<String> {
// The logic is simple: we loop from item to parent until we reach the top.
let mut path = vec![];
let mut index = index;
let mut parent;
// Loop until we reach the root index.
loop {
let text = model.data_2a(index, RECORD_NODE_NAME).to_string().to_std_string();
parent = index.parent();
// If the parent is valid, it's the new item. Otherwise, we stop without adding it (we don't want the PackFile's name in).
if parent.is_valid() {
path.push(text);
index = parent.as_ref();
} else { break; }
}
// Reverse it, as we want it from arent to children.
path.reverse();
path
}
unsafe fn get_item_from_path(path: &[String], model: &QPtr<QStandardItemModel>) -> Ptr<QStandardItem> {
// Get it another time, this time to use it to hold the current item.
let mut item = model.item_1a(0);
let mut index = 0;
let path_deep = path.len();
loop {
// If we reached the folder of the item...
let children_count = item.row_count();
if index == (path_deep - 1) {
for row in 0..children_count {
let child = item.child_1a(row);
let text = child.text().to_std_string();
if text == path[index] {
item = child;
break;
}
}
break;
}
// If we are not still in the folder of the file...
else {
// Get the amount of children of the current item and goe through them until we find our folder.
let mut not_found = true;
for row in 0..children_count {
let child = item.child_1a(row);
let text = child.text().to_std_string();
if text == path[index] {
item = child;
index += 1;
not_found = false;
break;
}
}
// If the child was not found, stop and return the parent.
if not_found { break; }
}
}
item
}
unsafe fn update_treeview(&self, has_filter: bool, operation: ESFTreeViewOperation) {
let filter: Option<QPtr<QSortFilterProxyModel>> = if has_filter { Some(self.model().static_downcast()) } else { None };
let model: QPtr<QStandardItemModel> = if let Some(ref filter) = filter { filter.source_model().static_downcast() } else { self.model().static_downcast() };
// We act depending on the operation requested.
match operation {
// If we want to build a new TreeView...
ESFTreeViewOperation::Build(esf_data) => {
// First, we clean the TreeStore and whatever was created in the TreeView.
model.clear();
// Second, we set as the big_parent, the base for the folders of the TreeView, a fake folder
// with the name of the PackFile. All big things start with a lie.
let root_node = esf_data.get_ref_root_node();
if let NodeType::Record(node) = root_node {
let big_parent = QStandardItem::from_q_string(&QString::from_std_str(&node.get_ref_name()));
let state_item = QStandardItem::new();
big_parent.set_editable(false);
state_item.set_editable(false);
state_item.set_selectable(false);
let esf_data_no_node: ESF = esf_data.clone_without_root_node();
big_parent.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&serde_json::to_string_pretty(&esf_data_no_node).unwrap())), ESF_DATA);
big_parent.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&serde_json::to_string_pretty(&root_node.clone_without_children()).unwrap())), CHILDLESS_NODE);
big_parent.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&serde_json::to_string_pretty(&node.get_ref_children()[0].iter().map(|x| x.clone_without_children()).collect::<Vec<NodeType>>()).unwrap())), CHILD_NODES);
big_parent.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&node.get_ref_name())), RECORD_NODE_NAME);
let flags = ItemFlag::from(state_item.flags().to_int() & ItemFlag::ItemIsSelectable.to_int());
state_item.set_flags(QFlags::from(flags));
for node_group in node.get_ref_children() {
for node in node_group {
load_node_to_view(&big_parent, node, None);
}
}
// Delay adding the big parent as much as we can, as otherwise the signals triggered when adding a PackedFile can slow this down to a crawl.
let qlist = QListOfQStandardItem::new();
qlist.append_q_standard_item(&big_parent.into_ptr().as_mut_raw_ptr());
qlist.append_q_standard_item(&state_item.into_ptr().as_mut_raw_ptr());
model.append_row_q_list_of_q_standard_item(qlist.as_ref());
self.header().set_section_resize_mode_2a(0, ResizeMode::Stretch);
self.header().set_section_resize_mode_2a(1, ResizeMode::Interactive);
self.header().set_minimum_section_size(4);
self.header().resize_section(1, 4);
}
},
}
}
unsafe fn get_esf_from_view(&self, has_filter: bool) -> ESF {
let filter: Option<QPtr<QSortFilterProxyModel>> = if has_filter { Some(self.model().static_downcast()) } else { None };
let model: QPtr<QStandardItemModel> = if let Some(ref filter) = filter { filter.source_model().static_downcast() } else { self.model().static_downcast() };
let mut new_esf: ESF = serde_json::from_str(&model.item_1a(0).data_1a(ESF_DATA).to_string().to_std_string()).unwrap();
new_esf.set_root_node(get_node_type_from_tree_node(None, &model));
// Return the created ESF.
// TODO: check this returns the exact same ESF if there are no changes.
new_esf
}
}
/// This function takes care of recursively loading all the nodes into the `TreeView`.
unsafe fn load_node_to_view(parent: &CppBox<QStandardItem>, child: &NodeType, block_key: Option<&str>) {
if let NodeType::Record(node) = child {
// Create the node for the record.
let child_item = QStandardItem::from_q_string(&QString::from_std_str(node.get_ref_name()));
let state_item = QStandardItem::new();
child_item.set_editable(false);
state_item.set_editable(false);
state_item.set_selectable(false);
// If it has a name (it should have it), name it.
if let Some(block_key) = block_key {
child_item.set_text(&QString::from_std_str(block_key));
}
// Prepare the data in a way or another, depending if we have nested blocks or not.
if node.get_ref_record_flags().contains(RecordNodeFlags::HAS_NESTED_BLOCKS) {
for (index, node_group) in node.get_ref_children().iter().enumerate() {
let node_group_name = if node_group.len() == 2 {
if let NodeType::Ascii(ref key) = node_group[0] {
key.to_owned()
} else { format!("{}_{}", node.get_ref_name(), index) }
} else { format!("{}_{}", node.get_ref_name(), index) };
let node_group_item = QStandardItem::from_q_string(&QString::from_std_str(&node_group_name));
let node_group_state_item = QStandardItem::new();
node_group_item.set_editable(false);
node_group_state_item.set_editable(false);
node_group_state_item.set_selectable(false);
// Put all record nodes under the "Group Node".
for grandchild_node in node_group {
if let NodeType::Record(_) = grandchild_node {
load_node_to_view(&node_group_item, grandchild_node, None);
}
}
// Store the group's data.
node_group_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(serde_json::to_string_pretty(&node_group.iter().map(|x| x.clone_without_children()).collect::<Vec<NodeType>>()).unwrap())), CHILD_NODES);
node_group_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&node_group_name)), RECORD_NODE_NAME);
let qlist = QListOfQStandardItem::new();
qlist.append_q_standard_item(&node_group_item.into_ptr().as_mut_raw_ptr());
qlist.append_q_standard_item(&node_group_state_item.into_ptr().as_mut_raw_ptr());
child_item.append_row_q_list_of_q_standard_item(qlist.as_ref());
}
// Set the child's data, and add the child to the TreeView.
child_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(serde_json::to_string_pretty(&child.clone_without_children()).unwrap())), CHILDLESS_NODE);
child_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&node.get_ref_name())), RECORD_NODE_NAME);
}
// If it doesn't have nested blocks, just grab the first block's pack.
else {
// First, load record nodes into the view.
for child_node in &node.get_ref_children()[0] {
if let NodeType::Record(_) = child_node {
load_node_to_view(&child_item, child_node, None);
}
}
// Once done, store its data and it's values.
child_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(serde_json::to_string_pretty(&child.clone_without_children()).unwrap())), CHILDLESS_NODE);
child_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(serde_json::to_string_pretty(&node.get_ref_children()[0].iter().map(|x| x.clone_without_children()).collect::<Vec<NodeType>>()).unwrap())), CHILD_NODES);
child_item.set_data_2a(&QVariant::from_q_string(&QString::from_std_str(&node.get_ref_name())), RECORD_NODE_NAME);
}
let qlist = QListOfQStandardItem::new();
qlist.append_q_standard_item(&child_item.into_ptr().as_mut_raw_ptr());
qlist.append_q_standard_item(&state_item.into_ptr().as_mut_raw_ptr());
parent.append_row_q_list_of_q_standard_item(qlist.as_ref());
}
}
/// This function reads the entire `TreeView` recursively and returns a node list.
unsafe fn get_node_type_from_tree_node(current_item: Option<Ptr<QStandardItem>>, model: &QStandardItemModel) -> NodeType {
// Try to get the node info. If it fails, this node is not a proper node, but a child of a node.
let item = if let Some(item) = current_item { item } else { model.item_1a(0) };
let mut node = serde_json::from_str(&item.data_1a(CHILDLESS_NODE).to_string().to_std_string()).unwrap();
// If it has no children, just its json.
match node {
NodeType::Record(ref mut node) => {
// Depending if we should have nested blocks or not, get the children in one way or another.
if node.get_ref_record_flags().contains(RecordNodeFlags::HAS_NESTED_BLOCKS) {
// Get the record group nodes, and process the groups one by one.
let record_group_count = item.row_count();
let mut record_group_nodes = Vec::with_capacity(record_group_count as usize);
for row in 0..record_group_count {
let child = item.child_1a(row);
let child_nodes = child.data_1a(CHILD_NODES).to_string().to_std_string();
let mut child_nodes: Vec<NodeType> = if !child_nodes.is_empty() {
match serde_json::from_str(&child_nodes) {
Ok(data) => data,
Err(error) => { dbg!(error); vec![]},
}
} else {
vec![]
};
let mut record_group = Vec::with_capacity(child.row_count() as usize);
for row in 0..child.row_count() {
let child = child.child_1a(row);
record_group.push(get_node_type_from_tree_node(Some(child), model));
}
// If we have record nodes, move their data into the parent node data.
if !record_group.is_empty() {
record_group.reverse();
for child_node in child_nodes.iter_mut() {
if let NodeType::Record(_) = child_node {
if let Some(record_node) = record_group.pop() {
*child_node = record_node;
}
}
}
}
record_group_nodes.push(child_nodes);
}
// Save the children... of our node.
node.set_children(record_group_nodes);
}
// No nested blocks means we can directly get the children.
else {
let child_nodes = item.data_1a(CHILD_NODES).to_string().to_std_string();
let mut child_nodes: Vec<NodeType> = if !child_nodes.is_empty() {
match serde_json::from_str(&child_nodes) {
Ok(data) => data,
Err(error) => { dbg!(error); vec![]},
}
} else {
vec![]
};
// Get the record nodes and their data from the TreeView.
let record_count = item.row_count();
let mut record_nodes = Vec::with_capacity(record_count as usize);
for row in 0..record_count {
let child = item.child_1a(row);
record_nodes.push(get_node_type_from_tree_node(Some(child), model));
}
// If we have record nodes, move their data into the parent node data.
if !record_nodes.is_empty() {
record_nodes.reverse();
for child_node in child_nodes.iter_mut() {
if let NodeType::Record(_) = child_node {
if let Some(record_node) = record_nodes.pop() {
*child_node = record_node;
}
}
}
}
// Save the children... of our node.
let children = vec![child_nodes];
node.set_children(children);
}
},
// Only record nodes are allowed to be nodes on the TreeView.
_ => panic!()
}
node
}
| t_items_from_selection(& |
kv_history.go | // Copyright (c) 2021, R.I. Pienaar and the Choria Project contributors
//
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"fmt"
"strconv"
"sync"
"time"
"github.com/choria-io/go-choria/internal/util"
)
type kvHistoryCommand struct {
command
name string
key string
}
func (k *kvHistoryCommand) Setup() error {
if kv, ok := cmdWithFullCommand("kv"); ok {
k.cmd = kv.Cmd().Command("history", "View the history for a specific key")
k.cmd.Arg("bucket", "The bucket name").Required().StringVar(&k.name)
k.cmd.Arg("key", "The key to retrieve history for").Required().StringVar(&k.key)
}
return nil
}
func (k *kvHistoryCommand) Configure() error {
return commonConfigure()
}
func (k *kvHistoryCommand) Run(wg *sync.WaitGroup) error {
defer wg.Done()
store, err := c.KV(ctx, nil, k.name, false)
if err != nil {
return err
}
history, err := store.History(ctx, k.key)
if err != nil {
return err
}
table := util.NewMarkdownTable("Seq", "Operation", "Time", "Length", "Value")
for _, r := range history {
val := util.Base64IfNotPrintable(r.Value())
if len(val) > 40 {
val = fmt.Sprintf("%s...%s", val[0:15], val[len(val)-15:])
}
table.Append([]string{
strconv.Itoa(int(r.Sequence())),
string(r.Operation()),
r.Created().Format(time.RFC822),
strconv.Itoa(len(r.Value())),
val,
})
}
table.Render()
return nil
}
func init() | {
cli.commands = append(cli.commands, &kvHistoryCommand{})
} |
|
zjuser_model.go | // ==========================================================================
// 云捷GO自动生成ORM代码,无需手动修改,重新生成会自动覆盖.
// 生成日期:2020-12-07 01:38:53
// 生成路径: app/model/module/zjuser/zjuser_model.go
// 生成人:yunjie
// ==========================================================================
package zjuser
import (
"database/sql"
"github.com/gogf/gf/database/gdb"
"github.com/gogf/gf/frame/g"
"time"
)
// arModel is a active record design model for table t_zjuser operations.
type arModel struct {
M *gdb.Model
}
var (
// Table is the table name of t_zjuser.
Table = "t_zjuser"
// Model is the model object of t_zjuser.
Model = &arModel{g.DB("default").Table(Table).Safe()}
)
// FindOne is a convenience method for Model.FindOne.
// See Model.FindOne.
func FindOne(where ...interface{}) (*Entity, error) {
return Model.FindOne(where...)
}
// FindAll is a convenience method for Model.FindAll.
// See Model.FindAll.
func FindAll(where ...interface{}) ([]*Entity, error) {
return Model.FindAll(where...)
}
// FindValue is a convenience method for Model.FindValue.
// See Model.FindValue.
func FindValue(fieldsAndWhere ...interface{}) (gdb.Value, error) {
return Model.FindValue(fieldsAndWhere...)
}
// FindCount is a convenience method for Model.FindCount.
// See Model.FindCount.
func FindCount(where ...interface{}) (int, error) {
return Model.FindCount(where...)
}
// Insert is a convenience method for Model.Insert.
func Insert(data ...interface{}) (result sql.Result, err error) {
return Model.Insert(data...)
}
// Replace is a convenience method for Model.Replace.
func Replace(data ...interface{}) (result sql.Result, err error) {
return Model.Replace(data...)
}
// Save is a convenience method for Model.Save.
func Save(data ...interface{}) (result sql.Result, err error) {
return Model.Save(data...)
}
// Update is a convenience method for Model | re ...interface{}) (result sql.Result, err error) {
return Model.Update(dataAndWhere...)
}
// Delete is a convenience method for Model.Delete.
func Delete(where ...interface{}) (result sql.Result, err error) {
return Model.Delete(where...)
}
// As sets an alias name for current table.
func (m *arModel) As(as string) *arModel {
return &arModel{m.M.As(as)}
}
// TX sets the transaction for current operation.
func (m *arModel) TX(tx *gdb.TX) *arModel {
return &arModel{m.M.TX(tx)}
}
// Master marks the following operation on master node.
func (m *arModel) Master() *arModel {
return &arModel{m.M.Master()}
}
// Slave marks the following operation on slave node.
// Note that it makes sense only if there's any slave node configured.
func (m *arModel) Slave() *arModel {
return &arModel{m.M.Slave()}
}
// LeftJoin does "LEFT JOIN ... ON ..." statement on the model.
func (m *arModel) LeftJoin(joinTable string, on string) *arModel {
return &arModel{m.M.LeftJoin(joinTable, on)}
}
// RightJoin does "RIGHT JOIN ... ON ..." statement on the model.
func (m *arModel) RightJoin(joinTable string, on string) *arModel {
return &arModel{m.M.RightJoin(joinTable, on)}
}
// InnerJoin does "INNER JOIN ... ON ..." statement on the model.
func (m *arModel) InnerJoin(joinTable string, on string) *arModel {
return &arModel{m.M.InnerJoin(joinTable, on)}
}
// Fields sets the operation fields of the model, multiple fields joined using char ','.
func (m *arModel) Fields(fields string) *arModel {
return &arModel{m.M.Fields(fields)}
}
// FieldsEx sets the excluded operation fields of the model, multiple fields joined using char ','.
func (m *arModel) FieldsEx(fields string) *arModel {
return &arModel{m.M.FieldsEx(fields)}
}
// Option sets the extra operation option for the model.
func (m *arModel) Option(option int) *arModel {
return &arModel{m.M.Option(option)}
}
// OmitEmpty sets OPTION_OMITEMPTY option for the model, which automatically filers
// the data and where attributes for empty values.
func (m *arModel) OmitEmpty() *arModel {
return &arModel{m.M.OmitEmpty()}
}
// Filter marks filtering the fields which does not exist in the fields of the operated table.
func (m *arModel) Filter() *arModel {
return &arModel{m.M.Filter()}
}
// Where sets the condition statement for the model. The parameter <where> can be type of
// string/map/gmap/slice/struct/*struct, etc. Note that, if it's called more than one times,
// multiple conditions will be joined into where statement using "AND".
// Eg:
// Where("uid=10000")
// Where("uid", 10000)
// Where("money>? AND name like ?", 99999, "vip_%")
// Where("uid", 1).Where("name", "john")
// Where("status IN (?)", g.Slice{1,2,3})
// Where("age IN(?,?)", 18, 50)
// Where(User{ Id : 1, UserName : "john"})
func (m *arModel) Where(where interface{}, args ...interface{}) *arModel {
return &arModel{m.M.Where(where, args...)}
}
// And adds "AND" condition to the where statement.
func (m *arModel) And(where interface{}, args ...interface{}) *arModel {
return &arModel{m.M.And(where, args...)}
}
// Or adds "OR" condition to the where statement.
func (m *arModel) Or(where interface{}, args ...interface{}) *arModel {
return &arModel{m.M.Or(where, args...)}
}
// Group sets the "GROUP BY" statement for the model.
func (m *arModel) Group(groupBy string) *arModel {
return &arModel{m.M.Group(groupBy)}
}
// Order sets the "ORDER BY" statement for the model.
func (m *arModel) Order(orderBy string) *arModel {
return &arModel{m.M.Order(orderBy)}
}
// Limit sets the "LIMIT" statement for the model.
// The parameter <limit> can be either one or two number, if passed two number is passed,
// it then sets "LIMIT limit[0],limit[1]" statement for the model, or else it sets "LIMIT limit[0]"
// statement.
func (m *arModel) Limit(limit ...int) *arModel {
return &arModel{m.M.Limit(limit...)}
}
// Offset sets the "OFFSET" statement for the model.
// It only makes sense for some databases like SQLServer, PostgreSQL, etc.
func (m *arModel) Offset(offset int) *arModel {
return &arModel{m.M.Offset(offset)}
}
// Page sets the paging number for the model.
// The parameter <page> is started from 1 for paging.
// Note that, it differs that the Limit function start from 0 for "LIMIT" statement.
func (m *arModel) Page(page, limit int) *arModel {
return &arModel{m.M.Page(page, limit)}
}
// Batch sets the batch operation number for the model.
func (m *arModel) Batch(batch int) *arModel {
return &arModel{m.M.Batch(batch)}
}
// Cache sets the cache feature for the model. It caches the result of the sql, which means
// if there's another same sql request, it just reads and returns the result from cache, it
// but not committed and executed into the database.
//
// If the parameter <duration> < 0, which means it clear the cache with given <name>.
// If the parameter <duration> = 0, which means it never expires.
// If the parameter <duration> > 0, which means it expires after <duration>.
//
// The optional parameter <name> is used to bind a name to the cache, which means you can later
// control the cache like changing the <duration> or clearing the cache with specified <name>.
//
// Note that, the cache feature is disabled if the model is operating on a transaction.
func (m *arModel) Cache(expire time.Duration, name ...string) *arModel {
return &arModel{m.M.Cache(expire, name...)}
}
// Data sets the operation data for the model.
// The parameter <data> can be type of string/map/gmap/slice/struct/*struct, etc.
// Eg:
// Data("uid=10000")
// Data("uid", 10000)
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
func (m *arModel) Data(data ...interface{}) *arModel {
return &arModel{m.M.Data(data...)}
}
// Insert does "INSERT INTO ..." statement for the model.
// The optional parameter <data> is the same as the parameter of Model.Data function,
// see Model.Data.
func (m *arModel) Insert(data ...interface{}) (result sql.Result, err error) {
return m.M.Insert(data...)
}
// Replace does "REPLACE INTO ..." statement for the model.
// The optional parameter <data> is the same as the parameter of Model.Data function,
// see Model.Data.
func (m *arModel) Replace(data ...interface{}) (result sql.Result, err error) {
return m.M.Replace(data...)
}
// Save does "INSERT INTO ... ON DUPLICATE KEY UPDATE..." statement for the model.
// It updates the record if there's primary or unique index in the saving data,
// or else it inserts a new record into the table.
//
// The optional parameter <data> is the same as the parameter of Model.Data function,
// see Model.Data.
func (m *arModel) Save(data ...interface{}) (result sql.Result, err error) {
return m.M.Save(data...)
}
// Update does "UPDATE ... " statement for the model.
//
// If the optional parameter <dataAndWhere> is given, the dataAndWhere[0] is the updated
// data field, and dataAndWhere[1:] is treated as where condition fields.
// Also see Model.Data and Model.Where functions.
func (m *arModel) Update(dataAndWhere ...interface{}) (result sql.Result, err error) {
return m.M.Update(dataAndWhere...)
}
// Delete does "DELETE FROM ... " statement for the model.
// The optional parameter <where> is the same as the parameter of Model.Where function,
// see Model.Where.
func (m *arModel) Delete(where ...interface{}) (result sql.Result, err error) {
return m.M.Delete(where...)
}
// Count does "SELECT COUNT(x) FROM ..." statement for the model.
// The optional parameter <where> is the same as the parameter of Model.Where function,
// see Model.Where.
func (m *arModel) Count(where ...interface{}) (int, error) {
return m.M.Count(where...)
}
// All does "SELECT FROM ..." statement for the model.
// It retrieves the records from table and returns the result as []*Entity.
// It returns nil if there's no record retrieved with the given conditions from table.
//
// The optional parameter <where> is the same as the parameter of Model.Where function,
// see Model.Where.
func (m *arModel) All(where ...interface{}) ([]*Entity, error) {
all, err := m.M.All(where...)
if err != nil {
return nil, err
}
var entities []*Entity
if err = all.Structs(&entities); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entities, nil
}
// One retrieves one record from table and returns the result as *Entity.
// It returns nil if there's no record retrieved with the given conditions from table.
//
// The optional parameter <where> is the same as the parameter of Model.Where function,
// see Model.Where.
func (m *arModel) One(where ...interface{}) (*Entity, error) {
one, err := m.M.One(where...)
if err != nil {
return nil, err
}
var entity *Entity
if err = one.Struct(&entity); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entity, nil
}
// Value retrieves a specified record value from table and returns the result as interface type.
// It returns nil if there's no record found with the given conditions from table.
//
// If the optional parameter <fieldsAndWhere> is given, the fieldsAndWhere[0] is the selected fields
// and fieldsAndWhere[1:] is treated as where condition fields.
// Also see Model.Fields and Model.Where functions.
func (m *arModel) Value(fieldsAndWhere ...interface{}) (gdb.Value, error) {
return m.M.Value(fieldsAndWhere...)
}
// FindOne retrieves and returns a single Record by Model.WherePri and Model.One.
// Also see Model.WherePri and Model.One.
func (m *arModel) FindOne(where ...interface{}) (*Entity, error) {
one, err := m.M.FindOne(where...)
if err != nil {
return nil, err
}
var entity *Entity
if err = one.Struct(&entity); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entity, nil
}
// FindAll retrieves and returns Result by by Model.WherePri and Model.All.
// Also see Model.WherePri and Model.All.
func (m *arModel) FindAll(where ...interface{}) ([]*Entity, error) {
all, err := m.M.FindAll(where...)
if err != nil {
return nil, err
}
var entities []*Entity
if err = all.Structs(&entities); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entities, nil
}
// FindValue retrieves and returns single field value by Model.WherePri and Model.Value.
// Also see Model.WherePri and Model.Value.
func (m *arModel) FindValue(fieldsAndWhere ...interface{}) (gdb.Value, error) {
return m.M.FindValue(fieldsAndWhere...)
}
// FindCount retrieves and returns the record number by Model.WherePri and Model.Count.
// Also see Model.WherePri and Model.Count.
func (m *arModel) FindCount(where ...interface{}) (int, error) {
return m.M.FindCount(where...)
}
// Chunk iterates the table with given size and callback function.
func (m *arModel) Chunk(limit int, callback func(entities []*Entity, err error) bool) {
m.M.Chunk(limit, func(result gdb.Result, err error) bool {
var entities []*Entity
err = result.Structs(&entities)
if err == sql.ErrNoRows {
return false
}
return callback(entities, err)
})
} | .Update.
func Update(dataAndWhe |
utils.py | import numpy as np
def clean_data(df, out_df_dir=""):
df.dropna(axis=1, inplace=True)
if out_df_dir:
|
return df
# Calculate log change of daily price
def log_change(series):
return np.log(series[1] / series[0])
# Calculate correaltion
def calculate_cor(df, start, end):
return df[start:end].rolling(
window=2,
min_periods=2
).apply(
log_change,
raw=True
).corr(method="pearson")
# Calculate profit
def take_profit(price, start, end):
return price.iloc[end]/price.iloc[start] - 1 | df.to_csv(out_df_dir) |
slug.rs | use lazy_static::lazy_static;
use regex::Regex;
lazy_static! {
static ref SPECIAL: Regex = Regex::new("[\\s_-]+").unwrap(); // swap any length of whitespace, underscore, hyphen characters with a single _
static ref LEADING: Regex = Regex::new("^-+|-+$").unwrap();
}
pub fn slugify(text: &str) -> String | g(test)]
mod tests {
use crate::entry::slug::slugify;
#[test]
fn chinese_slug() {
let string = slugify("你無可奈何asd fsadf+");
assert_eq!("你無可奈何asd-fsadf+", string);
}
#[test]
fn leading_slash() {
let string = slugify("-love");
assert_eq!("love", string);
}
}
| {
let lower = text.trim().to_lowercase();
let result = SPECIAL.replace_all(lower.as_str(), "-").to_string();
let result = LEADING.replace_all(result.as_str(), "").to_string();
result
.replace(',', "")
.replace('。', "")
.replace(' ', "-")
.replace('?', "-")
.replace('#', "-")
.replace(':', "-")
.replace("-/-", "")
.replace('/', "")
.replace("——", "-")
}
#[cf |
listener.go | // Copyright 2018-2020 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package listener
import (
"github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/observer"
"github.com/cilium/cilium/pkg/monitor/agent/listener"
"github.com/cilium/cilium/pkg/monitor/payload"
"github.com/cilium/cilium/pkg/node"
"github.com/golang/protobuf/ptypes"
)
type hubbleListener struct {
observer observer.GRPCServer
}
// NewHubbleListener returns an initialized pointer to hubbleListener.
func NewHubbleListener(observer observer.GRPCServer) listener.MonitorListener |
// Enqueue converts monitor payload to the format accepted by Hubble:
// https://github.com/cilium/hubble/blob/04ab72591faca62a305ce0715108876167182e04/api/v1/flow/flow.proto#L266
func (ml *hubbleListener) Enqueue(pl *payload.Payload) {
grpcPl := &flow.Payload{
Data: pl.Data,
CPU: int32(pl.CPU),
Lost: pl.Lost,
Type: flow.EventType(pl.Type),
Time: ptypes.TimestampNow(),
HostName: node.GetName(),
}
select {
case ml.observer.GetEventsChannel() <- grpcPl:
default:
ml.observer.GetLogger().Debug("Per listener queue is full, dropping message")
}
}
// Version returns the listener version supported by hubbleListener.
func (ml *hubbleListener) Version() listener.Version {
return listener.Version1_2
}
// Close is a no-op for hubbleListener.
func (ml *hubbleListener) Close() {
}
| {
ml := &hubbleListener{observer}
return ml
} |
version.go | // Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package version implements logic to parse version
// of Go tags and release branches.
package version
import (
"strings"
)
// ParseTag parses the major-minor-patch version triplet
// from goX, goX.Y, or goX.Y.Z tag names,
// and reports whether the tag name is valid.
//
// Tags with suffixes like "go1.2beta3" or "go1.2rc1"
// are currently not supported, and get rejected.
//
// For example, "go1" is parsed as version 1.0.0,
// "go1.2" is parsed as version 1.2.0,
// and "go1.2.3" is parsed as version 1.2.3.
func ParseTag(tagName string) (major, minor, patch int, ok bool) {
const prefix = "go"
if !strings.HasPrefix(tagName, prefix) {
return 0, 0, 0, false
}
if strings.HasSuffix(tagName, ".0") {
// Trailing zero version components must be omitted in Go tags,
// so reject if we see one.
return 0, 0, 0, false
}
v := strings.SplitN(tagName[len(prefix):], ".", 4)
if len(v) > 3 {
return 0, 0, 0, false
}
major, ok = parse0To999(v[0])
if !ok || major == 0 {
return 0, 0, 0, false
}
if len(v) == 2 || len(v) == 3 {
minor, ok = parse0To999(v[1])
if !ok {
return 0, 0, 0, false
}
}
if len(v) == 3 {
patch, ok = parse0To999(v[2])
if !ok {
return 0, 0, 0, false
}
}
return major, minor, patch, true
}
// ParseReleaseBranch parses the major-minor version pair
// from release-branch.goX or release-branch.goX.Y release branch names,
// and reports whether the release branch name is valid.
//
// For example, "release-branch.go1" is parsed as version 1.0,
// and "release-branch.go1.2" is parsed as version 1.2.
func ParseReleaseBranch(branchName string) (major, minor int, ok bool) |
// parse0To999 converts the canonical ASCII string representation
// of a number in the range [0, 999] to its integer form.
// strconv.Itoa(n) will equal to s if and only if ok is true.
//
// It's similar to strconv.Atoi, except it doesn't permit
// negative numbers, leading '+'/'-' signs, leading zeros,
// or other potential valid but non-canonical string
// representations of numbers.
func parse0To999(s string) (n int, ok bool) {
if len(s) < 1 || 3 < len(s) {
return 0, false
}
if len(s) > 1 && s[0] == '0' {
// Leading zeros are rejected.
return 0, false
}
for _, c := range []byte(s) {
if c < '0' || '9' < c {
return 0, false
}
n = n*10 + int(c-'0')
}
return n, true
}
| {
const prefix = "release-branch.go"
if !strings.HasPrefix(branchName, prefix) {
return 0, 0, false
}
if strings.HasSuffix(branchName, ".0") {
// Trailing zero version components must be omitted in Go release branches,
// so reject if we see one.
return 0, 0, false
}
v := strings.SplitN(branchName[len(prefix):], ".", 3)
if len(v) > 2 {
return 0, 0, false
}
major, ok = parse0To999(v[0])
if !ok || major == 0 {
return 0, 0, false
}
if len(v) == 2 {
minor, ok = parse0To999(v[1])
if !ok {
return 0, 0, false
}
}
return major, minor, true
} |
hashsum.py | #!/usr/bin/env python3
"""Compute and check message digest with different hash algorithms.
The sums are computed as described in [1].
When checking, the input should be a former output of this program.
The default mode is to print a line with checksum, a character indicating
input mode ('*' for binary, space for text), and name for each FILE.
[1] https://docs.python.org/3/library/hashlib.html
"""
import io
import os
import re
import sys
import enum
import codecs
import hashlib
import logging
import argparse
import warnings
import functools
try:
from os import EX_OK
except ImportError:
EX_OK = 0
EX_FAILURE = 1
EX_INTERRUPT = 130
try:
import argcomplete
except ImportError:
argcomplete = False
else:
PYTHON_ARGCOMPLETE_OK = True
__version__ = '1.4.2.dev0'
PROG = os.path.splitext(os.path.basename(__file__))[0]
LOGFMT = '%(levelname)s: %(message)s'
DIGEST_LINE_RE = re.compile(
r'^(?P<digest>\w+) (?P<binary>[ *])(?P<path>.+)$')
DIGEST_LINE_BSD_RE = re.compile(
r'^(?P<algo>\w+) ?\((?P<path>.+)\) ?= (?P<digest>\w+)$')
BLOCKSIZE = 1024 * 1024 # 1MB
_QUEUE_LEN = 50 # max 50MB
DEFAULT_ALGO = 'md5'
def blockiter(fd, blocksize=io.DEFAULT_BUFFER_SIZE):
"""Iterate on file-like objects reading blocks of the specified size.
The `fd` parameter must be a binary or text file-like object opened
for reading.
The `blocksize` parameter defaults to `io.DEFAULT_BUFFER_SIZE`.
"""
guard = '' if isinstance(fd, io.TextIOBase) else b''
return iter(functools.partial(fd.read, blocksize), guard)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
super().__init__(errors=errors)
self.buffer = b''
self.from_ = os.linesep.encode('ascii')
self.to = b'\n'
def decode(self, data, final=False):
if self.buffer:
output = self.buffer + data
else:
|
self.buffer = b''
if len(self.from_) > 1:
assert(len(self.from_) == 2)
lastchar = self.from_[-2:-1]
if output.endswith(lastchar) and not final:
output = output[:-1]
self.buffer = lastchar
output = output.replace(self.from_, self.to)
return output
def reset(self):
super().reset()
self.buffer = b''
def getstate(self):
return self.buffer, 0
def setstate(self, state):
self.buffer = state[0]
class CheckResult(enum.Enum):
OK = 'OK'
FAILURE = 'FAILURE'
READ_FAILURE = 'READ_FAILURE'
BAD_FORMATTING = 'BAD_FORMATTING'
IGNORED = 'IGNORED'
# TODO: inherit form collections.Counter
class CheckResultData:
def __init__(self, n_ok=0, n_failures=0, n_read_failures=0,
n_improperly_formatted=0, n_ignored=0):
self.n_ok = n_ok
self.n_failures = n_failures
self.n_read_failures = n_read_failures
self.n_improperly_formatted = n_improperly_formatted
self.n_ignored = n_ignored
def update(self, ret):
if ret == CheckResult.OK:
self.n_ok += 1
elif ret == CheckResult.FAILURE:
self.n_failures += 1
elif ret == CheckResult.READ_FAILURE:
self.n_read_failures += 1
elif ret == CheckResult.BAD_FORMATTING:
self.n_improperly_formatted += 1
elif ret == CheckResult.IGNORED:
self.n_ignored += 1
else:
raise ValueError(f'unexpected value: {ret}')
def __repr__(self):
keys = [
'n_ok',
'n_failures',
'n_read_failures',
'n_improperly_formatted',
'n_ignored',
]
kvstr = ', '.join(f'{k}={getattr(self, k)}' for k in keys)
return f'CheckResultData({kvstr})'
def _compute_file_checksum_sequential(fd, algo=DEFAULT_ALGO, binary=True):
hash_obj = hashlib.new(algo)
if not binary and os.linesep != '\n':
decoder = IncrementalNewlineDecoder()
else:
decoder = None
for data in blockiter(fd, BLOCKSIZE):
if decoder:
data = decoder.decode(data)
hash_obj.update(data)
if decoder:
data = decoder.decode(b'', final=True)
hash_obj.update(data)
return hash_obj
class HashObjectData:
def __init__(self, hash_obj):
self.block_size = hash_obj.block_size
self.name = hash_obj.name
self.digest_size = hash_obj.digest_size
self._digest = hash_obj.digest()
self._hexdigest = hash_obj.hexdigest()
def digest(self):
return self._digest
def hexdigest(self):
return self._hexdigest
def _worker(tasks, results, algo=DEFAULT_ALGO, decoder=None):
try:
hash_obj = hashlib.new(algo)
for data in iter(tasks.get, None):
if decoder:
data = decoder.decode(data)
hash_obj.update(data)
tasks.task_done()
else:
if decoder:
data = decoder.decode(b'', final=True)
hash_obj.update(data)
tasks.task_done() # for None
results.put(HashObjectData(hash_obj))
except Exception as exc:
results.put(exc)
def _compute_file_checksum_threading(fd, algo=DEFAULT_ALGO, binary=True):
import queue
import threading
if not binary and os.linesep != '\n':
decoder = IncrementalNewlineDecoder()
else:
decoder = None
task_queue = queue.Queue(_QUEUE_LEN)
result_queue = queue.Queue()
args = (task_queue, result_queue, algo, decoder)
worker = threading.Thread(name='worker', target=_worker, args=args)
worker.start()
try:
for data in blockiter(fd, BLOCKSIZE):
task_queue.put(data)
if not result_queue.empty():
break # fail fast
finally:
task_queue.put(None)
result = result_queue.get()
worker.join()
if isinstance(result, Exception):
raise result
return result
class ChecksumVerifier:
def __init__(self, algo=None, quiet=False, status=False, warn=False,
strict=False, multi_thread=False):
self.algo = algo
self.quiet = quiet
self.status = status
self.warn = warn
self.strict = strict
self.multi_thread = multi_thread
self._log = logging.getLogger('hashsum')
def _compute_file_checksum(self, fd, algo, binary):
if self.multi_thread:
return _compute_file_checksum_threading(fd, algo, binary)
else:
return _compute_file_checksum_sequential(fd, algo, binary)
def _check_algorithm_compatibility(self, algo):
if self.algo is not None and self.algo.lower() != algo.lower():
raise ValueError(
'specified hashing algorithm ({}) is different form '
'the one used in the digest file ({})'.format(
self.algo, algo))
def decode_checksum_file_line(self, line):
mobj = DIGEST_LINE_BSD_RE.match(line)
if mobj:
self._check_algorithm_compatibility(mobj.group('algo'))
algo = mobj.group('algo')
path = mobj.group('path')
hexdigest = mobj.group('digest')
binary = True
else:
mobj = DIGEST_LINE_RE.match(line)
if not mobj:
raise ValueError(
f'unable to decode digest line: "{line}"')
path = mobj.group('path')
hexdigest = mobj.group('digest')
binary = True if mobj.group('binary') == '*' else False
if self.algo is None:
msg = f'no algorithm specified: using {DEFAULT_ALGO!r}'
warnings.warn(msg)
# self._log.warning(msg)
algo = DEFAULT_ALGO
else:
algo = self.algo
return path, hexdigest, binary, algo
def process_checksum_file_line(self, line):
if len(line.strip()) == 0 or line[0] == '#':
# support for comments in the digest-file
return CheckResult.IGNORED
path, hexdigest, binary, algo = self.decode_checksum_file_line(line)
try:
with open(path, 'rb') as fd:
hash_obj = self._compute_file_checksum(fd, algo, binary)
except OSError:
result = CheckResult.READ_FAILURE
if not self.quiet:
print(f'{path}: FAILED open or read')
else:
if hash_obj.hexdigest() == hexdigest:
result = CheckResult.OK
elif len(hash_obj.hexdigest()) != len(hexdigest):
result = CheckResult.BAD_FORMATTING
else:
result = CheckResult.FAILURE
if not self.status:
if (result != CheckResult.OK) or not self.quiet:
print(f'{path}: {result.value}')
return result
def print_check_results(self, check_result, filename):
ret = True
if check_result.n_failures > 0:
if not self.status:
self._log.warning(
'{} computed checksum do NOT match'.format(
check_result.n_failures))
ret = False
if check_result.n_read_failures > 0:
if not self.status:
self._log.warning(
'{} listed file(s) could not be read'.format(
check_result.n_read_failures))
ret = False
if check_result.n_improperly_formatted > 0:
if self.warn:
self._log.warning(
'{} improperly formatted checksum line'.format(
check_result.n_improperly_formatted))
if self.strict:
ret = False
if check_result.n_ok == 0:
self._log.info(
'{}: no properly formatted checksum lines found'.format(
filename))
ret = False
return ret
def verify_checksums(self, filenames):
result = True
if filenames:
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
check_result = CheckResultData()
with open(filename) as fd:
for line in fd:
ret = self.process_checksum_file_line(line)
check_result.update(ret)
ret = self.print_check_results(check_result, filename)
if not ret:
result = False
else:
# filenames is None or an empty list
filename = '-'
check_result = CheckResultData()
for line in sys.stdin:
ret = self.process_checksum_file_line(line)
check_result.update(ret)
ret = self.print_check_results(check_result, filename)
if not ret:
result = False
return result
class ChecksumCalculator:
def __init__(self, algo=None, binary=None, tag=False, multi_thread=False):
self.algo = algo
self.binary = binary
self.tag = tag
self.multi_thread = multi_thread
self._log = logging.getLogger('hashsum')
if self.algo is None:
msg = f'no algorithm specified: using {DEFAULT_ALGO!r}'
warnings.warn(msg)
# self._log.warning(msg)
self.algo = DEFAULT_ALGO
if self.tag and not self.binary:
raise ValueError(
'binary option set to False is incompatible with tag '
'option set to Ture')
def print_hash_line(self, filename, hash_obj):
if self.tag:
algo = hash_obj.name.upper()
print(f'{algo} ({filename}) = {hash_obj.hexdigest()}')
else:
marker = '*' if self.binary else ' '
print(f'{hash_obj.hexdigest()} {marker}{filename}')
def _compute_file_checksum(self, fd):
if self.multi_thread:
return _compute_file_checksum_threading(fd, self.algo, self.binary)
else:
return _compute_file_checksum_sequential(fd, self.algo,
self.binary)
def compute_checksums(self, filenames):
if filenames:
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
if os.path.isdir(filename):
self._log.info(f'{filename}: is a directory')
continue
with open(filename, 'rb') as fd:
hash_obj = self._compute_file_checksum(fd)
self.print_hash_line(filename, hash_obj)
else:
# filenames is None or an empty list
filename = '-'
# TODO: check
# stdin = io.open(sys.stdin.fileno(), mode='rb', closefd=False)
stdin = sys.stdin.buffer
hash_obj = self._compute_file_checksum(stdin)
self.print_hash_line(filename, hash_obj)
def get_parser():
"""Instantiate the command line argument parser."""
epilog = 'Copyright (C) 2016-2021, Antonio Valentino'
parser = argparse.ArgumentParser(prog=PROG, description=__doc__,
epilog=epilog)
parser.add_argument(
'-a', '--algorithm', choices=hashlib.algorithms_available,
default=None, metavar='',
help='specify the hashing algorithm '
'(default: {!r})'.format(DEFAULT_ALGO))
parser.add_argument(
'--tag', action='store_true', default=False,
help='create a BSD-style checksum')
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
'-b', '--binary', action='store_true', default=None,
help='read input data in binary mode')
mode_group.add_argument(
'-t', '--text', dest='binary', action='store_false',
help='read input data in text mode (default)')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-c', '--check', action='store_true', default=False,
help='read checksum(s) form FILE and check them')
group.add_argument(
'-l', '--list-algorithms', action='store_true', default=False,
help='list available hashing algorithms')
check_group = parser.add_argument_group(
'check', 'Options that are useful only when verifying checksums')
check_group.add_argument(
'--quiet', action='store_true', default=False,
help="don't print OK for each successfully verified file")
check_group.add_argument(
'--status', action='store_true', default=False,
help="don't output anything, status code shows success")
check_group.add_argument(
'--strict', action='store_true', default=False,
help="exit non-zero for improperly formatted checksum lines")
check_group.add_argument(
'-w', '--warn', action='store_true', default=False,
help="warn about improperly formatted checksum lines")
parser.add_argument(
'-m', '--multi-thread', action='store_true', default=False,
help='perform I/O and hash computation in separate threads '
'(default=%(default)s). '
'Can speed-up computation on large files while it is not '
'recommended for small files.')
parser.add_argument(
'--version', action='version',
version=f'%(prog)s v{__version__}')
parser.add_argument(
'filenames', nargs='*', metavar='FILE',
help='name of file to process. '
'If not specified, or set to -, data are read form the '
'standard input')
if argcomplete:
argcomplete.autocomplete(parser)
return parser
def parse_args(args=None, namespace=None, parser=None):
"""Parse command line arguments."""
if parser is None:
parser = get_parser()
args = parser.parse_args(args)
if args.tag:
if args.binary is False:
parser.error('--tag does not support --text mode')
else:
args.binary = True
if args.tag and args.check:
parser.error(
'the --tag option is meaningless when verifying checksums')
if args.binary and args.check:
parser.error('the --binary and --text options are meaningless '
'when verifying checksums')
if args.status and not args.check:
parser.error('the --status option is meaningful only when '
'verifying checksums')
if args.warn and not args.check:
parser.error('the --warn option is meaningful only when '
'verifying checksums')
if args.quiet and not args.check:
parser.error('the --quiet option is meaningful only when '
'verifying checksums')
if args.strict and not args.check:
parser.error('the --strict option is meaningful only when '
'verifying checksums')
if '-' in args.filenames:
if len(args.filenames) > 1:
parser.error('"-" cannot be used if other file names have '
'been specified')
else:
args.filenames.remove('-')
return args
def main(*argv):
# setup logging
logging.basicConfig(format=LOGFMT, level=logging.DEBUG)
logging.captureWarnings(True)
log = logging.getLogger('hashsum')
# parse cmd line arguments
args = parse_args(argv if argv else None)
exitcode = EX_OK
try:
if args.list_algorithms:
algoset = hashlib.algorithms_available
algolist = sorted(
algo for algo in algoset
if algo.islower() or algo.lower() not in algoset
)
print('Available hash algoritms:')
print(' ', '\n '.join(algolist), sep='')
elif args.check:
tool = ChecksumVerifier(args.algorithm, args.quiet, args.status,
args.warn, args.strict, args.multi_thread)
result = tool.verify_checksums(args.filenames)
if not result:
exitcode = EX_FAILURE
else:
tool = ChecksumCalculator(
args.algorithm, args.binary, args.tag, args.multi_thread)
tool.compute_checksums(args.filenames)
except Exception as exc:
log.error(str(exc))
log.debug('stacktrace:', exc_info=True)
exitcode = EX_FAILURE
except KeyboardInterrupt:
log.warning('Keyboard interrupt received: exit the program')
exitcode = EX_INTERRUPT
return exitcode
if __name__ == '__main__':
sys.exit(main())
| output = data |
doc.go | /*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at |
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by Kubeform. DO NOT EDIT.
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=kubeform.dev/provider-aws-api/apis/s3
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
// +groupName=s3.aws.kubeform.com
package v1alpha1 |
http://www.apache.org/licenses/LICENSE-2.0 |
eval_util.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for evaluation."""
import collections
import os
import time
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path)
write_metrics(metrics, global_step, summary_dir)
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
|
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] float32 tensor of
binarized masks, reframed to full image masks.
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
label_id_offset = 1 # Applying label id offset (b/63711816)
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.to_int32(detections[detection_fields.num_detections])
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# TODO(rathodv): This should be done in model's postprocess
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
if scale_to_absolute:
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
return evaluator_options
| detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8) |
Page.tsx | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*/
import {MenuProvider} from 'components/useMenu';
import * as React from 'react';
import {Nav} from './Nav';
import {RouteItem, SidebarContext} from './useRouteMeta';
import {Sidebar} from './Sidebar';
import {Footer} from './Footer';
import SocialBanner from '../SocialBanner'; | interface PageProps {
children: React.ReactNode;
routeTree: RouteItem;
}
export function Page({routeTree, children}: PageProps) {
return (
<>
<SocialBanner />
<MenuProvider>
<SidebarContext.Provider value={routeTree}>
<div className="h-auto lg:h-screen flex flex-row">
<div className="no-bg-scrollbar h-auto lg:h-full lg:overflow-y-scroll fixed flex flex-row lg:flex-col py-0 top-16 sm:top-10 left-0 right-0 lg:max-w-xs w-full shadow lg:shadow-none z-50">
<Nav />
<Sidebar />
</div>
<div className="flex flex-1 w-full h-full self-stretch">
<div className="w-full min-w-0">
<main className="flex flex-1 self-stretch mt-10 flex-col items-end justify-around">
{children}
<Footer />
</main>
</div>
</div>
</div>
</SidebarContext.Provider>
</MenuProvider>
</>
);
} | |
oc.js | /*
Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved. | */
CKEDITOR.plugins.setLang( 'sourcearea', 'oc', {
toolbar: 'Font'
} ); | For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license |
LongestPassword_test.go | package LongestPassword
import "testing"
func | (t *testing.T) {
type args struct {
S string
}
tests := []struct {
name string
args args
want int
}{
{"1", args{"test 5 a0A pass007 ?xy1"}, 7},
{"2", args{"a b 11 2?3"}, -1},
{"3", args{"pass0, logestPass001 111aa c 0"}, 13},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Solution(tt.args.S); got != tt.want {
t.Errorf("Solution() = %v, want %v", got, tt.want)
}
})
}
} | TestSolution |
rapid_resynchronization_request.rs | use std::fmt;
use std::io::{Read, Write};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use util::Error;
use super::errors::*;
use super::header::*;
use crate::util::get_padding;
#[cfg(test)]
mod rapid_resynchronization_request_test;
// The RapidResynchronizationRequest packet informs the encoder about the loss of an undefined amount of coded video data belonging to one or more pictures
#[derive(Debug, PartialEq, Default, Clone)]
pub struct RapidResynchronizationRequest {
// SSRC of sender
pub sender_ssrc: u32,
// SSRC of the media source
pub media_ssrc: u32,
}
const RRR_LENGTH: usize = 2;
const RRR_HEADER_LENGTH: usize = SSRC_LENGTH * 2;
const RRR_MEDIA_OFFSET: usize = 4;
impl fmt::Display for RapidResynchronizationRequest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"RapidResynchronizationRequest {:x} {:x}",
self.sender_ssrc, self.media_ssrc
)
}
}
impl RapidResynchronizationRequest {
fn size(&self) -> usize {
HEADER_LENGTH + RRR_HEADER_LENGTH
}
// Unmarshal decodes the ReceptionReport from binary
pub fn unmarshal<R: Read>(reader: &mut R) -> Result<Self, Error> {
let header = Header::unmarshal(reader)?;
if header.packet_type != PacketType::TransportSpecificFeedback || header.count != FORMAT_RRR
{
return Err(ERR_WRONG_TYPE.clone());
}
let sender_ssrc = reader.read_u32::<BigEndian>()?;
let media_ssrc = reader.read_u32::<BigEndian>()?;
Ok(RapidResynchronizationRequest {
sender_ssrc,
media_ssrc,
})
}
// Header returns the Header associated with this packet.
pub fn header(&self) -> Header {
let l = self.size() + get_padding(self.size());
Header {
padding: get_padding(self.size()) != 0,
count: FORMAT_RRR,
packet_type: PacketType::TransportSpecificFeedback,
length: ((l / 4) - 1) as u16,
}
}
// destination_ssrc returns an array of SSRC values that this packet refers to.
pub fn destination_ssrc(&self) -> Vec<u32> |
// Marshal encodes the packet in binary.
pub fn marshal<W: Write>(&self, writer: &mut W) -> Result<(), Error> {
self.header().marshal(writer)?;
writer.write_u32::<BigEndian>(self.sender_ssrc)?;
writer.write_u32::<BigEndian>(self.media_ssrc)?;
Ok(())
}
}
| {
vec![self.media_ssrc]
} |
main.go | // Copyright © 2021 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"github.com/spf13/viper"
"github.com/umarcor/cobra"
)
func main() { |
var (
// Used for flags.
cfgFile string
userLicense string
rootCmd = &cobra.Command{
Use: "cobra",
Short: "A generator for Cobra based Applications",
Long: `Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
}
)
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
rootCmd.PersistentFlags().Bool("viper", false, "use Viper for configuration")
cobra.CheckErr(viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")))
cobra.CheckErr(viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")))
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
viper.SetDefault("license", "none")
rootCmd.AddCommand(addCmd)
rootCmd.AddCommand(initCmd)
}
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := os.UserHomeDir()
cobra.CheckErr(err)
// Search config in home directory with name ".cobra" (without extension).
viper.AddConfigPath(home)
viper.SetConfigType("yaml")
viper.SetConfigName(".cobra")
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err == nil {
fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed())
}
}
|
err := rootCmd.Execute()
if err != nil {
os.Exit(1)
}
}
|
posts.rs | use crate::blogs::Manifest;
use comrak::ComrakOptions;
use regex::Regex;
use serde_derive::{Deserialize, Serialize};
use std::error::Error;
use std::path::{Path, PathBuf};
use time::{Duration, Tm};
#[derive(Debug, PartialEq, Deserialize)]
struct YamlHeader {
title: String,
author: String,
#[serde(default)]
release: bool,
team: Option<String>,
layout: String,
}
#[derive(Debug, Clone, Serialize)]
pub(crate) struct Post {
pub(crate) filename: String,
pub(crate) layout: String,
pub(crate) title: String,
pub(crate) author: String,
pub(crate) year: u32,
pub(crate) show_year: bool,
pub(crate) month: u32,
pub(crate) day: u32,
pub(crate) contents: String,
pub(crate) url: String,
pub(crate) published: String,
pub(crate) updated: String,
pub(crate) release: bool,
pub(crate) has_team: bool,
pub(crate) team: String,
pub(crate) team_url: String,
}
impl Post {
pub(crate) fn open(path: &Path, manifest: &Manifest) -> Result<Self, Box<dyn Error>> {
// yeah this might blow up, but it won't
let filename = path.file_name().unwrap().to_str().unwrap();
// we need to get the metadata out of the url
let mut split = filename.splitn(4, "-");
// we do some unwraps because these need to be valid
let year = split.next().unwrap().parse::<u32>().unwrap();
let month = split.next().unwrap().parse::<u32>().unwrap();
let day = split.next().unwrap().parse::<u32>().unwrap();
let filename = split.next().unwrap().to_string();
let contents = std::fs::read_to_string(path)?;
// yaml headers.... we know the first four bytes of each file are "---\n"
// so we need to find the end. we need the fours to adjust for those first bytes
let end_of_yaml = contents[4..].find("---").unwrap() + 4;
let yaml = &contents[..end_of_yaml];
let YamlHeader {
author,
title,
release,
team: team_string,
layout,
} = serde_yaml::from_str(yaml)?;
// next, the contents. we add + to get rid of the final "---\n\n"
let options = ComrakOptions {
ext_header_ids: Some(String::new()),
unsafe_: true, // Allow rendering of raw HTML
..ComrakOptions::default()
};
let contents = comrak::markdown_to_html(&contents[end_of_yaml + 5..], &options);
// finally, the url.
let mut url = PathBuf::from(&*filename);
url.set_extension("html");
// this is fine
let url = format!("{}/{}/{}/{}", year, month, day, url.to_str().unwrap());
let published = build_post_time(year, month, day, 0);
let updated = published.clone();
// validate for now that the layout is specified as "post"
match &*layout {
"post" => (),
_ => panic!(
"blog post at path `{}` should have layout `post`",
path.display()
),
};
// Enforce extra conditions
if manifest.requires_team && team_string.is_none() {
panic!("blog post at path `{}` lacks team metadata", path.display());
}
// If they supplied team, it should look like `team-text <team-url>`
let (team, team_url) = match team_string {
Some(s) => {
lazy_static::lazy_static! {
static ref R: Regex = Regex::new(r"(?P<name>[^<]*) <(?P<url>[^>]+)>").unwrap();
}
let captures = match R.captures(&s) {
Some(c) => c,
None => panic!(
"team from path `{}` should have format `$name <$url>`",
path.display()
),
};
(
Some(captures["name"].to_string()),
Some(captures["url"].to_string()),
)
}
None => (None, None),
};
Ok(Self {
filename,
title,
author,
year,
show_year: false,
month,
day,
contents,
url,
published,
updated,
release,
layout,
has_team: team.is_some(),
team: team.unwrap_or_default(),
team_url: team_url.unwrap_or_default(),
})
}
pub fn | (&mut self, hour: u32) {
self.updated = build_post_time(self.year, self.month, self.day, hour);
}
}
fn build_post_time(year: u32, month: u32, day: u32, seconds: u32) -> String {
let seconds = Duration::seconds(seconds as i64);
if seconds >= Duration::days(1) {
panic!("seconds must be less then a day")
};
// build the time. this is only approximate, which is fine.
let mut time = Tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: day as i32,
tm_mon: (month as i32) - 1, // 0-11 not 1-12
tm_year: (year as i32) - 1900, // from the year 1900, not the actual year
// these next two fields are wrong but we never use them to generate our times
tm_wday: 1,
tm_yday: 1,
tm_isdst: 0,
tm_utcoff: 0,
tm_nsec: 0,
};
time = time + seconds;
time.rfc3339().to_string()
}
| set_updated |
list-aquisitions.client.controller.tests.js | (function () {
'use strict';
describe('Aquisitions List Controller Tests', function () {
// Initialize global variables
var AquisitionsListController,
$scope,
$httpBackend,
$state,
Authentication,
AquisitionsService,
mockAquisition;
// The $resource service augments the response object with methods for updating and deleting the resource.
// If we were to use the standard toEqual matcher, our tests would fail because the test values would not match
// the responses exactly. To solve the problem, we define a new toEqualData Jasmine matcher.
// When the toEqualData matcher compares two objects, it takes only object properties into
// account and ignores methods.
beforeEach(function () {
jasmine.addMatchers({
toEqualData: function (util, customEqualityTesters) {
return {
compare: function (actual, expected) {
return {
pass: angular.equals(actual, expected)
};
}
};
}
});
});
// Then we can start by loading the main application module
beforeEach(module(ApplicationConfiguration.applicationModuleName));
// The injector ignores leading and trailing underscores here (i.e. _$httpBackend_).
// This allows us to inject a service but then attach it to a variable
// with the same name as the service.
beforeEach(inject(function ($controller, $rootScope, _$state_, _$httpBackend_, _Authentication_, _AquisitionsService_) {
// Set a new global scope
$scope = $rootScope.$new();
// Point global variables to injected services
$httpBackend = _$httpBackend_;
$state = _$state_;
Authentication = _Authentication_;
AquisitionsService = _AquisitionsService_;
// create mock article
mockAquisition = new AquisitionsService({
_id: '525a8422f6d0f87f0e407a33',
name: 'Aquisition Name'
});
// Mock logged in user
Authentication.user = {
roles: ['user']
};
// Initialize the Aquisitions List controller.
AquisitionsListController = $controller('AquisitionsListController as vm', {
$scope: $scope
});
// Spy on state go
spyOn($state, 'go'); |
beforeEach(function () {
mockAquisitionList = [mockAquisition, mockAquisition];
});
it('should send a GET request and return all Aquisitions', inject(function (AquisitionsService) {
// Set POST response
$httpBackend.expectGET('api/aquisitions').respond(mockAquisitionList);
$httpBackend.flush();
// Test form inputs are reset
expect($scope.vm.aquisitions.length).toEqual(2);
expect($scope.vm.aquisitions[0]).toEqual(mockAquisition);
expect($scope.vm.aquisitions[1]).toEqual(mockAquisition);
}));
});
});
}()); | }));
describe('Instantiate', function () {
var mockAquisitionList; |
bcm2835.go | // BCM2835 SoC support
// https://github.com/f-secure-foundry/tamago
//
// Copyright (c) the bcm2835 package authors
//
// Use of this source code is governed by the license
// that can be found in the LICENSE file.
// Package bcm2835 provides support to Go bare metal unikernels written using
// the TamaGo framework on BCM2835/BCM2836 SoCs.
//
// This package is only meant to be used with `GOOS=tamago GOARCH=arm` as
// supported by the TamaGo framework for bare metal Go on ARM SoCs, see
// https://github.com/f-secure-foundry/tamago.
package bcm2835
import (
_ "unsafe"
"github.com/f-secure-foundry/tamago/arm"
)
// nanos - should be same value as arm/timer.go refFreq
const refFreq int64 = 1000000000
// DRAM_FLAG_NOCACHE disables caching by setting to high bits
const DRAM_FLAG_NOCACHE = 0xC0000000
// peripheralBase represents the (remapped) peripheral base address, it varies
// by model and it is therefore initialized (see Init) by individual board
// packages.
var peripheralBase uint32
// ARM processor instance
var ARM = &arm.CPU{}
//go:linkname ramStackOffset runtime.ramStackOffset
var ramStackOffset uint32 = 0x100000 // 1 MB
//go:linkname nanotime1 runtime.nanotime1
func nanotime1() int64 {
return int64(read_systimer() * ARM.TimerMultiplier)
}
// Init takes care of the lower level SoC initialization triggered early in
// runtime setup.
func Init(base uint32) {
peripheralBase = base
ARM.Init()
ARM.EnableVFP()
// required when booting in SDP mode
ARM.EnableSMP()
// MMU initialization is required to take advantage of data cache
ARM.InitMMU()
ARM.CacheEnable()
ARM.TimerMultiplier = refFreq / SysTimerFreq
ARM.TimerFn = read_systimer
// initialize serial console
MiniUART.Init()
}
| // boards map 'bus addresses' to board specific base addresses but with
// consistent layout otherwise.
func PeripheralAddress(offset uint32) uint32 {
return peripheralBase + offset
} | // PeripheralAddress returns the absolute address for a peripheral. The Pi |
catconfig.py | from enum import Enum
from typing import Any
from importlib import import_module
class ValidationError(Exception):
"""
Error class for validation failed
"""
def __init__(self, payload: dict):
"""
:param message: error message
"""
self.payload = payload
def generate_err_msg(self, payload: dict, indent: int = 0) -> str:
"""
Generate human-friendly error message
example output:
key1: Error message
key2:
inner_key: error message
inner_key2:
key3: error message
"""
make_indent = ''.join([' ' for i in range(0, indent)])
previous_text = ''
for (key, errors) in payload.items():
for err in errors:
if isinstance(err, dict):
previous_text += '{}{}:\n'.format(make_indent, key)
previous_text += self.generate_err_msg(err, indent+1)
pass
else:
previous_text += '{}{}: {}\n'.format(make_indent, key, err)
pass
return previous_text
@property
def message(self):
return self.generate_err_msg(self.payload)
class CatConfig:
def __init__(self, format: str = 'json', validator_schema: dict = None, data: dict = None):
"""
:param format: Format of data used for read (json/toml/yaml)
:param validator_schema: Schema for validator (see https://docs.python-cerberus.org/en/stable/usage.html)
:param data: Config data
"""
self._parser = None
self._data = {}
if not data == None:
self._data = data
self._validator_schema = validator_schema
if format:
self._import_parser(format)
self._config = {}
def _import_parser(self, parser_name: str):
if parser_name == 'json':
self._parser = import_module('json')
elif parser_name == 'toml':
try:
self._parser = import_module('toml')
except ImportError:
raise Exception(
"CatConfig needs toml parser to work, "
"please add `toml` module to your project")
elif parser_name == 'yaml':
try:
self._parser = import_module('yaml')
# it works! I love Python!
self._parser.loads = self._parser.load
except ImportError:
raise Exception(
"CatConfig needs yaml parser to work, "
"please add `pyyaml` module to your project\n")
else:
raise Exception('Unsupported parser type')
def load_from_file(self, file_path: str, format: 'str' = None) -> None:
|
def load_from_string(self, data: str, format: 'str' = None) -> None:
"""
Update config from string and validate
:param data: target data
:param format: format of config file (default: json)
"""
if format:
self._import_parser(format)
return self.load(self._parser.loads(data))
def load(self, data: dict) -> None:
"""
Update config from param `data`
:param data: data
"""
if self._validator_schema:
self.validate(data)
self._data.update(data)
def validate(self, data: str) -> None:
"""
Validate data
:param data: config data
"""
try:
cerberus = import_module('cerberus')
except ImportError:
raise Exception('CatConfig need `cerberus` module to make validation work normally, '
'please add `cerberus` module to your project.')
v = cerberus.Validator(self._validator_schema)
v.validate(data)
if v != True:
raise ValidationError(v.errors)
def update(self, data: dict) -> None:
"""
Update config item
:param data: data to be updated
"""
self._data.update(data)
def set(self, key: str, value: str) -> None:
"""
Set config value
:param key: key of config item
:param value: value of config item
"""
return self.update({key: value})
def get(self, key: str=None) -> Any:
"""
Get item by key
It will return self contained object if param `key` == None
:param key: key
"""
if key == None:
return self._data
if key in self._data:
data = self._data.get(key)
if isinstance(data, dict):
return CatConfig(data=data)
elif isinstance(data, list):
return [CatConfig(data=x) for x in data]
else:
return data
return CatConfig()
def __getitem__(self, key: str) -> Any:
return self.get(key)
def __bool__(self):
"""
Return False if `self._data` has no item
"""
return len(self._data) != 0
def __getattr__(self, name: str) -> Any:
return self.__getitem__(name)
def __eq__(self, b):
"""
Make sure CatConfig object without any data equal False
"""
if b == None:
if len(self._data.keys()) == 0:
return True
return self._data == b
def __str__(self):
if self._data == {}:
return 'None'
return str(self._data) | """
Update config from file
:param file_path: config file path
:param format: format of config file (default: json)
"""
with open(file_path, 'r') as f:
self.load_from_string(f.read(), format) |
phases_test.go | package build_test
import (
"context"
"io/ioutil"
"math/rand"
"os"
"testing"
"time"
"github.com/sclevine/spec"
"github.com/docker/docker/api/types/container"
"github.com/heroku/color"
"github.com/sclevine/spec/report"
"github.com/buildpacks/pack/internal/api"
"github.com/buildpacks/pack/internal/build"
"github.com/buildpacks/pack/internal/build/fakes"
h "github.com/buildpacks/pack/testhelpers"
)
func TestPhases(t *testing.T) {
rand.Seed(time.Now().UTC().UnixNano())
color.Disable(true)
defer color.Disable(false)
spec.Run(t, "phases", testPhases, spec.Report(report.Terminal{}), spec.Sequential())
}
func | (t *testing.T, when spec.G, it spec.S) {
// Avoid contaminating tests with existing docker configuration.
// GGCR resolves the default keychain by inspecting DOCKER_CONFIG - this is used by the Analyze step
// when constructing the auth config (see `auth.BuildEnvVar` in phases.go).
var dockerConfigDir string
it.Before(func() {
var err error
dockerConfigDir, err = ioutil.TempDir("", "empty-docker-config-dir")
h.AssertNil(t, err)
h.AssertNil(t, os.Setenv("DOCKER_CONFIG", dockerConfigDir))
})
it.After(func() {
h.AssertNil(t, os.Unsetenv("DOCKER_CONFIG"))
h.AssertNil(t, os.RemoveAll(dockerConfigDir))
})
when("#Create", func() {
it("creates a phase and then run it", func() {
lifecycle := fakeLifecycle(t, false)
fakePhase := &fakes.FakePhase{}
fakePhaseFactory := fakes.NewFakePhaseFactory(fakes.WhichReturnsForNew(fakePhase))
err := lifecycle.Create(context.Background(), false, false, "test", "test", "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
h.AssertEq(t, fakePhase.CleanupCallCount, 1)
h.AssertEq(t, fakePhase.RunCallCount, 1)
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepoName := "some-repo-name"
expectedRunImage := "some-run-image"
err := verboseLifecycle.Create(context.Background(), false, false, expectedRunImage, "test", "test", expectedRepoName, "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "creator")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-log-level", "debug"},
[]string{"-run-image", expectedRunImage},
[]string{expectedRepoName},
)
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Create(context.Background(), false, false, "test", "test", "test", "test", expectedNetworkMode, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
when("clear cache", func() {
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Create(context.Background(), false, true, "test", "test", "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "creator")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-skip-restore"},
)
})
})
when("clear cache is false", func() {
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Create(context.Background(), false, false, "test", "test", "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "creator")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-cache-dir", "/cache"},
)
h.AssertSliceContains(t, configProvider.HostConfig().Binds, "/var/run/docker.sock:/var/run/docker.sock")
})
})
when("publish", func() {
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBinds := []string{"some-cache:/cache"}
err := lifecycle.Create(context.Background(), true, false, "test", "test", "some-cache", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBinds...)
})
it("configures the phase with root", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Create(context.Background(), true, false, "test", "test", "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
})
it("configures the phase with registry access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepos := "some-repo-name"
err := lifecycle.Create(context.Background(), true, false, "test", "test", "test", expectedRepos, "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.ContainerConfig().Env, "CNB_REGISTRY_AUTH={}")
})
})
when("publish is false", func() {
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Create(context.Background(), false, false, "test", "test", "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "creator")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-daemon"},
[]string{"-launch-cache", "/launch-cache"},
)
h.AssertSliceContains(t, configProvider.HostConfig().Binds, "/var/run/docker.sock:/var/run/docker.sock")
})
it("configures the phase with daemon access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Create(context.Background(), false, false, "test", "some-launch-cache", "some-cache", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
h.AssertSliceContains(t, configProvider.HostConfig().Binds, "/var/run/docker.sock:/var/run/docker.sock")
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBinds := []string{"some-cache:/cache", "some-launch-cache:/launch-cache"}
err := lifecycle.Create(context.Background(), false, false, "test", "some-launch-cache", "some-cache", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBinds...)
})
})
})
when("#Detect", func() {
it("creates a phase and then runs it", func() {
lifecycle := fakeLifecycle(t, false)
fakePhase := &fakes.FakePhase{}
fakePhaseFactory := fakes.NewFakePhaseFactory(fakes.WhichReturnsForNew(fakePhase))
err := lifecycle.Detect(context.Background(), "test", []string{}, fakePhaseFactory)
h.AssertNil(t, err)
h.AssertEq(t, fakePhase.CleanupCallCount, 1)
h.AssertEq(t, fakePhase.RunCallCount, 1)
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Detect(context.Background(), "test", []string{"test"}, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "detector")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-log-level", "debug"},
[]string{"-app", "/workspace"},
[]string{"-platform", "/platform"},
)
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Detect(context.Background(), expectedNetworkMode, []string{}, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBind := "some-mount-source:/some-mount-target"
err := lifecycle.Detect(context.Background(), "test", []string{expectedBind}, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBind)
})
})
when("#Analyze", func() {
it("creates a phase and then runs it", func() {
lifecycle := fakeLifecycle(t, false)
fakePhase := &fakes.FakePhase{}
fakePhaseFactory := fakes.NewFakePhaseFactory(fakes.WhichReturnsForNew(fakePhase))
err := lifecycle.Analyze(context.Background(), "test", "test", "test", false, false, fakePhaseFactory)
h.AssertNil(t, err)
h.AssertEq(t, fakePhase.CleanupCallCount, 1)
h.AssertEq(t, fakePhase.RunCallCount, 1)
})
when("clear cache", func() {
it("configures the phase with the expected arguments", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepoName := "some-repo-name"
err := lifecycle.Analyze(context.Background(), expectedRepoName, "test", "test", false, true, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "analyzer")
h.AssertSliceContains(t, configProvider.ContainerConfig().Cmd, "-skip-layers")
})
})
when("clear cache is false", func() {
it("configures the phase with the expected arguments", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepoName := "some-repo-name"
err := lifecycle.Analyze(context.Background(), expectedRepoName, "test", "test", false, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "analyzer")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-cache-dir", "/cache"},
)
})
})
when("publish", func() {
it("configures the phase with registry access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepos := "some-repo-name"
expectedNetworkMode := "some-network-mode"
err := lifecycle.Analyze(context.Background(), expectedRepos, "test", expectedNetworkMode, true, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.ContainerConfig().Env, "CNB_REGISTRY_AUTH={}")
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with root", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Analyze(context.Background(), "test", "test", "test", true, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepoName := "some-repo-name"
err := verboseLifecycle.Analyze(context.Background(), expectedRepoName, "test", "test", true, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "analyzer")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
//[]string{"-log-level", "debug"}, // TODO: fix [https://github.com/buildpacks/pack/issues/419].
[]string{"-layers", "/layers"},
[]string{expectedRepoName},
)
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBind := "some-cache:/cache"
err := lifecycle.Analyze(context.Background(), "test", "some-cache", "test", true, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBind)
})
})
when("publish is false", func() {
it("configures the phase with daemon access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Analyze(context.Background(), "test", "test", "test", false, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
h.AssertSliceContains(t, configProvider.HostConfig().Binds, "/var/run/docker.sock:/var/run/docker.sock")
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepoName := "some-repo-name"
err := verboseLifecycle.Analyze(context.Background(), expectedRepoName, "test", "test", false, true, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "analyzer")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-log-level", "debug"},
[]string{"-daemon"},
[]string{"-layers", "/layers"},
[]string{expectedRepoName},
)
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Analyze(context.Background(), "test", "test", expectedNetworkMode, false, false, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBind := "some-cache:/cache"
err := lifecycle.Analyze(context.Background(), "test", "some-cache", "test", false, true, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBind)
})
})
})
when("#Restore", func() {
it("creates a phase and then runs it", func() {
lifecycle := fakeLifecycle(t, false)
fakePhase := &fakes.FakePhase{}
fakePhaseFactory := fakes.NewFakePhaseFactory(fakes.WhichReturnsForNew(fakePhase))
err := lifecycle.Restore(context.Background(), "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
h.AssertEq(t, fakePhase.CleanupCallCount, 1)
h.AssertEq(t, fakePhase.RunCallCount, 1)
})
it("configures the phase with root access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Restore(context.Background(), "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Restore(context.Background(), "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "restorer")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-log-level", "debug"},
[]string{"-cache-dir", "/cache"},
[]string{"-layers", "/layers"},
)
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Restore(context.Background(), "test", expectedNetworkMode, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBind := "some-cache:/cache"
err := lifecycle.Restore(context.Background(), "some-cache", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBind)
})
})
when("#Build", func() {
it("creates a phase and then runs it", func() {
lifecycle := fakeLifecycle(t, false)
fakePhase := &fakes.FakePhase{}
fakePhaseFactory := fakes.NewFakePhaseFactory(fakes.WhichReturnsForNew(fakePhase))
err := lifecycle.Build(context.Background(), "test", []string{}, fakePhaseFactory)
h.AssertNil(t, err)
h.AssertEq(t, fakePhase.CleanupCallCount, 1)
h.AssertEq(t, fakePhase.RunCallCount, 1)
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Build(context.Background(), "test", []string{}, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "builder")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
//[]string{"-log-level", "debug"}, // TODO: fix [https://github.com/buildpacks/pack/issues/419].
[]string{"-layers", "/layers"},
[]string{"-app", "/workspace"},
[]string{"-platform", "/platform"},
)
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Build(context.Background(), expectedNetworkMode, []string{}, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBind := "some-mount-source:/some-mount-target"
err := lifecycle.Build(context.Background(), "test", []string{expectedBind}, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBind)
})
})
when("#Export", func() {
it("creates a phase and then runs it", func() {
lifecycle := fakeLifecycle(t, false)
fakePhase := &fakes.FakePhase{}
fakePhaseFactory := fakes.NewFakePhaseFactory(fakes.WhichReturnsForNew(fakePhase))
err := lifecycle.Export(context.Background(), "test", "test", false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
h.AssertEq(t, fakePhase.CleanupCallCount, 1)
h.AssertEq(t, fakePhase.RunCallCount, 1)
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepoName := "some-repo-name"
err := verboseLifecycle.Export(context.Background(), expectedRepoName, "test", false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "exporter")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-log-level", "debug"},
[]string{"-cache-dir", "/cache"},
[]string{"-layers", "/layers"},
[]string{"-app", "/workspace"},
[]string{expectedRepoName},
)
})
when("publish", func() {
it("configures the phase with registry access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRepos := []string{"some-repo-name", "some-run-image"}
err := lifecycle.Export(context.Background(), expectedRepos[0], expectedRepos[1], true, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.ContainerConfig().Env, "CNB_REGISTRY_AUTH={}")
})
it("configures the phase with root", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Export(context.Background(), "test", "test", true, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Export(context.Background(), "test", "test", true, "test", "test", expectedNetworkMode, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBind := "some-cache:/cache"
err := lifecycle.Export(context.Background(), "test", "test", true, "test", "some-cache", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBind)
})
})
when("publish is false", func() {
it("configures the phase with daemon access", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := lifecycle.Export(context.Background(), "test", "test", false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.ContainerConfig().User, "root")
h.AssertSliceContains(t, configProvider.HostConfig().Binds, "/var/run/docker.sock:/var/run/docker.sock")
})
it("configures the phase with the expected arguments", func() {
verboseLifecycle := fakeLifecycle(t, true)
fakePhaseFactory := fakes.NewFakePhaseFactory()
err := verboseLifecycle.Export(context.Background(), "test", "test", false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "exporter")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-daemon"},
[]string{"-launch-cache", "/launch-cache"},
)
})
it("configures the phase with the expected network mode", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedNetworkMode := "some-network-mode"
err := lifecycle.Export(context.Background(), "test", "test", false, "test", "test", expectedNetworkMode, fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.HostConfig().NetworkMode, container.NetworkMode(expectedNetworkMode))
})
it("configures the phase with binds", func() {
lifecycle := fakeLifecycle(t, false)
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedBinds := []string{"some-cache:/cache", "some-launch-cache:/launch-cache"}
err := lifecycle.Export(context.Background(), "test", "test", false, "some-launch-cache", "some-cache", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertSliceContains(t, configProvider.HostConfig().Binds, expectedBinds...)
})
})
when("platform api 0.2", func() {
it("uses -image", func() {
platformAPIVersion, err := api.NewVersion("0.2")
h.AssertNil(t, err)
fakeBuilder, err := fakes.NewFakeBuilder(fakes.WithPlatformVersion(platformAPIVersion))
h.AssertNil(t, err)
lifecycle := fakeLifecycle(t, false, fakes.WithBuilder(fakeBuilder))
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRunImage := "some-run-image"
err = lifecycle.Export(context.Background(), "test", expectedRunImage, false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "exporter")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-image", expectedRunImage},
)
})
})
when("platform api 0.3+", func() {
var (
fakeBuilder *fakes.FakeBuilder
err error
)
it.Before(func() {
platformAPIVersion, err := api.NewVersion("0.3")
h.AssertNil(t, err)
fakeBuilder, err = fakes.NewFakeBuilder(fakes.WithPlatformVersion(platformAPIVersion))
h.AssertNil(t, err)
})
it("uses -run-image instead of deprecated -image", func() {
lifecycle := fakeLifecycle(t, false, fakes.WithBuilder(fakeBuilder))
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedRunImage := "some-run-image"
err = lifecycle.Export(context.Background(), "test", expectedRunImage, false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertEq(t, configProvider.Name(), "exporter")
h.AssertIncludeAllExpectedPatterns(t,
configProvider.ContainerConfig().Cmd,
[]string{"-run-image", expectedRunImage},
)
})
it("configures the phase with default arguments", func() {
lifecycle := fakeLifecycle(t, true, fakes.WithBuilder(fakeBuilder), func(options *build.LifecycleOptions) {
options.DefaultProcessType = "test-process"
})
fakePhaseFactory := fakes.NewFakePhaseFactory()
expectedDefaultProc := []string{"-process-type", "test-process"}
err := lifecycle.Export(context.Background(), "test", "test", false, "test", "test", "test", fakePhaseFactory)
h.AssertNil(t, err)
configProvider := fakePhaseFactory.NewCalledWithProvider
h.AssertIncludeAllExpectedPatterns(t, configProvider.ContainerConfig().Cmd, expectedDefaultProc)
})
})
})
}
func fakeLifecycle(t *testing.T, logVerbose bool, ops ...func(*build.LifecycleOptions)) *build.Lifecycle {
lifecycle, err := fakes.NewFakeLifecycle(logVerbose, ops...)
h.AssertNil(t, err)
return lifecycle
}
| testPhases |
SearchChallengeView.js | /*
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
Ext.define('Ssp.view.tools.actionplan.SearchChallengeView', {
extend: 'Ext.form.Panel',
alias: 'widget.searchchallengeview',
mixins: ['Deft.mixin.Injectable', 'Deft.mixin.Controllable'],
inject: {
apiProperties: 'apiProperties',
appEventsController: 'appEventsController',
columnRendererUtils: 'columnRendererUtils',
challengesStore: 'challengesAllUnpagedStore',
challengeCategoriesStore: 'challengeCategoriesStore',
challengeReferralsStore: 'challengeReferralsStore',
textStore: 'sspTextStore'
},
controller: 'Ssp.controller.tool.actionplan.SearchChallengeViewController',
title: 'Add Task',
width: '100%',
height: '100%',
initComponent: function(){
var me = this;
Ext.apply(me, {
layout: {
type: 'fit'
},
padding: 0, | layout: 'vbox',
margin: '0 0 0 0',
padding: '0 0 0 15',
width: '100%',
height: '100%',
items: [{
xtype: 'container',
border: 0,
title: '',
margin: '0 0 5 0',
padding: '5 0 0 5',
layout: 'hbox',
defaults: {
anchor: '100%'
},
items: [{
xtype: 'label',
padding: '0 0 2 3',
text: me.textStore.getValueByCode('ssp.label.action-plan.search-challenge.filter', 'Filter Category, Challenge or Keyword')
}, {
xtype: 'tbspacer',
width: 195
}, {
tooltip: me.textStore.getValueByCode('ssp.tooltip.reset-button', 'Reset'),
text: me.textStore.getValueByCode('ssp.label.reset-button', 'Reset'),
type: 'refresh',
xtype: 'button',
padding: '0 0 2 3',
itemId: 'resetChallengesButton'
}]
}, {
xtype: 'combobox',
fieldLabel: '',
itemId: 'categoryNameCombo',
name: 'categoryNameCombo',
fieldLabel: '',
emptyText: me.textStore.getValueByCode('ssp.empty-text.action-plan.search-challenge.category', 'Filter by Category'),
store: me.challengeCategoriesStore,
valueField: 'id',
displayField: 'name',
mode: 'local',
typeAhead: true,
queryMode: 'local',
allowBlank: true,
width: 430,
padding: '0 0 0 10'
}, {
xtype: 'combobox',
fieldLabel: '',
itemId: 'categoryChallengeNameCombo',
name: 'categoryChallengeNameCombo',
fieldLabel: '',
emptyText: me.textStore.getValueByCode('ssp.empty-text.action-plan.search-challenge.challenge', 'Filter by Challenge'),
store: me.challengesStore,
valueField: 'id',
displayField: 'name',
mode: 'local',
typeAhead: true,
queryMode: 'local',
allowBlank: true,
width: 430,
padding: '0 0 0 10'
}, {
xtype: 'fieldcontainer',
margin: '0 0 0 10',
layout: {
align: 'stretch',
type: 'hbox'
},
fieldLabel: '',
items: [{
xtype: 'button',
text: me.textStore.getValueByCode('ssp.label.search-button', 'Search'),
itemId: 'searchKeywordButton'
}, {
xtype: 'tbspacer',
width: 10
}, {
xtype: 'textfield',
fieldLabel: '',
itemId: 'searchKeyword',
name: 'searchKeyword',
margin: ' 0 0 0 20',
width: 380,
enableKeyEvents: true,
listeners: {
afterrender: function(field){
field.focus(false, 0);
},
specialkey: {
scope: me,
fn: function(field, el){
if (el.getKey() == Ext.EventObject.ENTER) {
this.appEventsController.getApplication().fireEvent("onSearchKeyword");
}
}
}
}
}]
}, {
xtype: 'fieldset',
width: '100%',
padding: '0 305 0 10',
margin: '2',
layout: {
align: 'stretch',
type: 'hbox'
},
title: me.textStore.getValueByCode('ssp.label.action-plan.search-challenge.add-challenge-referral', 'Add ChallengeReferral'),
items: [{
xtype: 'button',
text: me.textStore.getValueByCode('ssp.label.add-button', 'Add'),
itemId: 'addChallengeReferralButton'
}, {
xtype: 'tbspacer',
width: 10
}, {
xtype: 'button',
text: me.textStore.getValueByCode('ssp.label.add-all-button', 'Add All'),
itemId: 'addAllChallengeReferralButton'
}]
}, {
xtype: 'challengesgrid',
flex: 1,
itemId: 'challengesgrid'
}]
}],
dockedItems: [{
xtype: 'fieldcontainer',
layout: {
align: 'stretch',
type: 'hbox'
},
fieldLabel: '',
items: [{
xtype: 'button',
text: me.textStore.getValueByCode('ssp.label.save-button', 'Save'),
itemId: 'saveBulkActionPlanButton'
}, {
xtype: 'button',
text: me.textStore.getValueByCode('ssp.label.cancel-button', 'Cancel'),
itemId: 'cancelButton'
}]
}]
});
return me.callParent(arguments);
}
}); | preventHeader: true,
items: [{
xtype: 'fieldcontainer',
fieldLabel: '', |
index.js | 'use strict';
module.exports = function (cb) {
return new Promise(function (resolve) {
resolve(cb());
}); | }; | |
grabaciones.component.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { HistorialService } from '@services/historial.service';
import { GrabacionesService } from '@services/grabaciones.service';
import * as moment from 'moment';
import { FormControl } from '@angular/forms';
import { debounceTime } from 'rxjs/operators';
import { from } from 'rxjs';
@Component({
selector: 'grabaciones',
templateUrl: './grabaciones.component.html',
})
export class GrabacionesComponent implements OnInit {
public Historial;
filtroValue = '';
search = new FormControl('');
public Historia = [];
public HistAdmin = [];
public HistOper = [];
public HistSaliente = [];
public HistEntrante = [];
public HistPerdida = [];
//obtenemos los datos del usuario actual
public us = localStorage.getItem('Usuario');
public obj = JSON.parse(this.us);
// Aquí va el numero actual del operador
public numero = localStorage.getItem('NumberSelected');
constructor(
private historialService: HistorialService,
private grabaservice: GrabacionesService,
private router: Router
) {
}
ngOnInit() {
this.preload();
this.search.valueChanges.pipe(debounceTime(300)).subscribe((value) => {
this.Historia = this.HistOper.filter((it) => it.numero.includes(value));
});
}
DescargarAudio(uni, cha) { | type: 'application/zip'
});
const url = window.URL.createObjectURL(blob);
window.open(url);
});
}
preload() {
console.log(this.obj);
if (this.obj == null) {
console.log('el usuario es nulo');
return;
}
// Standard
if (this.obj.tipo == 'standard') {
this.llenarHistorialOperador();
}
//Administradores
else {
this.llenarCDRxAdmin();
}
}
llenarCDRxAdmin() {
this.historialService.HistorialLlamadasAdministrador().subscribe(
(response) => {
// response.forEach((element) => {
// var fecha = moment(element.calldate).subtract(10, 'days').calendar();
// element.calldate = fecha;
// console.log(element.calldate);
// });
this.HistAdmin = response;
this.HistAdmin.forEach((element) => {
element.calldate = this.convert(element.calldate);
element.segundos = this.convertseconds(element.segundos);
console.log(element.calldate);
//salientes
if (
element.disposition == 'ANSWERED' &&
element.clid.includes(element.src)
) {
this.HistSaliente.push(element);
}
//Entrantes
if (
element.disposition == 'ANSWERED' &&
element.clid.includes(element.dst)
) {
this.HistEntrante.push(element);
}
//Perdidas
if (element.disposition == 'NO ANSWERED') {
this.HistPerdida.push(element);
}
});
},
(er) => console.log(er)
);
this.Historia = this.HistAdmin;
}
llenarHistorialOperador() {
console.log(this.numero);
this.historialService.HistorialxSipoIax(this.numero).subscribe(
(response) => {
response.forEach(it => {
var fec = this.convert(it.fechayhora);
it.fechayhora = fec;
it.segundos = this.convertseconds(it.segundos);
if(it.tipo == 'entrante'){
this.HistEntrante.push(it);
}
if(it.tipo == 'saliente'){
this.HistSaliente.push(it);
}
if(it.tipo == 'perdida'){
this.HistPerdida.push(it);
}
});
console.log(response);
this.HistOper = response;
this.Historia = this.HistOper;
},
(error) => {
console.log(error);
}
);
}
salientes() {
if (this.obj.tipo == 'standard') {
this.Historia = this.HistSaliente;
} else {
}
}
entrantes() {
// Aca se compara con el atributo si fue entrante
if (this.obj.tipo == 'standard') {
this.Historia = this.HistEntrante;
} else {
}
}
perdidas() {
// Aca se compara con el atributo si fue perdida
if (this.obj.tipo == 'standard') {
this.Historia = this.HistPerdida;
} else {
}
}
defaultHistOpe() {
if (this.obj.tipo == 'standard') {
this.Historia = this.HistOper;
} else {
console.log('todas las llamadas ');
this.Historia = this.HistAdmin;
}
}
convertseconds(segundos){
let minutos = Math.floor(segundos/60);
let seconds = Math.floor(segundos%60);
let m, s;
m = minutos<10? '0'+minutos : minutos;
s = seconds<10? '0'+seconds : seconds;
return m+':'+s;
}
convert(str) {
var date = new Date(str),
mnth = ('0' + (date.getMonth() + 1)).slice(-2),
day = ('0' + date.getDate()).slice(-2);
return [mnth, day, date.getFullYear()].join('/');
}
} | this.grabaservice.downloadFile(uni, cha).subscribe(data => {
const blob = new Blob([data], { |
thread.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use alloc::boxed::FnBox;
use cmp;
use io;
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::stack::RED_ZONE;
use sys_common::thread::*;
use time::Duration;
pub struct Thread {
handle: Handle
}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// For now, the only requirement is that it's big enough to hold the
// red zone. Round up to the next 64 kB because that's what the NT
// kernel does, might as well make it explicit. With the current
// 20 kB red zone, that makes for a 64 kB minimum stack.
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else {
mem::forget(p); // ownership passed to CreateThread
Ok(Thread { handle: Handle::new(ret) })
};
#[no_stack_check]
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
unsafe { start_thread(main); }
0
}
}
pub fn | (_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub fn join(self) {
use libc::consts::os::extra::INFINITE;
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
}
pub fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
unsafe { c::SwitchToThread(); }
}
pub fn sleep(dur: Duration) {
unsafe {
if dur < Duration::zero() {
return Thread::yield_now()
}
let ms = dur.num_milliseconds();
// if we have a fractional number of milliseconds then add an extra
// millisecond to sleep for
let extra = dur - Duration::milliseconds(ms);
let ms = ms + if extra.is_zero() {0} else {1};
c::Sleep(ms as DWORD);
}
}
}
pub mod guard {
pub unsafe fn main() -> usize { 0 }
pub unsafe fn current() -> usize { 0 }
pub unsafe fn init() {}
}
| set_name |
kubernetes_secret_controller.go | package controller
import (
"fmt"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller"
"github.com/longhorn/longhorn-manager/datastore"
"github.com/longhorn/longhorn-manager/types"
"github.com/longhorn/longhorn-manager/util"
)
type KubernetesSecretController struct {
*baseController
// use as the OwnerID of the controller
namespace string
controllerID string
kubeClient clientset.Interface
eventRecorder record.EventRecorder
ds *datastore.DataStore
secretSynced cache.InformerSynced
}
func NewKubernetesSecretController(
logger logrus.FieldLogger,
ds *datastore.DataStore,
scheme *runtime.Scheme,
secretInformer coreinformers.SecretInformer,
kubeClient clientset.Interface,
controllerID string,
namespace string) *KubernetesSecretController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(logrus.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
})
ks := &KubernetesSecretController{
baseController: newBaseController("longhorn-kubernetes-secret-controller", logger),
namespace: namespace,
controllerID: controllerID,
ds: ds,
kubeClient: kubeClient,
eventRecorder: eventBroadcaster.NewRecorder(scheme, v1.EventSource{Component: "longhorn-kubernetes-secret-controller"}),
secretSynced: secretInformer.Informer().HasSynced,
}
secretInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ks.enqueueSecretChange,
UpdateFunc: func(old, cur interface{}) { ks.enqueueSecretChange(cur) },
DeleteFunc: ks.enqueueSecretChange,
})
return ks
}
func (ks *KubernetesSecretController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer ks.queue.ShutDown()
ks.logger.Infof("Start")
defer ks.logger.Infof("Shutting down")
if !cache.WaitForNamedCacheSync(ks.name, stopCh, ks.secretSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(ks.worker, time.Second, stopCh)
}
<-stopCh
}
func (ks *KubernetesSecretController) worker() {
for ks.processNextWorkItem() {
}
}
func (ks *KubernetesSecretController) processNextWorkItem() bool {
key, quit := ks.queue.Get()
if quit {
return false
}
defer ks.queue.Done(key)
err := ks.syncHandler(key.(string))
ks.handleErr(err, key)
return true
}
func (ks *KubernetesSecretController) handleErr(err error, key interface{}) {
if err == nil {
ks.queue.Forget(key)
return | ks.logger.WithError(err).Warnf("Error syncing Secret %v", key)
ks.queue.AddRateLimited(key)
return
}
ks.logger.WithError(err).Warnf("Dropping Secret %v out of the queue", key)
ks.queue.Forget(key)
utilruntime.HandleError(err)
}
func (ks *KubernetesSecretController) syncHandler(key string) (err error) {
defer func() {
err = errors.Wrapf(err, "%v: fail to sync %v", ks.name, key)
}()
namespace, secretName, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
if err := ks.reconcileSecret(namespace, secretName); err != nil {
return err
}
return nil
}
func (ks *KubernetesSecretController) reconcileSecret(namespace, secretName string) error {
if namespace != ks.namespace {
// Not ours, skip it
return nil
}
backupTarget, err := ks.ds.GetSettingValueExisted(types.SettingNameBackupTarget)
if err != nil {
// The backup target does not exist, skip it
return nil
}
backupType, err := util.CheckBackupType(backupTarget)
if err != nil {
// Invalid backup target, skip it
return nil
}
if backupType != types.BackupStoreTypeS3 {
// We only focus on backup target S3, skip it
return nil
}
sn, err := ks.ds.GetSettingValueExisted(types.SettingNameBackupTargetCredentialSecret)
if err != nil {
// The backup target credential secret does not exist, skip it
return nil
}
if sn != secretName {
// Not ours, skip it
return nil
}
secret, err := ks.ds.GetSecretRO(namespace, secretName)
if err != nil {
return err
}
// Annotates AWS IAM role arn to the manager as well as the replica instance managers
awsIAMRoleArn := string(secret.Data[types.AWSIAMRoleArn])
return ks.annotateAWSIAMRoleArn(awsIAMRoleArn)
}
func (ks *KubernetesSecretController) enqueueSecretChange(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", obj, err))
return
}
ks.queue.AddRateLimited(key)
}
// annotateAWSIAMRoleArn ensures that the running pods of the manager as well as the replica instance managers.
// have the correct AWS IAM role arn assigned to them based on the passed `awsIAMRoleArn`
func (ks *KubernetesSecretController) annotateAWSIAMRoleArn(awsIAMRoleArn string) error {
managerPods, err := ks.ds.ListManagerPods()
if err != nil {
return err
}
imPods, err := ks.ds.ListInstanceManagerPodsBy(ks.controllerID, "", types.InstanceManagerTypeReplica)
if err != nil {
return err
}
pods := append(managerPods, imPods...)
for _, pod := range pods {
if pod.Spec.NodeName != ks.controllerID {
continue
}
val, exist := pod.Annotations[types.AWSIAMRoleAnnotation]
updateAnnotation := awsIAMRoleArn != "" && awsIAMRoleArn != val
deleteAnnotation := awsIAMRoleArn == "" && exist
if updateAnnotation {
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
pod.Annotations[types.AWSIAMRoleAnnotation] = awsIAMRoleArn
} else if deleteAnnotation {
delete(pod.Annotations, types.AWSIAMRoleAnnotation)
} else {
continue
}
if _, err = ks.kubeClient.CoreV1().Pods(pod.Namespace).Update(pod); err != nil {
return err
}
ks.logger.Infof("AWS IAM role for pod %v/%v updated", pod.Namespace, pod.Name)
}
return nil
} | }
if ks.queue.NumRequeues(key) < maxRetries { |
test_nameserver.py | """
Test file for nameserver.
"""
import multiprocessing
import os
import random
import time
from threading import Timer
import pytest
from osbrain import Agent
from osbrain import AgentProcess
from osbrain import NameServer
from osbrain import NSProxy
from osbrain import Proxy
from osbrain import SocketAddress
from osbrain import run_agent
from osbrain import run_nameserver
from osbrain.helper import wait_agent_attr
from osbrain.nameserver import NameServerProcess
from osbrain.nameserver import random_nameserver_process
from .common import skip_windows_any_port
from .common import skip_windows_port_reuse
def test_nameserver_ping(nsproxy):
"""
Simple name server ping test.
"""
assert nsproxy.ping() == 'pong'
def test_nameserver_list(nsproxy):
"""
A simple test that checks the correct creation of a name server.
"""
agents = nsproxy.list()
name = 'Pyro.NameServer'
assert len(agents) == 1
assert list(agents.keys())[0] == name
assert agents[name] == 'PYRO:%s@%s' % (name, nsproxy.addr())
def test_nameserver_proxy_list(nsproxy):
"""
Verify new agents get registered in the nameserver.
"""
run_agent('a0', nsproxy.addr())
run_agent('a1', nsproxy.addr())
# List registered agents
agent_list = nsproxy.list()
assert 'a0' in agent_list
assert 'a1' in agent_list
def test_run_agents_same_name(nsproxy):
"""
Check that the user cannot create two agents with the same name. A
RuntimeError should be raised.
"""
run_agent('name')
with pytest.raises(RuntimeError) as error:
run_agent('name')
assert 'name already registered' in str(error.value)
def test_nameserver_proxy_shutdown_no_agents():
"""
Shutdown a name server through a proxy when the name server has no
agents registered.
"""
ns = run_nameserver()
ns.shutdown()
def test_nameserver_proxy_shutdown_agents(nsproxy):
"""
Shutdown agents registered in a name server from a name server proxy.
"""
run_agent('Agent0', nsaddr=nsproxy.addr())
run_agent('Agent1', nsaddr=nsproxy.addr())
nsproxy.shutdown_agents()
assert len(nsproxy.agents()) == 0
def test_nameserver_proxy_shutdown_with_agents():
"""
Shutdown a name server from a name server proxy.
"""
ns = run_nameserver()
run_agent('Agent0', nsaddr=ns.addr())
run_agent('Agent1', nsaddr=ns.addr())
ns.shutdown()
def test_nameserver_proxy_shutdown_with_many_agents():
"""
Shutdown a name server from a name server proxy when there are many agents
registered in the name server (make sure proxies do not saturate the name
server on shutdown).
The shutdown process is given a long timeout to avoid raising exceptions.
"""
import Pyro4
Pyro4.config.THREADPOOL_SIZE = 4
ns = run_nameserver()
for i in range(20):
run_agent('Agent%s' % i)
ns.shutdown(timeout=60)
def test_nameserver_proxy_shutdown_with_many_agents_timeout():
"""
Shutdown a name server from a name server proxy when there are many agents
registered in the name server (make sure proxies do not saturate the name
server on shutdown).
The shutdown process is given the shortest timeout to ensure an exception
is raised.
"""
import Pyro4
Pyro4.config.THREADPOOL_SIZE = 4
ns = run_nameserver()
for i in range(20):
run_agent('Agent%s' % i)
with pytest.raises(TimeoutError):
ns.shutdown(timeout=0.0)
ns.shutdown()
@pytest.mark.parametrize('delay', [1, 3, 5])
@pytest.mark.parametrize('timeout', [True, False])
def test_nameserver_proxy_shutdown_lazy_agents(delay, timeout):
"""
Shutdown a name server proxy with agents that wait some time before
shutting down.
The name server shutdown should always succeed. If the agents do not
shutdown cleanly soon they should be hard-killed.
"""
class Lazy(Agent):
def shutdown(self):
time.sleep(delay)
super().shutdown()
ns = run_nameserver()
run_agent('a0', base=Lazy)
run_agent('a1', base=Lazy)
t0 = time.time()
if timeout:
ns.shutdown(timeout=10)
else:
ns.shutdown()
assert time.time() - t0 > delay / 2.0
assert time.time() - t0 < delay + 2
def test_nameserver_proxy_shutdown_raise_timeout():
"""
A name server proxy should raise a TimeoutError if agents were not shutdown
or killed before the set timeout.
"""
ns = run_nameserver()
run_agent('a0')
with pytest.raises(TimeoutError) as error:
ns.shutdown(timeout=0.0)
assert 'not shutdown after' in str(error.value)
ns.shutdown()
def test_nameserver_proxy_shutdown_with_pyroerror():
"""
Check that `PyroError`s raised during `async_nameserver_shutdown` are
handled correctly.
"""
nameserver = run_nameserver()
ap = AgentProcess()
name = ap.start()
proxy = Proxy(name)
proxy.run()
ap.kill()
nameserver.async_shutdown_agents(nameserver.addr())
nameserver.shutdown()
def test_oneway_kill_non_running_agent_on_name_server_shutdown():
"""
The agent's `shutdown` method is only executed for running agents. When
agents are not running (i.e.: they raised an exception while running or
their `keep_alive` attribute was simply set to `False`, the `kill` method
is called instead.
When killing a non-running agent (i.e.: when shutting down the
architecture from the name server), this call is expected to be executed
one-way, as otherwise the Pyro daemon will shut down before returning
from the method, resulting in a `ConnectionClosedError`.
"""
class WilliamWallace(Agent):
def kill(self):
super().kill()
time.sleep(2)
ns = run_nameserver()
william = run_agent('william', base=WilliamWallace)
# Stop the agent
william.set_attr(_keep_alive=False)
assert wait_agent_attr(william, name='_running', value=False)
# Shut down should work just fine
ns.shutdown()
def test_nameserverprocess_shutdown():
"""
Name server shutdown can be called directly from the name server process.
"""
nameserver = random_nameserver_process()
run_agent('a0')
run_agent('a1')
while not len(nameserver.agents()) == 2:
continue
assert 'a0' in nameserver.agents()
assert 'a1' in nameserver.agents()
nameserver.shutdown()
assert not nameserver.is_alive()
def test_nameserverprocess_shutdown_lazy_agents():
"""
Shutdown a name server process with agents that wait some time before
shutting down.
"""
class Lazy(Agent):
def shutdown(self):
time.sleep(1)
super().shutdown()
nsprocess = random_nameserver_process()
run_agent('a0', base=Lazy)
run_agent('a1', base=Lazy)
t0 = time.time()
nsprocess.shutdown()
assert time.time() - t0 > 1
def test_nameserver_proxy_timeout():
"""
When creating a proxy to the name server, there should be a timeout
before raising an error if the name server cannot be located.
"""
while True:
try:
# Bind to random port
host = '127.0.0.1'
port = random.randrange(10000, 20000)
addr = SocketAddress(host, port)
nameserver = NameServerProcess(addr)
# Start name server later
Timer(1, nameserver.start).start()
# Locate name server now
pyro_address = NSProxy(addr, timeout=3.0).addr()
except PermissionError:
continue
break
assert pyro_address.host == host
assert pyro_address.port == port
nameserver.shutdown()
def test_nameserver_process_default_host():
"""
A name server process should default to localhost (127.0.0.1).
"""
ns = NameServerProcess(1234)
assert ns.port == 1234
assert ns.host == '127.0.0.1'
def test_nameserver_environ(nsproxy):
"""
When starting a nameserver, a environment variable should be set to ease
the process of running new agents.
"""
assert str(nsproxy.addr()) == os.environ.get('OSBRAIN_NAMESERVER_ADDRESS')
run_agent('a0')
run_agent('a1')
# List registered agents
agent_list = nsproxy.list()
assert 'a0' in agent_list
assert 'a1' in agent_list
def test_nameserver_agents(nsproxy):
"""
Test the agents() method, which should return a list with the names of
the registered agents.
"""
# No agents registered
agents = nsproxy.agents()
assert len(agents) == 0
# One agent registered
run_agent('Agent0')
agents = nsproxy.agents()
assert len(agents) == 1
# Two agents registered
run_agent('Agent1')
agents = nsproxy.agents()
assert len(agents) == 2
assert 'Agent0' in agents
assert 'Agent1' in agents
def test_nameserver_agent_address(nsproxy):
"""
A name server proxy can be used to retrieve an agent's socket address as
well, given the agent's alias and the socket's alias.
"""
a0 = run_agent('a0')
a1 = run_agent('a1')
addr0 = a0.bind('PUB', alias='foo')
addr1 = a1.bind('PUSH', alias='bar')
assert nsproxy.addr('a0', 'foo') == addr0
assert nsproxy.addr('a1', 'bar') == addr1
@skip_windows_any_port
def test_random_nameserver_process():
"""
Basic random_nameserver_process function tests: port range and exceptions.
"""
# Port range
port_start = 11000
port_stop = port_start + 100
nsprocess = random_nameserver_process(
port_start=port_start, port_stop=port_stop
)
address = nsprocess.addr
assert port_start <= address.port <= port_stop
ns = NSProxy(address)
ns.shutdown()
# Raising exceptions
with pytest.raises(ValueError):
random_nameserver_process(port_start=-1, port_stop=-2)
with pytest.raises(RuntimeError):
random_nameserver_process(port_start=22, port_stop=22, timeout=0.5)
@skip_windows_port_reuse
def test_nameserver_oserror(nsproxy):
"""
Name server start() should raise an error if address is already in use.
"""
with pytest.raises(RuntimeError) as error:
run_nameserver(nsproxy.addr())
assert 'OSError' in str(error.value)
assert 'Address already in use' in str(error.value)
@skip_windows_any_port
def test_nameserver_permissionerror():
"""
Name server start() should raise an error if it has not sufficient
permissions.
"""
with pytest.raises(RuntimeError) as error:
run_nameserver('127.0.0.1:22')
assert 'PermissionError' in str(error.value)
assert 'Permission denied' in str(error.value)
def test_run_nameserver_base():
"""
The `run_nameserver` function should accept a `base` parameter to specify
the base NameServer class.
"""
class BobMarley(NameServer): |
ns = run_nameserver(base=BobMarley)
assert ns.get_up() == 'stand up!'
ns.shutdown()
def test_nameserver_spawn_process(nsproxy):
"""
A name server should be able to spawn child processes.
It is a way to make sure name servers are run as non-daemonic processes,
which are not allowed to have children.
"""
class Spawner(NameServer):
def spawn_process(self):
p = multiprocessing.Process()
p.start()
return True
ns = run_nameserver(base=Spawner)
assert ns.spawn_process()
ns.shutdown() | def get_up(self):
return 'stand up!' |
app.module.ts | import { Connection } from 'typeorm';
import { Module } from '@nestjs/common';
import { TypeOrmModule } from '@nestjs/typeorm';
import { AppController } from './app.controller';
import { AppService } from './app.service';
import { AuthModule } from './auth/auth.module';
import { typeOrmConfig } from './config';
import { OrdersModule } from './orders/orders.module';
import { ProductsModule } from './products/products.module';
import { UsersModule } from './users/users.module';
import { ProductOrdersModule } from './product-orders/product-orders.module';
| imports: [
TypeOrmModule.forRoot(typeOrmConfig),
UsersModule,
ProductsModule,
OrdersModule,
AuthModule,
ProductOrdersModule,
],
controllers: [AppController],
providers: [AppService],
})
export class AppModule {
constructor(private connection: Connection) {}
} | @Module({ |
initial.ts | import { MainArticlesState } from '../namespace'; | export const initial: MainArticlesState = {
isLoading: false,
isErr: false,
page: 1,
data: [],
topic: '',
sortBy: '',
date: '',
}; | |
httprule.go | // Package httprule provides utilities to map google.api.http annotation
// to net/http Request and Response types. These utilities allow to
// generate HTTP Clients for a given proto service. The methods of this
// service have their HTTP mappings specified via `google.api.http`
// method options, e.g.:
//
// service HelloService {
// rpc Hello (HelloRequest) returns (HelloResponse) {
// option (google.api.http) = { post:"/api/hello" body:"*" };
// };
// };
//
// HttpRule proto: https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
// HttpRule codegen: https://pkg.go.dev/google.golang.org/genproto/googleapis/api/annotations
package httprule
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
pb "google.golang.org/genproto/googleapis/api/annotations"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
)
var (
ErrInvalidHttpRule = fmt.Errorf("invalid HttpRule")
)
// ParseProtoResponse parses a http.Response using a HttpRule into a target
// message. The HttpRule contains a specification of how the response body and
// headers are mapped into the target proto message. The body JSON may map
// directly to the target message, or it may map to a top-level field of the
// target message. Response headers may reference any top-level scalar or
// repeated scalar fields of the target message.
//
// The http.Response body is consumed but not closed.
func ParseProtoResponse(rule *pb.HttpRule, resp *http.Response, target proto.Message) error {
if err := ValidateHTTPRule(rule); err != nil {
return err
}
if err := parseResponseBody(rule, resp.Body, target); err != nil {
return err
}
if err := parseResponseHeaders(rule, resp.Header, target); err != nil {
return err
}
return nil
}
func parseResponseBody(rule *pb.HttpRule, body io.Reader, target proto.Message) error {
b, err := io.ReadAll(body)
if err != nil {
return fmt.Errorf("reading body: %w", err)
}
if len(bytes.TrimSpace(b)) == 0 {
return nil
}
if rule.ResponseBody != "" {
target, err = newField(rule.ResponseBody, target)
if err != nil {
return err
}
}
if err := protojson.Unmarshal(b, target); err != nil {
return fmt.Errorf("protojson unmarshal: %w", err)
}
return nil
}
func parseResponseHeaders(rule *pb.HttpRule, header http.Header, target proto.Message) error {
for _, rule := range rule.AdditionalBindings {
custom := rule.GetCustom()
if custom == nil || custom.Kind != "response_header" {
continue
}
if err := parseResponseHeader(custom.Path, header, target); err != nil {
return err
}
}
return nil
}
func parseResponseHeader(spec string, header http.Header, target proto.Message) error {
// "Header: value"
parts := strings.SplitN(spec, ":", 2)
key, pattern := http.CanonicalHeaderKey(parts[0]), strings.TrimSpace(parts[1])
re, err := newResponseHeaderParser(pattern)
if err != nil {
return fmt.Errorf("%w: response header '%s': %s", ErrInvalidHttpRule, key, err)
}
for _, val := range header.Values(key) {
matches := re.FindStringSubmatch(val)
if len(matches) < 2 {
// no match, nothing to extract
continue
}
fields := re.SubexpNames()
for i := 1; i < len(matches); i++ {
if err := setField(target, fields[i], matches[i]); err != nil {
return fmt.Errorf("%w: %s", ErrInvalidHttpRule, err)
}
}
}
return nil
}
func newResponseHeaderParser(pattern string) (*regexp.Regexp, error) {
// A pattern is an alternation of string literals and a braced field
// name. e.g. the pattern "hello {name}." could match the string "hello
// julia." where "julia" is to be extracted into the "name" field.
// Multiple fields are allowed.
result := strings.Builder{}
result.WriteString("^")
for i := 0; i < len(pattern); {
var segment string
var length int
if pattern[i] != '{' {
segment, length = extractLiteral(pattern[i:])
segment = regexp.QuoteMeta(segment)
} else {
var err error
segment, length, err = extractField(pattern[i:])
if err != nil {
return nil, err
}
segment = "(?P<" + segment + ">.+)"
}
result.WriteString(segment)
i += length
}
result.WriteString("$")
return regexp.Compile(result.String())
}
var validFieldName = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_]*$`)
func extractField(s string) (string, int, error) {
closeBrace := strings.Index(s, "}")
if closeBrace == -1 {
return "", 0, fmt.Errorf("no closing brace on '%s'", s)
}
if closeBrace == 1 {
return "", 0, fmt.Errorf("empty field name")
}
fieldName := s[1:closeBrace]
if !validFieldName.MatchString(fieldName) {
return "", 0, fmt.Errorf("invalid field name '%s'", fieldName)
}
return fieldName, closeBrace + 1, nil
}
func extractLiteral(s string) (string, int) {
openBrace := strings.Index(s, "{")
if openBrace == -1 {
return s, len(s)
}
if openBrace > 0 && s[openBrace-1] == '\\' {
// Remove the backslash and advance past the open brace
return s[:openBrace-1] + "{", openBrace + 1
}
return s[:openBrace], openBrace
}
func setField(target proto.Message, name, valstr string) error {
m := target.ProtoReflect()
fd := m.Descriptor().Fields().ByTextName(name)
if fd == nil {
return fmt.Errorf("field '%s' not in message", name)
}
var val interface{}
var err error
switch fd.Kind() {
case protoreflect.BoolKind:
val, err = strconv.ParseBool(valstr)
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
var v int64
v, err = strconv.ParseInt(valstr, 10, 32)
val = int32(v)
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
var v uint64
v, err = strconv.ParseUint(valstr, 10, 32)
val = uint32(v)
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
var v int64
v, err = strconv.ParseInt(valstr, 10, 64)
val = int64(v)
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
var v uint64
v, err = strconv.ParseUint(valstr, 10, 64)
val = uint64(v)
case protoreflect.FloatKind:
var v float64
v, err = strconv.ParseFloat(valstr, 32)
val = float32(v)
case protoreflect.DoubleKind:
val, err = strconv.ParseFloat(valstr, 64)
case protoreflect.StringKind:
val, err = valstr, nil
case protoreflect.BytesKind:
val, err = []byte(valstr), nil
default:
err = fmt.Errorf("field '%s' of unsupported type", name)
}
if err != nil {
return err
}
value := protoreflect.ValueOf(val)
if fd.IsList() | else {
m.Set(fd, value)
}
return nil
}
func ValidateHTTPRule(rule *pb.HttpRule) error {
if method(rule) == "" {
return fmt.Errorf("%w: invalid method or empty path", ErrInvalidHttpRule)
}
return nil
}
// NewHTTPReuqest creates an *http.Request for a given proto message and
// HTTPRule, containing the request mapping information.
func NewHTTPRequest(rule *pb.HttpRule, baseURL string, req proto.Message) (*http.Request, error) {
u, err := url.Parse(baseURL)
if err != nil {
return nil, fmt.Errorf("cannot parse baseURL: %w", err)
}
if err := ValidateHTTPRule(rule); err != nil {
return nil, err
}
templPath := templatePath(rule) // e.g. /v1/messages/{message_id}/{sub.subfield}
keys := map[string]bool{}
p, err := interpolate(templPath, req, keys)
if err != nil {
return nil, err
}
u.Path = path.Join(u.Path, p)
body, err := jsonBody(rule.Body, req, keys)
if err != nil {
return nil, err
}
header, err := requestHeaders(rule, req, keys)
if err != nil {
return nil, err
}
u.RawQuery, err = urlRawQuery(rule.Body, req, keys)
if err != nil {
return nil, err
}
r, err := http.NewRequest(method(rule), u.String(), body)
if err != nil {
return nil, fmt.Errorf("cannot create HTTP request: %w", err)
}
r.Header = header
return r, nil
}
func newField(fieldName string, msg proto.Message) (proto.Message, error) {
m := msg.ProtoReflect()
fd := m.Descriptor().Fields().ByTextName(fieldName)
if fd == nil {
return nil, fmt.Errorf("%w: field '%s' not in message", ErrInvalidHttpRule, fieldName)
}
if fd.Kind() != protoreflect.MessageKind {
return nil, fmt.Errorf("%w: field '%s' is not a message type", ErrInvalidHttpRule, fieldName)
}
val := m.NewField(fd)
m.Set(fd, val)
return val.Message().Interface(), nil
}
func requestHeaders(httpRule *pb.HttpRule, req proto.Message, skip map[string]bool) (http.Header, error) {
h := http.Header{}
for _, rule := range httpRule.AdditionalBindings {
if custom := rule.GetCustom(); custom != nil {
if custom.Kind == "header" {
key, val, err := parseHeader(custom.Path, req, skip)
if err != nil {
return nil, err
}
h.Add(key, val)
}
}
}
return h, nil
}
func parseHeader(s string, m proto.Message, skip map[string]bool) (key string, val string, err error) {
// "Content-Type: application/json"
parts := strings.SplitN(s, ":", 2)
key, val = parts[0], strings.TrimSpace(parts[1])
key = http.CanonicalHeaderKey(key)
val, err = interpolate(val, m, skip)
return key, val, err
}
// jsonBody returns an io.Reader of the for the given top-level message field, or the whole message
// if bodyField is set to "*".
func jsonBody(bodyField string, msg proto.Message, skip map[string]bool) (io.Reader, error) {
if bodyField == "" {
return nil, nil
}
if (bodyField == "*" && len(skip) != 0) || skip[bodyField] {
return nil, fmt.Errorf("%w: body and path fields overlap", ErrInvalidHttpRule)
}
if bodyField != "*" {
m := msg.ProtoReflect()
fds := m.Descriptor().Fields()
fd := fds.ByTextName(bodyField)
if fd == nil {
return nil, fmt.Errorf("%w: field '%s' not in message", ErrInvalidHttpRule, bodyField)
}
if fd.Kind() != protoreflect.MessageKind {
return nil, fmt.Errorf("%w: field '%s' is not a message type", ErrInvalidHttpRule, bodyField)
}
skip[bodyField] = true
msg = m.Get(fd).Message().Interface()
}
b, err := protojson.Marshal(msg)
if err != nil {
return nil, fmt.Errorf("cannot create bodyJSON for '%s': %w", bodyField, err)
}
return bytes.NewReader(b), nil
}
// interpolate returns a path from a templated path and a proto message
// whose values are substituted in the path template. For example:
//
// templatePath: "/v1/messages/{message_id}"
// proto message definition: message M { string message_id = 1; }
// proto message value: { message_id: 123 }
//
// => result path: "/v1/messages/123"
//
// Referenced message fields must have primitive types; they cannot not
// repeated or message types. See:
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
//
// Only basic substitutions via {var}, {var=*} and {var=**} of top-level
// fields are supported. {var} is a short hand for {var=*} and
// substitutes the value of a message field with path escaping (%2...).
// {var=**} will substitute without path. This may be useful for
// expansions where the values include slashes and is deviation from
// the spec, which only allows {var=**} for the last path segment.
//
// The extended syntax for `*` and `**` substitutions with further path
// segments is not implemented. Nested field values are not supported
// (e.g.{msg_field.sub_field}).
//
// TODO: Complete interpolate implementation for full substitution grammar
func interpolate(templ string, msg proto.Message, skipKeys map[string]bool) (string, error) {
m := msg.ProtoReflect()
fds := m.Descriptor().Fields()
re := regexp.MustCompile(`{([a-zA-Z0-9_-]+)(=\*\*?)?}`)
result := templ
for _, match := range re.FindAllStringSubmatch(templ, -1) {
fullMatch, fieldName := match[0], match[1]
if skipKeys[fieldName] {
return "", fmt.Errorf("%w: field %q already in use", ErrInvalidHttpRule, fieldName)
}
fd := fds.ByTextName(fieldName)
if fd == nil {
return "", fmt.Errorf("cannot find %s in request proto message: %w", fieldName, ErrInvalidHttpRule)
}
if fd.Kind() == protoreflect.MessageKind || fd.Cardinality() == protoreflect.Repeated {
return "", fmt.Errorf("only primitive types supported in path substitution")
}
val := m.Get(fd).String()
if match[2] != "=**" {
val = url.PathEscape(val)
}
result = strings.ReplaceAll(result, fullMatch, val)
skipKeys[fieldName] = true
}
return result, nil
}
// urlRawQuery converts a proto message into url.Values.
//
// {"a": "A", "b": {"nested": "🐣"}, "SLICE": [1, 2]}}
// => ?a=A&b.nested=🐣&SLICE=1&SLICE=2
//
// TODO: Investigate zero value encoding for optional and default types.
func urlRawQuery(bodyRule string, m proto.Message, skip map[string]bool) (string, error) {
if bodyRule == "*" {
return "", nil
}
pm := &protojson.MarshalOptions{UseProtoNames: true}
b, err := pm.Marshal(m)
if err != nil {
return "", fmt.Errorf("cannot marshal message for URL query encoding: %w", err)
}
var obj map[string]interface{}
if err := json.Unmarshal(b, &obj); err != nil {
return "", fmt.Errorf("cannot query encode: error unmarshalling '%v': %w", m, err)
}
vals := url.Values{}
if err := queryEnc(obj, "", vals, skip); err != nil {
return "", err
}
return vals.Encode(), nil
}
func queryEnc(m map[string]interface{}, path string, vals url.Values, skip map[string]bool) error {
for key, val := range m {
p := path + key
if skip[p] {
continue
}
switch v := val.(type) {
case int, bool, string, float64:
vals.Add(p, fmt.Sprintf("%v", v))
case []interface{}:
if err := addSlice(v, p, vals); err != nil {
return err
}
case map[string]interface{}:
if err := queryEnc(v, p+".", vals, skip); err != nil {
return err
}
default:
return fmt.Errorf("cannot query encode %T", v)
}
}
return nil
}
func addSlice(s []interface{}, path string, vals url.Values) error {
for _, el := range s {
switch v := el.(type) {
case int, bool, string, float64:
vals.Add(path, fmt.Sprintf("%v", v))
default:
return fmt.Errorf("cannot query encode slices of non-basic type %T", v)
}
}
return nil
}
func templatePath(rule *pb.HttpRule) string {
switch {
case rule.GetGet() != "":
return rule.GetGet()
case rule.GetPut() != "":
return rule.GetPut()
case rule.GetPost() != "":
return rule.GetPost()
case rule.GetDelete() != "":
return rule.GetDelete()
case rule.GetCustom() != nil && rule.GetCustom().GetKind() == "HEAD":
return rule.GetCustom().GetPath()
}
return ""
}
func method(rule *pb.HttpRule) string {
switch {
case rule.GetGet() != "":
return http.MethodGet
case rule.GetPut() != "":
return http.MethodPut
case rule.GetPost() != "":
return http.MethodPost
case rule.GetDelete() != "":
return http.MethodDelete
case rule.GetPatch() != "":
return http.MethodPatch
case rule.GetCustom() != nil && rule.GetCustom().GetKind() == "HEAD":
return http.MethodHead
}
return ""
}
| {
m.Mutable(fd).List().Append(value)
} |
views.py | from django.shortcuts import render, redirect
from django.views.generic.detail import DetailView
from .models import Fly,Airport
from django.db.models import Q
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/login/')
def | (request):
ls_airports = Airport.objects.all()
context = {'airports' : ls_airports}
return render(request,'relazioni/form_prenotazione.html',context)
def visualizza_voli(request):
ls_voli = []
if request.method == 'POST':
aeroporto_partenza = request.POST['a_partenza']
aeroporto_arrivo = request.POST['a_arrivo']
data = request.POST['data']
ls_voli = Fly.objects.filter(Q(aeroporto_partenza=aeroporto_partenza) & Q(aeroporto_arrivo=aeroporto_arrivo) & Q(data_partenza=data))
messages.success(request, 'Ecco tutti i voli disponibili')
voli = []
for index in ls_voli:
volo = Fly.objects.get(code_volo=index)
voli.append(volo)
context = {
'voli': voli,
}
else:
messages.error(request, 'Non ci sono voli disponibili!')
return render(request,'relazioni/voli.html',context)
| selection_airport |
service.pb.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.13.0
// source: google/cloud/servicedirectory/v1beta1/service.proto
package servicedirectory
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// An individual service. A service contains a name and optional metadata.
// A service must exist before
// [endpoints][google.cloud.servicedirectory.v1beta1.Endpoint] can be
// added to it.
type Service struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Immutable. The resource name for the service in the format
// 'projects/*/locations/*/namespaces/*/services/*'.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. Metadata for the service. This data can be consumed by service
// clients. The entire metadata dictionary may contain up to 2000 characters,
// spread across all key-value pairs. Metadata that goes beyond any these
// limits will be rejected.
Metadata map[string]string `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. Endpoints associated with this service. Returned on LookupService.Resolve.
// Control plane clients should use RegistrationService.ListEndpoints.
Endpoints []*Endpoint `protobuf:"bytes,3,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
}
func (x *Service) Reset() {
*x = Service{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_servicedirectory_v1beta1_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Service) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Service) ProtoMessage() {}
func (x *Service) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_servicedirectory_v1beta1_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Service.ProtoReflect.Descriptor instead.
func (*Service) Descriptor() ([]byte, []int) {
return file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescGZIP(), []int{0}
}
func (x *Service) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Service) GetMetadata() map[string]string {
if x != nil {
return x.Metadata
}
return nil
}
func (x *Service) GetEndpoints() []*Endpoint {
if x != nil {
return x.Endpoints
}
return nil
}
var File_google_cloud_servicedirectory_v1beta1_service_proto protoreflect.FileDescriptor
var file_google_cloud_servicedirectory_v1beta1_service_proto_rawDesc = []byte{
0x0a, 0x33, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2f,
0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63,
0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62,
0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69,
0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f,
0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x03, 0x0a,
0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x5d, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x12, 0x52, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x64, 0x70,
0x6f, 0x69, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f,
0x69, 0x6e, 0x74, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x3a, 0x7f, 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64,
0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x51, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c,
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
0x63, 0x65, 0x73, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x7d, 0x42, 0x90, 0x02, 0x0a, 0x29, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64,
0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
0x5a, 0x55, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76,
0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69,
0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x25, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x56, 0x31, 0x42, 0x65, 0x74,
0x61, 0x31, 0xca, 0x02, 0x25, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75,
0x64, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x79, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0xea, 0x02, 0x28, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31,
0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescOnce sync.Once
file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescData = file_google_cloud_servicedirectory_v1beta1_service_proto_rawDesc
)
func file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescGZIP() []byte |
var file_google_cloud_servicedirectory_v1beta1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_google_cloud_servicedirectory_v1beta1_service_proto_goTypes = []interface{}{
(*Service)(nil), // 0: google.cloud.servicedirectory.v1beta1.Service
nil, // 1: google.cloud.servicedirectory.v1beta1.Service.MetadataEntry
(*Endpoint)(nil), // 2: google.cloud.servicedirectory.v1beta1.Endpoint
}
var file_google_cloud_servicedirectory_v1beta1_service_proto_depIdxs = []int32{
1, // 0: google.cloud.servicedirectory.v1beta1.Service.metadata:type_name -> google.cloud.servicedirectory.v1beta1.Service.MetadataEntry
2, // 1: google.cloud.servicedirectory.v1beta1.Service.endpoints:type_name -> google.cloud.servicedirectory.v1beta1.Endpoint
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_google_cloud_servicedirectory_v1beta1_service_proto_init() }
func file_google_cloud_servicedirectory_v1beta1_service_proto_init() {
if File_google_cloud_servicedirectory_v1beta1_service_proto != nil {
return
}
file_google_cloud_servicedirectory_v1beta1_endpoint_proto_init()
if !protoimpl.UnsafeEnabled {
file_google_cloud_servicedirectory_v1beta1_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Service); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_cloud_servicedirectory_v1beta1_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_cloud_servicedirectory_v1beta1_service_proto_goTypes,
DependencyIndexes: file_google_cloud_servicedirectory_v1beta1_service_proto_depIdxs,
MessageInfos: file_google_cloud_servicedirectory_v1beta1_service_proto_msgTypes,
}.Build()
File_google_cloud_servicedirectory_v1beta1_service_proto = out.File
file_google_cloud_servicedirectory_v1beta1_service_proto_rawDesc = nil
file_google_cloud_servicedirectory_v1beta1_service_proto_goTypes = nil
file_google_cloud_servicedirectory_v1beta1_service_proto_depIdxs = nil
}
| {
file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescOnce.Do(func() {
file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescData)
})
return file_google_cloud_servicedirectory_v1beta1_service_proto_rawDescData
} |
sign.rs | use crate::operators::{Additive, ClosedNeg};
use crate::properties::general::Identity;
pub trait Signed: ClosedNeg {
fn abs(&self) -> Self;
fn abs_sub(&self, rhs: &Self) -> Self;
fn signum(&self) -> Self;
fn is_positive(&self) -> bool;
fn is_negative(&self) -> bool;
}
macro_rules! impl_signed_int {
($($t:ty)*) => {
$(
impl Signed for $t {
#[inline]
fn abs(&self) -> Self {
<$t>::abs(*self)
}
#[inline]
fn abs_sub(&self, rhs: &Self) -> Self {
if *self <= *rhs {
<$t as Identity<Additive>>::identity()
} else {
*self - *rhs
}
}
#[inline]
fn signum(&self) -> Self {
<$t>::signum(*self)
}
#[inline]
fn is_positive(&self) -> bool {
<$t>::is_positive(*self)
}
#[inline]
fn is_negative(&self) -> bool {
<$t>::is_positive(*self)
}
}
)*
}
}
macro_rules! impl_signed_float {
($($t:ty)*) => {
$(
impl Signed for $t {
#[inline]
fn abs(&self) -> Self {
<$t>::abs(*self)
}
#[inline]
fn abs_sub(&self, rhs: &Self) -> Self {
if *self <= *rhs {
<$t as Identity<Additive>>::identity()
} else {
*self - *rhs
}
}
#[inline]
fn signum(&self) -> Self {
<$t>::signum(*self)
}
#[inline]
fn is_positive(&self) -> bool {
<$t>::is_sign_positive(*self) | fn is_negative(&self) -> bool {
<$t>::is_sign_positive(*self)
}
}
)*
}
}
impl_signed_int!(i8 i16 i32 i64 i128 isize);
impl_signed_float!(f32 f64); | }
#[inline] |
pools.pb.go | // Copyright 2017 The LUCI Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0-devel
// protoc v3.12.1
// source: go.chromium.org/luci/swarming/proto/config/pools.proto
package configpb
import (
proto "github.com/golang/protobuf/proto"
_ "go.chromium.org/luci/common/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Schema for pools.cfg service config file in luci-config.
//
// It defined a set of Pool objects, each one corresponding to a single Swarming
// pool dimension. Each Swarming task resided in some pool, and each Swarming
// bot belongs to at least one pool.
//
// Pools are used to isolate groups of tasks/bots from each other for security
// and capacity reasons. Two different pools should not interfere with each
// other at all (unless explicitly configured to share bots or accounts).
type PoolsCfg struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// List of all defined pools.
Pool []*Pool `protobuf:"bytes,1,rep,name=pool,proto3" json:"pool,omitempty"`
// Configures the default isolate and CIPD services to use for all pools on
// this server.
DefaultExternalServices *ExternalServices `protobuf:"bytes,6,opt,name=default_external_services,json=defaultExternalServices,proto3" json:"default_external_services,omitempty"`
// This is the "shared namespace" of task templates.
//
// Task templates allow pools to specify some property defaults (particularly
// around caches, CIPD packages and Environment variables) for tasks created
// within the pool. These templates can have 'include' statements, and those
// include statements draw from this namespace.
//
// Swarming will do a 2-pass parse of these so order doesn't matter (i.e. If
// A includes B, but is defined B-then-A, it's not an error).
TaskTemplate []*TaskTemplate `protobuf:"bytes,3,rep,name=task_template,json=taskTemplate,proto3" json:"task_template,omitempty"`
// This is the "shared namespace" of deployments.
//
// When pools specify a task_template_deployment, it draws from this
// namespace.
TaskTemplateDeployment []*TaskTemplateDeployment `protobuf:"bytes,4,rep,name=task_template_deployment,json=taskTemplateDeployment,proto3" json:"task_template_deployment,omitempty"`
// Defines about how to monitor bots in a pool. Each pool above may refer to
// one of the BotMonitoring message by name, which permits reusing
// BotMonitoring definitions.
BotMonitoring []*BotMonitoring `protobuf:"bytes,5,rep,name=bot_monitoring,json=botMonitoring,proto3" json:"bot_monitoring,omitempty"`
}
func (x *PoolsCfg) Reset() {
*x = PoolsCfg{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PoolsCfg) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PoolsCfg) ProtoMessage() {}
func (x *PoolsCfg) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PoolsCfg.ProtoReflect.Descriptor instead.
func (*PoolsCfg) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{0}
}
func (x *PoolsCfg) GetPool() []*Pool {
if x != nil {
return x.Pool
}
return nil
}
func (x *PoolsCfg) GetDefaultExternalServices() *ExternalServices {
if x != nil {
return x.DefaultExternalServices
}
return nil
}
func (x *PoolsCfg) GetTaskTemplate() []*TaskTemplate {
if x != nil {
return x.TaskTemplate
}
return nil
}
func (x *PoolsCfg) GetTaskTemplateDeployment() []*TaskTemplateDeployment {
if x != nil {
return x.TaskTemplateDeployment
}
return nil
}
func (x *PoolsCfg) GetBotMonitoring() []*BotMonitoring {
if x != nil {
return x.BotMonitoring
}
return nil
}
// Properties of a single pool or a bunch of identically configured pools.
//
// In particular contains authorization configuration.
type Pool struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Names of the pools this config applies to.
//
// Tasks target the pool by specifying its name as 'pool' dimension, thus
// names here should be valid dimension value.
Name []string `protobuf:"bytes,1,rep,name=name,proto3" json:"name,omitempty"`
// Contact information for people that own this pool.
//
// Not used in any ACLs, just informational field.
Owners []string `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners,omitempty"`
// Defines who can schedule tasks in this pool.
//
// The checks here act as a second authorization layer, consulted after the
// first server-global one (defined based on groups set in settings.cfg, see
// AuthSettings in config.proto).
Schedulers *Schedulers `protobuf:"bytes,3,opt,name=schedulers,proto3" json:"schedulers,omitempty"`
// List of service account emails allowed to be used for tasks that target
// this pool. Tasks specify the service account via 'service_account' field
// in the tasks.New RPC.
//
// By associating accounts with pools in the config we make it more explicit
// that a bot belonging to a pool eventually can get access to service
// accounts of all tasks running in this pool (just by sitting there, grabbing
// tasks and sniffing service account tokens).
AllowedServiceAccount []string `protobuf:"bytes,4,rep,name=allowed_service_account,json=allowedServiceAccount,proto3" json:"allowed_service_account,omitempty"`
// Same as 'allowed_service_account', but the set of service accounts is
// specified through an auth group.
AllowedServiceAccountGroup []string `protobuf:"bytes,5,rep,name=allowed_service_account_group,json=allowedServiceAccountGroup,proto3" json:"allowed_service_account_group,omitempty"`
// Types that are assignable to TaskDeploymentScheme:
// *Pool_TaskTemplateDeployment
// *Pool_TaskTemplateDeploymentInline
TaskDeploymentScheme isPool_TaskDeploymentScheme `protobuf_oneof:"task_deployment_scheme"`
// Refer to one bot_monitoring at the file level by name.
BotMonitoring string `protobuf:"bytes,8,opt,name=bot_monitoring,json=botMonitoring,proto3" json:"bot_monitoring,omitempty"`
// If specified, this is the description of the external schedulers to be used
// for tasks and bots for this pool that match the dimension set of a scheduler.
// For a given task or bot, the first entry in this list that matches based on
// dimension eligibility will be used.
ExternalSchedulers []*ExternalSchedulerConfig `protobuf:"bytes,9,rep,name=external_schedulers,json=externalSchedulers,proto3" json:"external_schedulers,omitempty"`
// Realm name that the pool is associated with.
//
// e.g.
// 'infra:pool/flex/try' for 'luci.flex.try' pool
//
// See also
// https://chromium.googlesource.com/infra/luci/luci-go/+/HEAD/server/auth/service/protocol/components/auth/proto/realms.proto
Realm string `protobuf:"bytes,10,opt,name=realm,proto3" json:"realm,omitempty"`
// Enforcements of permissions can be controlled by pool during migration
// from legacy ACLs configs to Realms configs.
//
// When scheduling tasks:
// * If a task doesn't have a realm (i.e. it is a legacy task), it will be
// assigned `default_task_realm` and only permissions listed here will be
// enforced. If some permission is not enforced, Swarming will use a
// legacy ACL check for it instead.
// * If a task has a realm (i.e. it is a modern task aware of realms), all
// permissions will always be enforced for it. Legacy ACLs will not be
// used at all.
//
// This field is not used for permissions not related to task scheduling.
//
// This field will be deprecated after migration. All scheduling permissions
// will be enforced at all times.
EnforcedRealmPermissions []RealmPermission `protobuf:"varint,11,rep,packed,name=enforced_realm_permissions,json=enforcedRealmPermissions,proto3,enum=swarming.config.RealmPermission" json:"enforced_realm_permissions,omitempty"`
// Realm name to use for tasks if they don't have a realm associated.
DefaultTaskRealm string `protobuf:"bytes,12,opt,name=default_task_realm,json=defaultTaskRealm,proto3" json:"default_task_realm,omitempty"`
}
func (x *Pool) Reset() {
*x = Pool{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Pool) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Pool) ProtoMessage() {}
func (x *Pool) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Pool.ProtoReflect.Descriptor instead.
func (*Pool) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{1}
}
func (x *Pool) GetName() []string {
if x != nil {
return x.Name
}
return nil
}
func (x *Pool) GetOwners() []string {
if x != nil {
return x.Owners
}
return nil
}
func (x *Pool) GetSchedulers() *Schedulers {
if x != nil {
return x.Schedulers
}
return nil
}
func (x *Pool) GetAllowedServiceAccount() []string {
if x != nil {
return x.AllowedServiceAccount
}
return nil
}
func (x *Pool) GetAllowedServiceAccountGroup() []string {
if x != nil {
return x.AllowedServiceAccountGroup
}
return nil
}
func (m *Pool) GetTaskDeploymentScheme() isPool_TaskDeploymentScheme {
if m != nil {
return m.TaskDeploymentScheme
}
return nil
}
func (x *Pool) GetTaskTemplateDeployment() string {
if x, ok := x.GetTaskDeploymentScheme().(*Pool_TaskTemplateDeployment); ok {
return x.TaskTemplateDeployment
}
return ""
}
func (x *Pool) GetTaskTemplateDeploymentInline() *TaskTemplateDeployment {
if x, ok := x.GetTaskDeploymentScheme().(*Pool_TaskTemplateDeploymentInline); ok {
return x.TaskTemplateDeploymentInline
}
return nil
}
func (x *Pool) GetBotMonitoring() string {
if x != nil {
return x.BotMonitoring
}
return ""
}
func (x *Pool) GetExternalSchedulers() []*ExternalSchedulerConfig {
if x != nil {
return x.ExternalSchedulers | func (x *Pool) GetRealm() string {
if x != nil {
return x.Realm
}
return ""
}
func (x *Pool) GetEnforcedRealmPermissions() []RealmPermission {
if x != nil {
return x.EnforcedRealmPermissions
}
return nil
}
func (x *Pool) GetDefaultTaskRealm() string {
if x != nil {
return x.DefaultTaskRealm
}
return ""
}
type isPool_TaskDeploymentScheme interface {
isPool_TaskDeploymentScheme()
}
type Pool_TaskTemplateDeployment struct {
// Most Pools will include a task_template_deployment by name.
TaskTemplateDeployment string `protobuf:"bytes,6,opt,name=task_template_deployment,json=taskTemplateDeployment,proto3,oneof"`
}
type Pool_TaskTemplateDeploymentInline struct {
// However, pools which substantially differ from other ones can define an
// entire deployment inline without putting it in the shared namespace.
//
// The name fields in this deployment and any embedded task_templates must
// not be specified.
TaskTemplateDeploymentInline *TaskTemplateDeployment `protobuf:"bytes,7,opt,name=task_template_deployment_inline,json=taskTemplateDeploymentInline,proto3,oneof"`
}
func (*Pool_TaskTemplateDeployment) isPool_TaskDeploymentScheme() {}
func (*Pool_TaskTemplateDeploymentInline) isPool_TaskDeploymentScheme() {}
// Defines who can schedule tasks in a pool.
type Schedulers struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Emails of individual end-users.
//
// Useful to avoid creating one-person groups.
User []string `protobuf:"bytes,1,rep,name=user,proto3" json:"user,omitempty"`
// List of groups with end-users.
Group []string `protobuf:"bytes,2,rep,name=group,proto3" json:"group,omitempty"`
// See TrustedDelegation comment.
TrustedDelegation []*TrustedDelegation `protobuf:"bytes,3,rep,name=trusted_delegation,json=trustedDelegation,proto3" json:"trusted_delegation,omitempty"`
}
func (x *Schedulers) Reset() {
*x = Schedulers{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Schedulers) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Schedulers) ProtoMessage() {}
func (x *Schedulers) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Schedulers.ProtoReflect.Descriptor instead.
func (*Schedulers) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{2}
}
func (x *Schedulers) GetUser() []string {
if x != nil {
return x.User
}
return nil
}
func (x *Schedulers) GetGroup() []string {
if x != nil {
return x.Group
}
return nil
}
func (x *Schedulers) GetTrustedDelegation() []*TrustedDelegation {
if x != nil {
return x.TrustedDelegation
}
return nil
}
// Defines a delegatee trusted to make authorization decisions for who can use
// a pool.
//
// This is based on LUCI delegation protocol. Imagine an end user U calling
// Swarming through an intermediary service X. In this case U is a delegator and
// X is a delegatee. When X calls Swarming, it makes an RPC to the token server
// to make a delegation token that says "<X can call Swarming on behalf of U>".
//
// This token is then sent to the Swarming with the RPC. Swarming sees that
// the direct peer it's talking to is X, but the call should be performed under
// the authority of U.
//
// We extend this to also allow X make authorization decisions about whether U
// can use particular Swarming resource or not. The result of this decision is
// encoded in the delegation token as a set of "key:value" tags. Swarming then
// can treat presence of such tags as a signal that the particular call is
// allowed.
//
// In this scenario we totally trust X to make the correct decision.
type TrustedDelegation struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Email of a trusted delegatee (the one who's minting the delegation token).
PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
// A list of tags to expected in the delegation token to allow the usage of
// a pool.
//
// Presence of any of the specified tags are enough. The format of these tags
// generally depends on what service is doing the delegation.
RequireAnyOf *TrustedDelegation_TagList `protobuf:"bytes,2,opt,name=require_any_of,json=requireAnyOf,proto3" json:"require_any_of,omitempty"`
}
func (x *TrustedDelegation) Reset() {
*x = TrustedDelegation{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TrustedDelegation) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrustedDelegation) ProtoMessage() {}
func (x *TrustedDelegation) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrustedDelegation.ProtoReflect.Descriptor instead.
func (*TrustedDelegation) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{3}
}
func (x *TrustedDelegation) GetPeerId() string {
if x != nil {
return x.PeerId
}
return ""
}
func (x *TrustedDelegation) GetRequireAnyOf() *TrustedDelegation_TagList {
if x != nil {
return x.RequireAnyOf
}
return nil
}
// A TaskTemplate describes a set of properties (caches, CIPD packages and
// envvars) which apply to tasks created within a swarming pool.
//
// TaskTemplates may either be defined inline inside of
// a TaskTemplateDeployment, or in "shared namespace" of the
// PoolsCfg.task_template field.
//
// TaskTemplates may also include other TaskTemplates by name from the "shared
// namespace" in PoolsCfg. Swarming calculates the final value for a given
// TaskTemplate by applying all of its `include` fields depth-first, and then by
// applying the properties in the body of the TaskTemplate. Includes may never
// be repeated, including transitively. This means that "diamond shaped
// dependencies" are forbidden (i.e. A<-B<-D and A<-C<-D would be forbidden
// because `A` is included in `D` twice (via both C and B)).
type TaskTemplate struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// This gives the template a name for the 'include' field below. This only
// applies to templates defined within the PoolsCfg message (i.e. the
// top-level message), not to templates inlined into a TaskTemplateDeployment.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Includes properties from the named other TaskTemplate. This can only
// include templates defined in the top-level PoolsCfg message.
Include []string `protobuf:"bytes,2,rep,name=include,proto3" json:"include,omitempty"`
// CacheEntries are keyed by `name`, and `path` is overridden wholesale.
//
// It is illegal to have any TaskTemplate with multiple cache entries mapping
// to the same path. It is illegal to have any cache paths overlap with cipd
// package paths.
Cache []*TaskTemplate_CacheEntry `protobuf:"bytes,3,rep,name=cache,proto3" json:"cache,omitempty"`
// CipdPackages are keyed by (path, name), and `version` is overridden
// wholesale.
//
// It is illegal to have any cipd paths overlap with cache entry paths.
CipdPackage []*TaskTemplate_CipdPackage `protobuf:"bytes,4,rep,name=cipd_package,json=cipdPackage,proto3" json:"cipd_package,omitempty"`
// Env vars are keyed by the `var` field,
//
// `value` fields overwrite included values.
// `soft` fields overwrite included values.
// `prefix` fields append to included values. For example, Doing:
//
// {name: "1" env { var: "PATH" prefix: "a" }}
// {name: "2" env { var: "PATH" prefix: "b" }}
// {name: "3" include: "1" include: "2" }
//
// Is equivalent to:
//
// {name: "3" env { var: "PATH" prefix: "a" prefix: "b" }}
//
//
// Full Example:
//
// env {
// var: "PATH"
// value: "/disable_system_path"
// prefix: "a"
// prefix: "b"
// prefix: "c"
// soft: true
// }
// env {
// var: "OTHER"
// value: "1"
// }
// env {
// var: "PYTHONPATH"
// prefix: "a"
// }
//
// Results in, essentially:
//
// $PATH=/path/to/a:/path/to/b:/path/to/c:/disable_system_path
// $OTHER=1
// $PYTHONPATH=/path/to/a:$PYTHONPATH
Env []*TaskTemplate_Env `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"`
}
func (x *TaskTemplate) Reset() {
*x = TaskTemplate{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskTemplate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskTemplate) ProtoMessage() {}
func (x *TaskTemplate) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskTemplate.ProtoReflect.Descriptor instead.
func (*TaskTemplate) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{4}
}
func (x *TaskTemplate) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *TaskTemplate) GetInclude() []string {
if x != nil {
return x.Include
}
return nil
}
func (x *TaskTemplate) GetCache() []*TaskTemplate_CacheEntry {
if x != nil {
return x.Cache
}
return nil
}
func (x *TaskTemplate) GetCipdPackage() []*TaskTemplate_CipdPackage {
if x != nil {
return x.CipdPackage
}
return nil
}
func (x *TaskTemplate) GetEnv() []*TaskTemplate_Env {
if x != nil {
return x.Env
}
return nil
}
// This is a tuple of (prod template, canary template, canary_chance), so that it
// can be referenced from multiple pools simultaneously as a single unit.
type TaskTemplateDeployment struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// This gives the deployment a name for the 'task_template_deployment' field
// in PoolCfg.
//
// When this TaskTemplateDeployment is inlined into another message (e.g.
// `TaskTemplate.task_template_deployment_inline`), the name field must not be
// specified.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Most Deployments will have a TaskTemplate with just a single include
// directive.
//
// However, pools which substantially differ from other ones could define an
// entire template inline without being forced to put it in the shared
// namespace.
//
// The name field in this template (and the canary template) must not be
// specified.
Prod *TaskTemplate `protobuf:"bytes,2,opt,name=prod,proto3" json:"prod,omitempty"`
// The canary template can be defined like the `prod` field above. If this is
// defined and `canary_chance` is greater than 0, then this template will be
// selected instead of `prod`.
Canary *TaskTemplate `protobuf:"bytes,3,opt,name=canary,proto3" json:"canary,omitempty"`
// range [0, 9999] where each tick corresponds to %0.01 chance of selecting
// the template. Exactly 0 means 'canary is disabled', meaning that tasks
// in this pool will always get the prod template.
//
// Examples:
// * 1 ".01% chance of picking canary"
// * 10 ".1% chance of picking canary"
// * 100 "1% chance of picking canary"
// * 1000 "10% chance of picking canary"
// * 5000 "50% chance of picking canary"
// * 7500 "75% chance of picking canary"
// * 9999 "99.99% chance of picking canary"
CanaryChance int32 `protobuf:"varint,4,opt,name=canary_chance,json=canaryChance,proto3" json:"canary_chance,omitempty"`
}
func (x *TaskTemplateDeployment) Reset() {
*x = TaskTemplateDeployment{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskTemplateDeployment) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskTemplateDeployment) ProtoMessage() {}
func (x *TaskTemplateDeployment) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskTemplateDeployment.ProtoReflect.Descriptor instead.
func (*TaskTemplateDeployment) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{5}
}
func (x *TaskTemplateDeployment) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *TaskTemplateDeployment) GetProd() *TaskTemplate {
if x != nil {
return x.Prod
}
return nil
}
func (x *TaskTemplateDeployment) GetCanary() *TaskTemplate {
if x != nil {
return x.Canary
}
return nil
}
func (x *TaskTemplateDeployment) GetCanaryChance() int32 {
if x != nil {
return x.CanaryChance
}
return 0
}
// Defines about how to monitor bots.
type BotMonitoring struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Name is used by Pool to describe how to monitor bots in this pool.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Dimension keys to be used to bucket the bots.
//
// The algorithm for a key with multiple values is:
// def simplify(values):
// values = sorted(values)
// return '|'.join(
// v for i, v in enumerate(values)
// if not any(v.startswith(value) for v in values[i+1:]))
//
// For example, if 'os' is specified and a bot has the values
// ['Linux', 'Ubuntu', 'Ubuntu-16.04'], the bucket value used for this bot
// will be 'Linux|Ubuntu-16.04'.
//
// Then whole algorithm then works for each key:
// def bucket(keys, bot_dimensions):
// return ';'.join(
// '%s:%s' % (key, simplify(bot_dimensions.get(values, []))
// for key in keys)
//
// so the end result may look like: 'os:Linux|Ubuntu-16.04;pool:Testers'.
//
// More precisely, when this is used, the other bot dimensions are ignored.
// 'pool' is always implicit.
DimensionKey []string `protobuf:"bytes,2,rep,name=dimension_key,json=dimensionKey,proto3" json:"dimension_key,omitempty"`
}
func (x *BotMonitoring) Reset() {
*x = BotMonitoring{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *BotMonitoring) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BotMonitoring) ProtoMessage() {}
func (x *BotMonitoring) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BotMonitoring.ProtoReflect.Descriptor instead.
func (*BotMonitoring) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{6}
}
func (x *BotMonitoring) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *BotMonitoring) GetDimensionKey() []string {
if x != nil {
return x.DimensionKey
}
return nil
}
// Describes an external scheduler used by a particular swarming pool and
// dimension set, via the external scheduler API.
type ExternalSchedulerConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Service address of external scheduler.
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// Scheduler id within the external scheduler service to use. This value
// is opaque to swarming.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
// Dimensions is a list of dimension strings in "key:value" format (e.g.
// ["os:foo", "featureX:bar"]) that determines eligibility for a bot or task
// to use this external scheduler. In particular:
// - a bot will be eligible if it contains all of these dimensions.
// - a task will be eligible if all its slices contain all these dimensions.
//
// Note of care: if this list is empty, that means all requests in the pool
// are eligible to be forwarded to it.
//
// Note: To be deprecated, in favor of: any_dimensions and all_dimensions.
Dimensions []string `protobuf:"bytes,3,rep,name=dimensions,proto3" json:"dimensions,omitempty"`
// If not enabled, this external scheduler config will be ignored. This
// makes it safer to add new configs (the naive approach of adding a config
// with empty dimentions list would cause all requests to be routed to
// that config).
Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"`
// Deprecated: Do not use.
FallbackWhenEmpty bool `protobuf:"varint,5,opt,name=fallback_when_empty,json=fallbackWhenEmpty,proto3" json:"fallback_when_empty,omitempty"`
// A task or bot must have all of these dimensions in order to match this
// dimension set.
//
// Note: Support not yet implemented.
AllDimensions []string `protobuf:"bytes,6,rep,name=all_dimensions,json=allDimensions,proto3" json:"all_dimensions,omitempty"`
// If any_dimensions is defined, a task or bot must have any of these
// dimensions in order to match this dimension set.
//
// Note: Support not yet implemented.
AnyDimensions []string `protobuf:"bytes,7,rep,name=any_dimensions,json=anyDimensions,proto3" json:"any_dimensions,omitempty"`
// If true, allows the swarming native scheduler to reap tasks that would
// otherwise be owned by this external scheduler, if the external scheduler
// returns no results.
//
// This field should be enabled temporarily when first turning on a new
// external scheduler config, to allow tasks that existing prior to that time
// to still have a chance to run. After prior tasks have aged out of the
// system, this flag should be disabled, to get stricter consistency between
// swarming's state and external scheduler's state.
AllowEsFallback bool `protobuf:"varint,8,opt,name=allow_es_fallback,json=allowEsFallback,proto3" json:"allow_es_fallback,omitempty"`
}
func (x *ExternalSchedulerConfig) Reset() {
*x = ExternalSchedulerConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExternalSchedulerConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExternalSchedulerConfig) ProtoMessage() {}
func (x *ExternalSchedulerConfig) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExternalSchedulerConfig.ProtoReflect.Descriptor instead.
func (*ExternalSchedulerConfig) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{7}
}
func (x *ExternalSchedulerConfig) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
func (x *ExternalSchedulerConfig) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *ExternalSchedulerConfig) GetDimensions() []string {
if x != nil {
return x.Dimensions
}
return nil
}
func (x *ExternalSchedulerConfig) GetEnabled() bool {
if x != nil {
return x.Enabled
}
return false
}
// Deprecated: Do not use.
func (x *ExternalSchedulerConfig) GetFallbackWhenEmpty() bool {
if x != nil {
return x.FallbackWhenEmpty
}
return false
}
func (x *ExternalSchedulerConfig) GetAllDimensions() []string {
if x != nil {
return x.AllDimensions
}
return nil
}
func (x *ExternalSchedulerConfig) GetAnyDimensions() []string {
if x != nil {
return x.AnyDimensions
}
return nil
}
func (x *ExternalSchedulerConfig) GetAllowEsFallback() bool {
if x != nil {
return x.AllowEsFallback
}
return false
}
type ExternalServices struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Isolate *ExternalServices_Isolate `protobuf:"bytes,1,opt,name=isolate,proto3" json:"isolate,omitempty"`
Cipd *ExternalServices_CIPD `protobuf:"bytes,2,opt,name=cipd,proto3" json:"cipd,omitempty"`
}
func (x *ExternalServices) Reset() {
*x = ExternalServices{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExternalServices) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExternalServices) ProtoMessage() {}
func (x *ExternalServices) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExternalServices.ProtoReflect.Descriptor instead.
func (*ExternalServices) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{8}
}
func (x *ExternalServices) GetIsolate() *ExternalServices_Isolate {
if x != nil {
return x.Isolate
}
return nil
}
func (x *ExternalServices) GetCipd() *ExternalServices_CIPD {
if x != nil {
return x.Cipd
}
return nil
}
type TrustedDelegation_TagList struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Tag []string `protobuf:"bytes,1,rep,name=tag,proto3" json:"tag,omitempty"`
}
func (x *TrustedDelegation_TagList) Reset() {
*x = TrustedDelegation_TagList{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TrustedDelegation_TagList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrustedDelegation_TagList) ProtoMessage() {}
func (x *TrustedDelegation_TagList) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrustedDelegation_TagList.ProtoReflect.Descriptor instead.
func (*TrustedDelegation_TagList) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{3, 0}
}
func (x *TrustedDelegation_TagList) GetTag() []string {
if x != nil {
return x.Tag
}
return nil
}
type TaskTemplate_CacheEntry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The name of the cache (required).
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The path relative to the task root to mount the cache (required).
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
}
func (x *TaskTemplate_CacheEntry) Reset() {
*x = TaskTemplate_CacheEntry{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskTemplate_CacheEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskTemplate_CacheEntry) ProtoMessage() {}
func (x *TaskTemplate_CacheEntry) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskTemplate_CacheEntry.ProtoReflect.Descriptor instead.
func (*TaskTemplate_CacheEntry) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{4, 0}
}
func (x *TaskTemplate_CacheEntry) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *TaskTemplate_CacheEntry) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type TaskTemplate_CipdPackage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The relative to the task root to unpack the CIPD package. A blank value
// is permitted and means 'the root directory of the task'.
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
// The CIPD package name template to use (required).
Pkg string `protobuf:"bytes,2,opt,name=pkg,proto3" json:"pkg,omitempty"`
// The version of the CIPD package to use (required).
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
}
func (x *TaskTemplate_CipdPackage) Reset() {
*x = TaskTemplate_CipdPackage{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskTemplate_CipdPackage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskTemplate_CipdPackage) ProtoMessage() {}
func (x *TaskTemplate_CipdPackage) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskTemplate_CipdPackage.ProtoReflect.Descriptor instead.
func (*TaskTemplate_CipdPackage) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{4, 1}
}
func (x *TaskTemplate_CipdPackage) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
func (x *TaskTemplate_CipdPackage) GetPkg() string {
if x != nil {
return x.Pkg
}
return ""
}
func (x *TaskTemplate_CipdPackage) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
type TaskTemplate_Env struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The envvar you want to set (required).
Var string `protobuf:"bytes,1,opt,name=var,proto3" json:"var,omitempty"`
// The envvar value you want to set. Any prefixes are prepended to this
// value. If the value is unset, prefixes will be prepended to the bot's
// current value of this envvar (see examples)
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
// Paths relative to the task root to prepend to this envvar on the bot.
// These will be resolved to absolute paths on the bot.
Prefix []string `protobuf:"bytes,3,rep,name=prefix,proto3" json:"prefix,omitempty"`
// If true, tasks setting this EnvVar can overwrite the value and/or the
// prefix. Otherwise, tasks will not be permitted to to set any env var or
// env_prefix for this var.
//
// This should be True for envvars you expect tasks to extend, like $PATH.
// Note that this only affects envvar manipulation at the Swarming API
// level; once the task is running it can (of course) manipulate the env
// however it wants.
Soft bool `protobuf:"varint,4,opt,name=soft,proto3" json:"soft,omitempty"`
}
func (x *TaskTemplate_Env) Reset() {
*x = TaskTemplate_Env{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskTemplate_Env) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskTemplate_Env) ProtoMessage() {}
func (x *TaskTemplate_Env) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskTemplate_Env.ProtoReflect.Descriptor instead.
func (*TaskTemplate_Env) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{4, 2}
}
func (x *TaskTemplate_Env) GetVar() string {
if x != nil {
return x.Var
}
return ""
}
func (x *TaskTemplate_Env) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
func (x *TaskTemplate_Env) GetPrefix() []string {
if x != nil {
return x.Prefix
}
return nil
}
func (x *TaskTemplate_Env) GetSoft() bool {
if x != nil {
return x.Soft
}
return false
}
type ExternalServices_Isolate struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// (required) URL of the default isolate server to use if it is not
// specified in the task.
//
// Must start with "https://" or "http://".
//
// e.g. "https://isolateserver.appspot.com"
Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"`
// (required) Default namespace to use if it is not specified in a task,
// e.g. "default-gzip"
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
}
func (x *ExternalServices_Isolate) Reset() {
*x = ExternalServices_Isolate{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExternalServices_Isolate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExternalServices_Isolate) ProtoMessage() {}
func (x *ExternalServices_Isolate) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExternalServices_Isolate.ProtoReflect.Descriptor instead.
func (*ExternalServices_Isolate) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{8, 0}
}
func (x *ExternalServices_Isolate) GetServer() string {
if x != nil {
return x.Server
}
return ""
}
func (x *ExternalServices_Isolate) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
type ExternalServices_CIPD struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// (required) URL of the default CIPD server to use, if it is not specified
// in the task.
//
// Must start with "https://" or "http://".
//
// e.g. "https://chrome-infra-packages.appspot.com"
Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"`
// (required) The version of the cipd client to use. This is likely the
// 'infra/tools/cipd/${platform}' package, but because it's part of the
// bootstrap needs special logic to handle its installation.
ClientPackage *CipdPackage `protobuf:"bytes,3,opt,name=client_package,json=clientPackage,proto3" json:"client_package,omitempty"`
}
func (x *ExternalServices_CIPD) Reset() {
*x = ExternalServices_CIPD{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExternalServices_CIPD) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExternalServices_CIPD) ProtoMessage() {}
func (x *ExternalServices_CIPD) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExternalServices_CIPD.ProtoReflect.Descriptor instead.
func (*ExternalServices_CIPD) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{8, 1}
}
func (x *ExternalServices_CIPD) GetServer() string {
if x != nil {
return x.Server
}
return ""
}
func (x *ExternalServices_CIPD) GetClientPackage() *CipdPackage {
if x != nil {
return x.ClientPackage
}
return nil
}
var File_go_chromium_org_luci_swarming_proto_config_pools_proto protoreflect.FileDescriptor
var file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDesc = []byte{
0x0a, 0x36, 0x67, 0x6f, 0x2e, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x69, 0x75, 0x6d, 0x2e, 0x6f, 0x72,
0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2f, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x70, 0x6f, 0x6f,
0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69,
0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x2f, 0x67, 0x6f, 0x2e, 0x63, 0x68,
0x72, 0x6f, 0x6d, 0x69, 0x75, 0x6d, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2f,
0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x37, 0x67, 0x6f, 0x2e, 0x63,
0x68, 0x72, 0x6f, 0x6d, 0x69, 0x75, 0x6d, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69,
0x2f, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x37, 0x67, 0x6f, 0x2e, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x69, 0x75, 0x6d,
0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2f, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69,
0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
0x72, 0x65, 0x61, 0x6c, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x03, 0x0a,
0x08, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x43, 0x66, 0x67, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x6f,
0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69,
0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x04,
0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x5d, 0x0a, 0x19, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69,
0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x17, 0x64, 0x65, 0x66, 0x61,
0x75, 0x6c, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x65, 0x6d, 0x70,
0x6c, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x77, 0x61,
0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73,
0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x54,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x61, 0x0a, 0x18, 0x74, 0x61, 0x73, 0x6b, 0x5f,
0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d,
0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x77, 0x61, 0x72,
0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b,
0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65,
0x6e, 0x74, 0x52, 0x16, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x62, 0x6f,
0x74, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x2e, 0x42, 0x6f, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
0x6e, 0x67, 0x52, 0x0d, 0x62, 0x6f, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xd8, 0x05, 0x0a, 0x04, 0x50, 0x6f, 0x6f, 0x6c,
0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02,
0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x0a,
0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1b, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x73,
0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x6c, 0x6c,
0x6f, 0x77, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x61, 0x6c, 0x6c, 0x6f,
0x77, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
0x74, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x67, 0x72, 0x6f,
0x75, 0x70, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65,
0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x47,
0x72, 0x6f, 0x75, 0x70, 0x12, 0x3a, 0x0a, 0x18, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x65, 0x6d,
0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74,
0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x65,
0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74,
0x12, 0x70, 0x0a, 0x1f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
0x65, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x6c,
0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x77, 0x61, 0x72,
0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b,
0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65,
0x6e, 0x74, 0x48, 0x00, 0x52, 0x1c, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61,
0x74, 0x65, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x6c, 0x69,
0x6e, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x6f, 0x74, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x6f, 0x74, 0x4d,
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x59, 0x0a, 0x13, 0x65, 0x78, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73,
0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e,
0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x12, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
0x6c, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x6c, 0x6d, 0x18, 0x0a, 0x20,
0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x65, 0x61, 0x6c, 0x6d, 0x12, 0x5e, 0x0a, 0x1a, 0x65, 0x6e,
0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x6c, 0x6d, 0x5f, 0x70, 0x65, 0x72,
0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x20,
0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x2e, 0x52, 0x65, 0x61, 0x6c, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
0x52, 0x18, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x52, 0x65, 0x61, 0x6c, 0x6d, 0x50,
0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x64, 0x65,
0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x6c, 0x6d,
0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54,
0x61, 0x73, 0x6b, 0x52, 0x65, 0x61, 0x6c, 0x6d, 0x42, 0x18, 0x0a, 0x16, 0x74, 0x61, 0x73, 0x6b,
0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65,
0x6d, 0x65, 0x22, 0x89, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72,
0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02,
0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x51, 0x0a, 0x12, 0x74,
0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69,
0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65,
0x64, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x74, 0x72, 0x75,
0x73, 0x74, 0x65, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x9b,
0x01, 0x0a, 0x11, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x50, 0x0a,
0x0e, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x6f, 0x66, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67,
0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44,
0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x61, 0x67, 0x4c, 0x69, 0x73,
0x74, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x41, 0x6e, 0x79, 0x4f, 0x66, 0x1a,
0x1b, 0x0a, 0x07, 0x54, 0x61, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61,
0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xdf, 0x03, 0x0a,
0x0c, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x03,
0x28, 0x09, 0x52, 0x07, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x63,
0x61, 0x63, 0x68, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x73, 0x77, 0x61,
0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73,
0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x63, 0x61, 0x63, 0x68, 0x65, 0x12, 0x4c, 0x0a, 0x0c, 0x63,
0x69, 0x70, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x29, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
0x2e, 0x43, 0x69, 0x70, 0x64, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x63, 0x69,
0x70, 0x64, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x03, 0x65, 0x6e, 0x76,
0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e,
0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d,
0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x1a, 0x34,
0x0a, 0x0a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x70, 0x61, 0x74, 0x68, 0x1a, 0x4d, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x64, 0x50, 0x61, 0x63, 0x6b,
0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x6b, 0x67, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x6b, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x1a, 0x59, 0x0a, 0x03, 0x45, 0x6e, 0x76, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x61,
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x76, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x03,
0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6f,
0x66, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x6f, 0x66, 0x74, 0x22, 0xbb,
0x01, 0x0a, 0x16, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x44,
0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a,
0x04, 0x70, 0x72, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x77,
0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61,
0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x04, 0x70, 0x72, 0x6f, 0x64,
0x12, 0x35, 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1d, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52,
0x06, 0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x6e, 0x61, 0x72,
0x79, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c,
0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x48, 0x0a, 0x0d,
0x42, 0x6f, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
0x65, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x22, 0xab, 0x02, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x0e, 0x0a, 0x02,
0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a,
0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
0x52, 0x0a, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65,
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x13, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61,
0x63, 0x6b, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x05, 0x20,
0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x11, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63,
0x6b, 0x57, 0x68, 0x65, 0x6e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c,
0x6c, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03,
0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6e, 0x79, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6e, 0x79, 0x44, 0x69,
0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f,
0x77, 0x5f, 0x65, 0x73, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x08, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x73, 0x46, 0x61, 0x6c, 0x6c,
0x62, 0x61, 0x63, 0x6b, 0x22, 0xcf, 0x02, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x07, 0x69, 0x73, 0x6f,
0x6c, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x73, 0x77, 0x61,
0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x49, 0x73,
0x6f, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x07, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x3a,
0x0a, 0x04, 0x63, 0x69, 0x70, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73,
0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45,
0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
0x43, 0x49, 0x50, 0x44, 0x52, 0x04, 0x63, 0x69, 0x70, 0x64, 0x1a, 0x3f, 0x0a, 0x07, 0x49, 0x73,
0x6f, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1c, 0x0a,
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x79, 0x0a, 0x04, 0x43,
0x49, 0x50, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x69, 0x70, 0x64, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
0x65, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7e, 0x5a, 0x33, 0x67, 0x6f, 0x2e, 0x63, 0x68, 0x72,
0x6f, 0x6d, 0x69, 0x75, 0x6d, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2f, 0x73,
0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x3b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0xa2, 0xfe, 0x23,
0x45, 0x0a, 0x43, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2d,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x70, 0x6f, 0x74, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2f, 0x73, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x3a, 0x70, 0x6f, 0x6f,
0x6c, 0x73, 0x2e, 0x63, 0x66, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescOnce sync.Once
file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescData = file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDesc
)
func file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP() []byte {
file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescOnce.Do(func() {
file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescData = protoimpl.X.CompressGZIP(file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescData)
})
return file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescData
}
var file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_go_chromium_org_luci_swarming_proto_config_pools_proto_goTypes = []interface{}{
(*PoolsCfg)(nil), // 0: swarming.config.PoolsCfg
(*Pool)(nil), // 1: swarming.config.Pool
(*Schedulers)(nil), // 2: swarming.config.Schedulers
(*TrustedDelegation)(nil), // 3: swarming.config.TrustedDelegation
(*TaskTemplate)(nil), // 4: swarming.config.TaskTemplate
(*TaskTemplateDeployment)(nil), // 5: swarming.config.TaskTemplateDeployment
(*BotMonitoring)(nil), // 6: swarming.config.BotMonitoring
(*ExternalSchedulerConfig)(nil), // 7: swarming.config.ExternalSchedulerConfig
(*ExternalServices)(nil), // 8: swarming.config.ExternalServices
(*TrustedDelegation_TagList)(nil), // 9: swarming.config.TrustedDelegation.TagList
(*TaskTemplate_CacheEntry)(nil), // 10: swarming.config.TaskTemplate.CacheEntry
(*TaskTemplate_CipdPackage)(nil), // 11: swarming.config.TaskTemplate.CipdPackage
(*TaskTemplate_Env)(nil), // 12: swarming.config.TaskTemplate.Env
(*ExternalServices_Isolate)(nil), // 13: swarming.config.ExternalServices.Isolate
(*ExternalServices_CIPD)(nil), // 14: swarming.config.ExternalServices.CIPD
(RealmPermission)(0), // 15: swarming.config.RealmPermission
(*CipdPackage)(nil), // 16: swarming.config.CipdPackage
}
var file_go_chromium_org_luci_swarming_proto_config_pools_proto_depIdxs = []int32{
1, // 0: swarming.config.PoolsCfg.pool:type_name -> swarming.config.Pool
8, // 1: swarming.config.PoolsCfg.default_external_services:type_name -> swarming.config.ExternalServices
4, // 2: swarming.config.PoolsCfg.task_template:type_name -> swarming.config.TaskTemplate
5, // 3: swarming.config.PoolsCfg.task_template_deployment:type_name -> swarming.config.TaskTemplateDeployment
6, // 4: swarming.config.PoolsCfg.bot_monitoring:type_name -> swarming.config.BotMonitoring
2, // 5: swarming.config.Pool.schedulers:type_name -> swarming.config.Schedulers
5, // 6: swarming.config.Pool.task_template_deployment_inline:type_name -> swarming.config.TaskTemplateDeployment
7, // 7: swarming.config.Pool.external_schedulers:type_name -> swarming.config.ExternalSchedulerConfig
15, // 8: swarming.config.Pool.enforced_realm_permissions:type_name -> swarming.config.RealmPermission
3, // 9: swarming.config.Schedulers.trusted_delegation:type_name -> swarming.config.TrustedDelegation
9, // 10: swarming.config.TrustedDelegation.require_any_of:type_name -> swarming.config.TrustedDelegation.TagList
10, // 11: swarming.config.TaskTemplate.cache:type_name -> swarming.config.TaskTemplate.CacheEntry
11, // 12: swarming.config.TaskTemplate.cipd_package:type_name -> swarming.config.TaskTemplate.CipdPackage
12, // 13: swarming.config.TaskTemplate.env:type_name -> swarming.config.TaskTemplate.Env
4, // 14: swarming.config.TaskTemplateDeployment.prod:type_name -> swarming.config.TaskTemplate
4, // 15: swarming.config.TaskTemplateDeployment.canary:type_name -> swarming.config.TaskTemplate
13, // 16: swarming.config.ExternalServices.isolate:type_name -> swarming.config.ExternalServices.Isolate
14, // 17: swarming.config.ExternalServices.cipd:type_name -> swarming.config.ExternalServices.CIPD
16, // 18: swarming.config.ExternalServices.CIPD.client_package:type_name -> swarming.config.CipdPackage
19, // [19:19] is the sub-list for method output_type
19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
}
func init() { file_go_chromium_org_luci_swarming_proto_config_pools_proto_init() }
func file_go_chromium_org_luci_swarming_proto_config_pools_proto_init() {
if File_go_chromium_org_luci_swarming_proto_config_pools_proto != nil {
return
}
file_go_chromium_org_luci_swarming_proto_config_config_proto_init()
file_go_chromium_org_luci_swarming_proto_config_realms_proto_init()
if !protoimpl.UnsafeEnabled {
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PoolsCfg); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Pool); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Schedulers); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TrustedDelegation); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskTemplate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskTemplateDeployment); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BotMonitoring); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExternalSchedulerConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExternalServices); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TrustedDelegation_TagList); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskTemplate_CacheEntry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskTemplate_CipdPackage); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskTemplate_Env); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExternalServices_Isolate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExternalServices_CIPD); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes[1].OneofWrappers = []interface{}{
(*Pool_TaskTemplateDeployment)(nil),
(*Pool_TaskTemplateDeploymentInline)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDesc,
NumEnums: 0,
NumMessages: 15,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_go_chromium_org_luci_swarming_proto_config_pools_proto_goTypes,
DependencyIndexes: file_go_chromium_org_luci_swarming_proto_config_pools_proto_depIdxs,
MessageInfos: file_go_chromium_org_luci_swarming_proto_config_pools_proto_msgTypes,
}.Build()
File_go_chromium_org_luci_swarming_proto_config_pools_proto = out.File
file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDesc = nil
file_go_chromium_org_luci_swarming_proto_config_pools_proto_goTypes = nil
file_go_chromium_org_luci_swarming_proto_config_pools_proto_depIdxs = nil
} | }
return nil
}
|
ReadBuffer.go | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the |
package utils
import (
"math/big"
)
type ReadBuffer interface {
// GetPos return the current byte position
GetPos() uint16
// HasMore returns true if there are bitLength bits available
HasMore(bitLength uint8) bool
// PullContext signals that we expect now a context with the supplied logical name
PullContext(logicalName string, readerArgs ...WithReaderArgs) error
ReadBit(logicalName string, readerArgs ...WithReaderArgs) (bool, error)
ReadByte(logicalName string, readerArgs ...WithReaderArgs) (byte, error)
ReadByteArray(logicalName string, numberOfBytes int, readerArgs ...WithReaderArgs) ([]byte, error)
ReadUint8(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (uint8, error)
ReadUint16(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (uint16, error)
ReadUint32(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (uint32, error)
ReadUint64(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (uint64, error)
ReadInt8(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (int8, error)
ReadInt16(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (int16, error)
ReadInt32(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (int32, error)
ReadInt64(logicalName string, bitLength uint8, readerArgs ...WithReaderArgs) (int64, error)
ReadBigInt(logicalName string, bitLength uint64, readerArgs ...WithReaderArgs) (*big.Int, error)
ReadFloat32(logicalName string, signed bool, exponentBitLength uint8, mantissaBitLength uint8, readerArgs ...WithReaderArgs) (float32, error)
ReadFloat64(logicalName string, signed bool, exponentBitLength uint8, mantissaBitLength uint8, readerArgs ...WithReaderArgs) (float64, error)
ReadBigFloat(logicalName string, signed bool, exponentBitLength uint8, mantissaBitLength uint8, readerArgs ...WithReaderArgs) (*big.Float, error)
ReadString(logicalName string, bitLength uint32, readerArgs ...WithReaderArgs) (string, error)
// CloseContext signals that we expect the end of the context with the supplied logical name
CloseContext(logicalName string, readerArgs ...WithReaderArgs) error
}
// WithReaderArgs is a marker interface for reader args supplied by the builders
type WithReaderArgs interface {
isReaderArgs() bool
}
///////////////////////////////////////
///////////////////////////////////////
//
// Internal section
//
type readerArg struct {
}
func (_ readerArg) isReaderArgs() bool {
return true
}
//
// Internal section
//
///////////////////////////////////////
/////////////////////////////////////// | * specific language governing permissions and limitations
* under the License.
*/ |
Dispatcher.js | "use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
/* tslint:disable */
var path = require("path");
var util_1 = require("../util");
var index_1 = require("../Output/index");
var Plugins_1 = require("../Plugin/Plugins");
var CommandManagerBase = /** @class */ (function () {
function CommandManagerBase(config) {
this.config = config;
}
CommandManagerBase.prototype.findCommand = function (id) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, null];
});
});
};
CommandManagerBase.prototype.listTopics = function (prefix) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, []];
});
});
};
CommandManagerBase.prototype.findTopic = function (id) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, null];
});
});
};
CommandManagerBase.prototype.require = function (p) {
return util_1.undefault(require(p));
};
return CommandManagerBase;
}());
exports.CommandManagerBase = CommandManagerBase;
var BuiltinCommandManager = /** @class */ (function (_super) {
__extends(BuiltinCommandManager, _super);
function BuiltinCommandManager() {
return _super !== null && _super.apply(this, arguments) || this;
}
BuiltinCommandManager.prototype.findCommand = function (id) {
return __awaiter(this, void 0, void 0, function () {
var builtins, p;
return __generator(this, function (_a) {
builtins = {
version: 'version',
help: 'help',
};
p = builtins[id];
if (p) {
p = path.join(__dirname, 'commands', p);
return [2 /*return*/, this.require(p)];
}
return [2 /*return*/];
});
});
};
BuiltinCommandManager.prototype.listTopics = function (prefix) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, ['version', 'help']];
});
});
};
return BuiltinCommandManager;
}(CommandManagerBase));
exports.BuiltinCommandManager = BuiltinCommandManager;
var CLICommandManager = /** @class */ (function (_super) {
__extends(CLICommandManager, _super);
function CLICommandManager() {
return _super !== null && _super.apply(this, arguments) || this;
}
CLICommandManager.prototype.findCommand = function (id) {
return __awaiter(this, void 0, void 0, function () {
var root, p;
return __generator(this, function (_a) {
root = this.config.commandsDir;
if (!root)
return [2 /*return*/];
try {
p = require.resolve(path.join.apply(path, [root].concat(id.split(':'))));
}
catch (err) {
if (err.code !== 'MODULE_NOT_FOUND')
throw err;
}
if (p)
return [2 /*return*/, this.require(p)];
return [2 /*return*/];
});
});
};
return CLICommandManager;
}(CommandManagerBase));
exports.CLICommandManager = CLICommandManager;
// TODO look into this later: https://sourcegraph.com/github.com/heroku/cli-engine/-/blob/src/plugins/index.js#L9:33
// not needed right now
//
var PluginCommandManager = /** @class */ (function (_super) {
__extends(PluginCommandManager, _super);
function PluginCommandManager() {
return _super !== null && _super.apply(this, arguments) || this;
}
PluginCommandManager.prototype.findCommand = function (id) {
return __awaiter(this, void 0, void 0, function () {
var out, plugins, foundCommand;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
out = new index_1.Output(this.config);
plugins = new Plugins_1.default(out);
return [4 /*yield*/, plugins.load()];
case 1:
_a.sent();
foundCommand = plugins.findCommand(id || this.config.defaultCommand || 'help');
return [2 /*return*/, foundCommand];
}
});
});
};
PluginCommandManager.prototype.findTopic = function (id) {
return __awaiter(this, void 0, void 0, function () {
var out, plugins;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
out = new index_1.Output(this.config);
plugins = new Plugins_1.default(out);
return [4 /*yield*/, plugins.load()];
case 1:
_a.sent();
return [2 /*return*/, plugins.findTopic(id)];
}
});
});
};
return PluginCommandManager;
}(CommandManagerBase));
var Dispatcher = /** @class */ (function () {
function | (config) {
this.config = config;
this.managers = [
new CLICommandManager(config),
new BuiltinCommandManager(config),
new PluginCommandManager(config),
];
}
Dispatcher.prototype.findCommand = function (id) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, manager, Command;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (!id)
return [2 /*return*/, {}];
_i = 0, _a = this.managers;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3 /*break*/, 4];
manager = _a[_i];
return [4 /*yield*/, manager.findCommand(id)];
case 2:
Command = _b.sent();
if (Command)
return [2 /*return*/, { Command: Command }];
_b.label = 3;
case 3:
_i++;
return [3 /*break*/, 1];
case 4: return [2 /*return*/, {}];
}
});
});
};
Dispatcher.prototype.findTopic = function (id) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, manager, topic;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (!id)
return [2 /*return*/, {}
// TODO: Fix this hack for "cluster".
// Find why cache does not invalidate for cluster command
];
// TODO: Fix this hack for "cluster".
// Find why cache does not invalidate for cluster command
if (id.trim() === 'cluster')
return [2 /*return*/, null];
_i = 0, _a = this.managers;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3 /*break*/, 4];
manager = _a[_i];
return [4 /*yield*/, manager.findTopic(id)];
case 2:
topic = _b.sent();
if (topic)
return [2 /*return*/, topic];
_b.label = 3;
case 3:
_i++;
return [3 /*break*/, 1];
case 4: return [2 /*return*/, null];
}
});
});
};
Dispatcher.prototype.listTopics = function (prefix) {
return __awaiter(this, void 0, void 0, function () {
var arrs;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, Promise.all(this.managers.map(function (m) { return m.listTopics(prefix); }))];
case 1:
arrs = _a.sent();
return [2 /*return*/, arrs.reduce(function (next, res) { return res.concat(next); }, [])];
}
});
});
};
Object.defineProperty(Dispatcher.prototype, "cmdAskingForHelp", {
get: function () {
for (var _i = 0, _a = this.config.argv; _i < _a.length; _i++) {
var arg = _a[_i];
if (['--help', '-h'].includes(arg))
return true;
if (arg === '--')
return false;
}
return false;
},
enumerable: true,
configurable: true
});
return Dispatcher;
}());
exports.Dispatcher = Dispatcher;
//# sourceMappingURL=Dispatcher.js.map | Dispatcher |
command.rs | use std::ffi::{OsStr, OsString};
use std::os::unix::ffi::OsStrExt;
use heim_common::prelude::*;
use crate::sys::macos::{pid_exists, wrappers};
use crate::{Pid, ProcessError, ProcessResult};
#[derive(Debug)]
pub struct Command(wrappers::ProcArgs);
impl Command {
pub fn to_os_string(&self) -> OsString {
self.0.to_command()
}
pub fn into_os_string(self) -> OsString {
// TODO: Performance could be better
self.to_os_string()
}
}
impl<'a> IntoIterator for &'a Command {
type Item = &'a OsStr;
type IntoIter = CommandIter<'a>;
fn into_iter(self) -> Self::IntoIter {
CommandIter(self.0.arguments())
}
}
#[derive(Debug)]
pub struct | <'a>(wrappers::ProcArgsArguments<'a>);
impl<'a> Iterator for CommandIter<'a> {
type Item = &'a OsStr;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(OsStr::from_bytes)
}
}
pub fn command(pid: Pid) -> impl Future<Output = ProcessResult<Command>> {
future::lazy(move |_| wrappers::ProcArgs::get(pid))
.map_ok(Command)
.or_else(move |e| {
// TODO: Will look better with `async_await`
match e.raw_os_error() {
// `KERN_PROCARGS2` syscall might return `EINVAL` in case of zombie process
Some(libc::EINVAL) => {
let f = pid_exists(pid).and_then(move |is_exists| {
if is_exists {
future::err(ProcessError::ZombieProcess(pid))
} else {
future::err(e.into())
}
});
future::Either::Left(f)
}
_ => {
let f = future::err(e.into());
future::Either::Right(f)
}
}
})
}
| CommandIter |
cfg1.rs | #[doc = "Register `CFG1` reader"]
pub struct R(crate::R<CFG1_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<CFG1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<CFG1_SPEC>> for R {
fn from(reader: crate::R<CFG1_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `CFG1` writer"]
pub struct W(crate::W<CFG1_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<CFG1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<CFG1_SPEC>> for W {
fn from(writer: crate::W<CFG1_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `HSYNC_POL` reader - Horizontal Synchronization Polarity"]
pub struct HSYNC_POL_R(crate::FieldReader<bool, bool>);
impl HSYNC_POL_R {
pub(crate) fn new(bits: bool) -> Self {
HSYNC_POL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for HSYNC_POL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `HSYNC_POL` writer - Horizontal Synchronization Polarity"]
pub struct HSYNC_POL_W<'a> {
w: &'a mut W,
}
impl<'a> HSYNC_POL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `VSYNC_POL` reader - Vertical Synchronization Polarity"]
pub struct VSYNC_POL_R(crate::FieldReader<bool, bool>);
impl VSYNC_POL_R {
pub(crate) fn new(bits: bool) -> Self {
VSYNC_POL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for VSYNC_POL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `VSYNC_POL` writer - Vertical Synchronization Polarity"]
pub struct VSYNC_POL_W<'a> {
w: &'a mut W,
}
impl<'a> VSYNC_POL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
#[doc = "Field `PIXCLK_POL` reader - Pixel Clock Polarity"]
pub struct PIXCLK_POL_R(crate::FieldReader<bool, bool>);
impl PIXCLK_POL_R { | }
impl core::ops::Deref for PIXCLK_POL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PIXCLK_POL` writer - Pixel Clock Polarity"]
pub struct PIXCLK_POL_W<'a> {
w: &'a mut W,
}
impl<'a> PIXCLK_POL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
#[doc = "Field `EMB_SYNC` reader - Embedded Synchronization"]
pub struct EMB_SYNC_R(crate::FieldReader<bool, bool>);
impl EMB_SYNC_R {
pub(crate) fn new(bits: bool) -> Self {
EMB_SYNC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for EMB_SYNC_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EMB_SYNC` writer - Embedded Synchronization"]
pub struct EMB_SYNC_W<'a> {
w: &'a mut W,
}
impl<'a> EMB_SYNC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6);
self.w
}
}
#[doc = "Field `CRC_SYNC` reader - Embedded Synchronization Correction"]
pub struct CRC_SYNC_R(crate::FieldReader<bool, bool>);
impl CRC_SYNC_R {
pub(crate) fn new(bits: bool) -> Self {
CRC_SYNC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CRC_SYNC_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CRC_SYNC` writer - Embedded Synchronization Correction"]
pub struct CRC_SYNC_W<'a> {
w: &'a mut W,
}
impl<'a> CRC_SYNC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7);
self.w
}
}
#[doc = "Field `FRATE` reader - Frame Rate \\[0..7\\]"]
pub struct FRATE_R(crate::FieldReader<u8, u8>);
impl FRATE_R {
pub(crate) fn new(bits: u8) -> Self {
FRATE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for FRATE_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `FRATE` writer - Frame Rate \\[0..7\\]"]
pub struct FRATE_W<'a> {
w: &'a mut W,
}
impl<'a> FRATE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 8)) | ((value as u32 & 0x07) << 8);
self.w
}
}
#[doc = "Field `DISCR` reader - Disable Codec Request"]
pub struct DISCR_R(crate::FieldReader<bool, bool>);
impl DISCR_R {
pub(crate) fn new(bits: bool) -> Self {
DISCR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DISCR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DISCR` writer - Disable Codec Request"]
pub struct DISCR_W<'a> {
w: &'a mut W,
}
impl<'a> DISCR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11);
self.w
}
}
#[doc = "Field `FULL` reader - Full Mode is Allowed"]
pub struct FULL_R(crate::FieldReader<bool, bool>);
impl FULL_R {
pub(crate) fn new(bits: bool) -> Self {
FULL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for FULL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `FULL` writer - Full Mode is Allowed"]
pub struct FULL_W<'a> {
w: &'a mut W,
}
impl<'a> FULL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12);
self.w
}
}
#[doc = "Threshold Mask\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum THMASK_A {
#[doc = "0: Only 4 beats AHB burst allowed"]
BEATS_4 = 0,
#[doc = "1: Only 4 and 8 beats AHB burst allowed"]
BEATS_8 = 1,
#[doc = "2: 4, 8 and 16 beats AHB burst allowed"]
BEATS_16 = 2,
}
impl From<THMASK_A> for u8 {
#[inline(always)]
fn from(variant: THMASK_A) -> Self {
variant as _
}
}
#[doc = "Field `THMASK` reader - Threshold Mask"]
pub struct THMASK_R(crate::FieldReader<u8, THMASK_A>);
impl THMASK_R {
pub(crate) fn new(bits: u8) -> Self {
THMASK_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<THMASK_A> {
match self.bits {
0 => Some(THMASK_A::BEATS_4),
1 => Some(THMASK_A::BEATS_8),
2 => Some(THMASK_A::BEATS_16),
_ => None,
}
}
#[doc = "Checks if the value of the field is `BEATS_4`"]
#[inline(always)]
pub fn is_beats_4(&self) -> bool {
**self == THMASK_A::BEATS_4
}
#[doc = "Checks if the value of the field is `BEATS_8`"]
#[inline(always)]
pub fn is_beats_8(&self) -> bool {
**self == THMASK_A::BEATS_8
}
#[doc = "Checks if the value of the field is `BEATS_16`"]
#[inline(always)]
pub fn is_beats_16(&self) -> bool {
**self == THMASK_A::BEATS_16
}
}
impl core::ops::Deref for THMASK_R {
type Target = crate::FieldReader<u8, THMASK_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `THMASK` writer - Threshold Mask"]
pub struct THMASK_W<'a> {
w: &'a mut W,
}
impl<'a> THMASK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: THMASK_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Only 4 beats AHB burst allowed"]
#[inline(always)]
pub fn beats_4(self) -> &'a mut W {
self.variant(THMASK_A::BEATS_4)
}
#[doc = "Only 4 and 8 beats AHB burst allowed"]
#[inline(always)]
pub fn beats_8(self) -> &'a mut W {
self.variant(THMASK_A::BEATS_8)
}
#[doc = "4, 8 and 16 beats AHB burst allowed"]
#[inline(always)]
pub fn beats_16(self) -> &'a mut W {
self.variant(THMASK_A::BEATS_16)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 13)) | ((value as u32 & 0x03) << 13);
self.w
}
}
#[doc = "Field `SLD` reader - Start of Line Delay"]
pub struct SLD_R(crate::FieldReader<u8, u8>);
impl SLD_R {
pub(crate) fn new(bits: u8) -> Self {
SLD_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SLD_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SLD` writer - Start of Line Delay"]
pub struct SLD_W<'a> {
w: &'a mut W,
}
impl<'a> SLD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 16)) | ((value as u32 & 0xff) << 16);
self.w
}
}
#[doc = "Field `SFD` reader - Start of Frame Delay"]
pub struct SFD_R(crate::FieldReader<u8, u8>);
impl SFD_R {
pub(crate) fn new(bits: u8) -> Self {
SFD_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SFD_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SFD` writer - Start of Frame Delay"]
pub struct SFD_W<'a> {
w: &'a mut W,
}
impl<'a> SFD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 24)) | ((value as u32 & 0xff) << 24);
self.w
}
}
impl R {
#[doc = "Bit 2 - Horizontal Synchronization Polarity"]
#[inline(always)]
pub fn hsync_pol(&self) -> HSYNC_POL_R {
HSYNC_POL_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Vertical Synchronization Polarity"]
#[inline(always)]
pub fn vsync_pol(&self) -> VSYNC_POL_R {
VSYNC_POL_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Pixel Clock Polarity"]
#[inline(always)]
pub fn pixclk_pol(&self) -> PIXCLK_POL_R {
PIXCLK_POL_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 6 - Embedded Synchronization"]
#[inline(always)]
pub fn emb_sync(&self) -> EMB_SYNC_R {
EMB_SYNC_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Embedded Synchronization Correction"]
#[inline(always)]
pub fn crc_sync(&self) -> CRC_SYNC_R {
CRC_SYNC_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 8:10 - Frame Rate \\[0..7\\]"]
#[inline(always)]
pub fn frate(&self) -> FRATE_R {
FRATE_R::new(((self.bits >> 8) & 0x07) as u8)
}
#[doc = "Bit 11 - Disable Codec Request"]
#[inline(always)]
pub fn discr(&self) -> DISCR_R {
DISCR_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Full Mode is Allowed"]
#[inline(always)]
pub fn full(&self) -> FULL_R {
FULL_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bits 13:14 - Threshold Mask"]
#[inline(always)]
pub fn thmask(&self) -> THMASK_R {
THMASK_R::new(((self.bits >> 13) & 0x03) as u8)
}
#[doc = "Bits 16:23 - Start of Line Delay"]
#[inline(always)]
pub fn sld(&self) -> SLD_R {
SLD_R::new(((self.bits >> 16) & 0xff) as u8)
}
#[doc = "Bits 24:31 - Start of Frame Delay"]
#[inline(always)]
pub fn sfd(&self) -> SFD_R {
SFD_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bit 2 - Horizontal Synchronization Polarity"]
#[inline(always)]
pub fn hsync_pol(&mut self) -> HSYNC_POL_W {
HSYNC_POL_W { w: self }
}
#[doc = "Bit 3 - Vertical Synchronization Polarity"]
#[inline(always)]
pub fn vsync_pol(&mut self) -> VSYNC_POL_W {
VSYNC_POL_W { w: self }
}
#[doc = "Bit 4 - Pixel Clock Polarity"]
#[inline(always)]
pub fn pixclk_pol(&mut self) -> PIXCLK_POL_W {
PIXCLK_POL_W { w: self }
}
#[doc = "Bit 6 - Embedded Synchronization"]
#[inline(always)]
pub fn emb_sync(&mut self) -> EMB_SYNC_W {
EMB_SYNC_W { w: self }
}
#[doc = "Bit 7 - Embedded Synchronization Correction"]
#[inline(always)]
pub fn crc_sync(&mut self) -> CRC_SYNC_W {
CRC_SYNC_W { w: self }
}
#[doc = "Bits 8:10 - Frame Rate \\[0..7\\]"]
#[inline(always)]
pub fn frate(&mut self) -> FRATE_W {
FRATE_W { w: self }
}
#[doc = "Bit 11 - Disable Codec Request"]
#[inline(always)]
pub fn discr(&mut self) -> DISCR_W {
DISCR_W { w: self }
}
#[doc = "Bit 12 - Full Mode is Allowed"]
#[inline(always)]
pub fn full(&mut self) -> FULL_W {
FULL_W { w: self }
}
#[doc = "Bits 13:14 - Threshold Mask"]
#[inline(always)]
pub fn thmask(&mut self) -> THMASK_W {
THMASK_W { w: self }
}
#[doc = "Bits 16:23 - Start of Line Delay"]
#[inline(always)]
pub fn sld(&mut self) -> SLD_W {
SLD_W { w: self }
}
#[doc = "Bits 24:31 - Start of Frame Delay"]
#[inline(always)]
pub fn sfd(&mut self) -> SFD_W {
SFD_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "ISI Configuration 1 Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cfg1](index.html) module"]
pub struct CFG1_SPEC;
impl crate::RegisterSpec for CFG1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [cfg1::R](R) reader structure"]
impl crate::Readable for CFG1_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [cfg1::W](W) writer structure"]
impl crate::Writable for CFG1_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets CFG1 to value 0"]
impl crate::Resettable for CFG1_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | pub(crate) fn new(bits: bool) -> Self {
PIXCLK_POL_R(crate::FieldReader::new(bits))
} |
at.go | package rfc4512
type RFC4512AttributeTypes []RFC4512AttributeType
type RFC4512AttributeType string | OperationalAttributeTypes RFC4512AttributeTypes
DSAAttributeTypes RFC4512AttributeTypes
AllAttributeTypes RFC4512AttributeTypes
)
// Operational AttributeTypes
var (
AliasedObjectName RFC4512AttributeType
ObjectClass RFC4512AttributeType
CreatorsName RFC4512AttributeType
CreateTimestamp RFC4512AttributeType
ModifiersName RFC4512AttributeType
ModifyTimestamp RFC4512AttributeType
StructuralObjectClass RFC4512AttributeType
GoverningStructureRule RFC4512AttributeType
)
// Directory Schema AttributeTypes
var (
ObjectClasses RFC4512AttributeType
SubschemaSubentry RFC4512AttributeType
AttributeTypes RFC4512AttributeType
MatchingRules RFC4512AttributeType
MatchingRuleUse RFC4512AttributeType
LDAPSyntaxes RFC4512AttributeType
DITContentRules RFC4512AttributeType
DITStructureRules RFC4512AttributeType
NameForms RFC4512AttributeType
)
// DSA AttributeTypes
var (
AltServer RFC4512AttributeType
NamingContexts RFC4512AttributeType
SupportedControl RFC4512AttributeType
SupportedExtension RFC4512AttributeType
SupportedFeatures RFC4512AttributeType
SupportedLDAPVersion RFC4512AttributeType
SupportedSASLMechanisms RFC4512AttributeType
)
func init() {
ObjectClasses = RFC4512AttributeType(`( 2.5.21.6 NAME 'objectClasses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.37 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
SubschemaSubentry = RFC4512AttributeType(`( 2.5.18.10 NAME 'subschemaSubentry' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
AttributeTypes = RFC4512AttributeType(`( 2.5.21.5 NAME 'attributeTypes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.3 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
MatchingRules = RFC4512AttributeType(`( 2.5.21.4 NAME 'matchingRules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.30 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
MatchingRuleUse = RFC4512AttributeType(`( 2.5.21.8 NAME 'matchingRuleUse' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.31 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
LDAPSyntaxes = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.16 NAME 'ldapSyntaxes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.54 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
DITContentRules = RFC4512AttributeType(`( 2.5.21.2 NAME 'dITContentRules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.16 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
DITStructureRules = RFC4512AttributeType(`( 2.5.21.1 NAME 'dITStructureRules' EQUALITY integerFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.17 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
NameForms = RFC4512AttributeType(`( 2.5.21.7 NAME 'nameForms' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.35 USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
AltServer = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.6 NAME 'altServer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
NamingContexts = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.5 NAME 'namingContexts' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
SupportedControl = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.13 NAME 'supportedControl' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
SupportedExtension = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.7 NAME 'supportedExtension' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
SupportedFeatures = RFC4512AttributeType(`( 1.3.6.1.4.1.4203.1.3.5 NAME 'supportedFeatures' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
SupportedLDAPVersion = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.15 NAME 'supportedLDAPVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
SupportedSASLMechanisms = RFC4512AttributeType(`( 1.3.6.1.4.1.1466.101.120.14 NAME 'supportedSASLMechanisms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 USAGE dSAOperation X-ORIGIN 'RFC4512' )`)
AliasedObjectName = RFC4512AttributeType(`( 2.5.4.1 NAME 'aliasedObjectName' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-ORIGIN 'RFC4512' )`)
ObjectClass = RFC4512AttributeType(`( 2.5.4.0 NAME 'objectClass' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'RFC4512' )`)
CreatorsName = RFC4512AttributeType(`( 2.5.18.3 NAME 'creatorsName' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
CreateTimestamp = RFC4512AttributeType(`( 2.5.18.1 NAME 'createTimestamp' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
ModifiersName = RFC4512AttributeType(`( 2.5.18.4 NAME 'modifiersName' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
ModifyTimestamp = RFC4512AttributeType(`( 2.5.18.2 NAME 'modifyTimestamp' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
StructuralObjectClass = RFC4512AttributeType(`( 2.5.21.9 NAME 'structuralObjectClass' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
GoverningStructureRule = RFC4512AttributeType(`( 2.5.21.10 NAME 'governingStructureRule' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'RFC4512' )`)
}
func init() {
OperationalAttributeTypes = RFC4512AttributeTypes{
AliasedObjectName,
ObjectClass,
CreatorsName,
CreateTimestamp,
ModifiersName,
ModifyTimestamp,
StructuralObjectClass,
GoverningStructureRule,
}
opATlen := len(OperationalAttributeTypes)
DirectorySchemaAttributeTypes = RFC4512AttributeTypes{
ObjectClasses,
SubschemaSubentry,
AttributeTypes,
MatchingRules,
MatchingRuleUse,
LDAPSyntaxes,
DITContentRules,
DITStructureRules,
NameForms,
}
dsATlen := len(DirectorySchemaAttributeTypes)
DSAAttributeTypes = RFC4512AttributeTypes{
AltServer,
NamingContexts,
SupportedControl,
SupportedExtension,
SupportedFeatures,
SupportedLDAPVersion,
SupportedSASLMechanisms,
}
dsaATlen := len(DSAAttributeTypes)
total := opATlen + dsATlen + dsaATlen
cur := 0
AllAttributeTypes = make(RFC4512AttributeTypes, total, total)
for _, v := range []RFC4512AttributeTypes{
OperationalAttributeTypes,
DirectorySchemaAttributeTypes,
DSAAttributeTypes} {
for i := 0; i < len(v); i++ {
cur++
AllAttributeTypes[cur-1] = v[i]
}
}
} |
var (
DirectorySchemaAttributeTypes RFC4512AttributeTypes |
generated.rs | //! Generated by `sourcegen_assists_docs`, do not edit by hand.
use super::check_doc_test;
#[test]
fn doctest_add_explicit_type() {
check_doc_test(
"add_explicit_type",
r#####"
fn main() {
let x$0 = 92;
}
"#####,
r#####"
fn main() {
let x: i32 = 92;
}
"#####,
)
}
#[test]
fn doctest_add_hash() {
check_doc_test(
"add_hash",
r#####"
fn main() {
r#"Hello,$0 World!"#;
}
"#####,
r#####"
fn main() {
r##"Hello, World!"##;
}
"#####,
)
}
#[test]
fn doctest_add_impl_default_members() {
check_doc_test(
"add_impl_default_members",
r#####"
trait Trait {
type X;
fn foo(&self);
fn bar(&self) {}
}
impl Trait for () {
type X = ();
fn foo(&self) {}$0
}
"#####, | trait Trait {
type X;
fn foo(&self);
fn bar(&self) {}
}
impl Trait for () {
type X = ();
fn foo(&self) {}
$0fn bar(&self) {}
}
"#####,
)
}
#[test]
fn doctest_add_impl_missing_members() {
check_doc_test(
"add_impl_missing_members",
r#####"
trait Trait<T> {
type X;
fn foo(&self) -> T;
fn bar(&self) {}
}
impl Trait<u32> for () {$0
}
"#####,
r#####"
trait Trait<T> {
type X;
fn foo(&self) -> T;
fn bar(&self) {}
}
impl Trait<u32> for () {
$0type X;
fn foo(&self) -> u32 {
todo!()
}
}
"#####,
)
}
#[test]
fn doctest_add_lifetime_to_type() {
check_doc_test(
"add_lifetime_to_type",
r#####"
struct Point {
x: &$0u32,
y: u32,
}
"#####,
r#####"
struct Point<'a> {
x: &'a u32,
y: u32,
}
"#####,
)
}
#[test]
fn doctest_add_turbo_fish() {
check_doc_test(
"add_turbo_fish",
r#####"
fn make<T>() -> T { todo!() }
fn main() {
let x = make$0();
}
"#####,
r#####"
fn make<T>() -> T { todo!() }
fn main() {
let x = make::<${0:_}>();
}
"#####,
)
}
#[test]
fn doctest_apply_demorgan() {
check_doc_test(
"apply_demorgan",
r#####"
fn main() {
if x != 4 ||$0 y < 3.14 {}
}
"#####,
r#####"
fn main() {
if !(x == 4 && !(y < 3.14)) {}
}
"#####,
)
}
#[test]
fn doctest_auto_import() {
check_doc_test(
"auto_import",
r#####"
fn main() {
let map = HashMap$0::new();
}
pub mod std { pub mod collections { pub struct HashMap { } } }
"#####,
r#####"
use std::collections::HashMap;
fn main() {
let map = HashMap::new();
}
pub mod std { pub mod collections { pub struct HashMap { } } }
"#####,
)
}
#[test]
fn doctest_change_visibility() {
check_doc_test(
"change_visibility",
r#####"
$0fn frobnicate() {}
"#####,
r#####"
pub(crate) fn frobnicate() {}
"#####,
)
}
#[test]
fn doctest_convert_if_to_bool_then() {
check_doc_test(
"convert_if_to_bool_then",
r#####"
//- minicore: option
fn main() {
if$0 cond {
Some(val)
} else {
None
}
}
"#####,
r#####"
fn main() {
cond.then(|| val)
}
"#####,
)
}
#[test]
fn doctest_convert_integer_literal() {
check_doc_test(
"convert_integer_literal",
r#####"
const _: i32 = 10$0;
"#####,
r#####"
const _: i32 = 0b1010;
"#####,
)
}
#[test]
fn doctest_convert_into_to_from() {
check_doc_test(
"convert_into_to_from",
r#####"
//- minicore: from
impl $0Into<Thing> for usize {
fn into(self) -> Thing {
Thing {
b: self.to_string(),
a: self
}
}
}
"#####,
r#####"
impl From<usize> for Thing {
fn from(val: usize) -> Self {
Thing {
b: val.to_string(),
a: val
}
}
}
"#####,
)
}
#[test]
fn doctest_convert_iter_for_each_to_for() {
check_doc_test(
"convert_iter_for_each_to_for",
r#####"
//- minicore: iterators
use core::iter;
fn main() {
let iter = iter::repeat((9, 2));
iter.for_each$0(|(x, y)| {
println!("x: {}, y: {}", x, y);
});
}
"#####,
r#####"
use core::iter;
fn main() {
let iter = iter::repeat((9, 2));
for (x, y) in iter {
println!("x: {}, y: {}", x, y);
}
}
"#####,
)
}
#[test]
fn doctest_convert_to_guarded_return() {
check_doc_test(
"convert_to_guarded_return",
r#####"
fn main() {
$0if cond {
foo();
bar();
}
}
"#####,
r#####"
fn main() {
if !cond {
return;
}
foo();
bar();
}
"#####,
)
}
#[test]
fn doctest_convert_tuple_struct_to_named_struct() {
check_doc_test(
"convert_tuple_struct_to_named_struct",
r#####"
struct Point$0(f32, f32);
impl Point {
pub fn new(x: f32, y: f32) -> Self {
Point(x, y)
}
pub fn x(&self) -> f32 {
self.0
}
pub fn y(&self) -> f32 {
self.1
}
}
"#####,
r#####"
struct Point { field1: f32, field2: f32 }
impl Point {
pub fn new(x: f32, y: f32) -> Self {
Point { field1: x, field2: y }
}
pub fn x(&self) -> f32 {
self.field1
}
pub fn y(&self) -> f32 {
self.field2
}
}
"#####,
)
}
#[test]
fn doctest_expand_glob_import() {
check_doc_test(
"expand_glob_import",
r#####"
mod foo {
pub struct Bar;
pub struct Baz;
}
use foo::*$0;
fn qux(bar: Bar, baz: Baz) {}
"#####,
r#####"
mod foo {
pub struct Bar;
pub struct Baz;
}
use foo::{Baz, Bar};
fn qux(bar: Bar, baz: Baz) {}
"#####,
)
}
#[test]
fn doctest_extract_function() {
check_doc_test(
"extract_function",
r#####"
fn main() {
let n = 1;
$0let m = n + 2;
let k = m + n;$0
let g = 3;
}
"#####,
r#####"
fn main() {
let n = 1;
fun_name(n);
let g = 3;
}
fn $0fun_name(n: i32) {
let m = n + 2;
let k = m + n;
}
"#####,
)
}
#[test]
fn doctest_extract_struct_from_enum_variant() {
check_doc_test(
"extract_struct_from_enum_variant",
r#####"
enum A { $0One(u32, u32) }
"#####,
r#####"
struct One(pub u32, pub u32);
enum A { One(One) }
"#####,
)
}
#[test]
fn doctest_extract_type_alias() {
check_doc_test(
"extract_type_alias",
r#####"
struct S {
field: $0(u8, u8, u8)$0,
}
"#####,
r#####"
type $0Type = (u8, u8, u8);
struct S {
field: Type,
}
"#####,
)
}
#[test]
fn doctest_extract_variable() {
check_doc_test(
"extract_variable",
r#####"
fn main() {
$0(1 + 2)$0 * 4;
}
"#####,
r#####"
fn main() {
let $0var_name = (1 + 2);
var_name * 4;
}
"#####,
)
}
#[test]
fn doctest_fill_match_arms() {
check_doc_test(
"fill_match_arms",
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
$0
}
}
"#####,
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
$0Action::Move { distance } => todo!(),
Action::Stop => todo!(),
}
}
"#####,
)
}
#[test]
fn doctest_fix_visibility() {
check_doc_test(
"fix_visibility",
r#####"
mod m {
fn frobnicate() {}
}
fn main() {
m::frobnicate$0() {}
}
"#####,
r#####"
mod m {
$0pub(crate) fn frobnicate() {}
}
fn main() {
m::frobnicate() {}
}
"#####,
)
}
#[test]
fn doctest_flip_binexpr() {
check_doc_test(
"flip_binexpr",
r#####"
fn main() {
let _ = 90 +$0 2;
}
"#####,
r#####"
fn main() {
let _ = 2 + 90;
}
"#####,
)
}
#[test]
fn doctest_flip_comma() {
check_doc_test(
"flip_comma",
r#####"
fn main() {
((1, 2),$0 (3, 4));
}
"#####,
r#####"
fn main() {
((3, 4), (1, 2));
}
"#####,
)
}
#[test]
fn doctest_flip_trait_bound() {
check_doc_test(
"flip_trait_bound",
r#####"
fn foo<T: Clone +$0 Copy>() { }
"#####,
r#####"
fn foo<T: Copy + Clone>() { }
"#####,
)
}
#[test]
fn doctest_generate_default_from_enum_variant() {
check_doc_test(
"generate_default_from_enum_variant",
r#####"
enum Version {
Undefined,
Minor$0,
Major,
}
"#####,
r#####"
enum Version {
Undefined,
Minor,
Major,
}
impl Default for Version {
fn default() -> Self {
Self::Minor
}
}
"#####,
)
}
#[test]
fn doctest_generate_default_from_new() {
check_doc_test(
"generate_default_from_new",
r#####"
struct Example { _inner: () }
impl Example {
pub fn n$0ew() -> Self {
Self { _inner: () }
}
}
"#####,
r#####"
struct Example { _inner: () }
impl Example {
pub fn new() -> Self {
Self { _inner: () }
}
}
impl Default for Example {
fn default() -> Self {
Self::new()
}
}
"#####,
)
}
#[test]
fn doctest_generate_deref() {
check_doc_test(
"generate_deref",
r#####"
struct A;
struct B {
$0a: A
}
"#####,
r#####"
struct A;
struct B {
a: A
}
impl std::ops::Deref for B {
type Target = A;
fn deref(&self) -> &Self::Target {
&self.a
}
}
"#####,
)
}
#[test]
fn doctest_generate_derive() {
check_doc_test(
"generate_derive",
r#####"
struct Point {
x: u32,
y: u32,$0
}
"#####,
r#####"
#[derive($0)]
struct Point {
x: u32,
y: u32,
}
"#####,
)
}
#[test]
fn doctest_generate_enum_as_method() {
check_doc_test(
"generate_enum_as_method",
r#####"
enum Value {
Number(i32),
Text(String)$0,
}
"#####,
r#####"
enum Value {
Number(i32),
Text(String),
}
impl Value {
fn as_text(&self) -> Option<&String> {
if let Self::Text(v) = self {
Some(v)
} else {
None
}
}
}
"#####,
)
}
#[test]
fn doctest_generate_enum_is_method() {
check_doc_test(
"generate_enum_is_method",
r#####"
enum Version {
Undefined,
Minor$0,
Major,
}
"#####,
r#####"
enum Version {
Undefined,
Minor,
Major,
}
impl Version {
/// Returns `true` if the version is [`Minor`].
fn is_minor(&self) -> bool {
matches!(self, Self::Minor)
}
}
"#####,
)
}
#[test]
fn doctest_generate_enum_try_into_method() {
check_doc_test(
"generate_enum_try_into_method",
r#####"
enum Value {
Number(i32),
Text(String)$0,
}
"#####,
r#####"
enum Value {
Number(i32),
Text(String),
}
impl Value {
fn try_into_text(self) -> Result<String, Self> {
if let Self::Text(v) = self {
Ok(v)
} else {
Err(self)
}
}
}
"#####,
)
}
#[test]
fn doctest_generate_from_impl_for_enum() {
check_doc_test(
"generate_from_impl_for_enum",
r#####"
enum A { $0One(u32) }
"#####,
r#####"
enum A { One(u32) }
impl From<u32> for A {
fn from(v: u32) -> Self {
Self::One(v)
}
}
"#####,
)
}
#[test]
fn doctest_generate_function() {
check_doc_test(
"generate_function",
r#####"
struct Baz;
fn baz() -> Baz { Baz }
fn foo() {
bar$0("", baz());
}
"#####,
r#####"
struct Baz;
fn baz() -> Baz { Baz }
fn foo() {
bar("", baz());
}
fn bar(arg: &str, baz: Baz) ${0:-> ()} {
todo!()
}
"#####,
)
}
#[test]
fn doctest_generate_getter() {
check_doc_test(
"generate_getter",
r#####"
struct Person {
nam$0e: String,
}
"#####,
r#####"
struct Person {
name: String,
}
impl Person {
/// Get a reference to the person's name.
fn $0name(&self) -> &str {
self.name.as_str()
}
}
"#####,
)
}
#[test]
fn doctest_generate_getter_mut() {
check_doc_test(
"generate_getter_mut",
r#####"
struct Person {
nam$0e: String,
}
"#####,
r#####"
struct Person {
name: String,
}
impl Person {
/// Get a mutable reference to the person's name.
fn $0name_mut(&mut self) -> &mut String {
&mut self.name
}
}
"#####,
)
}
#[test]
fn doctest_generate_impl() {
check_doc_test(
"generate_impl",
r#####"
struct Ctx<T: Clone> {
data: T,$0
}
"#####,
r#####"
struct Ctx<T: Clone> {
data: T,
}
impl<T: Clone> Ctx<T> {
$0
}
"#####,
)
}
#[test]
fn doctest_generate_is_empty_from_len() {
check_doc_test(
"generate_is_empty_from_len",
r#####"
struct MyStruct { data: Vec<String> }
impl MyStruct {
p$0ub fn len(&self) -> usize {
self.data.len()
}
}
"#####,
r#####"
struct MyStruct { data: Vec<String> }
impl MyStruct {
pub fn len(&self) -> usize {
self.data.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
"#####,
)
}
#[test]
fn doctest_generate_new() {
check_doc_test(
"generate_new",
r#####"
struct Ctx<T: Clone> {
data: T,$0
}
"#####,
r#####"
struct Ctx<T: Clone> {
data: T,
}
impl<T: Clone> Ctx<T> {
fn $0new(data: T) -> Self { Self { data } }
}
"#####,
)
}
#[test]
fn doctest_generate_setter() {
check_doc_test(
"generate_setter",
r#####"
struct Person {
nam$0e: String,
}
"#####,
r#####"
struct Person {
name: String,
}
impl Person {
/// Set the person's name.
fn set_name(&mut self, name: String) {
self.name = name;
}
}
"#####,
)
}
#[test]
fn doctest_infer_function_return_type() {
check_doc_test(
"infer_function_return_type",
r#####"
fn foo() { 4$02i32 }
"#####,
r#####"
fn foo() -> i32 { 42i32 }
"#####,
)
}
#[test]
fn doctest_inline_call() {
check_doc_test(
"inline_call",
r#####"
//- minicore: option
fn foo(name: Option<&str>) {
let name = name.unwrap$0();
}
"#####,
r#####"
fn foo(name: Option<&str>) {
let name = match name {
Some(val) => val,
None => panic!("called `Option::unwrap()` on a `None` value"),
};
}
"#####,
)
}
#[test]
fn doctest_inline_local_variable() {
check_doc_test(
"inline_local_variable",
r#####"
fn main() {
let x$0 = 1 + 2;
x * 4;
}
"#####,
r#####"
fn main() {
(1 + 2) * 4;
}
"#####,
)
}
#[test]
fn doctest_introduce_named_lifetime() {
check_doc_test(
"introduce_named_lifetime",
r#####"
impl Cursor<'_$0> {
fn node(self) -> &SyntaxNode {
match self {
Cursor::Replace(node) | Cursor::Before(node) => node,
}
}
}
"#####,
r#####"
impl<'a> Cursor<'a> {
fn node(self) -> &SyntaxNode {
match self {
Cursor::Replace(node) | Cursor::Before(node) => node,
}
}
}
"#####,
)
}
#[test]
fn doctest_invert_if() {
check_doc_test(
"invert_if",
r#####"
fn main() {
if$0 !y { A } else { B }
}
"#####,
r#####"
fn main() {
if y { B } else { A }
}
"#####,
)
}
#[test]
fn doctest_make_raw_string() {
check_doc_test(
"make_raw_string",
r#####"
fn main() {
"Hello,$0 World!";
}
"#####,
r#####"
fn main() {
r#"Hello, World!"#;
}
"#####,
)
}
#[test]
fn doctest_make_usual_string() {
check_doc_test(
"make_usual_string",
r#####"
fn main() {
r#"Hello,$0 "World!""#;
}
"#####,
r#####"
fn main() {
"Hello, \"World!\"";
}
"#####,
)
}
#[test]
fn doctest_merge_imports() {
check_doc_test(
"merge_imports",
r#####"
use std::$0fmt::Formatter;
use std::io;
"#####,
r#####"
use std::{fmt::Formatter, io};
"#####,
)
}
#[test]
fn doctest_merge_match_arms() {
check_doc_test(
"merge_match_arms",
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
$0Action::Move(..) => foo(),
Action::Stop => foo(),
}
}
"#####,
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
Action::Move(..) | Action::Stop => foo(),
}
}
"#####,
)
}
#[test]
fn doctest_move_arm_cond_to_match_guard() {
check_doc_test(
"move_arm_cond_to_match_guard",
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
Action::Move { distance } => $0if distance > 10 { foo() },
_ => (),
}
}
"#####,
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
Action::Move { distance } if distance > 10 => foo(),
_ => (),
}
}
"#####,
)
}
#[test]
fn doctest_move_bounds_to_where_clause() {
check_doc_test(
"move_bounds_to_where_clause",
r#####"
fn apply<T, U, $0F: FnOnce(T) -> U>(f: F, x: T) -> U {
f(x)
}
"#####,
r#####"
fn apply<T, U, F>(f: F, x: T) -> U where F: FnOnce(T) -> U {
f(x)
}
"#####,
)
}
#[test]
fn doctest_move_guard_to_arm_body() {
check_doc_test(
"move_guard_to_arm_body",
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
Action::Move { distance } $0if distance > 10 => foo(),
_ => (),
}
}
"#####,
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
Action::Move { distance } => if distance > 10 {
foo()
},
_ => (),
}
}
"#####,
)
}
#[test]
fn doctest_move_module_to_file() {
check_doc_test(
"move_module_to_file",
r#####"
mod $0foo {
fn t() {}
}
"#####,
r#####"
mod foo;
"#####,
)
}
#[test]
fn doctest_pull_assignment_up() {
check_doc_test(
"pull_assignment_up",
r#####"
fn main() {
let mut foo = 6;
if true {
$0foo = 5;
} else {
foo = 4;
}
}
"#####,
r#####"
fn main() {
let mut foo = 6;
foo = if true {
5
} else {
4
};
}
"#####,
)
}
#[test]
fn doctest_qualify_path() {
check_doc_test(
"qualify_path",
r#####"
fn main() {
let map = HashMap$0::new();
}
pub mod std { pub mod collections { pub struct HashMap { } } }
"#####,
r#####"
fn main() {
let map = std::collections::HashMap::new();
}
pub mod std { pub mod collections { pub struct HashMap { } } }
"#####,
)
}
#[test]
fn doctest_remove_dbg() {
check_doc_test(
"remove_dbg",
r#####"
fn main() {
$0dbg!(92);
}
"#####,
r#####"
fn main() {
92;
}
"#####,
)
}
#[test]
fn doctest_remove_hash() {
check_doc_test(
"remove_hash",
r#####"
fn main() {
r#"Hello,$0 World!"#;
}
"#####,
r#####"
fn main() {
r"Hello, World!";
}
"#####,
)
}
#[test]
fn doctest_remove_mut() {
check_doc_test(
"remove_mut",
r#####"
impl Walrus {
fn feed(&mut$0 self, amount: u32) {}
}
"#####,
r#####"
impl Walrus {
fn feed(&self, amount: u32) {}
}
"#####,
)
}
#[test]
fn doctest_remove_unused_param() {
check_doc_test(
"remove_unused_param",
r#####"
fn frobnicate(x: i32$0) {}
fn main() {
frobnicate(92);
}
"#####,
r#####"
fn frobnicate() {}
fn main() {
frobnicate();
}
"#####,
)
}
#[test]
fn doctest_reorder_fields() {
check_doc_test(
"reorder_fields",
r#####"
struct Foo {foo: i32, bar: i32};
const test: Foo = $0Foo {bar: 0, foo: 1}
"#####,
r#####"
struct Foo {foo: i32, bar: i32};
const test: Foo = Foo {foo: 1, bar: 0}
"#####,
)
}
#[test]
fn doctest_reorder_impl() {
check_doc_test(
"reorder_impl",
r#####"
trait Foo {
fn a() {}
fn b() {}
fn c() {}
}
struct Bar;
$0impl Foo for Bar {
fn b() {}
fn c() {}
fn a() {}
}
"#####,
r#####"
trait Foo {
fn a() {}
fn b() {}
fn c() {}
}
struct Bar;
impl Foo for Bar {
fn a() {}
fn b() {}
fn c() {}
}
"#####,
)
}
#[test]
fn doctest_replace_char_with_string() {
check_doc_test(
"replace_char_with_string",
r#####"
fn main() {
find('{$0');
}
"#####,
r#####"
fn main() {
find("{");
}
"#####,
)
}
#[test]
fn doctest_replace_derive_with_manual_impl() {
check_doc_test(
"replace_derive_with_manual_impl",
r#####"
trait Debug { fn fmt(&self, f: &mut Formatter) -> Result<()>; }
#[derive(Deb$0ug, Display)]
struct S;
"#####,
r#####"
trait Debug { fn fmt(&self, f: &mut Formatter) -> Result<()>; }
#[derive(Display)]
struct S;
impl Debug for S {
$0fn fmt(&self, f: &mut Formatter) -> Result<()> {
f.debug_struct("S").finish()
}
}
"#####,
)
}
#[test]
fn doctest_replace_for_loop_with_for_each() {
check_doc_test(
"replace_for_loop_with_for_each",
r#####"
fn main() {
let x = vec![1, 2, 3];
for$0 v in x {
let y = v * 2;
}
}
"#####,
r#####"
fn main() {
let x = vec![1, 2, 3];
x.into_iter().for_each(|v| {
let y = v * 2;
});
}
"#####,
)
}
#[test]
fn doctest_replace_if_let_with_match() {
check_doc_test(
"replace_if_let_with_match",
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
$0if let Action::Move { distance } = action {
foo(distance)
} else {
bar()
}
}
"#####,
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
match action {
Action::Move { distance } => foo(distance),
_ => bar(),
}
}
"#####,
)
}
#[test]
fn doctest_replace_impl_trait_with_generic() {
check_doc_test(
"replace_impl_trait_with_generic",
r#####"
fn foo(bar: $0impl Bar) {}
"#####,
r#####"
fn foo<B: Bar>(bar: B) {}
"#####,
)
}
#[test]
fn doctest_replace_let_with_if_let() {
check_doc_test(
"replace_let_with_if_let",
r#####"
enum Option<T> { Some(T), None }
fn main(action: Action) {
$0let x = compute();
}
fn compute() -> Option<i32> { None }
"#####,
r#####"
enum Option<T> { Some(T), None }
fn main(action: Action) {
if let Some(x) = compute() {
}
}
fn compute() -> Option<i32> { None }
"#####,
)
}
#[test]
fn doctest_replace_match_with_if_let() {
check_doc_test(
"replace_match_with_if_let",
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
$0match action {
Action::Move { distance } => foo(distance),
_ => bar(),
}
}
"#####,
r#####"
enum Action { Move { distance: u32 }, Stop }
fn handle(action: Action) {
if let Action::Move { distance } = action {
foo(distance)
} else {
bar()
}
}
"#####,
)
}
#[test]
fn doctest_replace_qualified_name_with_use() {
check_doc_test(
"replace_qualified_name_with_use",
r#####"
mod std { pub mod collections { pub struct HashMap<T, U>(T, U); } }
fn process(map: std::collections::$0HashMap<String, String>) {}
"#####,
r#####"
use std::collections::HashMap;
mod std { pub mod collections { pub struct HashMap<T, U>(T, U); } }
fn process(map: HashMap<String, String>) {}
"#####,
)
}
#[test]
fn doctest_replace_string_with_char() {
check_doc_test(
"replace_string_with_char",
r#####"
fn main() {
find("{$0");
}
"#####,
r#####"
fn main() {
find('{');
}
"#####,
)
}
#[test]
fn doctest_sort_items() {
check_doc_test(
"sort_items",
r#####"
struct $0Foo$0 { second: u32, first: String }
"#####,
r#####"
struct Foo { first: String, second: u32 }
"#####,
)
}
#[test]
fn doctest_sort_items_1() {
check_doc_test(
"sort_items",
r#####"
trait $0Bar$0 {
fn second(&self) -> u32;
fn first(&self) -> String;
}
"#####,
r#####"
trait Bar {
fn first(&self) -> String;
fn second(&self) -> u32;
}
"#####,
)
}
#[test]
fn doctest_sort_items_2() {
check_doc_test(
"sort_items",
r#####"
struct Baz;
impl $0Baz$0 {
fn second(&self) -> u32;
fn first(&self) -> String;
}
"#####,
r#####"
struct Baz;
impl Baz {
fn first(&self) -> String;
fn second(&self) -> u32;
}
"#####,
)
}
#[test]
fn doctest_sort_items_3() {
check_doc_test(
"sort_items",
r#####"
enum $0Animal$0 {
Dog(String, f64),
Cat { weight: f64, name: String },
}
"#####,
r#####"
enum Animal {
Cat { weight: f64, name: String },
Dog(String, f64),
}
"#####,
)
}
#[test]
fn doctest_sort_items_4() {
check_doc_test(
"sort_items",
r#####"
enum Animal {
Dog(String, f64),
Cat $0{ weight: f64, name: String }$0,
}
"#####,
r#####"
enum Animal {
Dog(String, f64),
Cat { name: String, weight: f64 },
}
"#####,
)
}
#[test]
fn doctest_split_import() {
check_doc_test(
"split_import",
r#####"
use std::$0collections::HashMap;
"#####,
r#####"
use std::{collections::HashMap};
"#####,
)
}
#[test]
fn doctest_toggle_ignore() {
check_doc_test(
"toggle_ignore",
r#####"
$0#[test]
fn arithmetics {
assert_eq!(2 + 2, 5);
}
"#####,
r#####"
#[test]
#[ignore]
fn arithmetics {
assert_eq!(2 + 2, 5);
}
"#####,
)
}
#[test]
fn doctest_unmerge_use() {
check_doc_test(
"unmerge_use",
r#####"
use std::fmt::{Debug, Display$0};
"#####,
r#####"
use std::fmt::{Debug};
use std::fmt::Display;
"#####,
)
}
#[test]
fn doctest_unwrap_block() {
check_doc_test(
"unwrap_block",
r#####"
fn foo() {
if true {$0
println!("foo");
}
}
"#####,
r#####"
fn foo() {
println!("foo");
}
"#####,
)
}
#[test]
fn doctest_wrap_return_type_in_result() {
check_doc_test(
"wrap_return_type_in_result",
r#####"
//- minicore: result
fn foo() -> i32$0 { 42i32 }
"#####,
r#####"
fn foo() -> Result<i32, ${0:_}> { Ok(42i32) }
"#####,
)
} | r#####" |
board.ts | /// <reference path="./types.d.ts" />
class Board {
private content: cellContent[][];
constructor(defaultContent: number[][]) {
this.content = [];
for (let i = 0; i < 9; i++) {
this.content.push([]);
for (let j = 0; j < 9; j++) {
this.content[i].push(defaultContent[i][j] === 0 ? { state: "BLANK" } : { state: "DEFAULT", num: defaultContent[i][j] });
}
}
}
setValue = (i: number, j: number, val: number) => {
if (this.content[i][j].state === "DEFAULT") return;
if (this.content[i][j].state === "BLANK") this.content[i][j] = { state: "FILLED", num: val };
else {
if (this.content[i][j].num === val) this.content[i][j] = { state: "BLANK" };
else this.content[i][j] = { state: "FILLED", num: val };
}
};
getValue = (i: number, j: number) => {
if (this.content[i][j].state === "BLANK") return undefined;
return this.content[i][j].num;
};
isDefault = (i: number, j: number) => {
return this.content[i][j].state === "DEFAULT";
};
private testGroup = (group: number[]) => {
return [1, 2, 3, 4, 5, 6, 7, 8, 9].reduce((acc, i) => acc + (group.includes(i) ? 0 : 1), 0) === 0;
};
isValid = () => {
if (this.content.filter((row) => row.filter((cell) => cell.state === "BLANK").length > 0).length > 0) return false;
return (
[0, 1, 2, 3, 4, 5, 6, 7, 8].reduce((acc, i) => {
let act = acc;
act += this.testGroup(this.content[i].map((c) => c.num)) ? 0 : 1;
act += this.testGroup(this.content.map((c) => c[i].num)) ? 0 : 1;
const iStart = 3 * Math.floor(i / 3);
const jStart = 3 * (i % 3); | [
[iStart, jStart],
[iStart, jStart + 1],
[iStart, jStart + 2],
[iStart + 1, jStart],
[iStart + 1, jStart + 1],
[iStart + 1, jStart + 2],
[iStart + 2, jStart],
[iStart + 2, jStart + 1],
[iStart + 2, jStart + 2],
].map((c) => this.content[c[0]][c[1]].num)
)
? 0
: 1;
return act;
}, 0) === 0
);
};
} | act += this.testGroup( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.