file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
prover.rs
use rand::Rng; use rayon::prelude::*; use algebra::msm::VariableBaseMSM; use algebra::{AffineCurve, PairingEngine, PrimeField, ProjectiveCurve, UniformRand}; use crate::gm17::r1cs_to_sap::R1CStoSAP; use crate::gm17::{Parameters, Proof}; use r1cs_core::{ConstraintSynthesizer, ConstraintSystem, SynthesisError, SynthesisMode}; use std::{ops::AddAssign, sync::Arc}; pub fn create_random_proof<E, C, R>( circuit: C, params: &Parameters<E>, rng: &mut R, ) -> Result<Proof<E>, SynthesisError> where E: PairingEngine, C: ConstraintSynthesizer<E::Fr>, R: Rng, { let d1 = E::Fr::rand(rng); let d2 = E::Fr::rand(rng); let r = E::Fr::rand(rng); create_proof::<E, C>(circuit, params, d1, d2, r) } pub fn create_proof<E, C>( circuit: C, params: &Parameters<E>, d1: E::Fr, d2: E::Fr, r: E::Fr, ) -> Result<Proof<E>, SynthesisError> where E: PairingEngine, C: ConstraintSynthesizer<E::Fr>, { let prover_time = start_timer!(|| "Prover"); let mode = SynthesisMode::Prove { construct_matrices: true, }; let mut prover = ConstraintSystem::<E::Fr>::new(mode); // Synthesize the circuit. let synthesis_time = start_timer!(|| "Constraint synthesis"); circuit.generate_constraints(&mut prover)?; end_timer!(synthesis_time); let witness_map_time = start_timer!(|| "R1CS to SAP witness map"); let (full_input_assignment, h, _) = R1CStoSAP::witness_map::<E>(&prover, &d1, &d2)?; end_timer!(witness_map_time); let input_assignment = Arc::new( full_input_assignment[1..prover.num_inputs] .iter() .map(|s| s.into_repr()) .collect::<Vec<_>>(), ); let aux_assignment = Arc::new( full_input_assignment[prover.num_inputs..] .into_par_iter() .map(|s| s.into_repr())
); drop(full_input_assignment); let h_input = Arc::new( h[0..prover.num_inputs] .iter() .map(|s| s.into_repr()) .collect::<Vec<_>>(), ); let h_aux = Arc::new( h[prover.num_inputs..] .into_par_iter() .map(|s| s.into_repr()) .collect::<Vec<_>>(), ); drop(h); // Compute A let a_acc_time = start_timer!(|| "Compute A"); let (a_inputs_source, a_aux_source) = params.get_a_query(prover.num_inputs)?; let a_inputs_acc = VariableBaseMSM::multi_scalar_mul(a_inputs_source, &input_assignment)?; let a_aux_acc = VariableBaseMSM::multi_scalar_mul(a_aux_source, &aux_assignment)?; let r_g = params.get_g_gamma_z()?.mul(r); let d1_g = params.get_g_gamma_z()?.mul(d1); let mut g_a = r_g; g_a.add_assign(&params.get_a_query_full()?[0].into_projective()); g_a.add_assign(&d1_g); g_a.add_assign(&a_inputs_acc); g_a.add_assign(&a_aux_acc); end_timer!(a_acc_time); // Compute B let b_acc_time = start_timer!(|| "Compute B"); let (b_inputs_source, b_aux_source) = params.get_b_query(prover.num_inputs)?; let b_inputs_acc = VariableBaseMSM::multi_scalar_mul(b_inputs_source, &input_assignment)?; let b_aux_acc = VariableBaseMSM::multi_scalar_mul(b_aux_source, &aux_assignment)?; let r_h = params.get_h_gamma_z()?.mul(r); let d1_h = params.get_h_gamma_z()?.mul(d1); let mut g_b = r_h; g_b.add_assign(&params.get_b_query_full()?[0].into_projective()); g_b.add_assign(&d1_h); g_b.add_assign(&b_inputs_acc); g_b.add_assign(&b_aux_acc); end_timer!(b_acc_time); // Compute C let c_acc_time = start_timer!(|| "Compute C"); let r_2 = r + &r; let r2 = r * &r; let d1_r_2 = d1 * &r_2; let c1_acc_time = start_timer!(|| "Compute C1"); let (_, c1_aux_source) = params.get_c_query_1(0)?; let c1_acc = VariableBaseMSM::multi_scalar_mul(c1_aux_source, &aux_assignment)?; end_timer!(c1_acc_time); let c2_acc_time = start_timer!(|| "Compute C2"); let (c2_inputs_source, c2_aux_source) = params.get_c_query_2(prover.num_inputs)?; let c2_inputs_acc = VariableBaseMSM::multi_scalar_mul(c2_inputs_source, &input_assignment)?; let c2_aux_acc = VariableBaseMSM::multi_scalar_mul(c2_aux_source, &aux_assignment)?; let c2_acc = c2_inputs_acc + &c2_aux_acc; end_timer!(c2_acc_time); // Compute G let g_acc_time = start_timer!(|| "Compute G"); let (g_inputs_source, g_aux_source) = params.get_g_gamma2_z_t(prover.num_inputs)?; let g_inputs_acc = VariableBaseMSM::multi_scalar_mul(g_inputs_source, &h_input)?; let g_aux_acc = VariableBaseMSM::multi_scalar_mul(g_aux_source, &h_aux)?; let g_acc = g_inputs_acc + &g_aux_acc; end_timer!(g_acc_time); let r2_g_gamma2_z2 = params.get_g_gamma2_z2()?.mul(r2); let r_g_ab_gamma_z = params.get_g_ab_gamma_z()?.mul(r); let d1_g_ab_gamma_z = params.get_g_ab_gamma_z()?.mul(d1); let r_c0 = params.get_c_query_2_full()?[0].mul(r); let r2_d1_g_gamma2_z2 = params.get_g_gamma2_z2()?.mul(d1_r_2); let d2_g_gamma2_z_t0 = params.get_g_gamma2_z_t_full()?[0].mul(d2); let mut r_c2_exp = c2_acc; r_c2_exp.mul_assign(r); let mut g_c = c1_acc; g_c.add_assign(&r2_g_gamma2_z2); g_c.add_assign(&r_g_ab_gamma_z); g_c.add_assign(&d1_g_ab_gamma_z); g_c.add_assign(&r_c0); g_c.add_assign(&r2_d1_g_gamma2_z2); g_c.add_assign(&r_c2_exp); g_c.add_assign(&d2_g_gamma2_z_t0); g_c.add_assign(&g_acc); end_timer!(c_acc_time); end_timer!(prover_time); Ok(Proof { a: g_a.into_affine(), b: g_b.into_affine(), c: g_c.into_affine(), }) }
.collect::<Vec<_>>(),
GuidedTourText.tsx
import * as React from 'react'; import { Trans, useTranslation } from 'react-i18next'; import { PlusCircleIcon } from '@patternfly/react-icons'; import { useOpenshiftVersion } from '@console/shared/src/hooks/version'; import { useK8sWatchResource } from '@console/internal/components/utils/k8s-watch-hook'; import { K8sResourceKind, referenceForModel } from '@console/internal/module/k8s'; import { ConsoleLinkModel } from '@console/internal/models'; const DevPerspectiveTourText: React.FC = () => { const { t } = useTranslation(); const openshiftVersion = useOpenshiftVersion(); return ( <> {t( 'devconsole~Get started with a tour of some of the key areas in OpenShift {{version}} Developer perspective that can help you complete workflows and be more productive.', { version: openshiftVersion ? [openshiftVersion?.slice(0, 3), "'s"].join('') : '4.x' }, )} </> ); }; export const devPerspectiveTourText = <DevPerspectiveTourText />; const PerspectiveSwitcherTourText: React.FC = () => { const { t } = useTranslation(); return ( <> <p>{t('devconsole~Switch between the Developer and Administrator perspectives.')}</p> <p> {t( 'devconsole~Use the Administrator perspective to manage workload storage, networking, cluster settings, and more. This may require additional user access.', )} </p> <p> {t( 'devconsole~Use the Developer perspective to build applications and associated components and services, define how they work together, and monitor their health over time.', )} </p> </> ); }; export const perspectiveSwitcherTourText = <PerspectiveSwitcherTourText />; export const SearchTourText: React.FC = () => { const { t } = useTranslation(); return ( <> <p> {t( 'devconsole~Search for resources in your Project by simply starting to type or scrolling through a list of existing resources.', )} </p> <p> {t( 'devconsole~Add frequently accessed resources to your side navigation for quick access. Look for the', )}{' '} <span style={{ color: 'var(--pf-global--palette--blue-400)' }}> <PlusCircleIcon /> {t('devconsole~Add to navigation')} </span>{' '} {t('devconsole~link next to your search result.')} </p> </> ); }; export const searchTourText = <SearchTourText />; const FinishTourText: React.FC = () => { const [consoleLinks] = useK8sWatchResource<K8sResourceKind[]>({ isList: true, kind: referenceForModel(ConsoleLinkModel), optional: true, }); const { t } = useTranslation(); const openshiftBlogLink = consoleLinks.filter( (link: K8sResourceKind) => link.metadata.name === 'openshift-blog', )[0]?.spec?.href; // declaring openshiftHelpBase instead of importing because it throws error while using it as tour extension const openshiftHelpBase = window.SERVER_FLAGS.documentationBaseURL || 'https://docs.okd.io/latest/'; return ( <Trans t={t} ns="devconsole"> Stay up-to-date with everything OpenShift on our{' '} <a href={openshiftBlogLink} target="_blank" rel="noopener noreferrer"> blog </a>{' '} or continue to learn more in our{' '} <a href={openshiftHelpBase} target="_blank" rel="noopener noreferrer"> documentation
</a> . </Trans> ); }; export const finishTourText = <FinishTourText />;
main.rs
//! Demonstrates loading a custom prefab using the Amethyst engine. use std::fmt::Debug; use amethyst::{ assets::{ AssetStorage, Handle, Prefab, PrefabData, PrefabLoader, PrefabLoaderSystemDesc, ProgressCounter, RonFormat, }, ecs::{ Entity, ReadStorage, World, WriteStorage, }, prelude::*, utils::application_root_dir, Error, }; use derive_new::new; use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Component, Debug, Default)] pub struct Position(pub f32, pub f32, pub f32); impl From<(i32, i32, i32)> for Position { fn from((x, y, z): (i32, i32, i32)) -> Position { Position(x as f32, y as f32, z as f32) } } impl From<(f32, f32, f32)> for Position { fn from((x, y, z): (f32, f32, f32)) -> Position { Position(x, y, z) } } #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)] #[serde(deny_unknown_fields)] pub enum PositionPrefab { Pos3f { x: f32, y: f32, z: f32 }, Pos3i { x: i32, y: i32, z: i32 }, } impl<'a> PrefabData<'a> for PositionPrefab { // To attach the `Position` to the constructed entity, // we write to the `Position` component storage. type SystemData = WriteStorage<'a, Position>; // This associated type is not used in this pattern, // so the empty tuple is specified. type Result = (); fn add_to_entity( &self, entity: Entity, positions: &mut Self::SystemData, _entities: &[Entity], _children: &[Entity], ) -> Result<(), Error> { let position = match *self { PositionPrefab::Pos3f { x, y, z } => (x, y, z).into(), PositionPrefab::Pos3i { x, y, z } => (x, y, z).into(), }; positions.insert(entity, position).map(|_| ())?; Ok(()) } } #[derive(new)] pub struct CustomPrefabState { /// Tracks loaded assets. #[new(default)] pub progress_counter: ProgressCounter, /// Handle to the loaded prefab. #[new(default)] pub prefab_handle: Option<Handle<Prefab<PositionPrefab>>>, } impl SimpleState for CustomPrefabState { fn on_start(&mut self, data: StateData<'_, GameData>)
fn update(&mut self, data: &mut StateData<'_, GameData>) -> SimpleTrans { if self.progress_counter.is_complete() { self.display_loaded_prefab(&data.world); self.display_loaded_entities(&mut data.world); Trans::Quit } else { Trans::None } } } impl CustomPrefabState { // Displays the contents of the loaded prefab. fn display_loaded_prefab(&self, world: &World) { let prefab_assets = world.read_resource::<AssetStorage<Prefab<PositionPrefab>>>(); if let Some(handle) = self.prefab_handle.as_ref() { let prefab = prefab_assets .get(handle) .expect("Expected prefab to be loaded."); println!("Prefab"); println!("======"); prefab .entities() .for_each(|entity| println!("{:?}", entity)); println!(); } } // Displays the `Component`s of entities in the `World`. fn display_loaded_entities(&self, world: &mut World) { println!("Entities"); println!("========"); println!(); println!( "| {e:24} | {prefab_handle:30} | {pos:23} |", e = "Entity", prefab_handle = "Handle<Prefab<PositionPrefab>>", pos = "Position", ); println!("| {c:-^24} | {c:-^30} | {c:-^23} |", c = "",); world.exec( |(entities, prefab_handles, positions): ( Entities, ReadStorage<Handle<Prefab<PositionPrefab>>>, ReadStorage<Position>, )| { (&entities, prefab_handles.maybe(), positions.maybe()) .join() .for_each(|(e, prefab_handle, pos)| { println!( "| {e:24} | {prefab_handle:30} | {pos:23} |", e = format!("{:?}", e), prefab_handle = Self::display(prefab_handle), pos = Self::display(pos), ) }); }, ) } fn display<T: Debug>(component: Option<T>) -> String { if let Some(component) = component { format!("{:?}", component) } else { format!("{:?}", component) } } } /// Wrapper around the main, so we can return errors easily. fn main() -> Result<(), Error> { amethyst::start_logger(Default::default()); let app_root = application_root_dir()?; // Add our meshes directory to the asset loader. let assets_dir = app_root.join("assets"); let mut game_data = DispatcherBuilder::default().with_system_desc( PrefabLoaderSystemDesc::<PositionPrefab>::default(), "", &[], ); let game = Application::build(assets_dir, CustomPrefabState::new())?.build(game_data)?; game.run(); Ok(()) }
{ let prefab_handle = data.world.exec(|loader: PrefabLoader<'_, PositionPrefab>| { loader.load( "prefab/prefab_adapter.ron", RonFormat, &mut self.progress_counter, ) }); // Create one set of entities from the prefab. (0..1).for_each(|_| { data.world .push((prefab_handle.clone(),)); }); self.prefab_handle = Some(prefab_handle); }
spinner.js
/** * Spinner component. * * Site Kit by Google, Copyright 2019 Google LLC
* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ const { Component } = wp.element; /** * A single module. Keeps track of its own active state and settings. */ class Spinner extends Component { render() { const { isSaving } = this.props; return ( <span className="spinner" style={ { display: ( isSaving ? 'inline-block' : 'none' ), float: 'none', marginTop: '0', visibility: 'visible', } } /> ); } } export default Spinner;
*
msi_bundle.rs
// Copyright 2019-2021 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::{settings::Settings, wix}; use std::{self, path::PathBuf}; /// Runs all of the commands to build the MSI installer. /// Returns a vector of PathBuf that shows where the MSI was created. pub fn bundle_project(settings: &Settings) -> crate::Result<Vec<PathBuf>>
{ let wix_path = PathBuf::from("./WixTools"); if !wix_path.exists() { wix::get_and_extract_wix(&wix_path)?; } let msi_path = wix::build_wix_app_installer(&settings, &wix_path)?; Ok(vec![msi_path]) }
Capabilities.js
/** * The copyright in this software is being made available under the BSD License, * included below. This software may be subject to other third party and contributor * rights, including patent rights, and no such rights are granted under this license. * * Copyright (c) 2013, Dash Industry Forum. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of Dash Industry Forum nor the names of its * contributors may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */'use strict';Object.defineProperty(exports,'__esModule',{value:true});function _interopRequireDefault(obj){return obj && obj.__esModule?obj:{'default':obj};}var _coreFactoryMaker=require('../../core/FactoryMaker');var _coreFactoryMaker2=_interopRequireDefault(_coreFactoryMaker);function Capabilities(){var instance=undefined,encryptedMediaSupported=undefined;function setup(){encryptedMediaSupported = false;}function supportsMediaSource(){var hasWebKit=('WebKitMediaSource' in window);var hasMediaSource=('MediaSource' in window);return hasWebKit || hasMediaSource;} /** * Returns whether Encrypted Media Extensions are supported on this * user agent * * @return {boolean} true if EME is supported, false otherwise */function supportsEncryptedMedia(){return encryptedMediaSupported;}function setEncryptedMediaSupported(value){encryptedMediaSupported = value;}function
(codec){if('MediaSource' in window && MediaSource.isTypeSupported(codec)){return true;}if('WebKitMediaSource' in window && WebKitMediaSource.isTypeSupported(codec)){return true;}return false;}instance = {supportsMediaSource:supportsMediaSource,supportsEncryptedMedia:supportsEncryptedMedia,supportsCodec:supportsCodec,setEncryptedMediaSupported:setEncryptedMediaSupported};setup();return instance;}Capabilities.__dashjs_factory_name = 'Capabilities';exports['default'] = _coreFactoryMaker2['default'].getSingletonFactory(Capabilities);module.exports = exports['default']; //# sourceMappingURL=Capabilities.js.map
supportsCodec
live_hash_add_transaction.rs
use hedera_derive::{TransactionExecute, TransactionProto}; use crate::transaction::Transaction; use crate::Client; use crate::Hbar; use crate::HederaError; use crate::LiveHash; #[derive(TransactionExecute, Debug, Clone)] #[hedera_derive(service(method_service_name = "crypto", method_service_fn = "add_live_hash"))] pub struct LiveHashAddTransaction { transaction: Transaction, services: Proto, } impl LiveHashAddTransaction { pub fn new() -> LiveHashAddTransaction { let transaction = Transaction::with_max_transaction_fee(Hbar::new(2.0)); let services = Proto::new(); LiveHashAddTransaction { transaction, services } } fn validate_network_on_ids(&self, _client: &Client) -> Result<(), HederaError> { Ok(()) } // live_hash gen_transaction_live_hash_fns!(); } #[derive(Debug, Clone, TransactionProto)] #[hedera_derive(proto( proto_enum = "CryptoAddLiveHash", proto_type = "CryptoAddLiveHashTransactionBody"
} impl Proto { pub fn new() -> Self { Proto { live_hash: None } } }
))] struct Proto { #[hedera_derive(to_option_proto)] pub live_hash: Option<LiveHash>,
iam_handwriting_word_database.py
import os from os.path import isfile from lxml import etree import torch from PIL import Image from torch.utils.data.dataset import Dataset from utils.image import image_pillow_to_numpy class IAMHandwritingWordDatabase(Dataset): def __init__(self, path, height=32, loss=None): self.height = height self.loss = loss self.images_path = os.path.join(path, "words") self.xmls_path = os.path.join(path, "xml") for xml_name in os.listdir(self.xmls_path): self.parse_xml(os.path.join(self.xmls_path, xml_name)) def parse_xml(self, xml_path): tree = etree.parse(xml_path) self.parse_xml_tree(xml_path, tree.getroot()) def parse_xml_tree(self, xml_path, root): for children in root.getchildren(): if children.tag.title() == "Line": root_dict = {} for name, value in children.attrib.items(): root_dict[name] = value self.parse_xml_tree_line(children) else: self.parse_xml_tree(xml_path, children) def parse_xml_tree_line(self, root): text_lines = [] for children in root.getchildren(): if children.tag.title() == "Word": text, id = self.parse_xml_tree_word(children) ids = id.split("-") image_path = os.path.join(self.images_path, ids[0] + "/" + ids[0] + "-" + ids[1] + "/" + ids[0] + "-" + ids[1] + "-" + ids[2] + "-" + ids[3] + ".png") if isfile(image_path): try: image = Image.open(image_path) self.labels.append((id, text)) except Exception: pass def parse_xml_tree_word(self, root): root_dict = {} for name, value in root.attrib.items(): root_dict[name] = value return root_dict["text"], root_dict["id"] def get_corpus(self): corpus = "" for id, text in self.labels: if corpus == "": corpus = text else: corpus = corpus + ". " + text return corpus def __getitem__(self, index):
def __len__(self): return len(self.labels)
id, text = self.labels[index] ids = id.split("-") image_path = os.path.join(self.images_path, ids[0] + "/" + ids[0] + "-" + ids[1] + "/" + ids[0] + "-" + ids[1] + "-" + ids[2] + "-" + ids[3] + ".png") # Load the image image = Image.open(image_path).convert('RGB') width, height = image.size image = image.resize((width * self.height // height, self.height), Image.ANTIALIAS) image = image_pillow_to_numpy(image) return torch.from_numpy(image), (self.loss.preprocess_label(text, width * self.height // height), text, image.shape[2])
regionck.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! The region check is a final pass that runs over the AST after we have inferred the type constraints but before we have actually finalized the types. Its purpose is to embed some final region constraints. The reason that this is not done earlier is that sometimes we don't know whether a given type will be a region pointer or not until this phase. In particular, we ensure that, if the type of an expression or variable is `&'r T`, then the expression or variable must occur within the region scope `r`. Note that in some cases `r` may still be a region variable, so this gives us a chance to influence the value for `r` that we infer to ensure we choose a value large enough to enclose all uses. There is a lengthy comment in visit_node() that explains this point a bit better. */ use middle::freevars::get_freevars; use middle::ty::{ReScope}; use middle::ty; use middle::typeck::astconv::AstConv; use middle::typeck::check::FnCtxt; use middle::typeck::check::regionmanip::relate_nested_regions; use middle::typeck::infer::resolve_and_force_all_but_regions; use middle::typeck::infer::resolve_type; use middle::typeck::infer; use middle::pat_util; use util::ppaux::{ty_to_str, region_to_str, Repr}; use syntax::ast::{ManagedSigil, OwnedSigil, BorrowedSigil}; use syntax::ast::{DefArg, DefBinding, DefLocal, DefSelf, DefUpvar}; use syntax::ast; use syntax::codemap::Span; use syntax::visit; use syntax::visit::Visitor; pub struct Rcx { fcx: @FnCtxt, errors_reported: uint, // id of innermost fn or loop repeating_scope: ast::NodeId, } fn encl_region_of_def(fcx: @FnCtxt, def: ast::Def) -> ty::Region { let tcx = fcx.tcx(); match def { DefLocal(node_id, _) | DefArg(node_id, _) | DefSelf(node_id, _) | DefBinding(node_id, _) => { tcx.region_maps.encl_region(node_id) } DefUpvar(_, subdef, closure_id, body_id) => { match ty::ty_closure_sigil(fcx.node_ty(closure_id)) { BorrowedSigil => encl_region_of_def(fcx, *subdef), ManagedSigil | OwnedSigil => ReScope(body_id) } } _ => { tcx.sess.bug(format!("unexpected def in encl_region_of_def: {:?}", def)) } } } impl Rcx { pub fn tcx(&self) -> ty::ctxt { self.fcx.ccx.tcx } pub fn set_repeating_scope(&mut self, scope: ast::NodeId) -> ast::NodeId { let old_scope = self.repeating_scope; self.repeating_scope = scope; old_scope } pub fn resolve_type(&mut self, unresolved_ty: ty::t) -> ty::t { /*! * Try to resolve the type for the given node, returning * t_err if an error results. Note that we never care * about the details of the error, the same error will be * detected and reported in the writeback phase. * * Note one important point: we do not attempt to resolve * *region variables* here. This is because regionck is * essentially adding constraints to those region variables * and so may yet influence how they are resolved. * * Consider this silly example: * * fn borrow(x: &int) -> &int {x} * fn foo(x: @int) -> int { // block: B * let b = borrow(x); // region: <R0> * *b * } * * Here, the region of `b` will be `<R0>`. `<R0>` is * constrainted to be some subregion of the block B and some * superregion of the call. If we forced it now, we'd choose * the smaller region (the call). But that would make the *b * illegal. Since we don't resolve, the type of b will be * `&<R0>.int` and then `*b` will require that `<R0>` be * bigger than the let and the `*b` expression, so we will * effectively resolve `<R0>` to be the block B. */ match resolve_type(self.fcx.infcx(), unresolved_ty, resolve_and_force_all_but_regions) { Ok(t) => t, Err(_) => ty::mk_err() } } /// Try to resolve the type for the given node. pub fn resolve_node_type(&mut self, id: ast::NodeId) -> ty::t { let t = self.fcx.node_ty(id); self.resolve_type(t) } /// Try to resolve the type for the given node. pub fn resolve_expr_type_adjusted(&mut self, expr: &ast::Expr) -> ty::t { let ty_unadjusted = self.resolve_node_type(expr.id); if ty::type_is_error(ty_unadjusted) || ty::type_is_bot(ty_unadjusted) { ty_unadjusted } else { let tcx = self.fcx.tcx(); let adjustment = { let adjustments = self.fcx.inh.adjustments.borrow(); adjustments.get().find_copy(&expr.id) }; ty::adjust_ty(tcx, expr.span, ty_unadjusted, adjustment) } } } pub fn regionck_expr(fcx: @FnCtxt, e: &ast::Expr) { let mut rcx = Rcx { fcx: fcx, errors_reported: 0, repeating_scope: e.id }; let rcx = &mut rcx; if fcx.err_count_since_creation() == 0 { // regionck assumes typeck succeeded rcx.visit_expr(e, ()); } fcx.infcx().resolve_regions(); } pub fn regionck_fn(fcx: @FnCtxt, blk: &ast::Block) { let mut rcx = Rcx { fcx: fcx, errors_reported: 0, repeating_scope: blk.id }; let rcx = &mut rcx; if fcx.err_count_since_creation() == 0 { // regionck assumes typeck succeeded rcx.visit_block(blk, ()); } fcx.infcx().resolve_regions(); } impl Visitor<()> for Rcx { // (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local, // However, right now we run into an issue whereby some free // regions are not properly related if they appear within the // types of arguments that must be inferred. This could be // addressed by deferring the construction of the region // hierarchy, and in particular the relationships between free // regions, until regionck, as described in #3238. fn
(&mut self, i: &ast::Item, _: ()) { visit_item(self, i); } fn visit_expr(&mut self, ex: &ast::Expr, _: ()) { visit_expr(self, ex); } //visit_pat: visit_pat, // (..) see above fn visit_arm(&mut self, a: &ast::Arm, _: ()) { visit_arm(self, a); } fn visit_local(&mut self, l: &ast::Local, _: ()) { visit_local(self, l); } fn visit_block(&mut self, b: &ast::Block, _: ()) { visit_block(self, b); } } fn visit_item(_rcx: &mut Rcx, _item: &ast::Item) { // Ignore items } fn visit_block(rcx: &mut Rcx, b: &ast::Block) { rcx.fcx.tcx().region_maps.record_cleanup_scope(b.id); visit::walk_block(rcx, b, ()); } fn visit_arm(rcx: &mut Rcx, arm: &ast::Arm) { // see above for &p in arm.pats.iter() { constrain_bindings_in_pat(p, rcx); } visit::walk_arm(rcx, arm, ()); } fn visit_local(rcx: &mut Rcx, l: &ast::Local) { // see above constrain_bindings_in_pat(l.pat, rcx); visit::walk_local(rcx, l, ()); } fn constrain_bindings_in_pat(pat: &ast::Pat, rcx: &mut Rcx) { let tcx = rcx.fcx.tcx(); debug!("regionck::visit_pat(pat={})", pat.repr(tcx)); pat_util::pat_bindings(tcx.def_map, pat, |_, id, span, _| { // If we have a variable that contains region'd data, that // data will be accessible from anywhere that the variable is // accessed. We must be wary of loops like this: // // // from src/test/compile-fail/borrowck-lend-flow.rs // let mut v = ~3, w = ~4; // let mut x = &mut w; // loop { // **x += 1; // (2) // borrow(v); //~ ERROR cannot borrow // x = &mut v; // (1) // } // // Typically, we try to determine the region of a borrow from // those points where it is dereferenced. In this case, one // might imagine that the lifetime of `x` need only be the // body of the loop. But of course this is incorrect because // the pointer that is created at point (1) is consumed at // point (2), meaning that it must be live across the loop // iteration. The easiest way to guarantee this is to require // that the lifetime of any regions that appear in a // variable's type enclose at least the variable's scope. let encl_region = tcx.region_maps.encl_region(id); constrain_regions_in_type_of_node( rcx, id, encl_region, infer::BindingTypeIsNotValidAtDecl(span)); }) } fn visit_expr(rcx: &mut Rcx, expr: &ast::Expr) { debug!("regionck::visit_expr(e={}, repeating_scope={:?})", expr.repr(rcx.fcx.tcx()), rcx.repeating_scope); let has_method_map = { let method_map = rcx.fcx.inh.method_map; method_map.get().contains_key(&expr.id) }; // Record cleanup scopes, which are used by borrowck to decide the // maximum lifetime of a temporary rvalue. These were derived by // examining where trans creates block scopes, not because this // reflects some principled decision around temporary lifetimes. // Ordinarily this would seem like something that should be setup // in region, but we need to know which uses of operators are // overloaded. See #3511. let tcx = rcx.fcx.tcx(); match expr.node { // You'd think that x += y where `+=` is overloaded would be a // cleanup scope. You'd be... kind of right. In fact the // handling of `+=` and friends in trans for overloaded // operators is a hopeless mess and I can't figure out how to // represent it. - ndm // // ast::expr_assign_op(..) | ast::ExprIndex(..) | ast::ExprBinary(..) | ast::ExprUnary(..) if has_method_map => { tcx.region_maps.record_cleanup_scope(expr.id); } ast::ExprBinary(_, ast::BiAnd, lhs, rhs) | ast::ExprBinary(_, ast::BiOr, lhs, rhs) => { tcx.region_maps.record_cleanup_scope(lhs.id); tcx.region_maps.record_cleanup_scope(rhs.id); } ast::ExprCall(..) | ast::ExprMethodCall(..) => { tcx.region_maps.record_cleanup_scope(expr.id); } ast::ExprMatch(_, ref arms) => { tcx.region_maps.record_cleanup_scope(expr.id); for arm in arms.iter() { for guard in arm.guard.iter() { tcx.region_maps.record_cleanup_scope(guard.id); } } } ast::ExprLoop(ref body, _) => { tcx.region_maps.record_cleanup_scope(body.id); } ast::ExprWhile(cond, ref body) => { tcx.region_maps.record_cleanup_scope(cond.id); tcx.region_maps.record_cleanup_scope(body.id); } _ => {} } // Check any autoderefs or autorefs that appear. { let adjustments = rcx.fcx.inh.adjustments.borrow(); let r = adjustments.get().find(&expr.id); for &adjustment in r.iter() { debug!("adjustment={:?}", adjustment); match *adjustment { @ty::AutoDerefRef( ty::AutoDerefRef {autoderefs: autoderefs, autoref: opt_autoref}) => { let expr_ty = rcx.resolve_node_type(expr.id); constrain_derefs(rcx, expr, autoderefs, expr_ty); for autoref in opt_autoref.iter() { guarantor::for_autoref(rcx, expr, autoderefs, autoref); // Require that the resulting region encompasses // the current node. // // FIXME(#6268) remove to support nested method calls constrain_regions_in_type_of_node( rcx, expr.id, ty::ReScope(expr.id), infer::AutoBorrow(expr.span)); } } @ty::AutoObject(ast::BorrowedSigil, Some(trait_region), _, _, _, _) => { // Determine if we are casting `expr` to an trait // instance. If so, we have to be sure that the type of // the source obeys the trait's region bound. // // Note: there is a subtle point here concerning type // parameters. It is possible that the type of `source` // contains type parameters, which in turn may contain // regions that are not visible to us (only the caller // knows about them). The kind checker is ultimately // responsible for guaranteeing region safety in that // particular case. There is an extensive comment on the // function check_cast_for_escaping_regions() in kind.rs // explaining how it goes about doing that. let source_ty = rcx.fcx.expr_ty(expr); constrain_regions_in_type(rcx, trait_region, infer::RelateObjectBound(expr.span), source_ty); } _ => {} } } } match expr.node { ast::ExprCall(callee, ref args, _) => { constrain_callee(rcx, callee.id, expr, callee); constrain_call(rcx, callee.id, expr, None, *args, false); visit::walk_expr(rcx, expr, ()); } ast::ExprMethodCall(callee_id, arg0, _, _, ref args, _) => { constrain_call(rcx, callee_id, expr, Some(arg0), *args, false); visit::walk_expr(rcx, expr, ()); } ast::ExprIndex(callee_id, lhs, rhs) | ast::ExprAssignOp(callee_id, _, lhs, rhs) | ast::ExprBinary(callee_id, _, lhs, rhs) if has_method_map => { // As `expr_method_call`, but the call is via an // overloaded op. Note that we (sadly) currently use an // implicit "by ref" sort of passing style here. This // should be converted to an adjustment! constrain_call(rcx, callee_id, expr, Some(lhs), [rhs], true); visit::walk_expr(rcx, expr, ()); } ast::ExprUnary(callee_id, _, lhs) if has_method_map => { // As above. constrain_call(rcx, callee_id, expr, Some(lhs), [], true); visit::walk_expr(rcx, expr, ()); } ast::ExprUnary(_, ast::UnDeref, base) => { // For *a, the lifetime of a must enclose the deref let base_ty = rcx.resolve_node_type(base.id); constrain_derefs(rcx, expr, 1, base_ty); visit::walk_expr(rcx, expr, ()); } ast::ExprIndex(_, vec_expr, _) => { // For a[b], the lifetime of a must enclose the deref let vec_type = rcx.resolve_expr_type_adjusted(vec_expr); constrain_index(rcx, expr, vec_type); visit::walk_expr(rcx, expr, ()); } ast::ExprCast(source, _) => { // Determine if we are casting `source` to an trait // instance. If so, we have to be sure that the type of // the source obeys the trait's region bound. // // Note: there is a subtle point here concerning type // parameters. It is possible that the type of `source` // contains type parameters, which in turn may contain // regions that are not visible to us (only the caller // knows about them). The kind checker is ultimately // responsible for guaranteeing region safety in that // particular case. There is an extensive comment on the // function check_cast_for_escaping_regions() in kind.rs // explaining how it goes about doing that. let target_ty = rcx.resolve_node_type(expr.id); match ty::get(target_ty).sty { ty::ty_trait(_, _, ty::RegionTraitStore(trait_region), _, _) => { let source_ty = rcx.resolve_expr_type_adjusted(source); constrain_regions_in_type( rcx, trait_region, infer::RelateObjectBound(expr.span), source_ty); } _ => () } visit::walk_expr(rcx, expr, ()); } ast::ExprAddrOf(_, base) => { guarantor::for_addr_of(rcx, expr, base); // Require that when you write a `&expr` expression, the // resulting pointer has a lifetime that encompasses the // `&expr` expression itself. Note that we constraining // the type of the node expr.id here *before applying // adjustments*. // // FIXME(#6268) nested method calls requires that this rule change let ty0 = rcx.resolve_node_type(expr.id); constrain_regions_in_type(rcx, ty::ReScope(expr.id), infer::AddrOf(expr.span), ty0); visit::walk_expr(rcx, expr, ()); } ast::ExprMatch(discr, ref arms) => { guarantor::for_match(rcx, discr, *arms); visit::walk_expr(rcx, expr, ()); } ast::ExprFnBlock(..) | ast::ExprProc(..) => { check_expr_fn_block(rcx, expr); } ast::ExprLoop(body, _) => { let repeating_scope = rcx.set_repeating_scope(body.id); visit::walk_expr(rcx, expr, ()); rcx.set_repeating_scope(repeating_scope); } ast::ExprWhile(cond, body) => { let repeating_scope = rcx.set_repeating_scope(cond.id); rcx.visit_expr(cond, ()); rcx.set_repeating_scope(body.id); rcx.visit_block(body, ()); rcx.set_repeating_scope(repeating_scope); } _ => { visit::walk_expr(rcx, expr, ()); } } } fn check_expr_fn_block(rcx: &mut Rcx, expr: &ast::Expr) { let tcx = rcx.fcx.tcx(); match expr.node { ast::ExprFnBlock(_, ref body) | ast::ExprProc(_, ref body) => { let function_type = rcx.resolve_node_type(expr.id); match ty::get(function_type).sty { ty::ty_closure( ty::ClosureTy { sigil: ast::BorrowedSigil, region: region, ..}) => { if get_freevars(tcx, expr.id).is_empty() { // No free variables means that the environment // will be NULL at runtime and hence the closure // has static lifetime. } else { // Otherwise, the closure must not outlive the // variables it closes over, nor can it // outlive the innermost repeating scope // (since otherwise that would require // infinite stack). constrain_free_variables(rcx, region, expr); let repeating_scope = ty::ReScope(rcx.repeating_scope); rcx.fcx.mk_subr(true, infer::InfStackClosure(expr.span), region, repeating_scope); } } _ => () } let repeating_scope = rcx.set_repeating_scope(body.id); visit::walk_expr(rcx, expr, ()); rcx.set_repeating_scope(repeating_scope); } _ => { tcx.sess.span_bug( expr.span, "Expected expr_fn_block"); } } } fn constrain_callee(rcx: &mut Rcx, callee_id: ast::NodeId, call_expr: &ast::Expr, callee_expr: &ast::Expr) { let call_region = ty::ReScope(call_expr.id); let callee_ty = rcx.resolve_node_type(callee_id); match ty::get(callee_ty).sty { ty::ty_bare_fn(..) => { } ty::ty_closure(ref closure_ty) => { rcx.fcx.mk_subr(true, infer::InvokeClosure(callee_expr.span), call_region, closure_ty.region); } _ => { // this should not happen, but it does if the program is // erroneous // // tcx.sess.span_bug( // callee_expr.span, // format!("Calling non-function: {}", callee_ty.repr(tcx))); } } } fn constrain_call(rcx: &mut Rcx, // might be expr_call, expr_method_call, or an overloaded // operator callee_id: ast::NodeId, call_expr: &ast::Expr, receiver: Option<@ast::Expr>, arg_exprs: &[@ast::Expr], implicitly_ref_args: bool) { //! Invoked on every call site (i.e., normal calls, method calls, //! and overloaded operators). Constrains the regions which appear //! in the type of the function. Also constrains the regions that //! appear in the arguments appropriately. let tcx = rcx.fcx.tcx(); debug!("constrain_call(call_expr={}, \ receiver={}, \ arg_exprs={}, \ implicitly_ref_args={:?})", call_expr.repr(tcx), receiver.repr(tcx), arg_exprs.repr(tcx), implicitly_ref_args); let callee_ty = rcx.resolve_node_type(callee_id); if ty::type_is_error(callee_ty) { // Bail, as function type is unknown return; } let fn_sig = ty::ty_fn_sig(callee_ty); // `callee_region` is the scope representing the time in which the // call occurs. // // FIXME(#6268) to support nested method calls, should be callee_id let callee_scope = call_expr.id; let callee_region = ty::ReScope(callee_scope); for &arg_expr in arg_exprs.iter() { debug!("Argument"); // ensure that any regions appearing in the argument type are // valid for at least the lifetime of the function: constrain_regions_in_type_of_node( rcx, arg_expr.id, callee_region, infer::CallArg(arg_expr.span)); // unfortunately, there are two means of taking implicit // references, and we need to propagate constraints as a // result. modes are going away and the "DerefArgs" code // should be ported to use adjustments if implicitly_ref_args { guarantor::for_by_ref(rcx, arg_expr, callee_scope); } } // as loop above, but for receiver for &r in receiver.iter() { debug!("Receiver"); constrain_regions_in_type_of_node( rcx, r.id, callee_region, infer::CallRcvr(r.span)); if implicitly_ref_args { guarantor::for_by_ref(rcx, r, callee_scope); } } // constrain regions that may appear in the return type to be // valid for the function call: constrain_regions_in_type( rcx, callee_region, infer::CallReturn(call_expr.span), fn_sig.output); } fn constrain_derefs(rcx: &mut Rcx, deref_expr: &ast::Expr, derefs: uint, mut derefd_ty: ty::t) { /*! * Invoked on any dereference that occurs, whether explicitly * or through an auto-deref. Checks that if this is a region * pointer being derefenced, the lifetime of the pointer includes * the deref expr. */ let r_deref_expr = ty::ReScope(deref_expr.id); for i in range(0u, derefs) { debug!("constrain_derefs(deref_expr=?, derefd_ty={}, derefs={:?}/{:?}", rcx.fcx.infcx().ty_to_str(derefd_ty), i, derefs); match ty::get(derefd_ty).sty { ty::ty_rptr(r_ptr, _) => { mk_subregion_due_to_derefence(rcx, deref_expr.span, r_deref_expr, r_ptr); } _ => {} } match ty::deref(derefd_ty, true) { Some(mt) => derefd_ty = mt.ty, /* if this type can't be dereferenced, then there's already an error in the session saying so. Just bail out for now */ None => break } } } pub fn mk_subregion_due_to_derefence(rcx: &mut Rcx, deref_span: Span, minimum_lifetime: ty::Region, maximum_lifetime: ty::Region) { rcx.fcx.mk_subr(true, infer::DerefPointer(deref_span), minimum_lifetime, maximum_lifetime) } fn constrain_index(rcx: &mut Rcx, index_expr: &ast::Expr, indexed_ty: ty::t) { /*! * Invoked on any index expression that occurs. Checks that if * this is a slice being indexed, the lifetime of the pointer * includes the deref expr. */ debug!("constrain_index(index_expr=?, indexed_ty={}", rcx.fcx.infcx().ty_to_str(indexed_ty)); let r_index_expr = ty::ReScope(index_expr.id); match ty::get(indexed_ty).sty { ty::ty_estr(ty::vstore_slice(r_ptr)) | ty::ty_evec(_, ty::vstore_slice(r_ptr)) => { rcx.fcx.mk_subr(true, infer::IndexSlice(index_expr.span), r_index_expr, r_ptr); } _ => {} } } fn constrain_free_variables(rcx: &mut Rcx, region: ty::Region, expr: &ast::Expr) { /*! * Make sure that all free variables referenced inside the closure * outlive the closure itself. */ let tcx = rcx.fcx.ccx.tcx; debug!("constrain_free_variables({}, {})", region.repr(tcx), expr.repr(tcx)); for freevar in get_freevars(tcx, expr.id).iter() { debug!("freevar def is {:?}", freevar.def); let def = freevar.def; let en_region = encl_region_of_def(rcx.fcx, def); debug!("en_region = {}", en_region.repr(tcx)); rcx.fcx.mk_subr(true, infer::FreeVariable(freevar.span), region, en_region); } } fn constrain_regions_in_type_of_node( rcx: &mut Rcx, id: ast::NodeId, minimum_lifetime: ty::Region, origin: infer::SubregionOrigin) -> bool { //! Guarantees that any lifetimes which appear in the type of //! the node `id` (after applying adjustments) are valid for at //! least `minimum_lifetime` let tcx = rcx.fcx.tcx(); // Try to resolve the type. If we encounter an error, then typeck // is going to fail anyway, so just stop here and let typeck // report errors later on in the writeback phase. let ty0 = rcx.resolve_node_type(id); let adjustment = { let adjustments = rcx.fcx.inh.adjustments.borrow(); adjustments.get().find_copy(&id) }; let ty = ty::adjust_ty(tcx, origin.span(), ty0, adjustment); debug!("constrain_regions_in_type_of_node(\ ty={}, ty0={}, id={}, minimum_lifetime={:?}, adjustment={:?})", ty_to_str(tcx, ty), ty_to_str(tcx, ty0), id, minimum_lifetime, adjustment); constrain_regions_in_type(rcx, minimum_lifetime, origin, ty) } fn constrain_regions_in_type( rcx: &mut Rcx, minimum_lifetime: ty::Region, origin: infer::SubregionOrigin, ty: ty::t) -> bool { /*! * Requires that any regions which appear in `ty` must be * superregions of `minimum_lifetime`. Also enforces the constraint * that given a pointer type `&'r T`, T must not contain regions * that outlive 'r, as well as analogous constraints for other * lifetime'd types. * * This check prevents regions from being used outside of the block in * which they are valid. Recall that regions represent blocks of * code or expressions: this requirement basically says "any place * that uses or may use a region R must be within the block of * code that R corresponds to." */ let e = rcx.errors_reported; let tcx = rcx.fcx.ccx.tcx; debug!("constrain_regions_in_type(minimum_lifetime={}, ty={})", region_to_str(tcx, "", false, minimum_lifetime), ty_to_str(tcx, ty)); relate_nested_regions(tcx, Some(minimum_lifetime), ty, |r_sub, r_sup| { debug!("relate_nested_regions(r_sub={}, r_sup={})", r_sub.repr(tcx), r_sup.repr(tcx)); if r_sup.is_bound() || r_sub.is_bound() { // a bound region is one which appears inside an fn type. // (e.g., the `&` in `fn(&T)`). Such regions need not be // constrained by `minimum_lifetime` as they are placeholders // for regions that are as-yet-unknown. } else if r_sub == minimum_lifetime { rcx.fcx.mk_subr( true, origin, r_sub, r_sup); } else { rcx.fcx.mk_subr( true, infer::ReferenceOutlivesReferent(ty, origin.span()), r_sub, r_sup); } }); return (e == rcx.errors_reported); } pub mod guarantor { /*! * The routines in this module are aiming to deal with the case * where a the contents of a reference are re-borrowed. * Imagine you have a reference `b` with lifetime L1 and * you have an expression `&*b`. The result of this borrow will * be another reference with lifetime L2 (which is an * inference variable). The borrow checker is going to enforce * the constraint that L2 < L1, because otherwise you are * re-borrowing data for a lifetime larger than the original loan. * However, without the routines in this module, the region * inferencer would not know of this dependency and thus it might * infer the lifetime of L2 to be greater than L1 (issue #3148). * * There are a number of troublesome scenarios in the tests * `region-dependent-*.rs`, but here is one example: * * struct Foo { i: int } * struct Bar { foo: Foo } * fn get_i(x: &'a Bar) -> &'a int { * let foo = &x.foo; // Lifetime L1 * &foo.i // Lifetime L2 * } * * Note that this comes up either with `&` expressions, `ref` * bindings, and `autorefs`, which are the three ways to introduce * a borrow. * * The key point here is that when you are borrowing a value that * is "guaranteed" by a reference, you must link the * lifetime of that reference (L1, here) to the lifetime of * the borrow itself (L2). What do I mean by "guaranteed" by a * reference? I mean any data that is reached by first * dereferencing a reference and then either traversing * interior offsets or owned pointers. We say that the guarantor * of such data it the region of the reference that was * traversed. This is essentially the same as the ownership * relation, except that a reference never owns its * contents. * * NB: I really wanted to use the `mem_categorization` code here * but I cannot because final type resolution hasn't happened yet, * and `mem_categorization` requires that all types be known. * So this is very similar logic to what you would find there, * but more special purpose. */ use middle::typeck::astconv::AstConv; use middle::typeck::check::regionck::Rcx; use middle::typeck::check::regionck::mk_subregion_due_to_derefence; use middle::typeck::infer; use middle::ty; use syntax::ast; use syntax::codemap::Span; use util::ppaux::{ty_to_str, Repr}; pub fn for_addr_of(rcx: &mut Rcx, expr: &ast::Expr, base: &ast::Expr) { /*! * Computes the guarantor for an expression `&base` and then * ensures that the lifetime of the resulting pointer is linked * to the lifetime of its guarantor (if any). */ debug!("guarantor::for_addr_of(base=?)"); let guarantor = guarantor(rcx, base); link(rcx, expr.span, expr.id, guarantor); } pub fn for_match(rcx: &mut Rcx, discr: &ast::Expr, arms: &[ast::Arm]) { /*! * Computes the guarantors for any ref bindings in a match and * then ensures that the lifetime of the resulting pointer is * linked to the lifetime of its guarantor (if any). */ debug!("regionck::for_match()"); let discr_guarantor = guarantor(rcx, discr); debug!("discr_guarantor={}", discr_guarantor.repr(rcx.tcx())); for arm in arms.iter() { for pat in arm.pats.iter() { link_ref_bindings_in_pat(rcx, *pat, discr_guarantor); } } } pub fn for_autoref(rcx: &mut Rcx, expr: &ast::Expr, autoderefs: uint, autoref: &ty::AutoRef) { /*! * Computes the guarantor for an expression that has an * autoref adjustment and links it to the lifetime of the * autoref. This is only important when auto re-borrowing * region pointers. */ debug!("guarantor::for_autoref(autoref={:?})", autoref); let mut expr_ct = categorize_unadjusted(rcx, expr); debug!(" unadjusted cat={:?}", expr_ct.cat); expr_ct = apply_autoderefs( rcx, expr, autoderefs, expr_ct); match *autoref { ty::AutoPtr(r, _) => { // In this case, we are implicitly adding an `&`. maybe_make_subregion(rcx, expr, r, expr_ct.cat.guarantor); } ty::AutoBorrowVec(r, _) | ty::AutoBorrowVecRef(r, _) | ty::AutoBorrowFn(r) | ty::AutoBorrowObj(r, _) => { // In each of these cases, what is being borrowed is // not the (autoderef'd) expr itself but rather the // contents of the autoderef'd expression (i.e., what // the pointer points at). maybe_make_subregion(rcx, expr, r, guarantor_of_deref(&expr_ct.cat)); } ty::AutoUnsafe(_) => {} } fn maybe_make_subregion( rcx: &mut Rcx, expr: &ast::Expr, sub_region: ty::Region, sup_region: Option<ty::Region>) { for r in sup_region.iter() { rcx.fcx.mk_subr(true, infer::Reborrow(expr.span), sub_region, *r); } } } pub fn for_by_ref(rcx: &mut Rcx, expr: &ast::Expr, callee_scope: ast::NodeId) { /*! * Computes the guarantor for cases where the `expr` is * being passed by implicit reference and must outlive * `callee_scope`. */ let tcx = rcx.tcx(); debug!("guarantor::for_by_ref(expr={}, callee_scope={:?})", expr.repr(tcx), callee_scope); let expr_cat = categorize(rcx, expr); debug!("guarantor::for_by_ref(expr={:?}, callee_scope={:?}) category={:?}", expr.id, callee_scope, expr_cat); let minimum_lifetime = ty::ReScope(callee_scope); for guarantor in expr_cat.guarantor.iter() { mk_subregion_due_to_derefence(rcx, expr.span, minimum_lifetime, *guarantor); } } fn link( rcx: &mut Rcx, span: Span, id: ast::NodeId, guarantor: Option<ty::Region>) { /*! * * Links the lifetime of the reference resulting from a borrow * to the lifetime of its guarantor (if any). */ debug!("link(id={:?}, guarantor={:?})", id, guarantor); let bound = match guarantor { None => { // If guarantor is None, then the value being borrowed // is not guaranteed by a region pointer, so there are // no lifetimes to link. return; } Some(r) => { r } }; // this routine is used for the result of ref bindings and & // expressions, both of which always yield a region variable, so // mk_subr should never fail. let rptr_ty = rcx.resolve_node_type(id); if !ty::type_is_bot(rptr_ty) { let tcx = rcx.fcx.ccx.tcx; debug!("rptr_ty={}", ty_to_str(tcx, rptr_ty)); let r = ty::ty_region(tcx, span, rptr_ty); rcx.fcx.mk_subr(true, infer::Reborrow(span), r, bound); } } /// Categorizes types based on what kind of pointer they are. /// Note that we don't bother to distinguish between rptrs (&T) /// and slices (&[T], &str)---they are all just `BorrowedPointer`. enum PointerCategorization { NotPointer, OwnedPointer, BorrowedPointer(ty::Region), OtherPointer } /// Guarantor of an expression paired with the /// PointerCategorization` of its type. struct ExprCategorization { guarantor: Option<ty::Region>, pointer: PointerCategorization } /// ExprCategorization paired with the full type of the expr struct ExprCategorizationType { cat: ExprCategorization, ty: ty::t } fn guarantor(rcx: &mut Rcx, expr: &ast::Expr) -> Option<ty::Region> { /*! * * Computes the guarantor of `expr`, or None if `expr` is * not guaranteed by any region. Here `expr` is some expression * whose address is being taken (e.g., there is an expression * `&expr`). */ debug!("guarantor()"); match expr.node { ast::ExprUnary(_, ast::UnDeref, b) => { let cat = categorize(rcx, b); guarantor_of_deref(&cat) } ast::ExprField(b, _, _) => { categorize(rcx, b).guarantor } ast::ExprIndex(_, b, _) => { let cat = categorize(rcx, b); guarantor_of_deref(&cat) } ast::ExprParen(e) => { guarantor(rcx, e) } ast::ExprPath(..) | ast::ExprSelf => { // Either a variable or constant and hence resides // in constant memory or on the stack frame. Either way, // not guaranteed by a region pointer. None } // All of these expressions are rvalues and hence their // value is not guaranteed by a region pointer. ast::ExprInlineAsm(..) | ast::ExprMac(..) | ast::ExprLit(_) | ast::ExprUnary(..) | ast::ExprAddrOf(..) | ast::ExprBinary(..) | ast::ExprVstore(..) | ast::ExprBox(..) | ast::ExprBreak(..) | ast::ExprAgain(..) | ast::ExprRet(..) | ast::ExprLogLevel | ast::ExprWhile(..) | ast::ExprLoop(..) | ast::ExprAssign(..) | ast::ExprAssignOp(..) | ast::ExprCast(..) | ast::ExprCall(..) | ast::ExprMethodCall(..) | ast::ExprStruct(..) | ast::ExprTup(..) | ast::ExprIf(..) | ast::ExprMatch(..) | ast::ExprFnBlock(..) | ast::ExprProc(..) | ast::ExprDoBody(..) | ast::ExprBlock(..) | ast::ExprRepeat(..) | ast::ExprVec(..) => { assert!(!ty::expr_is_lval( rcx.fcx.tcx(), rcx.fcx.inh.method_map, expr)); None } ast::ExprForLoop(..) => fail!("non-desugared expr_for_loop"), } } fn categorize(rcx: &mut Rcx, expr: &ast::Expr) -> ExprCategorization { debug!("categorize()"); let mut expr_ct = categorize_unadjusted(rcx, expr); debug!("before adjustments, cat={:?}", expr_ct.cat); let adjustments = rcx.fcx.inh.adjustments.borrow(); match adjustments.get().find(&expr.id) { Some(&@ty::AutoAddEnv(..)) => { // This is basically an rvalue, not a pointer, no regions // involved. expr_ct.cat = ExprCategorization { guarantor: None, pointer: NotPointer }; } Some(&@ty::AutoObject(ast::BorrowedSigil, Some(region), _, _, _, _)) => { expr_ct.cat = ExprCategorization { guarantor: None, pointer: BorrowedPointer(region) }; } Some(&@ty::AutoObject(ast::OwnedSigil, _, _, _, _, _)) => { expr_ct.cat = ExprCategorization { guarantor: None, pointer: OwnedPointer }; } Some(&@ty::AutoObject(ast::ManagedSigil, _, _, _, _, _)) => { expr_ct.cat = ExprCategorization { guarantor: None, pointer: OtherPointer }; } Some(&@ty::AutoDerefRef(ref adjustment)) => { debug!("adjustment={:?}", adjustment); expr_ct = apply_autoderefs( rcx, expr, adjustment.autoderefs, expr_ct); match adjustment.autoref { None => { } Some(ty::AutoUnsafe(_)) => { expr_ct.cat.guarantor = None; expr_ct.cat.pointer = OtherPointer; debug!("autoref, cat={:?}", expr_ct.cat); } Some(ty::AutoPtr(r, _)) | Some(ty::AutoBorrowVec(r, _)) | Some(ty::AutoBorrowVecRef(r, _)) | Some(ty::AutoBorrowFn(r)) | Some(ty::AutoBorrowObj(r, _)) => { // If there is an autoref, then the result of this // expression will be some sort of reference. expr_ct.cat.guarantor = None; expr_ct.cat.pointer = BorrowedPointer(r); debug!("autoref, cat={:?}", expr_ct.cat); } } } Some(..) => fail!("invalid or unhandled adjustment"), None => {} } debug!("result={:?}", expr_ct.cat); return expr_ct.cat; } fn categorize_unadjusted(rcx: &mut Rcx, expr: &ast::Expr) -> ExprCategorizationType { debug!("categorize_unadjusted()"); let guarantor = { let method_map = rcx.fcx.inh.method_map.borrow(); if method_map.get().contains_key(&expr.id) { None } else { guarantor(rcx, expr) } }; let expr_ty = rcx.resolve_node_type(expr.id); ExprCategorizationType { cat: ExprCategorization { guarantor: guarantor, pointer: pointer_categorize(expr_ty) }, ty: expr_ty } } fn apply_autoderefs( rcx: &mut Rcx, expr: &ast::Expr, autoderefs: uint, ct: ExprCategorizationType) -> ExprCategorizationType { let mut ct = ct; let tcx = rcx.fcx.ccx.tcx; if (ty::type_is_error(ct.ty)) { ct.cat.pointer = NotPointer; return ct; } for _ in range(0u, autoderefs) { ct.cat.guarantor = guarantor_of_deref(&ct.cat); match ty::deref(ct.ty, true) { Some(mt) => { ct.ty = mt.ty; ct.cat.pointer = pointer_categorize(ct.ty); } None => { tcx.sess.span_bug( expr.span, format!("Autoderef but type not derefable: {}", ty_to_str(tcx, ct.ty))); } } debug!("autoderef, cat={:?}", ct.cat); } return ct; } fn pointer_categorize(ty: ty::t) -> PointerCategorization { match ty::get(ty).sty { ty::ty_rptr(r, _) | ty::ty_evec(_, ty::vstore_slice(r)) | ty::ty_trait(_, _, ty::RegionTraitStore(r), _, _) | ty::ty_estr(ty::vstore_slice(r)) => { BorrowedPointer(r) } ty::ty_uniq(..) | ty::ty_estr(ty::vstore_uniq) | ty::ty_trait(_, _, ty::UniqTraitStore, _, _) | ty::ty_evec(_, ty::vstore_uniq) => { OwnedPointer } ty::ty_box(..) | ty::ty_ptr(..) | ty::ty_evec(_, ty::vstore_box) | ty::ty_trait(_, _, ty::BoxTraitStore, _, _) | ty::ty_estr(ty::vstore_box) => { OtherPointer } ty::ty_closure(ref closure_ty) => { match closure_ty.sigil { ast::BorrowedSigil => BorrowedPointer(closure_ty.region), ast::OwnedSigil => OwnedPointer, ast::ManagedSigil => OtherPointer, } } _ => { NotPointer } } } fn guarantor_of_deref(cat: &ExprCategorization) -> Option<ty::Region> { match cat.pointer { NotPointer => cat.guarantor, BorrowedPointer(r) => Some(r), OwnedPointer => cat.guarantor, OtherPointer => None } } fn link_ref_bindings_in_pat( rcx: &mut Rcx, pat: &ast::Pat, guarantor: Option<ty::Region>) { /*! * * Descends through the pattern, tracking the guarantor * of the value being matched. When a ref binding is encountered, * links the lifetime of that ref binding to the lifetime of * the guarantor. We begin with the guarantor of the * discriminant but of course as we go we may pass through * other pointers. */ debug!("link_ref_bindings_in_pat(pat={}, guarantor={:?})", rcx.fcx.pat_to_str(pat), guarantor); match pat.node { ast::PatWild | ast::PatWildMulti => {} ast::PatIdent(ast::BindByRef(_), _, opt_p) => { link(rcx, pat.span, pat.id, guarantor); for p in opt_p.iter() { link_ref_bindings_in_pat(rcx, *p, guarantor); } } ast::PatIdent(_, _, opt_p) => { for p in opt_p.iter() { link_ref_bindings_in_pat(rcx, *p, guarantor); } } ast::PatEnum(_, None) => {} ast::PatEnum(_, Some(ref pats)) => { link_ref_bindings_in_pats(rcx, pats, guarantor); } ast::PatStruct(_, ref fpats, _) => { for fpat in fpats.iter() { link_ref_bindings_in_pat(rcx, fpat.pat, guarantor); } } ast::PatTup(ref ps) => { link_ref_bindings_in_pats(rcx, ps, guarantor) } ast::PatBox(p) => { link_ref_bindings_in_pat(rcx, p, None) } ast::PatUniq(p) => { link_ref_bindings_in_pat(rcx, p, guarantor) } ast::PatRegion(p) => { let rptr_ty = rcx.resolve_node_type(pat.id); let r = ty::ty_region(rcx.fcx.tcx(), pat.span, rptr_ty); link_ref_bindings_in_pat(rcx, p, Some(r)); } ast::PatLit(..) => {} ast::PatRange(..) => {} ast::PatVec(ref before, ref slice, ref after) => { let vec_ty = rcx.resolve_node_type(pat.id); let vstore = ty::ty_vstore(vec_ty); let guarantor1 = match vstore { ty::vstore_fixed(_) | ty::vstore_uniq => guarantor, ty::vstore_slice(r) => Some(r), ty::vstore_box => None }; link_ref_bindings_in_pats(rcx, before, guarantor1); for &p in slice.iter() { link_ref_bindings_in_pat(rcx, p, guarantor); } link_ref_bindings_in_pats(rcx, after, guarantor1); } } } fn link_ref_bindings_in_pats(rcx: &mut Rcx, pats: &~[@ast::Pat], guarantor: Option<ty::Region>) { for pat in pats.iter() { link_ref_bindings_in_pat(rcx, *pat, guarantor); } } }
visit_item
product-item.component.ts
import { Component, Input } from '@angular/core'; @Component({ selector: 'sellit-product-item', templateUrl: './product-item.component.html', styleUrls: ['./product-item.component.scss'] }) export class
{ @Input() public item = {}; }
ProductItemComponent
lib.rs
//! Client library for the [Discord](https://discord.com) API. //! //! The Discord API can be divided into three main components: the RESTful API //! to which calls can be made to take actions, a websocket-based permanent //! connection over which state updates are received, and the voice calling //! system. //! //! Log in to Discord with `Discord::new`, `new_cache`, or `from_bot_token` as appropriate. //! The resulting value can be used to make REST API calls to post messages and manipulate Discord //! state. Calling `connect()` will open a websocket connection, through which events can be //! received. These two channels are enough to write a simple chatbot which can //! read and respond to messages. //! //! For more in-depth tracking of Discord state, a `State` can be seeded with //! the `ReadyEvent` obtained when opening a `Connection` and kept updated with //! the events received over it. //! #![cfg_attr( not(feature = "voice"), doc = "*<b>NOTE</b>: The library has been compiled without voice support.*" )] //! To join voice servers, call `Connection::voice` to get a `VoiceConnection` and use `connect` //! to join a channel, then `play` and `stop` to control playback. Manipulating deaf/mute state //! and receiving audio are also possible. //! //! For examples, see the `examples` directory in the source tree. #![warn(missing_docs)] #![allow(deprecated)] extern crate base64; extern crate chrono; extern crate flate2; extern crate hyper; extern crate hyper_native_tls; extern crate multipart; extern crate serde; extern crate websocket; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; #[macro_use] extern crate bitflags; #[macro_use] extern crate log; #[cfg(feature = "voice")] extern crate byteorder; #[cfg(feature = "voice")] extern crate opus; #[cfg(feature = "voice")] extern crate sodiumoxide; use std::collections::BTreeMap; use std::time; type Object = serde_json::Map<String, serde_json::Value>; mod connection; mod error; mod ratelimit; mod state; #[cfg(feature = "voice")] pub mod voice; macro_rules! cdn_concat { ($e:expr) => { // Out of everything, only the CDN still uses the old domain. concat!("https://cdn.discordapp.com", $e) }; } #[macro_use] mod serial; pub mod builders; pub mod model; use builders::*; pub use connection::Connection; pub use error::{Error, Result}; use model::*; use ratelimit::RateLimits; pub use state::{ChannelRef, State}; const USER_AGENT: &'static str = concat!( "DiscordBot (https://github.com/SpaceManiac/discord-rs, ", env!("CARGO_PKG_VERSION"), ")" ); macro_rules! api_concat { ($e:expr) => { concat!("https://discord.com/api/v6", $e) }; } macro_rules! status_concat { ($e:expr) => { concat!("https://status.discord.com/api/v2", $e) }; } macro_rules! request { ($self_:ident, $method:ident($body:expr), $url:expr, $($rest:tt)*) => {{ let path = format!(api_concat!($url), $($rest)*); $self_.request(&path, || $self_.client.$method(&path).body(&$body))? }}; ($self_:ident, $method:ident, $url:expr, $($rest:tt)*) => {{ let path = format!(api_concat!($url), $($rest)*); $self_.request(&path, || $self_.client.$method(&path))? }}; ($self_:ident, $method:ident($body:expr), $url:expr) => {{ let path = api_concat!($url); $self_.request(path, || $self_.client.$method(path).body(&$body))? }}; ($self_:ident, $method:ident, $url:expr) => {{ let path = api_concat!($url); $self_.request(path, || $self_.client.$method(path))? }}; } /// Client for the Discord REST API. /// /// Log in to the API with a user's email and password using `new()`. Call /// `connect()` to create a `Connection` on which to receive events. If desired, /// use `logout()` to invalidate the token when done. Other methods manipulate /// the Discord REST API. pub struct Discord { rate_limits: RateLimits, client: hyper::Client, token: String, } fn tls_client() -> hyper::Client { let tls = hyper_native_tls::NativeTlsClient::new().expect("Error initializing NativeTlsClient"); let connector = hyper::net::HttpsConnector::new(tls); hyper::Client::with_connector(connector) } impl Discord { /// Log in to the Discord Rest API and acquire a token. #[deprecated(note = "Login automation is not recommended. Use `from_user_token` instead.")] pub fn new(email: &str, password: &str) -> Result<Discord> { let mut map = BTreeMap::new(); map.insert("email", email); map.insert("password", password); let client = tls_client(); let response = check_status( client .post(api_concat!("/auth/login")) .header(hyper::header::ContentType::json()) .header(hyper::header::UserAgent(USER_AGENT.to_owned())) .body(&serde_json::to_string(&map)?) .send(), )?; let mut json: BTreeMap<String, String> = serde_json::from_reader(response)?; let token = match json.remove("token") { Some(token) => token, None => { return Err(Error::Protocol( "Response missing \"token\" in Discord::new()", )) } }; Ok(Discord { rate_limits: RateLimits::default(), client: client, token: token, }) } /// Log in to the Discord Rest API, possibly using a cached login token. /// /// Cached login tokens are keyed to the email address and will be read from /// and written to the specified path. If no cached token was found and no /// password was specified, an error is returned. #[deprecated(note = "Login automation is not recommended. Use `from_user_token` instead.")] #[allow(deprecated)] pub fn new_cache<P: AsRef<std::path::Path>>( path: P, email: &str, password: Option<&str>, ) -> Result<Discord> { use std::fs::File; use std::io::{BufRead, BufReader, Write}; // Read the cache, looking for our token let path = path.as_ref(); let mut initial_token: Option<String> = None; if let Ok(file) = File::open(path) { for line in BufReader::new(file).lines() { let line = line?; let parts: Vec<_> = line.split('\t').collect(); if parts.len() == 2 && parts[0] == email { initial_token = Some(parts[1].trim().into()); break; } } } // Perform the login let discord = if let Some(ref initial_token) = initial_token { let mut map = BTreeMap::new(); map.insert("email", email); if let Some(password) = password { map.insert("password", password); } let client = tls_client(); let response = check_status( client .post(api_concat!("/auth/login")) .header(hyper::header::ContentType::json()) .header(hyper::header::UserAgent(USER_AGENT.to_owned())) .header(hyper::header::Authorization(initial_token.clone())) .body(&serde_json::to_string(&map)?) .send(), )?; let mut json: BTreeMap<String, String> = serde_json::from_reader(response)?; let token = match json.remove("token") { Some(token) => token, None => { return Err(Error::Protocol( "Response missing \"token\" in Discord::new()", )) } }; Discord { rate_limits: RateLimits::default(), client: client, token: token, } } else if let Some(password) = password { Discord::new(email, password)? } else { return Err(Error::Other( "No password was specified and no cached token was found", )); }; // Write the token back out, if needed if initial_token.as_ref() != Some(&discord.token) { let mut tokens = Vec::new(); tokens.push(format!("{}\t{}", email, discord.token)); if let Ok(file) = File::open(path) { for line in BufReader::new(file).lines() { let line = line?; if line.split('\t').next() != Some(email) { tokens.push(line); } } } let mut file = File::create(path)?; for line in tokens { file.write_all(line.as_bytes())?; file.write_all(&[b'\n'])?; } } Ok(discord) } fn from_token_raw(token: String) -> Discord { Discord { rate_limits: RateLimits::default(), client: tls_client(), token: token, } } /// Log in as a bot account using the given authentication token. /// /// The token will automatically be prefixed with "Bot ". pub fn from_bot_token(token: &str) -> Result<Discord> { Ok(Discord::from_token_raw(format!("Bot {}", token.trim()))) } /// Log in as a user account using the given authentication token. pub fn from_user_token(token: &str) -> Result<Discord> { Ok(Discord::from_token_raw(token.trim().to_owned())) } /// Log out from the Discord API, invalidating this clients's token. #[deprecated(note = "Accomplishes nothing and may fail for no reason.")] pub fn logout(self) -> Result<()> { let map = json! {{ "provider": null, "token": null, }}; let body = serde_json::to_string(&map)?; check_empty(request!(self, post(body), "/auth/logout")) } fn request<'a, F: Fn() -> hyper::client::RequestBuilder<'a>>( &self, url: &str, f: F, ) -> Result<hyper::client::Response> { self.rate_limits.pre_check(url); let f2 = || { f().header(hyper::header::ContentType::json()) .header(hyper::header::Authorization(self.token.clone())) }; let result = retry(&f2); if let Ok(response) = result.as_ref() { if self.rate_limits.post_update(url, response) { // we were rate limited, we have slept, it is time to retry // the request once. if it fails the second time, give up debug!("Retrying after having been ratelimited"); let result = retry(f2); if let Ok(response) = result.as_ref() { self.rate_limits.post_update(url, response); } return check_status(result); } } check_status(result) } /// Create a channel. pub fn create_channel( &self, server: ServerId, name: &str, kind: ChannelType, ) -> Result<Channel> { let map = json! {{ "name": name, "type": kind.name(), }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/guilds/{}/channels", server); Channel::decode(serde_json::from_reader(response)?) } /// Get the list of channels in a server. pub fn get_server_channels(&self, server: ServerId) -> Result<Vec<PublicChannel>> { let response = request!(self, get, "/guilds/{}/channels", server); decode_array(serde_json::from_reader(response)?, PublicChannel::decode) } /// Get information about a channel. pub fn get_channel(&self, channel: ChannelId) -> Result<Channel> { let response = request!(self, get, "/channels/{}", channel); Channel::decode(serde_json::from_reader(response)?) } /// Edit a channel's details. See `EditChannel` for the editable fields. /// /// ```ignore /// // Edit a channel's name and topic /// discord.edit_channel(channel_id, "general", |ch| ch /// .topic("Welcome to the general chat!") /// ); /// ``` pub fn edit_channel<F: FnOnce(EditChannel) -> EditChannel>( &self, channel: ChannelId, f: F, ) -> Result<PublicChannel> { // Work around the fact that this supposed PATCH call actually requires all fields let mut map = Object::new(); match self.get_channel(channel)? { Channel::Private(_) => return Err(Error::Other("Can not edit private channels")), Channel::Public(channel) => { map.insert("name".into(), channel.name.into()); map.insert("position".into(), channel.position.into()); match channel.kind { ChannelType::Text => { map.insert("topic".into(), json!(channel.topic)); } ChannelType::Voice => { map.insert("bitrate".into(), json!(channel.bitrate)); map.insert("user_limit".into(), json!(channel.user_limit)); } _ => { return Err(Error::Other(stringify!(format!( "Unreachable channel type: {:?}", channel.kind )))) } } } Channel::Group(group) => { map.insert("name".into(), json!(group.name)); } Channel::Category(_) => {} Channel::News => {} Channel::Store => {} }; let map = EditChannel::__apply(f, map); let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/channels/{}", channel); PublicChannel::decode(serde_json::from_reader(response)?) } /// Delete a channel. pub fn delete_channel(&self, channel: ChannelId) -> Result<Channel> { let response = request!(self, delete, "/channels/{}", channel); Channel::decode(serde_json::from_reader(response)?) } /// Indicate typing on a channel for the next 5 seconds. pub fn broadcast_typing(&self, channel: ChannelId) -> Result<()> { check_empty(request!(self, post, "/channels/{}/typing", channel)) } /// Get a single message by ID from a given channel. pub fn get_message(&self, channel: ChannelId, message: MessageId) -> Result<Message> { let response = request!(self, get, "/channels/{}/messages/{}", channel, message); from_reader(response) } /// Get messages in the backlog for a given channel. /// /// The `what` argument should be one of the options in the `GetMessages` /// enum, and will determine which messages will be returned. A message /// limit can also be specified, and defaults to 50. More recent messages /// will appear first in the list. pub fn get_messages( &self, channel: ChannelId, what: GetMessages, limit: Option<u64>, ) -> Result<Vec<Message>> { use std::fmt::Write; let mut url = format!( api_concat!("/channels/{}/messages?limit={}"), channel, limit.unwrap_or(50) ); match what { GetMessages::MostRecent => {} GetMessages::Before(id) => { let _ = write!(url, "&before={}", id); } GetMessages::After(id) => { let _ = write!(url, "&after={}", id); } GetMessages::Around(id) => { let _ = write!(url, "&around={}", id); } } let response = self.request(&url, || self.client.get(&url))?; from_reader(response) } /// Gets the pinned messages for a given channel. pub fn get_pinned_messages(&self, channel: ChannelId) -> Result<Vec<Message>> { let response = request!(self, get, "/channels/{}/pins", channel); from_reader(response) } /// Pin the given message to the given channel. /// /// Requires that the logged in account have the "MANAGE_MESSAGES" permission. pub fn pin_message(&self, channel: ChannelId, message: MessageId) -> Result<()> { check_empty(request!( self, put, "/channels/{}/pins/{}", channel, message )) } /// Removes the given message from being pinned to the given channel. /// /// Requires that the logged in account have the "MANAGE_MESSAGES" permission. pub fn unpin_message(&self, channel: ChannelId, message: MessageId) -> Result<()> { check_empty(request!( self, delete, "/channels/{}/pins/{}", channel, message )) } /// Send a message to a given channel. /// /// The `nonce` will be returned in the result and also transmitted to other /// clients. The empty string is a good default if you don't care. pub fn send_message_ex<F: FnOnce(SendMessage) -> SendMessage>( &self, channel: ChannelId, f: F, ) -> Result<Message> { let map = SendMessage::__build(f); let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/channels/{}/messages", channel); from_reader(response) } /// Edit a previously posted message. /// /// Requires that either the message was posted by this user, or this user /// has permission to manage other members' messages. /// /// Not all fields can be edited; see the [docs] for more. /// [docs]: https://discord.com/developers/docs/resources/channel#edit-message pub fn edit_message_ex<F: FnOnce(SendMessage) -> SendMessage>( &self, channel: ChannelId, message: MessageId, f: F, ) -> Result<Message> { let map = SendMessage::__build(f); let body = serde_json::to_string(&map)?; let response = request!( self, patch(body), "/channels/{}/messages/{}", channel, message ); from_reader(response) } /// Send a message to a given channel. /// /// The `nonce` will be returned in the result and also transmitted to other /// clients. The empty string is a good default if you don't care. pub fn send_message( &self, channel: ChannelId, text: &str, nonce: &str, tts: bool, ) -> Result<Message> { self.send_message_ex(channel, |b| b.content(text).nonce(nonce).tts(tts)) } /// Edit a previously posted message. /// /// Requires that either the message was posted by this user, or this user /// has permission to manage other members' messages. pub fn edit_message( &self, channel: ChannelId, message: MessageId, text: &str, ) -> Result<Message> { self.edit_message_ex(channel, message, |b| b.content(text)) } /// Delete a previously posted message. /// /// Requires that either the message was posted by this user, or this user /// has permission to manage other members' messages. pub fn delete_message(&self, channel: ChannelId, message: MessageId) -> Result<()> { check_empty(request!( self, delete, "/channels/{}/messages/{}", channel, message )) } /// Bulk deletes a list of `MessageId`s from a given channel. /// /// A minimum of 2 unique messages and a maximum of 100 unique messages may /// be supplied, otherwise an `Error::Other` will be returned. /// /// Each MessageId *should* be unique as duplicates will be removed from the /// array before being sent to the Discord API. /// /// Only bots can use this endpoint. Regular user accounts can not use this /// endpoint under any circumstance. /// /// Requires that either the message was posted by this user, or this user /// has permission to manage other members' messages. pub fn delete_messages(&self, channel: ChannelId, messages: &[MessageId]) -> Result<()> { // Create a Vec of the underlying u64's of the message ids, then remove // duplicates in it. let mut ids: Vec<u64> = messages.into_iter().map(|m| m.0).collect(); ids.sort(); ids.dedup(); if ids.len() < 2 { return Err(Error::Other("A minimum of 2 message ids must be supplied")); } else if ids.len() > 100 { return Err(Error::Other("A maximum of 100 message ids may be supplied")); } let map = json! {{ "messages": ids }}; let body = serde_json::to_string(&map)?; check_empty(request!( self, post(body), "/channels/{}/messages/bulk_delete", channel )) } /// Send some embedded rich content attached to a message on a given channel. /// /// See the `EmbedBuilder` struct for the editable fields. /// `text` may be empty. pub fn send_embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>( &self, channel: ChannelId, text: &str, f: F, ) -> Result<Message> { self.send_message_ex(channel, |b| b.content(text).embed(f)) } /// Edit the embed portion of a previously posted message. /// /// The text is unmodified, but the previous embed is entirely replaced. pub fn edit_embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>( &self, channel: ChannelId, message: MessageId, f: F, ) -> Result<Message> { self.edit_message_ex(channel, message, |b| b.embed(f)) } /// Send a file attached to a message on a given channel. /// /// The `text` is allowed to be empty, but the filename must always be specified. pub fn send_file<R: ::std::io::Read>( &self, channel: ChannelId, text: &str, mut file: R, filename: &str, ) -> Result<Message> { use std::io::Write; let url = match hyper::Url::parse(&format!(api_concat!("/channels/{}/messages"), channel)) { Ok(url) => url, Err(_) => return Err(Error::Other("Invalid URL in send_file")), }; // NB: We're NOT using the Hyper itegration of multipart in order not to wrestle with the openssl-sys dependency hell. let cr = multipart::mock::ClientRequest::default(); let mut multi = multipart::client::Multipart::from_request(cr)?; multi.write_text("content", text)?; multi.write_stream("file", &mut file, Some(filename), None)?; let http_buffer: multipart::mock::HttpBuffer = multi.send()?; fn multipart_mime(bound: &str) -> hyper::mime::Mime { use hyper::mime::{Attr, Mime, SubLevel, TopLevel, Value}; Mime( TopLevel::Multipart, SubLevel::Ext("form-data".into()), vec![(Attr::Ext("boundary".into()), Value::Ext(bound.into()))], ) } let tls = hyper_native_tls::NativeTlsClient::new().expect("Error initializing NativeTlsClient"); let connector = hyper::net::HttpsConnector::new(tls); let mut request = hyper::client::Request::with_connector(hyper::method::Method::Post, url, &connector)?; request .headers_mut() .set(hyper::header::Authorization(self.token.clone())); request .headers_mut() .set(hyper::header::UserAgent(USER_AGENT.to_owned())); request .headers_mut() .set(hyper::header::ContentType(multipart_mime( &http_buffer.boundary, ))); let mut request = request.start()?; request.write(&http_buffer.buf[..])?; Message::decode(serde_json::from_reader(check_status(request.send())?)?) } /// Acknowledge this message as "read" by this client. pub fn ack_message(&self, channel: ChannelId, message: MessageId) -> Result<()> { check_empty(request!( self, post, "/channels/{}/messages/{}/ack", channel, message )) } /// Create permissions for a `Channel` for a `Member` or `Role`. /// /// # Examples /// /// An example of creating channel role permissions for a `Member`: /// /// ```ignore /// use discord::model::{PermissionOverwriteType, permissions}; /// /// // Assuming that a `Discord` instance, member, and channel have already /// // been defined previously. /// let target = PermissionOverwrite { /// kind: PermissionOverwriteType::Member(member.user.id), /// allow: permissions::VOICE_CONNECT | permissions::VOICE_SPEAK, /// deny: permissions::VOICE_MUTE_MEMBERS | permissions::VOICE_MOVE_MEMBERS, /// }; /// let result = discord.create_permission(channel.id, target); /// ``` /// /// The same can similarly be accomplished for a `Role`: /// /// ```ignore /// use discord::model::{PermissionOverwriteType, permissions}; /// /// // Assuming that a `Discord` instance, role, and channel have already /// // been defined previously. /// let target = PermissionOverwrite { /// kind: PermissionOverwriteType::Role(role.id), /// allow: permissions::VOICE_CONNECT | permissions::VOICE_SPEAK, /// deny: permissions::VOICE_MUTE_MEMBERS | permissions::VOICE_MOVE_MEMBERS, /// }; /// let result = discord.create_permission(channel.id, target); /// ``` pub fn create_permission(&self, channel: ChannelId, target: PermissionOverwrite) -> Result<()> { let (id, kind) = match target.kind { PermissionOverwriteType::Member(id) => (id.0, "member"), PermissionOverwriteType::Role(id) => (id.0, "role"), }; let map = json! {{ "id": id, "kind": kind, "allow": target.allow.bits(), "deny": target.deny.bits(), }}; let body = serde_json::to_string(&map)?; check_empty(request!( self, put(body), "/channels/{}/permissions/{}", channel, id )) } /// Delete a `Member` or `Role`'s permissions for a `Channel`. /// /// # Examples /// /// Delete a `Member`'s permissions for a `Channel`: /// /// ```ignore /// use discord::model::PermissionOverwriteType; /// /// // Assuming that a `Discord` instance, channel, and member have already /// // been previously defined. /// let target = PermissionOverwriteType::Member(member.user.id); /// let response = discord.delete_permission(channel.id, target); /// ``` /// /// The same can be accomplished for a `Role` similarly: /// /// ```ignore /// use discord::model::PermissionOverwriteType; /// /// // Assuming that a `Discord` instance, channel, and role have already /// // been previously defined. /// let target = PermissionOverwriteType::Role(role.id); /// let response = discord.delete_permission(channel.id, target); /// ``` pub fn delete_permission( &self, channel: ChannelId, permission_type: PermissionOverwriteType, ) -> Result<()> { let id = match permission_type { PermissionOverwriteType::Member(id) => id.0, PermissionOverwriteType::Role(id) => id.0, }; check_empty(request!( self, delete, "/channels/{}/permissions/{}", channel, id )) } /// Add a `Reaction` to a `Message`. /// /// # Examples /// Add an unicode emoji to a `Message`: /// /// ```ignore /// // Assuming that a `Discord` instance, channel, message have /// // already been previously defined. /// use discord::model::ReactionEmoji; /// /// let _ = discord.add_reaction(&channel.id, message.id, ReactionEmoji::Unicode("👌".to_string)); /// ``` /// /// Add a custom emoji to a `Message`: /// /// ```ignore /// // Assuming that a `Discord` instance, channel, message have /// // already been previously defined. /// use discord::model::{EmojiId, ReactionEmoji}; /// /// let _ = discord.add_reaction(&channel.id, message.id, ReactionEmoji::Custom { /// name: "ThisIsFine", /// id: EmojiId(1234) /// }); /// ``` /// /// Requires the `ADD_REACTIONS` permission. pub fn add_reaction( &self, channel: ChannelId, message: MessageId, emoji: ReactionEmoji, ) -> Result<()> { let emoji = match emoji { ReactionEmoji::Custom { name, id } => format!("{}:{}", name, id.0), ReactionEmoji::Unicode(name) => name, }; check_empty(request!( self, put, "/channels/{}/messages/{}/reactions/{}/@me", channel, message, emoji )) } /// Delete a `Reaction` from a `Message`. /// /// # Examples /// Delete a `Reaction` from a `Message` (unicode emoji): /// /// ```ignore /// // Assuming that a `Discord` instance, channel, message, state have /// // already been previously defined. /// use discord::model::ReactionEmoji; /// /// let _ = discord.delete_reaction(&channel.id, message.id, None, ReactionEmoji::Unicode("👌".to_string())); /// ``` /// /// Delete your `Reaction` from a `Message` (custom emoji): /// /// ```ignore /// // Assuming that a `Discord` instance, channel, message have /// // already been previously defined. /// use discord::model::ReactionEmoji; /// /// let _ = discord.delete_reaction(&channel.id, message.id, None, ReactionEmoji::Custom { /// name: "ThisIsFine", /// id: EmojiId(1234) /// }); /// ``` /// /// Delete someone else's `Reaction` from a `Message` (custom emoji): /// /// ```ignore /// // Assuming that a `Discord` instance, channel, message have /// // already been previously defined. /// use discord::model::{EmojiId, ReactionEmoji}; /// /// let _ = discord.delete_reaction(&channel.id, message.id, Some(UserId(1234)), ReactionEmoji::Custom { /// name: "ThisIsFine", /// id: EmojiId(1234) /// }); /// ``` /// /// Requires `MANAGE_MESSAGES` if deleting someone else's `Reaction`. pub fn delete_reaction( &self, channel: ChannelId, message: MessageId, user_id: Option<UserId>, emoji: ReactionEmoji, ) -> Result<()> { let emoji = match emoji { ReactionEmoji::Custom { name, id } => format!("{}:{}", name, id.0), ReactionEmoji::Unicode(name) => name, }; let endpoint = format!( "/channels/{}/messages/{}/reactions/{}/{}", channel, message, emoji, match user_id { Some(id) => id.0.to_string(), None => "@me".to_string(), } ); check_empty(request!(self, delete, "{}", endpoint)) } /// Get reactors for the `Emoji` in a `Message`. /// /// The default `limit` is 50. The optional value of `after` is the ID of /// the user to retrieve the next reactions after. pub fn get_reactions( &self, channel: ChannelId, message: MessageId, emoji: ReactionEmoji, limit: Option<i32>, after: Option<UserId>, ) -> Result<Vec<User>> { let emoji = match emoji { ReactionEmoji::Custom { name, id } => format!("{}:{}", name, id.0), ReactionEmoji::Unicode(name) => name, }; let mut endpoint = format!( "/channels/{}/messages/{}/reactions/{}?limit={}", channel, message, emoji, limit.unwrap_or(50) ); if let Some(amount) = after { use std::fmt::Write; let _ = write!(endpoint, "&after={}", amount); } let response = request!(self, get, "{}", endpoint); from_reader(response) } /// Get the list of servers this user knows about. pub fn get_servers(&self) -> Result<Vec<ServerInfo>> { let response = request!(self, get, "/users/@me/guilds"); from_reader(response) } /// Gets a specific server. pub fn get_server(&self, server_id: ServerId) -> Result<Server> { let response = request!(self, get, "/guilds/{}", server_id); from_reader(response) } /// Gets the list of a specific server's members. pub fn get_server_members(&self, server_id: ServerId) -> Result<Vec<User>> { let response = request!(self, get, "/guilds/{}/members", server_id); from_reader(response) } /// Create a new server with the given name. pub fn create_server(&self, name: &str, region: &str, icon: Option<&str>) -> Result<Server> { let map = json! {{ "name": name, "region": region, "icon": icon, }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/guilds"); from_reader(response) } /// Edit a server's information. See `EditServer` for the editable fields. /// /// ```ignore /// // Rename a server /// discord.edit_server(server_id, |server| server.name("My Cool Server")); /// // Edit many properties at once /// discord.edit_server(server_id, |server| server /// .name("My Cool Server") /// .icon(Some("data:image/jpg;base64,...")) /// .afk_timeout(300) /// .region("us-south") /// ); /// ``` pub fn edit_server<F: FnOnce(EditServer) -> EditServer>( &self, server_id: ServerId, f: F, ) -> Result<Server> { let map = EditServer::__build(f); let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/guilds/{}", server_id); from_reader(response) } /// Leave the given server. pub fn leave_server(&self, server: ServerId) -> Result<Server> { let response = request!(self, delete, "/users/@me/guilds/{}", server); from_reader(response) } /// Delete the given server. Only available to the server owner. pub fn delete_server(&self, server: ServerId) -> Result<Server> { let response = request!(self, delete, "/guilds/{}", server); from_reader(response) } /// Creates an emoji in a server. /// /// `read_image` may be used to build an `image` string. Requires that the /// logged in account be a user and have the `ADMINISTRATOR` or /// `MANAGE_EMOJIS` permission. pub fn create_emoji(&self, server: ServerId, name: &str, image: &str) -> Result<Emoji> { let map = json! {{ "name": name, "image": image, }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/guilds/{}/emojis", server); from_reader(response) } /// Edits a server's emoji. /// /// Requires that the logged in account be a user and have the /// `ADMINISTRATOR` or `MANAGE_EMOJIS` permission. pub fn edit_emoji(&self, server: ServerId, emoji: EmojiId, name: &str) -> Result<Emoji> { let map = json! {{ "name": name }}; let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/guilds/{}/emojis/{}", server, emoji); from_reader(response) } /// Delete an emoji in a server. /// /// Requires that the logged in account be a user and have the /// `ADMINISTRATOR` or `MANAGE_EMOJIS` permission. pub fn delete_emoji(&self, server: ServerId, emoji: EmojiId) -> Result<()> { check_empty(request!( self, delete, "/guilds/{}/emojis/{}", server, emoji )) } /// Get the ban list for the given server. pub fn get_bans(&self, server: ServerId) -> Result<Vec<Ban>> { let response = request!(self, get, "/guilds/{}/bans", server); from_reader(response) } /// Ban a user from the server, optionally deleting their recent messages. /// /// Zero may be passed for `delete_message_days` if no deletion is desired. pub fn add_ban(&self, server: ServerId, user: UserId, delete_message_days: u32) -> Result<()> { check_empty(request!( self, put, "/guilds/{}/bans/{}?delete_message_days={}", server, user, delete_message_days )) } /// Unban a user from the server. pub fn remove_ban(&self, server: ServerId, user: UserId) -> Result<()> { check_empty(request!(self, delete, "/guilds/{}/bans/{}", server, user)) } /// Extract information from an invite. /// /// The invite should either be a URL of the form `http://discord.gg/CODE`, /// or a string containing just the `CODE`. pub fn get_invite(&self, invite: &str) -> Result<Invite> { let invite = resolve_invite(invite); let response = request!(self, get, "/invite/{}", invite); Invite::decode(serde_json::from_reader(response)?) } /// Get the active invites for a server. pub fn get_server_invites(&self, server: ServerId) -> Result<Vec<RichInvite>> { let response = request!(self, get, "/guilds/{}/invites", server); decode_array(serde_json::from_reader(response)?, RichInvite::decode) } /// Get the active invites for a channel. pub fn get_channel_invites(&self, channel: ChannelId) -> Result<Vec<RichInvite>> { let response = request!(self, get, "/channels/{}/invites", channel); decode_array(serde_json::from_reader(response)?, RichInvite::decode) } /// Accept an invite. See `get_invite` for details. pub fn accept_invite(&self, invite: &str) -> Result<Invite> { let invite = resolve_invite(invite); let response = request!(self, post, "/invite/{}", invite); Invite::decode(serde_json::from_reader(response)?) } /// Create an invite to a channel. /// /// Passing 0 for `max_age` or `max_uses` means no limit. `max_age` should /// be specified in seconds. pub fn create_invite( &self, channel: ChannelId, max_age: u64, max_uses: u64, temporary: bool, ) -> Result<RichInvite> { let map = json! {{ "validate": null, "max_age": max_age, "max_uses": max_uses, "temporary": temporary, }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/channels/{}/invites", channel); RichInvite::decode(serde_json::from_reader(response)?) } /// Delete an invite. See `get_invite` for details. pub fn delete_invite(&self, invite: &str) -> Result<Invite> { let invite = resolve_invite(invite); let response = request!(self, delete, "/invite/{}", invite); Invite::decode(serde_json::from_reader(response)?) } /// Retrieve a member object for a server given the member's user id. pub fn get_member(&self, server: ServerId, user: UserId) -> Result<Member> { let response = request!(self, get, "/guilds/{}/members/{}", server, user); from_reader(response) } /// Edit the list of roles assigned to a member of a server. pub fn edit_member_roles( &self, server: ServerId, user: UserId, roles: &[RoleId], ) -> Result<()> { self.edit_member(server, user, |m| m.roles(roles)) } /// Add a role to a member of a server. pub fn add_member_role(&self, server: ServerId, user: UserId, role: RoleId) -> Result<()> { check_empty(request!( self, put, "/guilds/{}/members/{}/roles/{}", server, user, role )) } /// Remove a role for a member of a server. pub fn remove_member_role(&self, server: ServerId, user: UserId, role: RoleId) -> Result<()> { check_empty(request!( self, delete, "/guilds/{}/members/{}/roles/{}", server, user, role )) } /// Edit member information, including roles, nickname, and voice state. /// /// See the `EditMember` struct for the editable fields. pub fn edit_member<F: FnOnce(EditMember) -> EditMember>( &self, server: ServerId, user: UserId, f: F, ) -> Result<()> { let map = EditMember::__build(f); let body = serde_json::to_string(&map)?; check_empty(request!( self, patch(body), "/guilds/{}/members/{}", server, user )) } /// Nickname current user. /// /// Similar to `edit_member` pub fn edit_nickname(&self, server: ServerId, nick: &str) -> Result<()> { let map = json! {{ "nick": nick }}; let body = serde_json::to_string(&map)?; check_empty(request!( self, patch(body), "/guilds/{}/members/@me/nick", server )) } /// Kick a member from a server. pub fn kick_member(&self, server: ServerId, user: UserId) -> Result<()> { check_empty(request!( self, delete, "/guilds/{}/members/{}", server, user )) } /// Retrieve the list of roles for a server. pub fn get_roles(&self, server: ServerId) -> Result<Vec<Role>> { let response = request!(self, get, "/guilds/{}/roles", server); decode_array(serde_json::from_reader(response)?, Role::decode) } /// Create a new role on a server. pub fn create_role( &self, server: ServerId, name: Option<&str>, permissions: Option<Permissions>, color: Option<u64>, hoist: Option<bool>, mentionable: Option<bool>, ) -> Result<Role> { let map = json! {{ "name": name, "permissions": permissions, "color": color, "hoist": hoist, "mentionable": mentionable, }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/guilds/{}/roles", server); Role::decode(serde_json::from_reader(response)?) } /// Create a new role on a server. pub fn create_role_from_builder<F: FnOnce(EditRole) -> EditRole>( &self, server: ServerId, f: F, ) -> Result<Role> { let map = EditRole::__build(f); let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/guilds/{}/roles", server); Role::decode(serde_json::from_reader(response)?) } /// Modify a role on a server. pub fn edit_role<F: FnOnce(EditRole) -> EditRole>( &self, server: ServerId, role: RoleId, f: F, ) -> Result<Role> { let map = EditRole::__build(f); let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/guilds/{}/roles/{}", server, role); Role::decode(serde_json::from_reader(response)?) } /// Reorder the roles on a server. pub fn reorder_roles(&self, server: ServerId, roles: &[(RoleId, usize)]) -> Result<Vec<Role>> { let map: serde_json::Value = roles .iter() .map(|&(id, pos)| { json! {{ "id": id, "position": pos }} }) .collect(); let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/guilds/{}/roles", server); decode_array(serde_json::from_reader(response)?, Role::decode) } /// Remove specified role from a server. pub fn delete_role(&self, server: ServerId, role: RoleId) -> Result<()> { check_empty(request!(self, delete, "/guilds/{}/roles/{}", server, role)) } /// Create a private channel with the given user, or return the existing /// one if it exists. pub fn create_private_channel(&self, recipient: UserId) -> Result<PrivateChannel> { let map = json! {{ "recipient_id": recipient }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/users/@me/channels"); PrivateChannel::decode(serde_json::from_reader(response)?) } /// Get the URL at which a user's avatar is located. pub fn get_user_avatar_url(&self, user: UserId, avatar: &str) -> String { format!(api_concat!("/users/{}/avatars/{}.jpg"), user, avatar) } /// Download a user's avatar. pub fn get_user_avatar(&self, user: UserId, avatar: &str) -> Result<Vec<u8>> { use std::io::Read; let mut response = retry(|| self.client.get(&self.get_user_avatar_url(user, avatar)))?; let mut vec = Vec::new(); response.read_to_end(&mut vec)?; Ok(vec) } /// Get information about a user. /// https://discord.com/developers/docs/resources/user#get-user pub fn get_user(&self, user: UserId) -> Result<User> { let response = request!(self, get, "/users/{}", user); from_reader(response) } /// Create a new DM channel with a user. /// https://discord.com/developers/docs/resources/user#create-dm pub fn create_dm(&self, recipient_id: UserId) -> Result<PrivateChannel> { let map = json! {{ "recipient_id": recipient_id.0, }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/users/@me/channels"); let json: serde_json::Value = from_reader(response)?; PrivateChannel::decode(json) } /// Get the logged-in user's profile. pub fn get_current_user(&self) -> Result<CurrentUser> { let response = request!(self, get, "/users/@me"); from_reader(response) } /// Edit the logged-in bot or user's profile. See `EditProfile` for editable fields. /// /// Usable for bot and user accounts. Only allows updating the username and /// avatar. pub fn edit_profile<F: FnOnce(EditProfile) -> EditProfile>(&self, f: F) -> Result<CurrentUser> { // First, get the current profile, so that providing username and avatar is optional. let response = request!(self, get, "/users/@me"); let user: CurrentUser = from_reader(response)?; let mut map = Object::new(); map.insert("username".into(), json!(user.username)); map.insert("avatar".into(), json!(user.avatar)); // Then, send the profile patch. let map = EditProfile::__apply(f, map); let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/users/@me"); from_reader(response) } /// Edit the logged-in non-bot user's profile. See `EditUserProfile` for editable fields. /// /// Usable only for user (non-bot) accounts. Requires mutable access in order /// to keep the login token up to date in the event of a password change. pub fn edit_user_profile<F: FnOnce(EditUserProfile) -> EditUserProfile>( &mut self, f: F, ) -> Result<CurrentUser> { // First, get the current profile, so that providing username and avatar is optional. let response = request!(self, get, "/users/@me"); let user: CurrentUser = from_reader(response)?; if user.bot { return Err(Error::Other( "Cannot call edit_user_profile on a bot account", )); } let mut map = Object::new(); map.insert("username".into(), json!(user.username)); map.insert("avatar".into(), json!(user.avatar)); if let Some(email) = user.email.as_ref() { map.insert("email".into(), email.as_str().into()); } // Then, send the profile patch. let map = EditUserProfile::__apply(f, map); let body = serde_json::to_string(&map)?; let response = request!(self, patch(body), "/users/@me"); let mut json: Object = serde_json::from_reader(response)?; // If a token was included in the response, switch to it. Important because if the // password was changed, the old token is invalidated. if let Some(serde_json::Value::String(token)) = json.remove("token") { self.token = token; } CurrentUser::decode(serde_json::Value::Object(json)) } /// Get the list of available voice regions for a server. pub fn get_voice_regions(&self) -> Result<Vec<VoiceRegion>> { let response = request!(self, get, "/voice/regions"); from_reader(response) } /// Move a server member to another voice channel. pub fn move_member_voice( &self, server: ServerId, user: UserId, channel: ChannelId, ) -> Result<()> { let map = json! {{ "channel_id": channel }}; let body = serde_json::to_string(&map)?; check_empty(request!( self, patch(body), "/guilds/{}/members/{}", server, user )) } /// Start a prune operation, kicking members who have been inactive for the /// specified number of days. Members with a role assigned will never be /// pruned. pub fn begin_server_prune(&self, server: ServerId, days: u16) -> Result<ServerPrune> { let map = json! {{ "days": days }}; let body = serde_json::to_string(&map)?; let response = request!(self, post(body), "/guilds/{}/prune", server); from_reader(response) } /// Get the number of members who have been inactive for the specified /// number of days and would be pruned by a prune operation. Members with a /// role assigned will never be pruned. pub fn get_server_prune_count(&self, server: ServerId, days: u16) -> Result<ServerPrune> { let map = json! {{ "days": days }}; let body = serde_json::to_string(&map)?; let response = request!(self, get(body), "/guilds/{}/prune", server); from_reader(response) } /// Sets a note for the user that is readable only to the currently logged /// in user. /// /// This endpoint is only available for users, and so does not work for /// bots. pub fn edit_note(&self, user: UserId, note: &str) -> Result<()> { let map = json! {{ "note": note }}; let body = serde_json::to_string(&map)?; check_empty(request!(self, put(body), "/users/@me/notes/{}", user)) } /// Retrieves information about the application and the owner. pub fn get_application_info(&self) -> Result<ApplicationInfo> { let response = request!(self, get, "/oauth2/applications/@me"); from_reader(response) } /// Retrieves the number of guild shards Discord suggests to use based on /// the number of guilds. /// /// This endpoint is only available for bots. pub fn suggested_shard_count(&self) -> Result<u64> { let response = request!(self, get, "/gateway/bot"); let mut value: Object = serde_json::from_reader(response)?; match value.remove("shards") { Some(value) => match value.as_u64() { Some(shards) => Ok(shards), None => Err(Error::Decode("Invalid \"shards\"", value)), }, None => Err(Error::Decode( "suggested_shard_count missing \"shards\"", serde_json::Value::Object(value), )), } } /// Establish a websocket connection over which events can be received. /// /// Also returns the `ReadyEvent` sent by Discord upon establishing the /// connection, which contains the initial state as seen by the client. /// /// See `connect_sharded` if you want to use guild sharding. pub fn connect(&self) -> Result<(Connection, ReadyEvent)> { self.connection_builder()?.connect() } /// Establish a sharded websocket connection over which events can be /// received. /// /// The `shard_id` is indexed at 0 while `total_shards` is indexed at 1. /// /// Also returns the `ReadyEvent` sent by Discord upon establishing the /// connection, which contains the initial state as seen by the client. /// /// See `connect` if you do not want to use guild sharding. pub fn connect_sharded( &self, shard_id: u8, total_shards: u8, ) -> Result<(Connection, ReadyEvent)> { self.connection_builder()?.with_shard(shard_id, total_shards).connect() } /// Prepare to establish a websocket connection over which events can be /// received. pub fn connection_builder(&self) -> Result<connection::ConnectionBuilder> { let url = self.get_gateway_url()?; Ok(connection::ConnectionBuilder::new(url, &self.token)) } fn get_gateway_url(&self) -> Result<String> { let response = request!(self, get, "/gateway"); let mut value: BTreeMap<String, String> = serde_json::from_reader(response)?; match value.remove("url") { Some(url) => Ok(url), None => Err(Error::Protocol("Response missing \"url\" in Discord::get_gateway_url()")) } } } fn from_reader<T: serde::de::DeserializeOwned, R: std::io::Read>(r: R) -> Result<T> { serde_json::from_reader(r).map_err(From::from) } /// Read an image from a file into a string suitable for upload. /// /// If the file's extension is `.png`, the claimed media type will be `image/png`, or `image/jpg` /// otherwise. Note that Discord may convert the image to JPEG or another format after upload. pub fn read_image<P: AsRef<::std::path::Path>>(path: P) -> Result<String> { use std::io::Read; let path = path.as_ref(); let mut vec = Vec::new(); std::fs::File::open(path)?.read_to_end(&mut vec)?; Ok(format!( "data:image/{};base64,{}", if path.extension() == Some("png".as_ref()) { "png" } else { "jpg" }, base64::encode(&vec), )) } /// Retrieves the current unresolved incidents from the status page. pub fn get_unresolved_incidents() -> Result<Vec<Incident>> { let client = tls_client(); let response = retry(|| client.get(status_concat!("/incidents/unresolved.json")))?; let mut json: Object = serde_json::from_reader(response)?; match json.remove("incidents") { Some(incidents) => decode_array(incidents, Incident::decode), None => Ok(vec![]), } } /// Retrieves the active maintenances from the status page. pub fn get_active_maintenances() -> Result<Vec<Maintenance>> { let client = tls_client(); let response = check_status(retry(|| { client.get(status_concat!("/scheduled-maintenances/active.json")) }))?; let mut json: Object = serde_json::from_reader(response)?; match json.remove("scheduled_maintenances") { Some(scheduled_maintenances) => decode_array(scheduled_maintenances, Maintenance::decode), None => Ok(vec![]), } } /// Retrieves the upcoming maintenances from the status page. pub fn get_upcoming_maintenances() -> Result<Vec<Maintenance>> { let client = tls_client(); let response = check_status(retry(|| { client.get(status_concat!("/scheduled-maintenances/upcoming.json")) }))?; let mut json: Object = serde_json::from_reader(response)?; match json.remove("scheduled_maintenances") { Some(scheduled_maintenances) => decode_array(scheduled_maintenances, Maintenance::decode), None => Ok(vec![]), } } /// Argument to `get_messages` to specify the desired message retrieval. pub enum GetMessages { /// Get the N most recent messages. MostRecent, /// Get the first N messages before the specified message. Before(MessageId), /// Get the first N messages after the specified message. After(MessageId), /// Get N/2 messages before, N/2 messages after, and the specified message. Around(MessageId), } /// Send a request with the correct `UserAgent`, retrying it a second time if the /// connection is aborted the first time. fn retry<'a, F: Fn() -> hyper::client::RequestBuilder<'a>>( f: F, ) -> hyper::Result<hyper::client::Response> { let f2 = || { f().header(hyper::header::UserAgent(USER_AGENT.to_owned())) .send() }; // retry on a ConnectionAborted, which occurs if it's been a while since the last request match f2() { Err(hyper::error::Error::Io(ref io)) if io.kind() == std::io::ErrorKind::ConnectionAborted => { f2() } other => other, } } /// Convert non-success hyper statuses to discord crate errors, tossing info. fn check_status( response: hyper::Result<hyper::client::Response>, ) -> Result<hyper::client::Response> { let response: hyper::client::Response = response?; if !response.status.is_success() { return Err(Error::from_response(response)); } Ok(response) } /// Validate a request that is expected to return 204 No Content and print /// debug information if it does not. fn check_empty(mut response: hyper::client::Response) -> Result<()> { if response.status != hyper::status::StatusCode::NoContent { use std::io::Read; debug!("Expected 204 No Content, got {}", response.status); for header in response.headers.iter() { debug!("Header: {}", header); } let mut content = String::new(); response.read_to_string(&mut content)?; debug!("Content: {}", content); } Ok(()) } fn resolve_invite(invite: &str) -> &str { if invite.starts_with("http://discord.gg/") { &invite[18..] } else if invite.starts_with("https://discord.gg/") { &invite[19..] } else if invite.starts_with("discord.gg/") { &invite[11..] } else { invite } } fn sleep_ms(millis: u64) { std::thread::sleep(time::Duration::from_millis(millis)) } // Timer that remembers when it is supposed to go off struct Timer { next_tick_at: time::Instant, tick_len: time::Duration, } #[cfg_attr(not(feature = "voice"), allow(dead_code))] impl Timer { fn new(tick_len_ms: u64) -> Timer { let tick_len = time::Duration::from_millis(tick_len_ms); Timer { next_tick_at: time::Instant::now() + tick_len, tick_len: tick_len, } } #[allow(dead_code)] fn immediately(&mut self) { self.next_tick_at = time::Instant::now(); } fn defer(&mut self) { self.next_tick_at = time::Instant::now() + self.tick_len; } fn check_tick(&mut self) -> bool { if time::Instant::now() >= self.next_tick_at { s
{ false } } fn sleep_until_tick(&mut self) { let now = time::Instant::now(); if self.next_tick_at > now { std::thread::sleep(self.next_tick_at - now); } self.next_tick_at = self.next_tick_at + self.tick_len; } } trait ReceiverExt { fn recv_json<F, T>(&mut self, decode: F) -> Result<T> where F: FnOnce(serde_json::Value) -> Result<T>; } trait SenderExt { fn send_json(&mut self, value: &serde_json::Value) -> Result<()>; } impl ReceiverExt for websocket::client::Receiver<websocket::stream::WebSocketStream> { fn recv_json<F, T>(&mut self, decode: F) -> Result<T> where F: FnOnce(serde_json::Value) -> Result<T>, { use websocket::message::{Message, Type}; use websocket::ws::receiver::Receiver; let message: Message = self.recv_message()?; if message.opcode == Type::Close { Err(Error::Closed( message.cd_status_code, String::from_utf8_lossy(&message.payload).into_owned(), )) } else if message.opcode == Type::Binary || message.opcode == Type::Text { let mut payload_vec; let payload = if message.opcode == Type::Binary { use std::io::Read; payload_vec = Vec::new(); flate2::read::ZlibDecoder::new(&message.payload[..]) .read_to_end(&mut payload_vec)?; &payload_vec[..] } else { &message.payload[..] }; serde_json::from_reader(payload) .map_err(From::from) .and_then(decode) .map_err(|e| { warn!("Error decoding: {}", String::from_utf8_lossy(payload)); e }) } else { Err(Error::Closed( None, String::from_utf8_lossy(&message.payload).into_owned(), )) } } } impl SenderExt for websocket::client::Sender<websocket::stream::WebSocketStream> { fn send_json(&mut self, value: &serde_json::Value) -> Result<()> { use websocket::message::Message; use websocket::ws::sender::Sender; serde_json::to_string(value) .map(Message::text) .map_err(Error::from) .and_then(|m| self.send_message(&m).map_err(Error::from)) } } mod internal { pub enum Status { SendMessage(::serde_json::Value), Sequence(u64), ChangeInterval(u64), ChangeSender(::websocket::client::Sender<::websocket::stream::WebSocketStream>), Aborted, } }
elf.next_tick_at = self.next_tick_at + self.tick_len; true } else
_grid.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import numpy import scipy import scipy.linalg # Exceptions. class OrbitalSpecificationError(Exception): pass class Grid: """ A multi-dimension grid of points with an assigned length scale. This grid acts as a helper class for parallelpiped super cells. It tracks a mapping from indices to grid points and stores the associated reciprocal lattice with respect to the original real-space lattice. This enables calculations with non-trivial unit cells. Attributes: dimensions (int): Number of spatial dimensions the grid occupys length (tuple of ints): d-length tuple specifying number of points along each dimension. shifts (list of ints): Integer shifts in position to center grid. scale (ndarray): Vectors defining the super cell being simulated, vectors are stored as columns in the matrix. volume (float): Total volume of the supercell parallelpiped. num_points (int): Total number of points in the grid. reciprocal_scale (ndarray): Vectors defining the reciprocal lattice. The vectors are stored as the columns in the matrix. """ def __init__(self, dimensions, length, scale): """ Args: dimensions (int): The number of dimensions the grid lives in. length (int or tuple): The number of points along each grid axis that will be taken in both reciprocal and real space. If tuple, it is read for each dimension, otherwise assumed uniform. scale (float or ndarray): The total length of each grid dimension. If a float is passed, the uniform cubic unit cell is assumed. For an ndarray, dimensions independent vectors of the correct dimension must be passed. We assume column vectors define the supercell vectors. """ if not isinstance(dimensions, int) or dimensions <= 0: raise ValueError( 'dimensions must be a positive int but was {} {}'.format( type(dimensions), repr(dimensions))) if ((not isinstance(length, int) or length < 0) and (not isinstance(length, tuple)) and (not isinstance(length, list))): raise ValueError( 'length must be a non-negative int or tuple ' 'but was {} {}'.format( type(length), repr(length))) if ((not isinstance(scale, float) or not scale > 0) and (not isinstance(scale, numpy.ndarray))): raise ValueError( 'scale must be a positive float or ndarray but was ' '{} {}'.format( type(scale), repr(scale))) self.dimensions = dimensions # If single integer, assume uniform if isinstance(length, int): self.length = (length, ) * dimensions else: self.length = length self.shifts = [self.length[i] // 2 for i in range(dimensions)] # If single float, construct cubic unit cell if isinstance(scale, float): self.scale = numpy.diag([scale] * self.dimensions) else: self.scale = scale # Compute the volume of the super cell self.volume = numpy.abs(scipy.linalg.det(self.scale)) # Compute total number of points self.num_points = numpy.prod(self.length) # Compute the reciprocal lattice basis self.reciprocal_scale = 2 * numpy.pi * scipy.linalg.inv(self.scale).T def volume_scale(self): """ Returns: float: The volume of a length-scale hypercube within the grid. """ return self.volume def all_points_indices(self): """ Returns: iterable[tuple[int]]: The index-coordinate tuple of each point in the grid. """ return itertools.product(*[range(self.length[i]) for i in range(self.dimensions)]) def position_vector(self, position_indices): """Given grid point coordinate, return position vector with dimensions. Args: position_indices (int|iterable[int]): List or tuple of integers giving grid point coordinate. Allowed values are ints in [0, grid_length). Returns: position_vector (numpy.ndarray[float]) """ # Raise exceptions. if isinstance(position_indices, int): position_indices = [position_indices] if not all(0 <= e < self.length[i] for i, e in enumerate(position_indices)): raise OrbitalSpecificationError( 'Position indices must be integers in [0, grid_length).') # Compute position vector vector = sum([(float(n - self.shifts[i]) / self.length[i]) * self.scale[:, i] for i, n in enumerate(position_indices)]) return vector def momentum_vector(self, momentum_indices, periodic=True): """Given grid point coordinate, return momentum vector with dimensions. Args: momentum_indices (list): integers giving momentum indices. Allowed values are ints in [0, grid_length). periodic (bool): Wrap the momentum indices according to periodicity Returns: momentum_vector: A numpy array giving the momentum vector with dimensions. """ # Raise exceptions. if isinstance(momentum_indices, int): momentum_indices = [momentum_indices] if (not all(0 <= e < self.length[i] for i, e in enumerate(momentum_indices))): raise OrbitalSpecificationError( 'Momentum indices must be integers in [0, grid_length).') # Compute momentum vector. momentum_ints = self.index_to_momentum_ints(momentum_indices) vector = self.momentum_ints_to_value(momentum_ints, periodic) return vector def index_to_momentum_ints(self, index): """ Args: index (tuple): d-dimensional tuple specifying index in the grid Returns: Integer momentum vector """ # Set baseline for grid between [-N//2, N//2] momentum_int = [index[i] - self.shifts[i] for i in range(self.dimensions)] return numpy.array(momentum_int, dtype=int) def momentum_ints_to_index(self, momentum_ints):
def momentum_ints_to_value(self, momentum_ints, periodic=True): """ Args: momentum_ints (tuple): d-dimensional tuple momentum integers periodic (bool): Alias the momentum Returns: ndarray containing the momentum vector. """ # Alias the higher momentum modes if periodic: momentum_ints = self.index_to_momentum_ints( self.momentum_ints_to_index(momentum_ints)) momentum_vector = sum([n * self.reciprocal_scale[:, i] for i, n in enumerate(momentum_ints)]) return momentum_vector def orbital_id(self, grid_coordinates, spin=None): """Return the tensor factor of a orbital with given coordinates and spin. Args: grid_coordinates: List or tuple of ints giving coordinates of grid element. Acceptable to provide an int(instead of tuple or list) for 1D case. spin (bool): 0 means spin down and 1 means spin up. If None, assume spinless model. Returns: tensor_factor (int): tensor factor associated with provided orbital label. """ # Initialize. if isinstance(grid_coordinates, int): grid_coordinates = [grid_coordinates] # Loop through dimensions of coordinate tuple. tensor_factor = 0 for dimension, grid_coordinate in enumerate(grid_coordinates): # Make sure coordinate is an integer in the correct bounds. if (isinstance(grid_coordinate, int) and grid_coordinate < self.length[dimension]): tensor_factor += (grid_coordinate * int(numpy.product(self.length[:dimension]))) else: # Raise for invalid model. raise OrbitalSpecificationError( 'Invalid orbital coordinates provided.') # Account for spin and return. if spin is None: return tensor_factor else: tensor_factor *= 2 tensor_factor += spin return tensor_factor def grid_indices(self, qubit_id, spinless): """This function is the inverse of orbital_id. Args: qubit_id (int): The tensor factor to map to grid indices. spinless (bool): Whether to use the spinless model or not. Returns: grid_indices (numpy.ndarray[int]): The location of the qubit on the grid. """ if not (numpy.product(self.length) * (2 - spinless) > qubit_id >= 0): raise OrbitalSpecificationError('Invalid qubit_id provided.') # Remove spin degree of freedom if it exists. orbital_id = qubit_id if not spinless: orbital_id //= 2 # Get grid indices. grid_indices = [] for dimension in range(self.dimensions): remainder = (orbital_id % int(numpy.product(self.length[:dimension + 1]))) grid_index = (remainder // int(numpy.product(self.length[:dimension]))) grid_indices += [grid_index] return grid_indices def __eq__(self, other): if not isinstance(other, type(self)): return NotImplemented return (self.dimensions == other.dimensions and (self.scale == other.scale).all() and self.length == other.length) def __ne__(self, other): return not self == other
""" Args: momentum_ints (tuple): d-dimensional tuple momentum integers Returns: d-dimensional tuples of indices """ indices = momentum_ints # Shift to indices indices = [n + self.shifts[i] for i, n in enumerate(indices)] # Wrap dimensions indices = [n % self.length[i] for i, n in enumerate(indices)] return indices
permanent.py
# # Copyright 2021 Budapest Quantum Computing Group # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from theboss.boson_sampling_utilities.permanent_calculators.glynn_gray_permanent_calculator import ( # noqa: E501 GlynnGrayPermanentCalculator, ) def
(matrix, rows, columns, calculator_class): calculator = calculator_class(matrix, rows, columns) return calculator.compute_permanent() def glynn_gray_permanent(matrix, rows, columns): return _permanent( matrix, rows, columns, calculator_class=GlynnGrayPermanentCalculator )
_permanent
n0021_merge_two_sorted_lists.rs
/** * [21] Merge Two Sorted Lists * * Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists. * * Example: * * Input: 1->2->4, 1->3->4 * Output: 1->1->2->3->4->4 * * */ pub struct Solution {} use super::util::linked_list::{to_list, ListNode}; // submission codes start here use std::mem::replace; impl Solution { pub fn merge_two_lists( l1: Option<Box<ListNode>>, l2: Option<Box<ListNode>>, ) -> Option<Box<ListNode>> { let mut dummy_head = Some(Box::new(ListNode { val: 0, next: None })); let mut prev = &mut dummy_head; let mut lh = &l1; let mut rh = &l2; while lh.is_some() || rh.is_some() { let cur = match (lh, rh) { (Some(a), Some(b)) => { if a.val > b.val { replace(&mut rh, &b.next); Some(ListNode::new(b.val)) } else { replace(&mut lh, &a.next); Some(ListNode::new(a.val)) } } (Some(a), None) => { replace(&mut lh, &a.next); Some(ListNode::new(a.val)) } (None, Some(b)) => { replace(&mut rh, &b.next); Some(ListNode::new(b.val)) } (_, _) => None, }; if let Some(prev_box) = prev { if let Some(cur) = cur { replace(&mut prev_box.next, Some(Box::new(cur))); } prev = &mut prev_box.next; } } dummy_head.unwrap().next } } // submission codes end #[cfg(test)] mod tests {
assert_eq!( Solution::merge_two_lists(to_list(vec![1, 2, 4]), to_list(vec![1, 3, 4])), to_list(vec![1, 1, 2, 3, 4, 4]) ); } }
use super::*; #[test] fn test_21() {
modify_i_pv6_translator_entry.go
package vpc //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // ModifyIPv6TranslatorEntry invokes the vpc.ModifyIPv6TranslatorEntry API synchronously // api document: https://help.aliyun.com/api/vpc/modifyipv6translatorentry.html func (client *Client) ModifyIPv6TranslatorEntry(request *ModifyIPv6TranslatorEntryRequest) (response *ModifyIPv6TranslatorEntryResponse, err error) { response = CreateModifyIPv6TranslatorEntryResponse() err = client.DoAction(request, response) return } // ModifyIPv6TranslatorEntryWithChan invokes the vpc.ModifyIPv6TranslatorEntry API asynchronously // api document: https://help.aliyun.com/api/vpc/modifyipv6translatorentry.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) ModifyIPv6TranslatorEntryWithChan(request *ModifyIPv6TranslatorEntryRequest) (<-chan *ModifyIPv6TranslatorEntryResponse, <-chan error) { responseChan := make(chan *ModifyIPv6TranslatorEntryResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.ModifyIPv6TranslatorEntry(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // ModifyIPv6TranslatorEntryWithCallback invokes the vpc.ModifyIPv6TranslatorEntry API asynchronously // api document: https://help.aliyun.com/api/vpc/modifyipv6translatorentry.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) ModifyIPv6TranslatorEntryWithCallback(request *ModifyIPv6TranslatorEntryRequest, callback func(response *ModifyIPv6TranslatorEntryResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *ModifyIPv6TranslatorEntryResponse var err error defer close(result) response, err = client.ModifyIPv6TranslatorEntry(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // ModifyIPv6TranslatorEntryRequest is the request struct for api ModifyIPv6TranslatorEntry type ModifyIPv6TranslatorEntryRequest struct { *requests.RpcRequest BackendIpv4Port requests.Integer `position:"Query" name:"BackendIpv4Port"` ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` EntryName string `position:"Query" name:"EntryName"` AclStatus string `position:"Query" name:"AclStatus"` EntryBandwidth requests.Integer `position:"Query" name:"EntryBandwidth"` AclType string `position:"Query" name:"AclType"` AllocateIpv6Port requests.Integer `position:"Query" name:"AllocateIpv6Port"` EntryDescription string `position:"Query" name:"EntryDescription"` BackendIpv4Addr string `position:"Query" name:"BackendIpv4Addr"` AclId string `position:"Query" name:"AclId"` Ipv6TranslatorEntryId string `position:"Query" name:"Ipv6TranslatorEntryId"` ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` OwnerAccount string `position:"Query" name:"OwnerAccount"` OwnerId requests.Integer `position:"Query" name:"OwnerId"` TransProtocol string `position:"Query" name:"TransProtocol"` } // ModifyIPv6TranslatorEntryResponse is the response struct for api ModifyIPv6TranslatorEntry type ModifyIPv6TranslatorEntryResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` } // CreateModifyIPv6TranslatorEntryRequest creates a request to invoke ModifyIPv6TranslatorEntry API func CreateModifyIPv6TranslatorEntryRequest() (request *ModifyIPv6TranslatorEntryRequest) { request = &ModifyIPv6TranslatorEntryRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Vpc", "2016-04-28", "ModifyIPv6TranslatorEntry", "Vpc", "openAPI") return } // CreateModifyIPv6TranslatorEntryResponse creates a response to parse from ModifyIPv6TranslatorEntry response func CreateModifyIPv6TranslatorEntryResponse() (response *ModifyIPv6TranslatorEntryResponse)
{ response = &ModifyIPv6TranslatorEntryResponse{ BaseResponse: &responses.BaseResponse{}, } return }
index.js
import Fetcher from './main' Object.freeze(Fetcher) export default Fetcher
expire.go
package policy import ( "context" "strings" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/snapshot" ) // ApplyRetentionPolicy applies retention policy to a given source by deleting expired snapshots. func ApplyRetentionPolicy(ctx context.Context, rep *repo.Repository, sourceInfo snapshot.SourceInfo, reallyDelete bool) ([]*snapshot.Manifest, error) { snapshots, err := snapshot.ListSnapshots(ctx, rep, sourceInfo) if err != nil { return nil, err } toDelete, err := getExpiredSnapshots(ctx, rep, snapshots) if err != nil { return nil, err } if reallyDelete { for _, it := range toDelete { if err := rep.Manifests.Delete(ctx, it.ID); err != nil { return toDelete, err } } } return toDelete, nil } func getExpiredSnapshots(ctx context.Context, rep *repo.Repository, snapshots []*snapshot.Manifest) ([]*snapshot.Manifest, error) { var toDelete []*snapshot.Manifest for _, snapshotGroup := range snapshot.GroupBySource(snapshots) { td, err := getExpiredSnapshotsForSource(ctx, rep, snapshotGroup) if err != nil { return nil, err } toDelete = append(toDelete, td...) } return toDelete, nil } func getExpiredSnapshotsForSource(ctx context.Context, rep *repo.Repository, snapshots []*snapshot.Manifest) ([]*snapshot.Manifest, error)
{ src := snapshots[0].Source pol, _, err := GetEffectivePolicy(ctx, rep, src) if err != nil { return nil, err } pol.RetentionPolicy.ComputeRetentionReasons(snapshots) var toDelete []*snapshot.Manifest for _, s := range snapshots { if len(s.RetentionReasons) == 0 { log.Debugf(" deleting %v", s.StartTime) toDelete = append(toDelete, s) } else { log.Debugf(" keeping %v reasons: [%v]", s.StartTime, strings.Join(s.RetentionReasons, ",")) } } return toDelete, nil }
register.go
/* Copyright The Pharmer Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) const GroupName = "cluster.pharmer.io" var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} var ( SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) func init() { localSchemeBuilder.Register(addKnownTypes) } func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion) return nil }
evaluate_mAP.py
import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import cv2 import numpy as np import tensorflow as tf from tensorflow.python.saved_model import tag_constants from yolov3.dataset import Dataset from yolov3.yolov4 import Create_Yolo from yolov3.utils import load_yolo_weights, detect_image, image_preprocess, postprocess_boxes, nms, read_class_names from yolov3.configs import * import shutil import json import time gpus = tf.config.experimental.list_physical_devices('GPU') if len(gpus) > 0: try: tf.config.experimental.set_memory_growth(gpus[0], True) except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')") def voc_ap(rec, prec): """ --- Official matlab code VOC2012--- mrec=[0 ; rec ; 1]; mpre=[0 ; prec ; 0]; for i=numel(mpre)-1:-1:1 mpre(i)=max(mpre(i),mpre(i+1)); end i=find(mrec(2:end)~=mrec(1:end-1))+1; ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); """ rec.insert(0, 0.0) # insert 0.0 at begining of list rec.append(1.0) # insert 1.0 at end of list mrec = rec[:] prec.insert(0, 0.0) # insert 0.0 at begining of list prec.append(0.0) # insert 0.0 at end of list mpre = prec[:] """ This part makes the precision monotonically decreasing (goes from the end to the beginning) matlab: for i=numel(mpre)-1:-1:1 mpre(i)=max(mpre(i),mpre(i+1)); """ # matlab indexes start in 1 but python in 0, so I have to do: # range(start=(len(mpre) - 2), end=0, step=-1) # also the python function range excludes the end, resulting in: # range(start=(len(mpre) - 2), end=-1, step=-1) for i in range(len(mpre)-2, -1, -1): mpre[i] = max(mpre[i], mpre[i+1]) """ This part creates a list of indexes where the recall changes matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1; """ i_list = [] for i in range(1, len(mrec)): if mrec[i] != mrec[i-1]: i_list.append(i) # if it was matlab would be i + 1 """ The Average Precision (AP) is the area under the curve (numerical integration) matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); """ ap = 0.0 for i in i_list: ap += ((mrec[i]-mrec[i-1])*mpre[i]) return ap, mrec, mpre def get_mAP(Yolo, dataset, score_threshold=0.25, iou_threshold=0.50, TEST_INPUT_SIZE=TEST_INPUT_SIZE): MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge) NUM_CLASS = read_class_names(TRAIN_CLASSES) ground_truth_dir_path = 'mAP/ground-truth' if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path) if not os.path.exists('mAP'): os.mkdir('mAP') os.mkdir(ground_truth_dir_path) print(f'\ncalculating mAP{int(iou_threshold*100)}...\n') gt_counter_per_class = {} for index in range(dataset.num_samples): ann_dataset = dataset.annotations[index] original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True) if len(bbox_data_gt) == 0: bboxes_gt = [] classes_gt = [] else: bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4] ground_truth_path = os.path.join(ground_truth_dir_path, str(index) + '.txt') num_bbox_gt = len(bboxes_gt) bounding_boxes = [] for i in range(num_bbox_gt): class_name = NUM_CLASS[classes_gt[i]] xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i])) bbox = xmin + " " + ymin + " " + xmax + " " +ymax bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False}) # count that object if class_name in gt_counter_per_class: gt_counter_per_class[class_name] += 1 else: # if class didn't exist yet gt_counter_per_class[class_name] = 1 bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n' with open(f'{ground_truth_dir_path}/{str(index)}_ground_truth.json', 'w') as outfile: json.dump(bounding_boxes, outfile) gt_classes = list(gt_counter_per_class.keys()) # sort the classes alphabetically gt_classes = sorted(gt_classes) n_classes = len(gt_classes) times = [] json_pred = [[] for i in range(n_classes)] for index in range(dataset.num_samples): ann_dataset = dataset.annotations[index] image_name = ann_dataset[0].split('/')[-1] original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True) image = image_preprocess(np.copy(original_image), [TEST_INPUT_SIZE, TEST_INPUT_SIZE]) image_data = image[np.newaxis, ...].astype(np.float32) t1 = time.time() if YOLO_FRAMEWORK == "tf": pred_bbox = Yolo.predict(image_data) elif YOLO_FRAMEWORK == "trt": batched_input = tf.constant(image_data) result = Yolo(batched_input) pred_bbox = [] for key, value in result.items(): value = value.numpy() pred_bbox.append(value) t2 = time.time() times.append(t2-t1) pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox] pred_bbox = tf.concat(pred_bbox, axis=0) bboxes = postprocess_boxes(pred_bbox, original_image, TEST_INPUT_SIZE, score_threshold) bboxes = nms(bboxes, iou_threshold, method='nms') for bbox in bboxes: coor = np.array(bbox[:4], dtype=np.int32) score = bbox[4] class_ind = int(bbox[5]) class_name = NUM_CLASS[class_ind] score = '%.4f' % score xmin, ymin, xmax, ymax = list(map(str, coor)) bbox = xmin + " " + ymin + " " + xmax + " " +ymax json_pred[gt_classes.index(class_name)].append({"confidence": str(score), "file_id": str(index), "bbox": str(bbox)}) ms = sum(times)/len(times)*1000 fps = 1000 / ms for class_name in gt_classes: json_pred[gt_classes.index(class_name)].sort(key=lambda x:float(x['confidence']), reverse=True) with open(f'{ground_truth_dir_path}/{class_name}_predictions.json', 'w') as outfile: json.dump(json_pred[gt_classes.index(class_name)], outfile) # Calculate the AP for each class sum_AP = 0.0 ap_dictionary = {} # open file to store the results with open("mAP/results.txt", 'w') as results_file: results_file.write("# AP and precision/recall per class\n") count_true_positives = {} for class_index, class_name in enumerate(gt_classes): count_true_positives[class_name] = 0 # Load predictions of that class predictions_file = f'{ground_truth_dir_path}/{class_name}_predictions.json' predictions_data = json.load(open(predictions_file)) # Assign predictions to ground truth objects nd = len(predictions_data) tp = [0] * nd # creates an array of zeros of size nd fp = [0] * nd for idx, prediction in enumerate(predictions_data): file_id = prediction["file_id"] # assign prediction to ground truth object if any # open ground-truth with that file_id gt_file = f'{ground_truth_dir_path}/{str(file_id)}_ground_truth.json' ground_truth_data = json.load(open(gt_file)) ovmax = -1 gt_match = -1 # load prediction bounding-box bb = [ float(x) for x in prediction["bbox"].split() ] # bounding box of prediction for obj in ground_truth_data: # look for a class_name match if obj["class_name"] == class_name: bbgt = [ float(x) for x in obj["bbox"].split() ] # bounding box of ground truth bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])] iw = bi[2] - bi[0] + 1 ih = bi[3] - bi[1] + 1 if iw > 0 and ih > 0: # compute overlap (IoU) = area of intersection / area of union ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0] + 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih ov = iw * ih / ua if ov > ovmax: ovmax = ov gt_match = obj # assign prediction as true positive/don't care/false positive if ovmax >= MINOVERLAP:# if ovmax > minimum overlap if not bool(gt_match["used"]): # true positive tp[idx] = 1 gt_match["used"] = True count_true_positives[class_name] += 1 # update the ".json" file with open(gt_file, 'w') as f: f.write(json.dumps(ground_truth_data)) else: # false positive (multiple detection) fp[idx] = 1 else: # false positive fp[idx] = 1 # compute precision/recall cumsum = 0 for idx, val in enumerate(fp): fp[idx] += cumsum cumsum += val cumsum = 0 for idx, val in enumerate(tp): tp[idx] += cumsum cumsum += val #print(tp) rec = tp[:] for idx, val in enumerate(tp): rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name] #print(rec) prec = tp[:] for idx, val in enumerate(tp): prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx]) #print(prec)
text = "{0:.3f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100) rounded_prec = [ '%.3f' % elem for elem in prec ] rounded_rec = [ '%.3f' % elem for elem in rec ] # Write to results.txt results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n") print(text) ap_dictionary[class_name] = ap results_file.write("\n# mAP of all classes\n") mAP = sum_AP / n_classes text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP*100, fps) results_file.write(text + "\n") print(text) return mAP*100 if __name__ == '__main__': if YOLO_FRAMEWORK == "tf": # TensorFlow detection if YOLO_TYPE == "yolov4": Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS if YOLO_TYPE == "yolov3": Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS if YOLO_CUSTOM_WEIGHTS == False: yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES) load_yolo_weights(yolo, Darknet_weights) # use Darknet weights else: yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES) yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights elif YOLO_FRAMEWORK == "trt": # TensorRT detection saved_model_loaded = tf.saved_model.load(f"./checkpoints/{TRAIN_MODEL_NAME}", tags=[tag_constants.SERVING]) signature_keys = list(saved_model_loaded.signatures.keys()) yolo = saved_model_loaded.signatures['serving_default'] testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE) get_mAP(yolo, testset, score_threshold=0.05, iou_threshold=0.50, TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
ap, mrec, mprec = voc_ap(rec, prec) sum_AP += ap
proxy.rs
//! A proxy that forwards data to another server and forwards that server's //! responses back to clients. //! //! Because the Tokio runtime uses a thread pool, each TCP connection is //! processed concurrently with all other TCP connections across multiple //! threads. //! //! You can showcase this by running this in one terminal: //! //! cargo run --example proxy //! //! This in another terminal //! //! cargo run --example echo //! //! And finally this in another terminal //! //! cargo run --example connect 127.0.0.1:8081 //! //! This final terminal will connect to our proxy, which will in turn connect to //! the echo server, and you'll be able to see data flowing between them. #![warn(rust_2018_idioms)] use futures::{future::try_join, FutureExt, StreamExt}; use std::{env, error::Error}; use tokio::{ io::AsyncReadExt, net::{TcpListener, TcpStream}, }; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string()); let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string());
println!("Listening on: {}", listen_addr); println!("Proxying to: {}", server_addr); let mut incoming = TcpListener::bind(listen_addr).await?.incoming(); while let Some(Ok(inbound)) = incoming.next().await { let transfer = transfer(inbound, server_addr.clone()).map(|r| { if let Err(e) = r { println!("Failed to transfer; error={}", e); } }); tokio::spawn(transfer); } Ok(()) } async fn transfer(inbound: TcpStream, proxy_addr: String) -> Result<(), Box<dyn Error>> { let outbound = TcpStream::connect(proxy_addr).await?; let (mut ri, mut wi) = inbound.split(); let (mut ro, mut wo) = outbound.split(); let client_to_server = ri.copy(&mut wo); let server_to_client = ro.copy(&mut wi); try_join(client_to_server, server_to_client).await?; Ok(()) }
GetHdfsCapacityStatisticInfoRequest.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkemr.endpoint import endpoint_data class GetHdfsCapacityStatisticInfoRequest(RpcRequest): def
(self): RpcRequest.__init__(self, 'Emr', '2016-04-08', 'GetHdfsCapacityStatisticInfo') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_FromDatetime(self): return self.get_query_params().get('FromDatetime') def set_FromDatetime(self,FromDatetime): self.add_query_param('FromDatetime',FromDatetime) def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_ClusterId(self): return self.get_query_params().get('ClusterId') def set_ClusterId(self,ClusterId): self.add_query_param('ClusterId',ClusterId) def get_ToDatetime(self): return self.get_query_params().get('ToDatetime') def set_ToDatetime(self,ToDatetime): self.add_query_param('ToDatetime',ToDatetime)
__init__
functionOverloads40.js
//// [functionOverloads40.ts] function foo(bar:{a:number;}[]):string; function foo(bar:{a:boolean;}[]):number; function foo(bar:{a:any;}[]):any{ return bar } var x = foo([{a:'bar'}]);
//// [functionOverloads40.js] function foo(bar) { return bar; } var x = foo([{ a: 'bar' }]);
struct_proxy.py
# GPLv3 License # # Copyright (C) 2020 Ubisoft # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ Proxy of a bpy.types.Struct, excluding bpy.types.ID that is implemented in datablock_proxy.py See synchronization.md """ from __future__ import annotations from functools import lru_cache import logging from typing import Optional, Tuple, TYPE_CHECKING, Union import bpy.types as T # noqa from mixer.blender_data import specifics from mixer.blender_data.attributes import apply_attribute, diff_attribute, read_attribute, write_attribute from mixer.blender_data.json_codec import serialize from mixer.blender_data.misc_proxies import NonePtrProxy from mixer.blender_data.proxy import Delta, DeltaReplace, DeltaUpdate, Proxy if TYPE_CHECKING: from mixer.blender_data.proxy import Context logger = logging.getLogger(__name__) def _create_clear_animation_data(incoming_proxy: StructProxy, existing_struct: T.bpy_struct) -> Optional[T.AnimData]: if existing_struct.animation_data is None: if not isinstance(incoming_proxy, NonePtrProxy): # None (current blender value) -> not None (incoming proxy) existing_struct.animation_data_create() else: if isinstance(incoming_proxy, NonePtrProxy): # not None (current blender value) -> None (incoming proxy) existing_struct.animation_data_clear() return existing_struct.animation_data @lru_cache() def _proxy_types(): from mixer.blender_data.modifier_proxies import NodesModifierProxy proxy_types = {} try: proxy_types[T.NodesModifier] = NodesModifierProxy except AttributeError: pass return proxy_types @serialize class StructProxy(Proxy): """ Holds a copy of a Blender bpy_struct """ _serialize: Tuple[str, ...] = ("_data",) def __init__(self): self._data = {} pass def copy_data(self, other: StructProxy): self._data = other._data def clear_data(self): self._data.clear() @classmethod def make(cls, bpy_struct: T.bpy_struct) -> StructProxy: proxy_class = _proxy_types().get(type(bpy_struct), StructProxy) return proxy_class() def load(self, attribute: T.bpy_struct, context: Context) -> StructProxy: """ Load the attribute Blender struct into this proxy Args:
attribute: the Blender struct to load into this proxy, (e.g an ObjectDisplay instance) key: the identifier of attribute in its parent (e.g. "display") context: the proxy and visit state """ self.clear_data() properties = context.synchronized_properties.properties(attribute) # includes properties from the bl_rna only, not the "view like" properties like MeshPolygon.edge_keys # that we do not want to load anyway properties = specifics.conditional_properties(attribute, properties) for name, bl_rna_property in properties: attr = getattr(attribute, name) attr_value = read_attribute(attr, name, bl_rna_property, attribute, context) self._data[name] = attr_value return self def save( self, attribute: T.bpy_struct, parent: Union[T.bpy_struct, T.bpy_prop_collection], key: Union[int, str], context: Context, ): """ Save this proxy into attribute Args: attribute: the bpy_struct to store this proxy into parent: (e.g an Object instance) key: (e.g. "display) context: the proxy and visit state """ if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)): attribute = _create_clear_animation_data(self, parent) if attribute is None: logger.info(f"save: attribute is None for {context.visit_state.display_path()}.{key}") return for k, v in self._data.items(): write_attribute(attribute, k, v, context) def apply( self, attribute: T.bpy_struct, parent: Union[T.bpy_struct, T.bpy_prop_collection], key: Union[int, str], delta: Delta, context: Context, to_blender: bool = True, ) -> Union[StructProxy, NonePtrProxy]: """ Apply delta to this proxy and optionally to the Blender attribute its manages. Args: attribute: the struct to update (e.g. a Material instance) parent: the attribute that contains attribute (e.g. bpy.data.materials) key: the key that identifies attribute in parent (e.g "Material") delta: the delta to apply context: proxy and visit state to_blender: update the managed Blender attribute in addition to this Proxy """ # WARNING parent must not be searched for key as it will fail in case of duplicate keys, with libraries update = delta.value if isinstance(delta, DeltaReplace): # The structure is replaced as a whole. # TODO explain when this occurs self.copy_data(update) if to_blender: self.save(attribute, parent, key, context) else: # the structure is updated if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)): # if animation_data is updated to None (cleared), the parent structure is updated to store # a NonePtrProxy if to_blender: attribute = _create_clear_animation_data(update, parent) if attribute is None: return NonePtrProxy() else: if isinstance(update, NonePtrProxy): return NonePtrProxy() if attribute: for k, member_delta in update._data.items(): current_value = self._data.get(k) try: self._data[k] = apply_attribute(attribute, k, current_value, member_delta, context, to_blender) except Exception as e: logger.warning(f"Struct.apply(). Processing {member_delta}") logger.warning(f"... for {attribute}.{k}") logger.warning(f"... Exception: {e!r}") logger.warning("... Update ignored") continue return self def diff( self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context ) -> Optional[Delta]: """ Computes the difference between the state of an item tracked by this proxy and its Blender state. As this proxy tracks a Struct or ID, the result will be a DeltaUpdate that contains a StructProxy or a DatablockProxy with an Delta item per added, deleted or updated property. One expect only DeltaUpdate, although DeltalAddition or DeltaDeletion may be produced when an addon is loaded or unloaded while a room is joined. This situation is not really supported as there is no handler to track addon changes. Args: attribute: the struct to update (e.g. a Material instance) key: the key that identifies attribute in parent (e.g "Material") prop: the Property of struct as found in its enclosing object context: proxy and visit state """ # Create a proxy that will be populated with attributes differences. diff = self.__class__() diff.init(attribute) delta = self._diff(attribute, key, prop, context, diff) return delta def _diff( self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context, diff: StructProxy ) -> Optional[Delta]: """ Computes the difference between the state of an item tracked by this proxy and its Blender state and attached the difference to diff. See diff() Args: attribute: the struct to update (e.g. a Material instance) key: the key that identifies attribute in parent (e.g "Material") prop: the Property of struct as found in its enclosing object context: proxy and visit state diff: the proxy that holds the difference and will be transmitted in a Delta Returns: a delta if any difference is found, None otherwise """ if attribute is None: from mixer.blender_data.misc_proxies import NonePtrProxy return DeltaUpdate(NonePtrProxy()) # PERF accessing the properties from the synchronized_properties is **far** cheaper that iterating over # _data and the getting the properties with # member_property = struct.bl_rna.properties[k] # line to which py-spy attributes 20% of the total diff ! properties = context.synchronized_properties.properties(attribute) properties = specifics.conditional_properties(attribute, properties) for k, member_property in properties: try: member = getattr(attribute, k) except AttributeError: logger.info(f"diff: unknown attribute {k} in {attribute}") continue proxy_data = self._data.get(k) delta = diff_attribute(member, k, member_property, proxy_data, context) if delta is not None: diff._data[k] = delta # TODO detect media updates (reload(), and attach a media descriptor to diff) # difficult ? # if anything has changed, wrap the hollow proxy in a DeltaUpdate. This may be superfluous but # it is homogenous with additions and deletions if len(diff._data): return DeltaUpdate(diff) return None
adress.controller.ts
import { Body, Controller, Delete, Get, Param, Post, Put, UseGuards } from '@nestjs/common'; import { JwtAuthGuard } from 'src/auth/jwt-auth.guard'; import { CreateAdressDto } from 'src/dtos/adress/adress.dto'; import { UpdateAdressDto } from 'src/dtos/adress/adress.update.dto'; import { AdressService } from 'src/services/adress/adress.service'; @Controller('user/person/:personId/adress/') export class AdressController { constructor( private readonly adressService: AdressService ){} @UseGuards(JwtAuthGuard) @Post() async create(@Param() params,@Body() createAdressDto: CreateAdressDto){ const adress = await this.adressService.create(params, createAdressDto); const adressUpdated = await this.adressService.updateFk(params, adress); return adressUpdated; } @UseGuards(JwtAuthGuard) @Get() findAll(@Param() params) { return this.adressService.findAllByPersonId(params); } @UseGuards(JwtAuthGuard) @Get(':id') findOne(@Param() params) { return this.adressService.findOne(params); } @UseGuards(JwtAuthGuard) @Put(':id') update(@Param() params, @Body() updateAdressDto: UpdateAdressDto) { return this.adressService.update(params, updateAdressDto); }
@Delete(':id') remove(@Param() params) { return this.adressService.remove(params); } }
@UseGuards(JwtAuthGuard)
riemannian.py
import tensorflow.compat.v1 as tf from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch from t3f import shapes from t3f import decompositions def project_sum(what, where, weights=None): """Project sum of `what` TTs on the tangent space of `where` TT. project_sum(what, x) = P_x(what) project_sum(batch_what, x) = P_x(\sum_i batch_what[i]) project_sum(batch_what, x, weights) = P_x(\sum_j weights[j] * batch_what[j]) This function implements the algorithm from the paper [1], theorem 3.1. [1] C. Lubich, I. Oseledets and B. Vandereycken, Time integration of Tensor Trains. Args: what: TensorTrain or TensorTrainBatch. In the case of batch returns projection of the sum of elements in the batch. where: TensorTrain, TT-tensor or TT-matrix on which tangent space to project weights: python list or tf.Tensor of numbers or None, weights of the sum Returns: a TensorTrain with the TT-ranks equal 2 * tangent_space_tens.get_tt_ranks() Complexity: O(d r_where^3 m) for orthogonalizing the TT-cores of where +O(batch_size d r_what r_where n (r_what + r_where)) d is the number of TT-cores (what.ndims()); r_what is the largest TT-rank of what max(what.get_tt_rank()) r_where is the largest TT-rank of where n is the size of the axis dimension of what and where e.g. for a tensor of size 4 x 4 x 4, n is 4;
for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12 """ # Always work with batch of TT objects for simplicity. what = shapes.expand_batch_dim(what) if weights is not None: weights = tf.convert_to_tensor(weights, dtype=where.dtype) if not isinstance(where, TensorTrain): raise ValueError('The first argument should be a TensorTrain object, got ' '"%s".' % where) if where.get_raw_shape() != what.get_raw_shape(): raise ValueError('The shapes of the tensor we want to project and of the ' 'tensor on which tangent space we want to project should ' 'match, got %s and %s.' % (where.get_raw_shape(), what.get_raw_shape())) dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or what.dtype.is_compatible_with(where.dtype)) if not dtypes_compatible: raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' % (where.dtype, what.dtype)) left_tangent_space_tens = decompositions.orthogonalize_tt_cores( where) right_tangent_space_tens = decompositions.orthogonalize_tt_cores( left_tangent_space_tens, left_to_right=False) ndims = where.ndims() dtype = where.dtype raw_shape = shapes.lazy_raw_shape(where) batch_size = shapes.lazy_batch_size(what) right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens) left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens) # For einsum notation. mode_str = 'ij' if where.is_tt_matrix() else 'i' right_rank_dim = where.right_tt_rank_dim left_rank_dim = where.left_tt_rank_dim if weights is not None: weights_shape = weights.get_shape() output_is_batch = len(weights_shape) > 1 and weights_shape[1] > 1 else: output_is_batch = False output_batch_str = 'o' if output_is_batch else '' if output_is_batch: right_rank_dim += 1 left_rank_dim += 1 output_batch_size = weights.get_shape()[1].value # Prepare rhs vectors. # rhs[core_idx] is of size # batch_size x tensor_tt_ranks[core_idx] x tangent_tt_ranks[core_idx] rhs = [None] * (ndims + 1) rhs[ndims] = tf.ones((batch_size, 1, 1), dtype=dtype) for core_idx in range(ndims - 1, 0, -1): tens_core = what.tt_cores[core_idx] right_tang_core = right_tangent_space_tens.tt_cores[core_idx] einsum_str = 'sa{0}b,sbd,c{0}d->sac'.format(mode_str) rhs[core_idx] = tf.einsum(einsum_str, tens_core, rhs[core_idx + 1], right_tang_core) # Prepare lhs vectors. # lhs[core_idx] is of size # batch_size x tangent_tt_ranks[core_idx] x tensor_tt_ranks[core_idx] lhs = [None] * (ndims + 1) lhs[0] = tf.ones((batch_size, 1, 1), dtype=dtype) for core_idx in range(ndims - 1): tens_core = what.tt_cores[core_idx] left_tang_core = left_tangent_space_tens.tt_cores[core_idx] einsum_str = 'sab,a{0}c,sb{0}d->scd'.format(mode_str) lhs[core_idx + 1] = tf.einsum(einsum_str, lhs[core_idx], left_tang_core, tens_core) # Left to right sweep. res_cores_list = [] for core_idx in range(ndims): tens_core = what.tt_cores[core_idx] left_tang_core = left_tangent_space_tens.tt_cores[core_idx] right_tang_core = right_tangent_space_tens.tt_cores[core_idx] if core_idx < ndims - 1: einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str) proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core) einsum_str = 'a{0}b,sbc->sa{0}c'.format(mode_str) proj_core -= tf.einsum(einsum_str, left_tang_core, lhs[core_idx + 1]) if weights is None: einsum_str = 'sa{0}b,sbc->a{0}c'.format(mode_str) proj_core = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1]) else: einsum_str = 'sa{0}b,sbc->sa{0}c'.format(mode_str, output_batch_str) proj_core_s = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1]) einsum_str = 's{1},sa{0}c->{1}a{0}c'.format(mode_str, output_batch_str) proj_core = tf.einsum(einsum_str, weights, proj_core_s) if core_idx == ndims - 1: if weights is None: einsum_str = 'sab,sb{0}c->a{0}c'.format(mode_str) proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core) else: einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str, output_batch_str) proj_core_s = tf.einsum(einsum_str, lhs[core_idx], tens_core) einsum_str = 's{1},sa{0}c->{1}a{0}c'.format(mode_str, output_batch_str) proj_core = tf.einsum(einsum_str, weights, proj_core_s) if output_is_batch: # Add batch dimension of size output_batch_size to left_tang_core and # right_tang_core extended_left_tang_core = tf.expand_dims(left_tang_core, 0) extended_right_tang_core = tf.expand_dims(right_tang_core, 0) if where.is_tt_matrix(): extended_left_tang_core = tf.tile(extended_left_tang_core, [output_batch_size, 1, 1, 1, 1]) extended_right_tang_core = tf.tile(extended_right_tang_core, [output_batch_size, 1, 1, 1, 1]) else: extended_left_tang_core = tf.tile(extended_left_tang_core, [output_batch_size, 1, 1, 1]) extended_right_tang_core = tf.tile(extended_right_tang_core, [output_batch_size, 1, 1, 1]) else: extended_left_tang_core = left_tang_core extended_right_tang_core = right_tang_core if core_idx == 0: res_core = tf.concat((proj_core, extended_left_tang_core), axis=right_rank_dim) elif core_idx == ndims - 1: res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim) else: rank_1 = right_tangent_tt_ranks[core_idx] rank_2 = left_tangent_tt_ranks[core_idx + 1] if where.is_tt_matrix(): mode_size_n = raw_shape[0][core_idx] mode_size_m = raw_shape[1][core_idx] shape = [rank_1, mode_size_n, mode_size_m, rank_2] else: mode_size = raw_shape[0][core_idx] shape = [rank_1, mode_size, rank_2] if output_is_batch: shape = [output_batch_size] + shape zeros = tf.zeros(shape, dtype) upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim) lower = tf.concat((proj_core, extended_left_tang_core), axis=right_rank_dim) res_core = tf.concat((upper, lower), axis=left_rank_dim) res_cores_list.append(res_core) # TODO: TT-ranks. if output_is_batch: res = TensorTrainBatch(res_cores_list, where.get_raw_shape(), batch_size=output_batch_size) else: res = TensorTrain(res_cores_list, where.get_raw_shape()) res.projection_on = where return res def project(what, where): """Project `what` TTs on the tangent space of `where` TT. project(what, x) = P_x(what) project(batch_what, x) = batch(P_x(batch_what[0]), ..., P_x(batch_what[N])) This function implements the algorithm from the paper [1], theorem 3.1. [1] C. Lubich, I. Oseledets and B. Vandereycken, Time integration of Tensor Trains. Args: what: TensorTrain or TensorTrainBatch. In the case of batch returns batch with projection of each individual tensor. where: TensorTrain, TT-tensor or TT-matrix on which tangent space to project Returns: a TensorTrain with the TT-ranks equal 2 * tangent_space_tens.get_tt_ranks() Complexity: O(d r_where^3 m) for orthogonalizing the TT-cores of where +O(batch_size d r_what r_where n (r_what + r_where)) d is the number of TT-cores (what.ndims()); r_what is the largest TT-rank of what max(what.get_tt_rank()) r_where is the largest TT-rank of where n is the size of the axis dimension of what and where e.g. for a tensor of size 4 x 4 x 4, n is 4; for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12 """ if not isinstance(where, TensorTrain): raise ValueError('The first argument should be a TensorTrain object, got ' '"%s".' % where) if where.get_raw_shape() != what.get_raw_shape(): raise ValueError('The shapes of the tensor we want to project and of the ' 'tensor on which tangent space we want to project should ' 'match, got %s and %s.' % (where.get_raw_shape(), what.get_raw_shape())) dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or what.dtype.is_compatible_with(where.dtype)) if not dtypes_compatible: raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' % (where.dtype, what.dtype)) left_tangent_space_tens = decompositions.orthogonalize_tt_cores( where) right_tangent_space_tens = decompositions.orthogonalize_tt_cores( left_tangent_space_tens, left_to_right=False) ndims = where.ndims() dtype = where.dtype raw_shape = shapes.lazy_raw_shape(where) right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens) left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens) # For einsum notation. mode_str = 'ij' if where.is_tt_matrix() else 'i' right_rank_dim = what.right_tt_rank_dim left_rank_dim = what.left_tt_rank_dim output_is_batch = isinstance(what, TensorTrainBatch) if output_is_batch: output_batch_size = what.batch_size # Always work with batch of TT objects for simplicity. what = shapes.expand_batch_dim(what) batch_size = shapes.lazy_batch_size(what) # Prepare rhs vectors. # rhs[core_idx] is of size # batch_size x tensor_tt_ranks[core_idx] x tangent_tt_ranks[core_idx] rhs = [None] * (ndims + 1) rhs[ndims] = tf.ones((batch_size, 1, 1), dtype=dtype) for core_idx in range(ndims - 1, 0, -1): tens_core = what.tt_cores[core_idx] right_tang_core = right_tangent_space_tens.tt_cores[core_idx] einsum_str = 'sa{0}b,sbd,c{0}d->sac'.format(mode_str) rhs[core_idx] = tf.einsum(einsum_str, tens_core, rhs[core_idx + 1], right_tang_core) # Prepare lhs vectors. # lhs[core_idx] is of size # batch_size x tangent_tt_ranks[core_idx] x tensor_tt_ranks[core_idx] lhs = [None] * (ndims + 1) lhs[0] = tf.ones((batch_size, 1, 1), dtype=dtype) for core_idx in range(ndims - 1): tens_core = what.tt_cores[core_idx] left_tang_core = left_tangent_space_tens.tt_cores[core_idx] einsum_str = 'sab,a{0}c,sb{0}d->scd'.format(mode_str) lhs[core_idx + 1] = tf.einsum(einsum_str, lhs[core_idx], left_tang_core, tens_core) # Left to right sweep. res_cores_list = [] for core_idx in range(ndims): tens_core = what.tt_cores[core_idx] left_tang_core = left_tangent_space_tens.tt_cores[core_idx] right_tang_core = right_tangent_space_tens.tt_cores[core_idx] if core_idx < ndims - 1: einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str) proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core) einsum_str = 'a{0}b,sbc->sa{0}c'.format(mode_str) proj_core -= tf.einsum(einsum_str, left_tang_core, lhs[core_idx + 1]) if output_is_batch: einsum_str = 'sa{0}b,sbc->sa{0}c'.format(mode_str) else: einsum_str = 'sa{0}b,sbc->a{0}c'.format(mode_str) proj_core = tf.einsum(einsum_str, proj_core, rhs[core_idx + 1]) if core_idx == ndims - 1: if output_is_batch: einsum_str = 'sab,sb{0}c->sa{0}c'.format(mode_str) else: einsum_str = 'sab,sb{0}c->a{0}c'.format(mode_str) proj_core = tf.einsum(einsum_str, lhs[core_idx], tens_core) if output_is_batch: # Add batch dimension of size output_batch_size to left_tang_core and # right_tang_core extended_left_tang_core = tf.expand_dims(left_tang_core, 0) extended_right_tang_core = tf.expand_dims(right_tang_core, 0) if where.is_tt_matrix(): extended_left_tang_core = tf.tile(extended_left_tang_core, [output_batch_size, 1, 1, 1, 1]) extended_right_tang_core = tf.tile(extended_right_tang_core, [output_batch_size, 1, 1, 1, 1]) else: extended_left_tang_core = tf.tile(extended_left_tang_core, [output_batch_size, 1, 1, 1]) extended_right_tang_core = tf.tile(extended_right_tang_core, [output_batch_size, 1, 1, 1]) else: extended_left_tang_core = left_tang_core extended_right_tang_core = right_tang_core if core_idx == 0: res_core = tf.concat((proj_core, extended_left_tang_core), axis=right_rank_dim) elif core_idx == ndims - 1: res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim) else: rank_1 = right_tangent_tt_ranks[core_idx] rank_2 = left_tangent_tt_ranks[core_idx + 1] if where.is_tt_matrix(): mode_size_n = raw_shape[0][core_idx] mode_size_m = raw_shape[1][core_idx] shape = [rank_1, mode_size_n, mode_size_m, rank_2] else: mode_size = raw_shape[0][core_idx] shape = [rank_1, mode_size, rank_2] if output_is_batch: shape = [output_batch_size] + shape zeros = tf.zeros(shape, dtype) upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim) lower = tf.concat((proj_core, extended_left_tang_core), axis=right_rank_dim) res_core = tf.concat((upper, lower), axis=left_rank_dim) res_cores_list.append(res_core) # TODO: TT-ranks. if output_is_batch: res = TensorTrainBatch(res_cores_list, where.get_raw_shape(), batch_size=output_batch_size) else: res = TensorTrain(res_cores_list, where.get_raw_shape()) res.projection_on = where return res def project_matmul(what, where, matrix): """Project `matrix` * `what` TTs on the tangent space of `where` TT. project(what, x) = P_x(what) project(batch_what, x) = batch(P_x(batch_what[0]), ..., P_x(batch_what[N])) This function implements the algorithm from the paper [1], theorem 3.1. [1] C. Lubich, I. Oseledets and B. Vandereycken, Time integration of Tensor Trains. Args: what: TensorTrain or TensorTrainBatch. In the case of batch returns batch with projection of each individual tensor. where: TensorTrain, TT-tensor or TT-matrix on which tangent space to project matrix: TensorTrain, TT-matrix to multiply by what Returns: a TensorTrain with the TT-ranks equal 2 * tangent_space_tens.get_tt_ranks() Complexity: O(d r_where^3 m) for orthogonalizing the TT-cores of where +O(batch_size d R r_what r_where (n r_what + n m R + m r_where)) d is the number of TT-cores (what.ndims()); r_what is the largest TT-rank of what max(what.get_tt_rank()) r_where is the largest TT-rank of where matrix is of TT-rank R and of raw-shape (m, m, ..., m) x (n, n, ..., n). """ if not isinstance(where, TensorTrain): raise ValueError('The first argument should be a TensorTrain object, got ' '"%s".' % where) if where.get_raw_shape() != what.get_raw_shape(): raise ValueError('The shapes of the tensor we want to project and of the ' 'tensor on which tangent space we want to project should ' 'match, got %s and %s.' % (where.get_raw_shape(), what.get_raw_shape())) dtypes_compatible = (where.dtype.is_compatible_with(what.dtype) or what.dtype.is_compatible_with(where.dtype)) if not dtypes_compatible: raise ValueError('Dtypes of the arguments should coincide, got %s and %s.' % (where.dtype, what.dtype)) left_tangent_space_tens = decompositions.orthogonalize_tt_cores( where) right_tangent_space_tens = decompositions.orthogonalize_tt_cores( left_tangent_space_tens, left_to_right=False) ndims = where.ndims() dtype = where.dtype raw_shape = shapes.lazy_raw_shape(where) batch_size = shapes.lazy_batch_size(what) right_tangent_tt_ranks = shapes.lazy_tt_ranks(right_tangent_space_tens) left_tangent_tt_ranks = shapes.lazy_tt_ranks(left_tangent_space_tens) # For einsum notation. right_rank_dim = what.right_tt_rank_dim left_rank_dim = what.left_tt_rank_dim output_is_batch = isinstance(what, TensorTrainBatch) if output_is_batch: output_batch_size = what.batch_size # Always work with batch of TT objects for simplicity. what = shapes.expand_batch_dim(what) # Prepare rhs vectors. # rhs[core_idx] is of size # batch_size x tensor_tt_ranks[core_idx] x matrix_tt_ranks[core_idx] x tangent_tt_ranks[core_idx] rhs = [None] * (ndims + 1) rhs[ndims] = tf.ones((batch_size, 1, 1, 1), dtype=dtype) for core_idx in range(ndims - 1, 0, -1): tens_core = what.tt_cores[core_idx] right_tang_core = right_tangent_space_tens.tt_cores[core_idx] matrix_core = matrix.tt_cores[core_idx] rhs[core_idx] = tf.einsum('bije,cikf,sdef,sajkd->sabc', matrix_core, right_tang_core, rhs[core_idx + 1], tens_core) # Prepare lhs vectors. # lhs[core_idx] is of size # batch_size x tangent_tt_ranks[core_idx] x matrix_tt_ranks[core_idx] x tensor_tt_ranks[core_idx] lhs = [None] * (ndims + 1) lhs[0] = tf.ones((batch_size, 1, 1, 1), dtype=dtype) for core_idx in range(ndims - 1): tens_core = what.tt_cores[core_idx] left_tang_core = left_tangent_space_tens.tt_cores[core_idx] matrix_core = matrix.tt_cores[core_idx] # TODO: brutforce order of indices in lhs?? lhs[core_idx + 1] = tf.einsum('bije,aikd,sabc,scjkf->sdef', matrix_core, left_tang_core, lhs[core_idx], tens_core) # Left to right sweep. res_cores_list = [] for core_idx in range(ndims): tens_core = what.tt_cores[core_idx] matrix_core = matrix.tt_cores[core_idx] left_tang_core = left_tangent_space_tens.tt_cores[core_idx] right_tang_core = right_tangent_space_tens.tt_cores[core_idx] if core_idx < ndims - 1: proj_core = tf.einsum('scjke,sabc,bijd->saikde', tens_core, lhs[core_idx], matrix_core) proj_core -= tf.einsum('aikb,sbcd->saikcd', left_tang_core, lhs[core_idx + 1]) proj_core = tf.einsum('saikcb,sbcd->saikd', proj_core, rhs[core_idx + 1]) if core_idx == ndims - 1: # d and e dimensions take 1 value, since its the last rank. # To make the result shape (?, ?, ?, 1), we are summing d and leaving e, # but we could have done the opposite -- sum e and leave d. proj_core = tf.einsum('sabc,bijd,scjke->saike', lhs[core_idx], matrix_core, tens_core) if output_is_batch: # Add batch dimension of size output_batch_size to left_tang_core and # right_tang_core extended_left_tang_core = tf.expand_dims(left_tang_core, 0) extended_right_tang_core = tf.expand_dims(right_tang_core, 0) extended_left_tang_core = tf.tile(extended_left_tang_core, [output_batch_size, 1, 1, 1, 1]) extended_right_tang_core = tf.tile(extended_right_tang_core, [output_batch_size, 1, 1, 1, 1]) else: extended_left_tang_core = left_tang_core extended_right_tang_core = right_tang_core if core_idx == 0: res_core = tf.concat((proj_core, extended_left_tang_core), axis=right_rank_dim) elif core_idx == ndims - 1: res_core = tf.concat((extended_right_tang_core, proj_core), axis=left_rank_dim) else: rank_1 = right_tangent_tt_ranks[core_idx] rank_2 = left_tangent_tt_ranks[core_idx + 1] mode_size_n = raw_shape[0][core_idx] mode_size_m = raw_shape[1][core_idx] shape = [rank_1, mode_size_n, mode_size_m, rank_2] if output_is_batch: shape = [output_batch_size] + shape zeros = tf.zeros(shape, dtype) upper = tf.concat((extended_right_tang_core, zeros), axis=right_rank_dim) lower = tf.concat((proj_core, extended_left_tang_core), axis=right_rank_dim) res_core = tf.concat((upper, lower), axis=left_rank_dim) res_cores_list.append(res_core) # TODO: TT-ranks. if output_is_batch: res = TensorTrainBatch(res_cores_list, where.get_raw_shape(), batch_size=output_batch_size) else: res = TensorTrain(res_cores_list, where.get_raw_shape()) res.projection_on = where return res def pairwise_flat_inner_projected(projected_tt_vectors_1, projected_tt_vectors_2): """Scalar products between two batches of TTs from the same tangent space. res[i, j] = t3f.flat_inner(projected_tt_vectors_1[i], projected_tt_vectors_1[j]). pairwise_flat_inner_projected(projected_tt_vectors_1, projected_tt_vectors_2) is equivalent to pairwise_flat_inner(projected_tt_vectors_1, projected_tt_vectors_2) , but works only on objects from the same tangent space and is much faster than general pairwise_flat_inner. Args: projected_tt_vectors_1: TensorTrainBatch of tensors projected on the same tangent space as projected_tt_vectors_2. projected_tt_vectors_2: TensorTrainBatch. Returns: tf.tensor with the scalar product matrix. Complexity: O(batch_size^2 d r^2 n), where d is the number of TT-cores (projected_tt_vectors_1.ndims()); r is the largest TT-rank max(projected_tt_vectors_1.get_tt_rank()) (i.e. 2 * {the TT-rank of the object we projected vectors onto}. and n is the size of the axis dimension, e.g. for a tensor of size 4 x 4 x 4, n is 4; for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12. """ if not hasattr(projected_tt_vectors_1, 'projection_on') or \ not hasattr(projected_tt_vectors_2, 'projection_on'): raise ValueError('Both arguments should be projections on the tangent ' 'space of some other TT-object. All projection* functions ' 'leave .projection_on field in the resulting TT-object ' 'which is not present in the arguments you\'ve provided') if projected_tt_vectors_1.projection_on != projected_tt_vectors_2.projection_on: raise ValueError('Both arguments should be projections on the tangent ' 'space of the same TT-object. The provided arguments are ' 'projections on different TT-objects (%s and %s). Or at ' 'least the pointers are different.' % (projected_tt_vectors_1.projection_on, projected_tt_vectors_2.projection_on)) # Always work with batches of objects for simplicity. projected_tt_vectors_1 = shapes.expand_batch_dim(projected_tt_vectors_1) projected_tt_vectors_2 = shapes.expand_batch_dim(projected_tt_vectors_2) ndims = projected_tt_vectors_1.ndims() tt_ranks = shapes.lazy_tt_ranks(projected_tt_vectors_1) if projected_tt_vectors_1.is_tt_matrix(): right_size = tt_ranks[1] // 2 curr_core_1 = projected_tt_vectors_1.tt_cores[0] curr_core_2 = projected_tt_vectors_2.tt_cores[0] curr_du_1 = curr_core_1[:, :, :, :, :right_size] curr_du_2 = curr_core_2[:, :, :, :, :right_size] res = tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2) for core_idx in range(1, ndims): left_size = tt_ranks[core_idx] // 2 right_size = tt_ranks[core_idx + 1] // 2 curr_core_1 = projected_tt_vectors_1.tt_cores[core_idx] curr_core_2 = projected_tt_vectors_2.tt_cores[core_idx] curr_du_1 = curr_core_1[:, left_size:, :, :, :right_size] curr_du_2 = curr_core_2[:, left_size:, :, :, :right_size] res += tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2) left_size = tt_ranks[-2] // 2 curr_core_1 = projected_tt_vectors_1.tt_cores[-1] curr_core_2 = projected_tt_vectors_2.tt_cores[-1] curr_du_1 = curr_core_1[:, left_size:, :, :, :] curr_du_2 = curr_core_2[:, left_size:, :, :, :] res += tf.einsum('paijb,qaijb->pq', curr_du_1, curr_du_2) else: # Working with TT-tensor, not TT-matrix. right_size = tt_ranks[1] // 2 curr_core_1 = projected_tt_vectors_1.tt_cores[0] curr_core_2 = projected_tt_vectors_2.tt_cores[0] curr_du_1 = curr_core_1[:, :, :, :right_size] curr_du_2 = curr_core_2[:, :, :, :right_size] res = tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2) for core_idx in range(1, ndims): left_size = tt_ranks[core_idx] // 2 right_size = tt_ranks[core_idx + 1] // 2 curr_core_1 = projected_tt_vectors_1.tt_cores[core_idx] curr_core_2 = projected_tt_vectors_2.tt_cores[core_idx] curr_du_1 = curr_core_1[:, left_size:, :, :right_size] curr_du_2 = curr_core_2[:, left_size:, :, :right_size] res += tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2) left_size = tt_ranks[-2] // 2 curr_core_1 = projected_tt_vectors_1.tt_cores[-1] curr_core_2 = projected_tt_vectors_2.tt_cores[-1] curr_du_1 = curr_core_1[:, left_size:, :, :] curr_du_2 = curr_core_2[:, left_size:, :, :] res += tf.einsum('paib,qaib->pq', curr_du_1, curr_du_2) return res def add_n_projected(tt_objects, coef=None): """Adds all input TT-objects that are projections on the same tangent space. add_projected((a, b)) is equivalent add(a, b) for a and b that are from the same tangent space, but doesn't increase the TT-ranks. Args: tt_objects: a list of TT-objects that are projections on the same tangent space. coef: a list of numbers or anything else convertable to tf.Tensor. If provided, computes weighted sum. The size of this array should be len(tt_objects) x tt_objects[0].batch_size Returns: TT-objects representing the sum of the tt_objects (weighted sum if coef is provided). The TT-rank of the result equals to the TT-ranks of the arguments. """ for tt in tt_objects: if not hasattr(tt, 'projection_on'): raise ValueError('Both arguments should be projections on the tangent ' 'space of some other TT-object. All projection* functions ' 'leave .projection_on field in the resulting TT-object ' 'which is not present in the argument you\'ve provided.') projection_on = tt_objects[0].projection_on for tt in tt_objects[1:]: if tt.projection_on != projection_on: raise ValueError('All tt_objects should be projections on the tangent ' 'space of the same TT-object. The provided arguments are ' 'projections on different TT-objects (%s and %s). Or at ' 'least the pointers are different.' % (tt.projection_on, projection_on)) if coef is not None: coef = tf.convert_to_tensor(coef, dtype=tt_objects[0].dtype) if coef.get_shape().ndims > 1: # In batch case we will need to multiply each core by this coefficients # along the first axis. To do it need to reshape the coefs to match # the TT-cores number of dimensions. some_core = tt_objects[0].tt_cores[0] dim_array = [1] * (some_core.get_shape().ndims + 1) dim_array[0] = coef.get_shape()[0].value dim_array[1] = coef.get_shape()[1].value coef = tf.reshape(coef, dim_array) ndims = tt_objects[0].ndims() tt_ranks = shapes.lazy_tt_ranks(tt_objects[0]) left_rank_dim = tt_objects[0].left_tt_rank_dim right_rank_dim = tt_objects[0].right_tt_rank_dim res_cores = [] def slice_tt_core(tt_core, left_idx, right_idx): num_tt_core_dims = len(tt_core.get_shape()) idx = [slice(None)] * num_tt_core_dims idx[left_rank_dim] = left_idx idx[right_rank_dim] = right_idx return tt_core[idx] right_half_rank = tt_ranks[1] // 2 left_chunks = [] for obj_idx, tt in enumerate(tt_objects): curr_core = slice_tt_core(tt.tt_cores[0], slice(None), slice(0, right_half_rank)) if coef is not None: curr_core *= coef[obj_idx] left_chunks.append(curr_core) left_part = tf.add_n(left_chunks) first_obj_core = tt_objects[0].tt_cores[0] right_part = slice_tt_core(first_obj_core, slice(None), slice(right_half_rank, None)) first_core = tf.concat((left_part, right_part), axis=right_rank_dim) res_cores.append(first_core) for core_idx in range(1, ndims - 1): first_obj_core = tt_objects[0].tt_cores[core_idx] left_half_rank = tt_ranks[core_idx] // 2 right_half_rank = tt_ranks[core_idx + 1] // 2 upper_part = slice_tt_core(tt.tt_cores[core_idx], slice(0, left_half_rank), slice(None)) lower_right_part = slice_tt_core(first_obj_core, slice(left_half_rank, None), slice(right_half_rank, None)) lower_left_chunks = [] for obj_idx, tt in enumerate(tt_objects): curr_core = slice_tt_core(tt.tt_cores[core_idx], slice(left_half_rank, None), slice(0, right_half_rank)) if coef is not None: curr_core *= coef[obj_idx] lower_left_chunks.append(curr_core) lower_left_part = tf.add_n(lower_left_chunks) lower_part = tf.concat((lower_left_part, lower_right_part), axis=right_rank_dim) curr_core = tf.concat((upper_part, lower_part), axis=left_rank_dim) res_cores.append(curr_core) left_half_rank = tt_ranks[ndims - 1] // 2 upper_part = slice_tt_core(tt.tt_cores[-1], slice(0, left_half_rank), slice(None)) lower_chunks = [] for obj_idx, tt in enumerate(tt_objects): curr_core = slice_tt_core(tt.tt_cores[-1], slice(left_half_rank, None), slice(None)) if coef is not None: curr_core *= coef[obj_idx] lower_chunks.append(curr_core) lower_part = tf.add_n(lower_chunks) last_core = tf.concat((upper_part, lower_part), axis=left_rank_dim) res_cores.append(last_core) raw_shape = tt_objects[0].get_raw_shape() static_tt_ranks = tt_objects[0].get_tt_ranks() if isinstance(tt_objects[0], TensorTrain): res = TensorTrain(res_cores, raw_shape, static_tt_ranks) elif isinstance(tt_objects[0], TensorTrainBatch): res = TensorTrainBatch(res_cores, raw_shape, static_tt_ranks, tt_objects[0].batch_size) # Maintain the projection_on property. res.projection_on = tt_objects[0].projection_on return res def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'): """Convert an element of the tangent space to deltas representation. Tangent space elements (outputs of t3f.project) look like: dP1 V2 ... Vd + U1 dP2 V3 ... Vd + ... + U1 ... Ud-1 dPd. This function takes as input an element of the tangent space and converts it to the list of deltas [dP1, ..., dPd]. Args: tt: `TensorTrain` or `TensorTrainBatch` that is a result of t3f.project, t3f.project_matmul, or other similar functions. name: string, name of the Op. Returns: A list of delta-cores (tf.Tensors). """ if not hasattr(tt, 'projection_on') or tt.projection_on is None: raise ValueError('tt argument is supposed to be a projection, but it ' 'lacks projection_on field') num_dims = tt.ndims() left_tt_rank_dim = tt.left_tt_rank_dim right_tt_rank_dim = tt.right_tt_rank_dim deltas = [None] * num_dims tt_ranks = shapes.lazy_tt_ranks(tt) for i in range(1, num_dims - 1): if int(tt_ranks[i] / 2) != tt_ranks[i] / 2: raise ValueError('tt argument is supposed to be a projection, but its ' 'ranks are not even.') with tf.name_scope(name, values=tt.tt_cores): for i in range(1, num_dims - 1): r1, r2 = tt_ranks[i], tt_ranks[i + 1] curr_core = tt.tt_cores[i] slc = [slice(None)] * len(curr_core.shape) slc[left_tt_rank_dim] = slice(int(r1 / 2), None) slc[right_tt_rank_dim] = slice(0, int(r2 / 2)) deltas[i] = curr_core[slc] slc = [slice(None)] * len(tt.tt_cores[0].shape) slc[right_tt_rank_dim] = slice(0, int(tt_ranks[1] / 2)) deltas[0] = tt.tt_cores[0][slc] slc = [slice(None)] * len(tt.tt_cores[0].shape) slc[left_tt_rank_dim] = slice(int(tt_ranks[-2] / 2), None) deltas[num_dims - 1] = tt.tt_cores[num_dims - 1][slc] return deltas def deltas_to_tangent_space(deltas, tt, left=None, right=None, name='t3f_deltas_to_tangent_space'): """Converts deltas representation of tangent space vector to TT object. Takes as input a list of [dP1, ..., dPd] and returns dP1 V2 ... Vd + U1 dP2 V3 ... Vd + ... + U1 ... Ud-1 dPd. This function is hard to use correctly because deltas should abey the so called gauge conditions. If the don't, the function will silently return incorrect result. This is why this function is not imported in __init__. Args: deltas: a list of deltas (essentially TT-cores) obeying the gauge conditions. tt: `TensorTrain` object on which the tangent space tensor represented by delta is projected. left: t3f.orthogonilize_tt_cores(tt). If you have it already compute, you may pass it as argument to avoid recomputing. right: t3f.orthogonilize_tt_cores(left, left_to_right=False). If you have it already compute, you may pass it as argument to avoid recomputing. name: string, name of the Op. Returns: `TensorTrain` object constructed from deltas, that is from the tangent space at point `tt`. """ cores = [] dtype = tt.dtype num_dims = tt.ndims() # TODO: add cache instead of mannually pasisng precomputed stuff? input_tensors = list(tt.tt_cores) + list(deltas) if left is not None: input_tensors += list(left.tt_cores) if right is not None: input_tensors += list(right.tt_cores) with tf.name_scope(name, values=input_tensors): if left is None: left = decompositions.orthogonalize_tt_cores(tt) if right is None: right = decompositions.orthogonalize_tt_cores(left, left_to_right=False) left_tangent_tt_ranks = shapes.lazy_tt_ranks(left) right_tangent_tt_ranks = shapes.lazy_tt_ranks(left) raw_shape = shapes.lazy_raw_shape(left) right_rank_dim = left.right_tt_rank_dim left_rank_dim = left.left_tt_rank_dim is_batch_case = len(deltas[0].shape) > len(tt.tt_cores[0].shape) if is_batch_case: right_rank_dim += 1 left_rank_dim += 1 batch_size = deltas[0].shape.as_list()[0] for i in range(num_dims): left_tt_core = left.tt_cores[i] right_tt_core = right.tt_cores[i] if is_batch_case: tile = [1] * len(left_tt_core.shape) tile = [batch_size] + tile left_tt_core = tf.tile(left_tt_core[None, ...], tile) right_tt_core = tf.tile(right_tt_core[None, ...], tile) if i == 0: tangent_core = tf.concat((deltas[i], left_tt_core), axis=right_rank_dim) elif i == num_dims - 1: tangent_core = tf.concat((right_tt_core, deltas[i]), axis=left_rank_dim) else: rank_1 = right_tangent_tt_ranks[i] rank_2 = left_tangent_tt_ranks[i + 1] if tt.is_tt_matrix(): mode_size_n = raw_shape[0][i] mode_size_m = raw_shape[1][i] shape = [rank_1, mode_size_n, mode_size_m, rank_2] else: mode_size_n = raw_shape[0][i] shape = [rank_1, mode_size_n, rank_2] if is_batch_case: shape = [batch_size] + shape zeros = tf.zeros(shape, dtype=dtype) upper = tf.concat((right_tt_core, zeros), axis=right_rank_dim) lower = tf.concat((deltas[i], left_tt_core), axis=right_rank_dim) tangent_core = tf.concat((upper, lower), axis=left_rank_dim) cores.append(tangent_core) if is_batch_case: tangent = TensorTrainBatch(cores, batch_size=batch_size) else: tangent = TensorTrain(cores) tangent.projection_on = tt return tangent
bfs.rs
use std::collections::{HashMap, VecDeque}; use crate::state; use crate::types; pub struct
{ pub turns: Vec<types::Turn>, pub nodes: usize, pub bench: std::time::Duration, } pub fn search(cube: &state::Cube) -> Path { if cube.is_solved() { return Path { turns: vec![], nodes: 0, bench: std::time::Duration::default(), } } let now = std::time::Instant::now(); let mut queue: VecDeque<state::Cube> = VecDeque::new(); let mut cache: HashMap<state::Cube, (state::Cube, types::Turn)> = HashMap::new(); let mut nodes = 0; queue.push_back(*cube); cache.insert(*cube, (*cube, (types::Face::U, types::Spin::CW).into())); while let Some(cube) = queue.pop_front() { nodes += 1; if cube.is_solved() { let mut back = &cache[&cube]; let mut turns = Vec::new(); turns.insert(0, back.1); while let Some(next) = cache.get(&back.0) { if next.0 == back.0 { break } turns.insert(0, next.1); back = next; } return Path { turns, nodes, bench: std::time::Instant::now() - now, } } for turn in types::Turn::all() { let mut next = cube.clone(); next.rotate(turn); if cache.contains_key(&next) { continue } cache.insert(next, (cube, turn)); queue.push_back(next); } } unreachable!() }
Path
gyp_main.py
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import subprocess PY3 = bytes != str # Below IsCygwin() function copied from pylib/gyp/common.py def
(): try: out = subprocess.Popen("uname", stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = out.communicate() if PY3: stdout = stdout.decode("utf-8") return "CYGWIN" in str(stdout) except Exception: return False def UnixifyPath(path): try: if not IsCygwin(): return path out = subprocess.Popen(["cygpath", "-u", path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = out.communicate() if PY3: stdout = stdout.decode("utf-8") return str(stdout) except Exception: return path # Make sure we're using the version of pylib in this repo, not one installed # elsewhere on the system. Also convert to Unix style path on Cygwin systems, # else the 'gyp' library will not be found path = UnixifyPath(sys.argv[0]) sys.path.insert(0, os.path.join(os.path.dirname(path), 'pylib')) import gyp if __name__ == '__main__': sys.exit(gyp.script_main())
IsCygwin
tempfile.go
package tempfile import ( "errors" "os" "strconv" "sync" "time" ) // Creator maintains the state of a pseudo-random number generator // used to create temp files. type Creator struct { mu sync.Mutex idum uint32 // Pseudo-random number generator state. } // NewCreator returns a new Creator, for creating temp files. func
() *Creator { return &Creator{idum: uint32(time.Now().UnixNano())} } // Fast "quick and dirty" linear congruential (pseudo-random) number // generator from Numerical Recipes. Excerpt here: // https://www.unf.edu/~cwinton/html/cop4300/s09/class.notes/LCGinfo.pdf // This is the same algorithm as used in the ioutil.TempFile go standard // library function. func (c *Creator) ranqd1() string { c.mu.Lock() c.idum = c.idum*1664525 + 1013904223 r := c.idum c.mu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } const flags = os.O_RDWR | os.O_CREATE | os.O_EXCL const EndMode = 0666 const wipMode = EndMode | os.ModeSetgid var errNoTempfile = errors.New("Failed to create a temp file") // Create attempts to create a file whose name is of the form // <base>-<randomstring> and with a ".v1" suffix if `legacy` is // true. The file will be created with the setgid bit set, which // indicates that it is not complete. The *os.File is returned // along with the random string, and an error if something went // wrong. // // Once the file has been successfully written by the caller, it // should be chmod'ed to `EndMode` to mark it as complete. func (c *Creator) Create(base string, legacy bool) (*os.File, string, error) { var err error var f *os.File var name string var random string for i := 0; i < 10000; i++ { random = c.ranqd1() if legacy { name = base + "-" + random + ".v1" } else { name = base + "-" + random } f, err = os.OpenFile(name, flags, wipMode) if err == nil { return f, random, nil } if os.IsExist(err) { // Tempfile collision. Try again. continue } // Unexpected error. return nil, "", err } return nil, "", errNoTempfile }
NewCreator
generate-route.spec.ts
// tslint:disable:max-line-length import * as fs from 'fs-extra'; import { expect } from 'chai'; import * as path from 'path'; const ng = require('../helpers/ng'); const tmp = require('../helpers/tmp'); const root = process.cwd(); const testPath = path.join(root, 'tmp', 'foo', 'src', 'app'); function fileExpectations(expectation: boolean) { const dir = 'my-route';
expect(fs.pathExistsSync(path.join(testPath, dir, 'my-route.component.ts'))) .to.equal(expectation); } xdescribe('Acceptance: ng generate route', function () { beforeEach(function () { return tmp.setup('./tmp').then(function () { process.chdir('./tmp'); }).then(function () { return ng(['new', 'foo', '--skip-install']); }); }); afterEach(function () { this.timeout(10000); return tmp.teardown('./tmp'); }); it('ng generate route my-route', function () { return ng(['generate', 'route', 'my-route']).then(() => { fileExpectations(true); }); }); it('ng generate route +my-route', function () { return ng(['generate', 'route', '+my-route']).then(() => { fileExpectations(true); }); }); it('ng generate route +my-route/my-other', () => { return ng(['generate', 'route', '+my-route']) .then(() => ng(['generate', 'route', '+my-route/my-other', '--default'])) .then(() => ng(['generate', 'route', '+my-route/+my-other/my-third', '--default'])) .then(() => { expect(fs.pathExistsSync(path.join(testPath, '+my-route/my-route.component.ts'))) .to.equal(true); expect(fs.pathExistsSync(path.join(testPath, '+my-route/+my-other/my-other.component.ts'))) .to.equal(true); expect(fs.pathExistsSync(path.join(testPath, '+my-route/+my-other/+my-third/my-third.component.ts'))) .to.equal(true); const appContent = fs.readFileSync(path.join(testPath, 'foo.component.ts'), 'utf-8'); const myRouteContent = fs.readFileSync(path.join(testPath, '+my-route/my-route.component.ts'), 'utf-8'); const myOtherRouteContent = fs.readFileSync(path.join(testPath, '+my-route/+my-other/my-other.component.ts'), 'utf-8'); const myThirdRouteContent = fs.readFileSync(path.join(testPath, '+my-route/+my-other/+my-third/my-third.component.ts'), 'utf-8'); expect(appContent).to.match(/@Routes\(\[[\s\S]+\/\+my-route\/\.\.\.[\s\S]+\]\)/m); expect(myRouteContent).to.match(/@Routes\(\[[\s\S]+\/my-other\/\.\.\.[\s\S]+\]\)/m); expect(myOtherRouteContent).to.match(/@Routes\(\[[\s\S]+\/my-third[^\.][\s\S]+\]\)/m); expect(myThirdRouteContent).to.not.include('@Routes'); }); }); it('ng generate route details --path /details/:id', () => { return ng(['generate', 'route', 'details', '--path', '/details/:id']) .then(() => { const appContent = fs.readFileSync(path.join(testPath, 'foo.component.ts'), 'utf-8'); expect(appContent).to.match(/path: '\/details\/:id'/m); }); }); it('ng generate route my-route --dry-run does not modify files', () => { const parentComponentPath = path.join(testPath, 'foo.component.ts'); const parentComponentHtmlPath = path.join(testPath, 'foo.component.html'); const unmodifiedParentComponent = fs.readFileSync(parentComponentPath, 'utf8'); const unmodifiedParentComponentHtml = fs.readFileSync(parentComponentHtmlPath, 'utf8'); return ng(['generate', 'route', 'my-route', '--dry-run']).then(() => { const afterGenerateParentComponent = fs.readFileSync(parentComponentPath, 'utf8'); const afterGenerateParentHtml = fs.readFileSync(parentComponentHtmlPath, 'utf8'); expect(afterGenerateParentComponent).to.equal(unmodifiedParentComponent); expect(afterGenerateParentHtml).to.equal(unmodifiedParentComponentHtml); }); }); });
volume_endpoint.go
package server import ( "fmt" "net/http" "strings" "github.com/golang/glog" "github.com/openebs/maya/types/v1" policies_v1 "github.com/openebs/maya/volume/policies/v1" "github.com/openebs/maya/volume/provisioners" ) const ( // NamespaceKey is used in request headers to get the // namespace NamespaceKey string = "namespace" ) // VolumeSpecificRequest is a http handler implementation. It deals with HTTP // requests w.r.t a single Volume. // // TODO // Should it return specific types than interface{} ? func (s *HTTPServer) volumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { fmt.Println("[DEBUG] Processing", req.Method, "request") switch req.Method { case "PUT", "POST": return s.volumeAdd(resp, req) case "GET": return s.volumeSpecificGetRequest(resp, req) default: return nil, CodedError(405, ErrInvalidMethod) } } // VolumeSpecificGetRequest deals with HTTP GET request w.r.t a single Volume func (s *HTTPServer) volumeSpecificGetRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Extract info from path after trimming path := strings.TrimPrefix(req.URL.Path, "/latest/volumes") // Is req valid ? if path == req.URL.Path { return nil, CodedError(405, ErrInvalidMethod) } switch { case strings.Contains(path, "/info/"): volName := strings.TrimPrefix(path, "/info/") return s.volumeRead(resp, req, volName) case strings.Contains(path, "/delete/"): volName := strings.TrimPrefix(path, "/delete/") return s.volumeDelete(resp, req, volName) case path == "/": return s.volumeList(resp, req) default: return nil, CodedError(405, ErrInvalidMethod) } } // VolumeList is the http handler that lists Volumes func (s *HTTPServer) volumeList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { glog.Infof("Processing Volume list request") // Get the namespace if provided ns := "" if req != nil { ns = req.Header.Get(NamespaceKey) } if ns == "" { // We shall override if empty. This seems to be simple enough // that works for most of the usecases. // Otherwise we need to introduce logic to decide for default // namespace depending on operation type. ns = v1.DefaultNamespaceForListOps } // Create a Volume vol := &v1.Volume{} vol.Namespace = ns // Pass through the policy enforcement logic policy, err := policies_v1.VolumeGenericPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get the persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile to provisioner _, err = pvp.Profile(vol) if err != nil { return nil, err } lister, ok, err := pvp.Lister() if err != nil { return nil, err } if !ok { return nil, fmt.Errorf("Volume list is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } l, err := lister.List() if err != nil { return nil, err } glog.Infof("Processed Volume list request successfully") return l, nil } // VolumeRead is the http handler that fetches the details of a Volume func (s *HTTPServer) volumeRead(resp http.ResponseWriter, req *http.Request, volName string) (*v1.Volume, error) { glog.Infof("Processing Volume read request") if volName == "" { return nil, CodedError(400, fmt.Sprintf("Volume name is missing")) } // Get the namespace if provided ns := "" if req != nil { ns = req.Header.Get(NamespaceKey) } // Create a Volume vol := &v1.Volume{} vol.Name = volName vol.Namespace = ns // Pass through the policy enforcement logic policy, err := policies_v1.VolumeGenericPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil
// Get persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile to provisioner _, err = pvp.Profile(vol) if err != nil { return nil, err } reader, ok := pvp.Reader() if !ok { return nil, fmt.Errorf("Volume read is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } // TODO // vol should not be passed again !! details, err := reader.Read(vol) if err != nil { return nil, err } if details == nil { return nil, CodedError(404, fmt.Sprintf("Volume '%s' not found", volName)) } glog.Infof("Processed Volume read request successfully for '" + volName + "'") return details, nil } // VolumeDelete is the http handler that fetches the details of a Volume func (s *HTTPServer) volumeDelete(resp http.ResponseWriter, req *http.Request, volName string) (interface{}, error) { glog.Infof("Processing Volume delete request") if volName == "" { return nil, CodedError(400, fmt.Sprintf("Volume name is missing")) } // Get the namespace if provided ns := "" if req != nil { ns = req.Header.Get(NamespaceKey) } // Create a Volume vol := &v1.Volume{} vol.Name = volName vol.Namespace = ns // Pass through the policy enforcement logic policy, err := policies_v1.VolumeGenericPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get the persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile _, err = pvp.Profile(vol) if err != nil { return nil, err } remover, ok, err := pvp.Remover() if err != nil { return nil, err } if !ok { return nil, fmt.Errorf("Volume delete is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } removed, err := remover.Remove() if err != nil { return nil, err } // If there was not any err & still no removal if !removed { return nil, CodedError(404, fmt.Sprintf("Volume '%s' not found", volName)) } glog.Infof("Processed Volume delete request successfully for '" + volName + "'") return fmt.Sprintf("Volume '%s' deleted successfully", volName), nil } // VolumeAdd is the http handler that fetches the details of a Volume func (s *HTTPServer) volumeAdd(resp http.ResponseWriter, req *http.Request) (interface{}, error) { glog.Infof("Processing Volume add request") vol := &v1.Volume{} // The yaml/json spec is decoded to vol struct if err := decodeBody(req, vol); err != nil { return nil, CodedError(400, err.Error()) } // Name is expected to be available even in the minimalist specs if vol.Name == "" { return nil, CodedError(400, fmt.Sprintf("Volume name missing in '%v'", vol)) } // Pass through the policy enforcement logic policy, err := policies_v1.VolumeAddPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile to provisioner _, err = pvp.Profile(vol) if err != nil { return nil, err } adder, ok := pvp.Adder() if !ok { return nil, fmt.Errorf("Volume add operation is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } // TODO // vol should not be passed again !! details, err := adder.Add(vol) if err != nil { return nil, err } glog.Infof("Processed Volume add request successfully for '" + vol.Name + "'") return details, nil }
{ return nil, err }
patch-compiler_rustc__codegen__ssa_src_back_linker.rs
$NetBSD: patch-compiler_rustc__codegen__ssa_src_back_linker.rs,v 1.2 2021/09/10 15:09:32 jperkin Exp $ Do not use @rpath on Darwin. --- compiler/rustc_codegen_ssa/src/back/linker.rs.orig 2021-02-10 17:36:44.000000000 +0000 +++ compiler/rustc_codegen_ssa/src/back/linker.rs
// the right `-Wl,-install_name` with an `@rpath` in it. if self.sess.opts.cg.rpath || self.sess.opts.debugging_opts.osx_rpath_install_name { self.linker_arg("-install_name"); - let mut v = OsString::from("@rpath/"); + let mut v = OsString::from("@PREFIX@/lib/"); v.push(out_filename.file_name().unwrap()); self.linker_arg(&v); }
@@ -242,7 +242,7 @@ impl<'a> GccLinker<'a> {
dns.py
# -*- coding: utf-8 -*- """Module containing logic for dns based detectors.""" import socket import logging from .base import IPDetector, AF_INET, AF_INET6, AF_UNSPEC LOG = logging.getLogger(__name__) def resolve(hostname, family=AF_UNSPEC): """ Resolve hostname to one or more IP addresses through the operating system. Resolution is carried out for the given address family. If no address family is specified, only IPv4 and IPv6 addresses are returned. If multiple IP addresses are found, all are returned. :param family: AF_INET or AF_INET6 or AF_UNSPEC (default) :return: tuple of unique IP addresses """ af_ok = (AF_INET, AF_INET6) if family != AF_UNSPEC and family not in af_ok: raise ValueError("Invalid family '%s'" % family) ips = () try: addrinfo = socket.getaddrinfo(hostname, None, family) except socket.gaierror as exc: # EAI_NODATA and EAI_NONAME are expected if this name is not (yet) # present in DNS if exc.errno not in (socket.EAI_NODATA, socket.EAI_NONAME): LOG.debug("socket.getaddrinfo() raised an exception", exc_info=exc) else: if family == AF_UNSPEC: ips = tuple({item[4][0] for item in addrinfo if item[0] in af_ok}) else: ips = tuple({item[4][0] for item in addrinfo}) return ips class
(IPDetector): """Class to resolve a hostname using socket.getaddrinfo().""" configuration_key = "dns" def __init__(self, hostname=None, family=None, *args, **kwargs): """ Initialize. :param hostname: host name to query from DNS :param family: IP address family (default: '' (ANY), also possible: 'INET', 'INET6') """ super(IPDetector_DNS, self).__init__(*args, family=family, **kwargs) self.opts_hostname = hostname if self.opts_hostname is None: raise ValueError( "IPDetector_DNS(): a hostname to be queried in DNS must be specified!") def can_detect_offline(self): """Return false, as this detector generates dns traffic. :return: False """ return False def detect(self): """ Resolve the hostname to an IP address through the operating system. Depending on the 'family' option, either ipv4 or ipv6 resolution is carried out. If multiple IP addresses are found, the first one is returned. :return: ip address """ theip = next(iter(resolve(self.opts_hostname, self.opts_family)), None) self.set_current_value(theip) return theip
IPDetector_DNS
error.rs
use std::path::PathBuf; use thiserror::Error; /// Describes the potential error conditions that might arise from rsgit [`Repo`] operations.
/// /// [`Repo`]: trait.Repo.html #[derive(Debug, Error)] pub enum Error { #[error("work_dir doesn't exist `{0}`")] WorkDirDoesntExist(PathBuf), #[error("git_dir doesn't exist `{0}`")] GitDirDoesntExist(PathBuf), #[error("git_dir shouldn't exist `{0}`")] GitDirShouldntExist(PathBuf), #[error(transparent)] IoError(#[from] std::io::Error), #[error(transparent)] OtherError(#[from] Box<dyn std::error::Error>), } /// A specialized [`Result`] type for rsgit [`Repo`] operations. /// /// [`Repo`]: trait.Repo.html /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html pub type Result<T> = std::result::Result<T, Error>;
client_test.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package github import ( "bytes" "context" "crypto/tls" "encoding/base64" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/http/httptest" "net/url" "reflect" "strconv" "strings" "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/diff" "k8s.io/test-infra/ghproxy/ghcache" ) type testTime struct { now time.Time slept time.Duration } func (tt *testTime) Sleep(d time.Duration) { tt.slept = d } func (tt *testTime) Until(t time.Time) time.Duration { return t.Sub(tt.now) } func getClient(url string) *client { getToken := func() []byte { return []byte("") } logger := logrus.New() logger.SetLevel(logrus.DebugLevel) return &client{ logger: logrus.NewEntry(logger), delegate: &delegate{ time: &testTime{}, getToken: getToken, censor: func(content []byte) []byte { return content }, client: &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, }, bases: []string{url}, maxRetries: defaultMaxRetries, max404Retries: defaultMax404Retries, initialDelay: defaultInitialDelay, maxSleepTime: defaultMaxSleepTime, }, } } func TestRequestRateLimit(t *testing.T) { tc := &testTime{now: time.Now()} ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if tc.slept == 0 { w.Header().Set("X-RateLimit-Remaining", "0") w.Header().Set("X-RateLimit-Reset", strconv.Itoa(int(tc.now.Add(time.Second).Unix()))) http.Error(w, "403 Forbidden", http.StatusForbidden) } })) defer ts.Close() c := getClient(ts.URL) c.time = tc resp, err := c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } else if tc.slept < time.Second { t.Errorf("Expected to sleep for at least a second, got %v", tc.slept) } } func TestAbuseRateLimit(t *testing.T) { tc := &testTime{now: time.Now()} ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if tc.slept == 0 { w.Header().Set("Retry-After", "1") http.Error(w, "403 Forbidden", http.StatusForbidden) } })) defer ts.Close() c := getClient(ts.URL) c.time = tc resp, err := c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } else if tc.slept < time.Second { t.Errorf("Expected to sleep for at least a second, got %v", tc.slept) } } func TestRetry404(t *testing.T) { tc := &testTime{now: time.Now()} ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if tc.slept == 0 { http.Error(w, "404 Not Found", http.StatusNotFound) } })) defer ts.Close() c := getClient(ts.URL) c.time = tc resp, err := c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } } func TestRetryBase(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) defer ts.Close() c := getClient(ts.URL) c.initialDelay = time.Microsecond // One good endpoint: c.bases = []string{c.bases[0]} resp, err := c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } // Bad endpoint followed by good endpoint: c.bases = []string{"not-a-valid-base", c.bases[0]} resp, err = c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } // One bad endpoint: c.bases = []string{"not-a-valid-base"} resp, err = c.requestRetry(http.MethodGet, "/", "", nil) if err == nil { t.Error("Expected an error from a request to an invalid base, but succeeded!?") } } func TestBotName(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/user" { t.Errorf("Bad request path: %s", r.URL.Path) } fmt.Fprint(w, "{\"login\": \"wowza\"}") })) c := getClient(ts.URL) botName, err := c.BotName() if err != nil { t.Errorf("Didn't expect error: %v", err) } else if botName != "wowza" { t.Errorf("Wrong bot name. Got %s, expected wowza.", botName) } ts.Close() botName, err = c.BotName() if err != nil { t.Errorf("Didn't expect error: %v", err) } else if botName != "wowza" { t.Errorf("Wrong bot name. Got %s, expected wowza.", botName) } } func TestIsMember(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/orgs/k8s/members/person" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "204 No Content", http.StatusNoContent) })) defer ts.Close() c := getClient(ts.URL) mem, err := c.IsMember("k8s", "person") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if !mem { t.Errorf("Should be member.") } } func TestCreateComment(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/comments" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ic IssueComment if err := json.Unmarshal(b, &ic); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if ic.Body != "hello" { t.Errorf("Wrong body: %s", ic.Body) } http.Error(w, "201 Created", http.StatusCreated) })) defer ts.Close() c := getClient(ts.URL) if err := c.CreateComment("k8s", "kuber", 5, "hello"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestCreateCommentCensored(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/comments" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ic IssueComment if err := json.Unmarshal(b, &ic); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if ic.Body != "CENSORED" { t.Errorf("Wrong body: %s", ic.Body) } http.Error(w, "201 Created", http.StatusCreated) })) defer ts.Close() c := getClient(ts.URL) c.delegate.censor = func(content []byte) []byte { return bytes.ReplaceAll(content, []byte("hello"), []byte("CENSORED")) } if err := c.CreateComment("k8s", "kuber", 5, "hello"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestCreateCommentReaction(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/comments/5/reactions" { t.Errorf("Bad request path: %s", r.URL.Path) } if r.Header.Get("Accept") != "application/vnd.github.squirrel-girl-preview" { t.Errorf("Bad Accept header: %s", r.Header.Get("Accept")) } http.Error(w, "201 Created", http.StatusCreated) })) defer ts.Close() c := getClient(ts.URL) if err := c.CreateCommentReaction("k8s", "kuber", 5, "+1"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestDeleteComment(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/comments/123" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "204 No Content", http.StatusNoContent) })) defer ts.Close() c := getClient(ts.URL) if err := c.DeleteComment("k8s", "kuber", 123); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestGetPullRequest(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/pulls/12" { t.Errorf("Bad request path: %s", r.URL.Path) } pr := PullRequest{ User: User{Login: "bla"}, } b, err := json.Marshal(&pr) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) pr, err := c.GetPullRequest("k8s", "kuber", 12) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if pr.User.Login != "bla" { t.Errorf("Wrong user: %s", pr.User.Login) } } func TestGetPullRequestChanges(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/pulls/12/files" { t.Errorf("Bad request path: %s", r.URL.Path) } changes := []PullRequestChange{ {Filename: "foo.txt"}, } b, err := json.Marshal(&changes) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) cs, err := c.GetPullRequestChanges("k8s", "kuber", 12) if err != nil { t.Errorf("Didn't expect error: %v", err) } if len(cs) != 1 || cs[0].Filename != "foo.txt" { t.Errorf("Wrong result: %#v", cs) } } func TestGetRef(t *testing.T) { testCases := []struct { name string githubResponse []byte expectedSHA string expectedError string expectedErrorType error }{ { name: "single ref", githubResponse: []byte(`{"object": {"sha":"abcde"}}`), expectedSHA: "abcde", }, { name: "multiple refs, no match", githubResponse: []byte(` [ { "ref": "refs/heads/feature-a", "node_id": "MDM6UmVmcmVmcy9oZWFkcy9mZWF0dXJlLWE=", "url": "https://api.github.com/repos/octocat/Hello-World/git/refs/heads/feature-a", "object": { "type": "commit", "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", "url": "https://api.github.com/repos/octocat/Hello-World/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" } }, { "ref": "refs/heads/feature-b", "node_id": "MDM6UmVmcmVmcy9oZWFkcy9mZWF0dXJlLWI=", "url": "https://api.github.com/repos/octocat/Hello-World/git/refs/heads/feature-b", "object": { "type": "commit", "sha": "612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac", "url": "https://api.github.com/repos/octocat/Hello-World/git/commits/612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac" } } ]`), expectedError: "query for org/repo ref \"heads/branch\" didn't match one but multiple refs: [refs/heads/feature-a refs/heads/feature-b]", expectedErrorType: GetRefTooManyResultsError{}, }, { name: "multiple refs with match", githubResponse: []byte(` [ { "ref": "refs/heads/branch", "node_id": "MDM6UmVmcmVmcy9oZWFkcy9mZWF0dXJlLWE=", "url": "https://api.github.com/repos/octocat/Hello-World/git/refs/heads/feature-a", "object": { "type": "commit", "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", "url": "https://api.github.com/repos/octocat/Hello-World/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" } }, { "ref": "refs/heads/feature-b", "node_id": "MDM6UmVmcmVmcy9oZWFkcy9mZWF0dXJlLWI=", "url": "https://api.github.com/repos/octocat/Hello-World/git/refs/heads/feature-b", "object": { "type": "commit", "sha": "612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac", "url": "https://api.github.com/repos/octocat/Hello-World/git/commits/612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac" } } ]`), expectedSHA: "aa218f56b14c9653891f9e74264a383fa43fefbd", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } expectedPath := "/repos/org/repo/git/refs/heads/branch" if r.URL.Path != expectedPath { t.Errorf("expected path %s, got path %s", expectedPath, r.URL.Path) } w.Write(tc.githubResponse) })) defer ts.Close() c := getClient(ts.URL) var errMsg string sha, err := c.GetRef("org", "repo", "heads/branch") if err != nil { errMsg = err.Error() } if errMsg != tc.expectedError { t.Fatalf("expected error %q, got error %q", tc.expectedError, err) } if !errors.Is(err, tc.expectedErrorType) { t.Errorf("expected error of type %T, got %T", tc.expectedErrorType, err) } if sha != tc.expectedSHA { t.Errorf("expected sha %q, got sha %q", tc.expectedSHA, sha) } }) } } func TestDeleteRef(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/git/refs/heads/my-feature" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "204 No Content", http.StatusNoContent) })) defer ts.Close() c := getClient(ts.URL) if err := c.DeleteRef("k8s", "kuber", "heads/my-feature"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestGetSingleCommit(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/octocat/Hello-World/commits/6dcb09b5b57875f334f61aebed695e2e4193db5e" { t.Errorf("Bad request path: %s", r.URL.Path) } fmt.Fprint(w, `{ "commit": { "tree": { "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e" } } }`) })) defer ts.Close() c := getClient(ts.URL) commit, err := c.GetSingleCommit("octocat", "Hello-World", "6dcb09b5b57875f334f61aebed695e2e4193db5e") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if commit.Commit.Tree.SHA != "6dcb09b5b57875f334f61aebed695e2e4193db5e" { t.Errorf("Wrong tree-hash: %s", commit.Commit.Tree.SHA) } } func TestCreateStatus(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/statuses/abcdef" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var s Status if err := json.Unmarshal(b, &s); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if s.Context != "c" { t.Errorf("Wrong context: %s", s.Context) } http.Error(w, "201 Created", http.StatusCreated) })) defer ts.Close() c := getClient(ts.URL) if err := c.CreateStatus("k8s", "kuber", "abcdef", Status{ Context: "c", }); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestListIssues(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path == "/repos/k8s/kuber/issues" { ics := []Issue{{Number: 1}} b, err := json.Marshal(ics) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) fmt.Fprint(w, string(b)) } else if r.URL.Path == "/someotherpath" { ics := []Issue{{Number: 2}} b, err := json.Marshal(ics) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) ics, err := c.ListOpenIssues("k8s", "kuber") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(ics) != 2 { t.Errorf("Expected two issues, found %d: %v", len(ics), ics) } else if ics[0].Number != 1 || ics[1].Number != 2 { t.Errorf("Wrong issue IDs: %v", ics) } } func TestListIssueComments(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path == "/repos/k8s/kuber/issues/15/comments" { ics := []IssueComment{{ID: 1}} b, err := json.Marshal(ics) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) fmt.Fprint(w, string(b)) } else if r.URL.Path == "/someotherpath" { ics := []IssueComment{{ID: 2}} b, err := json.Marshal(ics) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) ics, err := c.ListIssueComments("k8s", "kuber", 15) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(ics) != 2 { t.Errorf("Expected two issues, found %d: %v", len(ics), ics) } else if ics[0].ID != 1 || ics[1].ID != 2 { t.Errorf("Wrong issue IDs: %v", ics) } } func TestAddLabel(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/labels" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ls []string if err := json.Unmarshal(b, &ls); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ls) != 1 { t.Errorf("Wrong length labels: %v", ls) } else if ls[0] != "yay" { t.Errorf("Wrong label: %s", ls[0]) } })) defer ts.Close() c := getClient(ts.URL) if err := c.AddLabel("k8s", "kuber", 5, "yay"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestRemoveLabel(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/labels/yay" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "204 No Content", http.StatusNoContent) })) defer ts.Close() c := getClient(ts.URL) if err := c.RemoveLabel("k8s", "kuber", 5, "yay"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestRemoveLabelFailsOnOtherThan404(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/labels/yay" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "403 Forbidden", http.StatusForbidden) })) defer ts.Close() c := getClient(ts.URL) err := c.RemoveLabel("k8s", "kuber", 5, "yay") if err == nil { t.Errorf("Expected error but got none") } } func TestRemoveLabelNotFound(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, `{"message": "Label does not exist"}`, 404) })) defer ts.Close() c := getClient(ts.URL) err := c.RemoveLabel("any", "old", 3, "label") if err != nil { t.Fatalf("RemoveLabel expected no error, got one: %v", err) } } func TestNewNotFoundIsNotFound(t *testing.T) { if !IsNotFound(NewNotFound()) { t.Error("NewNotFound didn't return an error that was considered a NotFound") } } func TestIsNotFound(t *testing.T) { testCases := []struct { name string code int body string isNotFound bool }{ { name: "should be not found when status code is 404", code: 404, body: `{"message":"not found","errors":[{"resource":"fake resource","field":"fake field","code":"404","message":"status code 404"}]}`, isNotFound: true, }, { name: "should not be not found when status code is 200", code: 200, body: `{"message": "ok"}`, isNotFound: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, tc.body, tc.code) })) defer ts.Close() c := getClient(ts.URL) code, _, err := c.requestRaw(&request{ method: http.MethodGet, path: fmt.Sprintf("/repos/%s/%s/branches/%s/protection", "org", "repo", "branch"), exitCodes: []int{200}, }) if code != tc.code { t.Fatalf("Expected code to be %d, but got %d", tc.code, code) } isNotFound := IsNotFound(err) if isNotFound != tc.isNotFound { t.Fatalf("Expected isNotFound to be %t, but got %t", tc.isNotFound, isNotFound) } }) } } func TestIsNotFound_nested(t *testing.T) { t.Parallel() testCases := []struct { name string err error expectMatch bool }{ { name: "direct match", err: requestError{ClientError: ClientError{Errors: []clientErrorSubError{{Message: "status code 404"}}}}, expectMatch: true, }, { name: "direct, no match", err: requestError{ClientError: ClientError{Errors: []clientErrorSubError{{Message: "status code 403"}}}}, expectMatch: false, }, { name: "nested match", err: fmt.Errorf("wrapping: %w", requestError{ClientError: ClientError{Errors: []clientErrorSubError{{Message: "status code 404"}}}}), expectMatch: true, }, { name: "nested, no match", err: fmt.Errorf("wrapping: %w", requestError{ClientError: ClientError{Errors: []clientErrorSubError{{Message: "status code 403"}}}}), expectMatch: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if result := IsNotFound(tc.err); result != tc.expectMatch { t.Errorf("expected match: %t, got match: %t", tc.expectMatch, result) } }) } } func TestAssignIssue(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/assignees" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string][]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if len(ps["assignees"]) == 3 { if ps["assignees"][0] != "george" || ps["assignees"][1] != "jungle" || ps["assignees"][2] != "not-in-the-org" { t.Errorf("Wrong assignees: %v", ps) } } else if len(ps["assignees"]) == 2 { if ps["assignees"][0] != "george" || ps["assignees"][1] != "jungle" { t.Errorf("Wrong assignees: %v", ps) } } else { t.Errorf("Wrong assignees length: %v", ps) } w.WriteHeader(http.StatusCreated) json.NewEncoder(w).Encode(Issue{ Assignees: []User{{Login: "george"}, {Login: "jungle"}, {Login: "ignore-other"}}, }) })) defer ts.Close() c := getClient(ts.URL) if err := c.AssignIssue("k8s", "kuber", 5, []string{"george", "jungle"}); err != nil { t.Errorf("Unexpected error: %v", err) } if err := c.AssignIssue("k8s", "kuber", 5, []string{"george", "jungle", "not-in-the-org"}); err == nil { t.Errorf("Expected an error") } else if merr, ok := err.(MissingUsers); ok { if len(merr.Users) != 1 || merr.Users[0] != "not-in-the-org" { t.Errorf("Expected [not-in-the-org], not %v", merr.Users) } } else { t.Errorf("Expected MissingUsers error") } } func TestUnassignIssue(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5/assignees" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string][]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if len(ps["assignees"]) == 3 { if ps["assignees"][0] != "george" || ps["assignees"][1] != "jungle" || ps["assignees"][2] != "perma-assignee" { t.Errorf("Wrong assignees: %v", ps) } } else if len(ps["assignees"]) == 2 { if ps["assignees"][0] != "george" || ps["assignees"][1] != "jungle" { t.Errorf("Wrong assignees: %v", ps) } } else { t.Errorf("Wrong assignees length: %v", ps) } json.NewEncoder(w).Encode(Issue{ Assignees: []User{{Login: "perma-assignee"}, {Login: "ignore-other"}}, }) })) defer ts.Close() c := getClient(ts.URL) if err := c.UnassignIssue("k8s", "kuber", 5, []string{"george", "jungle"}); err != nil { t.Errorf("Unexpected error: %v", err) } if err := c.UnassignIssue("k8s", "kuber", 5, []string{"george", "jungle", "perma-assignee"}); err == nil { t.Errorf("Expected an error") } else if merr, ok := err.(ExtraUsers); ok { if len(merr.Users) != 1 || merr.Users[0] != "perma-assignee" { t.Errorf("Expected [perma-assignee], not %v", merr.Users) } } else { t.Errorf("Expected ExtraUsers error") } } func TestReadPaginatedResults(t *testing.T) { type response struct { labels []Label next string } cases := []struct { name string baseSuffix string initialPath string responses map[string]response expectedLabels []Label }{ { name: "regular pagination", initialPath: "/label/foo", responses: map[string]response{ "/label/foo": { labels: []Label{{Name: "foo"}}, next: `<blorp>; rel="first", <https://%s/label/bar>; rel="next"`, }, "/label/bar": { labels: []Label{{Name: "bar"}}, }, }, expectedLabels: []Label{{Name: "foo"}, {Name: "bar"}}, }, { name: "pagination with /api/v3 base suffix", initialPath: "/label/foo", baseSuffix: "/api/v3", responses: map[string]response{ "/api/v3/label/foo": { labels: []Label{{Name: "foo"}}, next: `<blorp>; rel="first", <https://%s/api/v3/label/bar>; rel="next"`, }, "/api/v3/label/bar": { labels: []Label{{Name: "bar"}}, }, }, expectedLabels: []Label{{Name: "foo"}, {Name: "bar"}}, }, } for _, tc := range cases { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if response, ok := tc.responses[r.URL.Path]; ok { b, err := json.Marshal(response.labels) if err != nil { t.Fatalf("Didn't expect error: %v", err) } if response.next != "" { w.Header().Set("Link", fmt.Sprintf(response.next, r.Host)) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) c.bases[0] = c.bases[0] + tc.baseSuffix var labels []Label err := c.readPaginatedResults( tc.initialPath, "", func() interface{} { return &[]Label{} }, func(obj interface{}) { labels = append(labels, *(obj.(*[]Label))...) }, ) if err != nil { t.Errorf("%s: didn't expect error: %v", tc.name, err) } else { if !reflect.DeepEqual(labels, tc.expectedLabels) { t.Errorf("%s: expected %s, got %s", tc.name, tc.expectedLabels, labels) } } } } func TestListPullRequestComments(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path == "/repos/k8s/kuber/pulls/15/comments" { prcs := []ReviewComment{{ID: 1}} b, err := json.Marshal(prcs) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) fmt.Fprint(w, string(b)) } else if r.URL.Path == "/someotherpath" { prcs := []ReviewComment{{ID: 2}} b, err := json.Marshal(prcs) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) prcs, err := c.ListPullRequestComments("k8s", "kuber", 15) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(prcs) != 2 { t.Errorf("Expected two comments, found %d: %v", len(prcs), prcs) } else if prcs[0].ID != 1 || prcs[1].ID != 2 { t.Errorf("Wrong issue IDs: %v", prcs) } } func TestListReviews(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path == "/repos/k8s/kuber/pulls/15/reviews" { reviews := []Review{{ID: 1}} b, err := json.Marshal(reviews) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) fmt.Fprint(w, string(b)) } else if r.URL.Path == "/someotherpath" { reviews := []Review{{ID: 2}} b, err := json.Marshal(reviews) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) reviews, err := c.ListReviews("k8s", "kuber", 15) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(reviews) != 2 { t.Errorf("Expected two reviews, found %d: %v", len(reviews), reviews) } else if reviews[0].ID != 1 || reviews[1].ID != 2 { t.Errorf("Wrong review IDs: %v", reviews) } } func TestPrepareReviewersBody(t *testing.T) { var tests = []struct { name string logins []string expectedBody map[string][]string }{ { name: "one reviewer", logins: []string{"george"}, expectedBody: map[string][]string{"reviewers": {"george"}}, }, { name: "three reviewers", logins: []string{"george", "jungle", "chimp"}, expectedBody: map[string][]string{"reviewers": {"george", "jungle", "chimp"}}, }, { name: "one team", logins: []string{"kubernetes/sig-testing-misc"}, expectedBody: map[string][]string{"team_reviewers": {"sig-testing-misc"}}, }, { name: "two teams", logins: []string{"kubernetes/sig-testing-misc", "kubernetes/sig-testing-bugs"}, expectedBody: map[string][]string{"team_reviewers": {"sig-testing-misc", "sig-testing-bugs"}}, }, { name: "one team not in org", logins: []string{"kubernetes/sig-testing-misc", "other-org/sig-testing-bugs"}, expectedBody: map[string][]string{"team_reviewers": {"sig-testing-misc"}}, }, { name: "mixed single", logins: []string{"george", "kubernetes/sig-testing-misc"}, expectedBody: map[string][]string{"reviewers": {"george"}, "team_reviewers": {"sig-testing-misc"}}, }, { name: "mixed multiple", logins: []string{"george", "kubernetes/sig-testing-misc", "kubernetes/sig-testing-bugs", "jungle", "chimp"}, expectedBody: map[string][]string{"reviewers": {"george", "jungle", "chimp"}, "team_reviewers": {"sig-testing-misc", "sig-testing-bugs"}}, }, } for _, test := range tests { body, _ := prepareReviewersBody(test.logins, "kubernetes") if !reflect.DeepEqual(body, test.expectedBody) { t.Errorf("%s: got %s instead of %s", test.name, body, test.expectedBody) } } } func TestRequestReview(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/pulls/5/requested_reviewers" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string][]string if err := json.Unmarshal(b, &ps); err != nil { t.Fatalf("Could not unmarshal request: %v", err) } if len(ps) < 1 || len(ps) > 2 { t.Fatalf("Wrong length patch: %v", ps) } if sets.NewString(ps["reviewers"]...).Has("not-a-collaborator") { w.WriteHeader(http.StatusUnprocessableEntity) return } requestedReviewers := []User{} for _, reviewers := range ps { for _, reviewer := range reviewers { requestedReviewers = append(requestedReviewers, User{Login: reviewer}) } } w.WriteHeader(http.StatusCreated) json.NewEncoder(w).Encode(PullRequest{ RequestedReviewers: requestedReviewers, }) })) defer ts.Close() c := getClient(ts.URL) if err := c.RequestReview("k8s", "kuber", 5, []string{"george", "jungle"}); err != nil { t.Errorf("Unexpected error: %v", err) } if err := c.RequestReview("k8s", "kuber", 5, []string{"george", "jungle", "k8s/team1"}); err != nil { t.Errorf("Unexpected error: %v", err) } if err := c.RequestReview("k8s", "kuber", 5, []string{"george", "jungle", "not-a-collaborator"}); err == nil { t.Errorf("Expected an error") } else if merr, ok := err.(MissingUsers); ok { if len(merr.Users) != 1 || merr.Users[0] != "not-a-collaborator" { t.Errorf("Expected [not-a-collaborator], not %v", merr.Users) } } else { t.Errorf("Expected MissingUsers error") } if err := c.RequestReview("k8s", "kuber", 5, []string{"george", "jungle", "notk8s/team1"}); err == nil { t.Errorf("Expected an error") } else if merr, ok := err.(MissingUsers); ok { if len(merr.Users) != 1 || merr.Users[0] != "notk8s/team1" { t.Errorf("Expected [notk8s/team1], not %v", merr.Users) } } else { t.Errorf("Expected MissingUsers error") } } func TestUnrequestReview(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/pulls/5/requested_reviewers" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string][]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if len(ps["reviewers"]) == 3 { if ps["reviewers"][0] != "george" || ps["reviewers"][1] != "jungle" || ps["reviewers"][2] != "perma-reviewer" { t.Errorf("Wrong reviewers: %v", ps) } } else if len(ps["reviewers"]) == 2 { if ps["reviewers"][0] != "george" || ps["reviewers"][1] != "jungle" { t.Errorf("Wrong reviewers: %v", ps) } } else { t.Errorf("Wrong reviewers length: %v", ps) } json.NewEncoder(w).Encode(PullRequest{ RequestedReviewers: []User{{Login: "perma-reviewer"}, {Login: "ignore-other"}}, }) })) defer ts.Close() c := getClient(ts.URL) if err := c.UnrequestReview("k8s", "kuber", 5, []string{"george", "jungle"}); err != nil { t.Errorf("Unexpected error: %v", err) } if err := c.UnrequestReview("k8s", "kuber", 5, []string{"george", "jungle", "perma-reviewer"}); err == nil { t.Errorf("Expected an error") } else if merr, ok := err.(ExtraUsers); ok { if len(merr.Users) != 1 || merr.Users[0] != "perma-reviewer" { t.Errorf("Expected [perma-reviewer], not %v", merr.Users) } } else { t.Errorf("Expected ExtraUsers error") } } func TestCloseIssue(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if ps["state"] != "closed" { t.Errorf("Wrong state: %s", ps["state"]) } })) defer ts.Close() c := getClient(ts.URL) if err := c.CloseIssue("k8s", "kuber", 5); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestReopenIssue(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if ps["state"] != "open" { t.Errorf("Wrong state: %s", ps["state"]) } })) defer ts.Close() c := getClient(ts.URL) if err := c.ReopenIssue("k8s", "kuber", 5); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestClosePR(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/pulls/5" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if ps["state"] != "closed" { t.Errorf("Wrong state: %s", ps["state"]) } })) defer ts.Close() c := getClient(ts.URL) if err := c.ClosePR("k8s", "kuber", 5); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestReopenPR(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/pulls/5" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var ps map[string]string if err := json.Unmarshal(b, &ps); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if len(ps) != 1 { t.Errorf("Wrong length patch: %v", ps) } else if ps["state"] != "open" { t.Errorf("Wrong state: %s", ps["state"]) } })) defer ts.Close() c := getClient(ts.URL) if err := c.ReopenPR("k8s", "kuber", 5); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestFindIssues(t *testing.T) { cases := []struct { name string sort bool order bool }{ { name: "simple query", }, { name: "sort no order", sort: true, }, { name: "sort and order", sort: true, order: true, }, } issueNum := 5 ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/search/issues" { t.Errorf("Bad request path: %s", r.URL.Path) } issueList := IssuesSearchResult{ Total: 1, Issues: []Issue{ { Number: issueNum, Title: r.URL.RawQuery, }, }, } b, err := json.Marshal(&issueList) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) for _, tc := range cases { var result []Issue var err error sort := "" if tc.sort { sort = "sort-strategy" } if result, err = c.FindIssues("commit_hash", sort, tc.order); err != nil { t.Errorf("%s: didn't expect error: %v", tc.name, err) } if len(result) != 1 { t.Errorf("%s: unexpected number of results: %v", tc.name, len(result)) } if result[0].Number != issueNum { t.Errorf("%s: expected issue number %+v, got %+v", tc.name, issueNum, result[0].Number) } if tc.sort && !strings.Contains(result[0].Title, "sort="+sort) { t.Errorf("%s: missing sort=%s from query: %s", tc.name, sort, result[0].Title) } if tc.order && !strings.Contains(result[0].Title, "order=asc") { t.Errorf("%s: missing order=asc from query: %s", tc.name, result[0].Title) } } } func TestGetFile(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/contents/foo.txt" { t.Errorf("Bad request path: %s", r.URL.Path) } if r.URL.RawQuery != "" { t.Errorf("Bad request query: %s", r.URL.RawQuery) } c := &Content{ Content: base64.StdEncoding.EncodeToString([]byte("abcde")), } b, err := json.Marshal(&c) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) if content, err := c.GetFile("k8s", "kuber", "foo.txt", ""); err != nil { t.Errorf("Didn't expect error: %v", err) } else if string(content) != "abcde" { t.Errorf("Wrong content -- expect: abcde, got: %s", string(content)) } } func TestGetFileRef(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/contents/foo/bar.txt" { t.Errorf("Bad request path: %s", r.URL) } if r.URL.RawQuery != "ref=12345" { t.Errorf("Bad request query: %s", r.URL.RawQuery) } c := &Content{ Content: base64.StdEncoding.EncodeToString([]byte("abcde")), } b, err := json.Marshal(&c) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) if content, err := c.GetFile("k8s", "kuber", "foo/bar.txt", "12345"); err != nil { t.Errorf("Didn't expect error: %v", err) } else if string(content) != "abcde" { t.Errorf("Wrong content -- expect: abcde, got: %s", string(content)) } } // TestGetLabels tests both GetRepoLabels and GetIssueLabels. func
(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } var labels []Label switch r.URL.Path { case "/repos/k8s/kuber/issues/5/labels": labels = []Label{{Name: "issue-label"}} w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) case "/repos/k8s/kuber/labels": labels = []Label{{Name: "repo-label"}} w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) case "/someotherpath": labels = []Label{{Name: "label2"}} default: t.Errorf("Bad request path: %s", r.URL.Path) return } b, err := json.Marshal(labels) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) labels, err := c.GetIssueLabels("k8s", "kuber", 5) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(labels) != 2 { t.Errorf("Expected two labels, found %d: %v", len(labels), labels) } else if labels[0].Name != "issue-label" || labels[1].Name != "label2" { t.Errorf("Wrong label names: %v", labels) } labels, err = c.GetRepoLabels("k8s", "kuber") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(labels) != 2 { t.Errorf("Expected two labels, found %d: %v", len(labels), labels) } else if labels[0].Name != "repo-label" || labels[1].Name != "label2" { t.Errorf("Wrong label names: %v", labels) } } func simpleTestServer(t *testing.T, path string, v interface{}) *httptest.Server { return httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == path { b, err := json.Marshal(v) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Fatalf("Bad request path: %s", r.URL.Path) } })) } func TestListTeams(t *testing.T) { ts := simpleTestServer(t, "/orgs/foo/teams", []Team{{ID: 1}}) defer ts.Close() c := getClient(ts.URL) teams, err := c.ListTeams("foo") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(teams) != 1 { t.Errorf("Expected one team, found %d: %v", len(teams), teams) } else if teams[0].ID != 1 { t.Errorf("Wrong team names: %v", teams) } } func TestCreateTeam(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/orgs/foo/teams" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var team Team switch err := json.Unmarshal(b, &team); { case err != nil: t.Errorf("Could not unmarshal request: %v", err) case team.Name == "": t.Errorf("client should reject empty names") case team.Name != "frobber": t.Errorf("Bad name: %s", team.Name) } team.Name = "hello" team.Description = "world" team.Privacy = "special" b, err = json.Marshal(team) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.WriteHeader(http.StatusCreated) // 201 fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) if _, err := c.CreateTeam("foo", Team{Name: ""}); err == nil { t.Errorf("client should reject empty name") } switch team, err := c.CreateTeam("foo", Team{Name: "frobber"}); { case err != nil: t.Errorf("unexpected error: %v", err) case team.Name != "hello": t.Errorf("bad name: %s", team.Name) case team.Description != "world": t.Errorf("bad description: %s", team.Description) case team.Privacy != "special": t.Errorf("bad privacy: %s", team.Privacy) } } func TestEditTeam(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/teams/63" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var team Team switch err := json.Unmarshal(b, &team); { case err != nil: t.Errorf("Could not unmarshal request: %v", err) case team.Name == "": t.Errorf("Bad name: %s", team.Name) } team.Name = "hello" team.Description = "world" team.Privacy = "special" b, err = json.Marshal(team) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.WriteHeader(http.StatusCreated) // 201 fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) if _, err := c.EditTeam(Team{ID: 0, Name: "frobber"}); err == nil { t.Errorf("client should reject id 0") } switch team, err := c.EditTeam(Team{ID: 63, Name: "frobber"}); { case err != nil: t.Errorf("unexpected error: %v", err) case team.Name != "hello": t.Errorf("bad name: %s", team.Name) case team.Description != "world": t.Errorf("bad description: %s", team.Description) case team.Privacy != "special": t.Errorf("bad privacy: %s", team.Privacy) } } func TestListTeamMembers(t *testing.T) { ts := simpleTestServer(t, "/teams/1/members", []TeamMember{{Login: "foo"}}) defer ts.Close() c := getClient(ts.URL) teamMembers, err := c.ListTeamMembers(1, RoleAll) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(teamMembers) != 1 { t.Errorf("Expected one team member, found %d: %v", len(teamMembers), teamMembers) } else if teamMembers[0].Login != "foo" { t.Errorf("Wrong team names: %v", teamMembers) } } func TestIsCollaborator(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/collaborators/person" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "204 No Content", http.StatusNoContent) })) defer ts.Close() c := getClient(ts.URL) mem, err := c.IsCollaborator("k8s", "kuber", "person") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if !mem { t.Errorf("Should be member.") } } func TestListCollaborators(t *testing.T) { ts := simpleTestServer(t, "/repos/org/repo/collaborators", []User{ {Login: "foo", Permissions: RepoPermissions{Pull: true}}, {Login: "bar", Permissions: RepoPermissions{Push: true}}, }) defer ts.Close() c := getClient(ts.URL) users, err := c.ListCollaborators("org", "repo") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(users) != 2 { t.Errorf("Expected two users, found %d: %v", len(users), users) return } if users[0].Login != "foo" { t.Errorf("Wrong user login for index 0: %v", users[0]) } if !reflect.DeepEqual(users[0].Permissions, RepoPermissions{Pull: true}) { t.Errorf("Wrong permissions for index 0: %v", users[0]) } if users[1].Login != "bar" { t.Errorf("Wrong user login for index 1: %v", users[1]) } if !reflect.DeepEqual(users[1].Permissions, RepoPermissions{Push: true}) { t.Errorf("Wrong permissions for index 1: %v", users[1]) } } func TestListRepoTeams(t *testing.T) { expectedTeams := []Team{ {ID: 1, Slug: "foo", Permission: RepoPull}, {ID: 2, Slug: "bar", Permission: RepoPush}, {ID: 3, Slug: "foobar", Permission: RepoAdmin}, } ts := simpleTestServer(t, "/repos/org/repo/teams", expectedTeams) defer ts.Close() c := getClient(ts.URL) teams, err := c.ListRepoTeams("org", "repo") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(teams) != 3 { t.Errorf("Expected three teams, found %d: %v", len(teams), teams) return } if !reflect.DeepEqual(teams, expectedTeams) { t.Errorf("Wrong list of teams, expected: %v, got: %v", expectedTeams, teams) } } func TestListIssueEvents(t *testing.T) { ts := simpleTestServer( t, "/repos/org/repo/issues/1/events", []ListedIssueEvent{ {Event: IssueActionLabeled}, {Event: IssueActionClosed}, }, ) defer ts.Close() c := getClient(ts.URL) events, err := c.ListIssueEvents("org", "repo", 1) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(events) != 2 { t.Errorf("Expected two events, found %d: %v", len(events), events) return } if events[0].Event != IssueActionLabeled { t.Errorf("Wrong event for index 0: %v", events[0]) } if events[1].Event != IssueActionClosed { t.Errorf("Wrong event for index 1: %v", events[1]) } } func TestThrottle(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/org/repo/issues/1/events" { b, err := json.Marshal([]ListedIssueEvent{{Event: IssueActionClosed}}) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else if r.URL.Path == "/repos/org/repo/issues/2/events" { w.Header().Set(ghcache.CacheModeHeader, string(ghcache.ModeRevalidated)) b, err := json.Marshal([]ListedIssueEvent{{Event: IssueActionOpened}}) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Fatalf("Bad request path: %s", r.URL.Path) } })) c := getClient(ts.URL) c.Throttle(1, 2) if c.client != &c.throttle { t.Errorf("Bad client %v, expecting %v", c.client, &c.throttle) } if len(c.throttle.throttle) != 2 { t.Fatalf("Expected two items in throttle channel, found %d", len(c.throttle.throttle)) } if cap(c.throttle.throttle) != 2 { t.Fatalf("Expected throttle channel capacity of two, found %d", cap(c.throttle.throttle)) } check := func(events []ListedIssueEvent, err error, expectedAction IssueEventAction) { if err != nil { t.Errorf("Unexpected error: %v", err) } if len(events) != 1 || events[0].Event != expectedAction { t.Errorf("Expected one %q event, found: %v", string(expectedAction), events) } if len(c.throttle.throttle) != 1 { t.Errorf("Expected one item in throttle channel, found %d", len(c.throttle.throttle)) } } events, err := c.ListIssueEvents("org", "repo", 1) check(events, err, IssueActionClosed) // The following 2 calls should be refunded. events, err = c.ListIssueEvents("org", "repo", 2) check(events, err, IssueActionOpened) events, err = c.ListIssueEvents("org", "repo", 2) check(events, err, IssueActionOpened) // Check that calls are delayed while throttled. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) go func() { if _, err := c.ListIssueEvents("org", "repo", 1); err != nil { t.Errorf("Unexpected error: %v", err) } if _, err := c.ListIssueEvents("org", "repo", 1); err != nil { t.Errorf("Unexpected error: %v", err) } cancel() }() slowed := false for ctx.Err() == nil { // Wait for the client to get throttled if atomic.LoadInt32(&c.throttle.slow) == 0 { continue } // Throttled, now add to the channel slowed = true select { case c.throttle.throttle <- time.Now(): // Add items to the channel case <-ctx.Done(): } } if !slowed { t.Errorf("Never throttled") } if err := ctx.Err(); err != context.Canceled { t.Errorf("Expected context cancellation did not happen: %v", err) } } func TestGetBranches(t *testing.T) { ts := simpleTestServer(t, "/repos/org/repo/branches", []Branch{ {Name: "master", Protected: false}, {Name: "release-3.7", Protected: true}, }) defer ts.Close() c := getClient(ts.URL) branches, err := c.GetBranches("org", "repo", true) if err != nil { t.Errorf("Unexpected error: %v", err) } else if len(branches) != 2 { t.Errorf("Expected two branches, found %d, %v", len(branches), branches) return } switch { case branches[0].Name != "master": t.Errorf("Wrong branch name for index 0: %v", branches[0]) case branches[1].Name != "release-3.7": t.Errorf("Wrong branch name for index 1: %v", branches[1]) case branches[1].Protected == false: t.Errorf("Wrong branch protection for index 1: %v", branches[1]) } } func TestGetBranchProtection(t *testing.T) { contexts := []string{"foo-pr-test", "other"} pushers := []Team{{Slug: "movers"}, {Slug: "awesome-team"}, {Slug: "shakers"}} ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/org/repo/branches/master/protection" { t.Errorf("Bad request path: %s", r.URL.Path) } bp := BranchProtection{ RequiredStatusChecks: &RequiredStatusChecks{ Contexts: contexts, }, Restrictions: &Restrictions{ Teams: pushers, }, } b, err := json.Marshal(&bp) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) bp, err := c.GetBranchProtection("org", "repo", "master") if err != nil { t.Errorf("Didn't expect error: %v", err) } switch { case bp.Restrictions == nil: t.Errorf("RestrictionsRequest unset") case bp.Restrictions.Teams == nil: t.Errorf("Teams unset") case len(bp.Restrictions.Teams) != len(pushers): t.Errorf("Bad teams: expected %v, got: %v", pushers, bp.Restrictions.Teams) case bp.RequiredStatusChecks == nil: t.Errorf("RequiredStatusChecks unset") case len(bp.RequiredStatusChecks.Contexts) != len(contexts): t.Errorf("Bad contexts: expected: %v, got: %v", contexts, bp.RequiredStatusChecks.Contexts) default: mc := map[string]bool{} for _, k := range bp.RequiredStatusChecks.Contexts { mc[k] = true } var missing []string for _, k := range contexts { if mc[k] != true { missing = append(missing, k) } } if n := len(missing); n > 0 { t.Errorf("missing %d required contexts: %v", n, missing) } mp := map[string]bool{} for _, k := range bp.Restrictions.Teams { mp[k.Slug] = true } missing = nil for _, k := range pushers { if mp[k.Slug] != true { missing = append(missing, k.Slug) } } if n := len(missing); n > 0 { t.Errorf("missing %d pushers: %v", n, missing) } } } // GetBranchProtection should return nil if the github API call // returns 404 with "Branch not protected" message func TestGetBranchProtection404BranchNotProtected(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/org/repo/branches/master/protection" { t.Errorf("Bad request path: %s", r.URL.Path) } ge := &githubError{ Message: "Branch not protected", } b, err := json.Marshal(&ge) if err != nil { t.Fatalf("Didn't expect error: %v", err) } http.Error(w, string(b), http.StatusNotFound) })) defer ts.Close() c := getClient(ts.URL) bp, err := c.GetBranchProtection("org", "repo", "master") if err != nil { t.Errorf("Unexpected error: %v", err) } if bp != nil { t.Errorf("Expected nil as BranchProtection object, got: %v", *bp) } } // GetBranchProtection should fail on any 404 which is NOT due to // branch not being protected. func TestGetBranchProtectionFailsOnOther404(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/org/repo/branches/master/protection" { t.Errorf("Bad request path: %s", r.URL.Path) } ge := &githubError{ Message: "Not Found", } b, err := json.Marshal(&ge) if err != nil { t.Fatalf("Didn't expect error: %v", err) } http.Error(w, string(b), http.StatusNotFound) })) defer ts.Close() c := getClient(ts.URL) _, err := c.GetBranchProtection("org", "repo", "master") if err == nil { t.Errorf("Expected error, got nil") } } func TestRemoveBranchProtection(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/org/repo/branches/master/protection" { t.Errorf("Bad request path: %s", r.URL.Path) } http.Error(w, "204 No Content", http.StatusNoContent) })) defer ts.Close() c := getClient(ts.URL) if err := c.RemoveBranchProtection("org", "repo", "master"); err != nil { t.Errorf("Unexpected error: %v", err) } } func TestUpdateBranchProtection(t *testing.T) { cases := []struct { name string // TODO(fejta): expand beyond contexts/pushers contexts []string pushers []string err bool }{ { name: "both", contexts: []string{"foo-pr-test", "other"}, pushers: []string{"movers", "awesome-team", "shakers"}, err: false, }, } for _, tc := range cases { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPut { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/org/repo/branches/master/protection" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var bpr BranchProtectionRequest if err := json.Unmarshal(b, &bpr); err != nil { t.Errorf("Could not unmarshal request: %v", err) } switch { case bpr.Restrictions != nil && bpr.Restrictions.Teams == nil: t.Errorf("Teams unset") case len(bpr.RequiredStatusChecks.Contexts) != len(tc.contexts): t.Errorf("Bad contexts: %v", bpr.RequiredStatusChecks.Contexts) case len(*bpr.Restrictions.Teams) != len(tc.pushers): t.Errorf("Bad teams: %v", *bpr.Restrictions.Teams) default: mc := map[string]bool{} for _, k := range tc.contexts { mc[k] = true } var missing []string for _, k := range bpr.RequiredStatusChecks.Contexts { if mc[k] != true { missing = append(missing, k) } } if n := len(missing); n > 0 { t.Errorf("%s: missing %d required contexts: %v", tc.name, n, missing) } mp := map[string]bool{} for _, k := range tc.pushers { mp[k] = true } missing = nil for _, k := range *bpr.Restrictions.Teams { if mp[k] != true { missing = append(missing, k) } } if n := len(missing); n > 0 { t.Errorf("%s: missing %d pushers: %v", tc.name, n, missing) } } http.Error(w, "200 OK", http.StatusOK) })) defer ts.Close() c := getClient(ts.URL) err := c.UpdateBranchProtection("org", "repo", "master", BranchProtectionRequest{ RequiredStatusChecks: &RequiredStatusChecks{ Contexts: tc.contexts, }, Restrictions: &RestrictionsRequest{ Teams: &tc.pushers, }, }) if tc.err && err == nil { t.Errorf("%s: expected error failed to occur", tc.name) } if !tc.err && err != nil { t.Errorf("%s: received unexpected error: %v", tc.name, err) } } } func TestClearMilestone(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var issue Issue if err := json.Unmarshal(b, &issue); err != nil { t.Errorf("Could not unmarshal request: %v", err) } else if issue.Milestone.Title != "" { t.Errorf("Milestone title not empty: %v", issue.Milestone.Title) } })) defer ts.Close() c := getClient(ts.URL) if err := c.ClearMilestone("k8s", "kuber", 5); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestSetMilestone(t *testing.T) { newMilestone := 42 ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/issues/5" { t.Errorf("Bad request path: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var issue struct { Milestone *int `json:"milestone,omitempty"` } if err := json.Unmarshal(b, &issue); err != nil { t.Fatalf("Could not unmarshal request: %v", err) } if issue.Milestone == nil { t.Fatal("Milestone was not set.") } if *issue.Milestone != newMilestone { t.Errorf("Expected milestone to be set to %d, but got %d.", newMilestone, *issue.Milestone) } })) defer ts.Close() c := getClient(ts.URL) if err := c.SetMilestone("k8s", "kuber", 5, newMilestone); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestListMilestones(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/milestones" { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) if err, _ := c.ListMilestones("k8s", "kuber"); err != nil { t.Errorf("Didn't expect error: %v", err) } } func TestListPRCommits(t *testing.T) { ts := simpleTestServer(t, "/repos/theorg/therepo/pulls/3/commits", []RepositoryCommit{ {SHA: "sha"}, {SHA: "sha2"}, }) defer ts.Close() c := getClient(ts.URL) if commits, err := c.ListPRCommits("theorg", "therepo", 3); err != nil { t.Errorf("Didn't expect error: %v", err) } else { if len(commits) != 2 { t.Errorf("Expected 2 commits to be returned, but got %d", len(commits)) } } } func TestCombinedStatus(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path == "/repos/k8s/kuber/commits/SHA/status" { statuses := CombinedStatus{ SHA: "SHA", Statuses: []Status{{Context: "foo"}}, } b, err := json.Marshal(statuses) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) fmt.Fprint(w, string(b)) } else if r.URL.Path == "/someotherpath" { statuses := CombinedStatus{ SHA: "SHA", Statuses: []Status{{Context: "bar"}}, } b, err := json.Marshal(statuses) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) combined, err := c.GetCombinedStatus("k8s", "kuber", "SHA") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if combined.SHA != "SHA" { t.Errorf("Expected SHA 'SHA', found %s", combined.SHA) } else if len(combined.Statuses) != 2 { t.Errorf("Expected two statuses, found %d: %v", len(combined.Statuses), combined.Statuses) } else if combined.Statuses[0].Context != "foo" || combined.Statuses[1].Context != "bar" { t.Errorf("Wrong review IDs: %v", combined.Statuses) } } func TestCreateRepo(t *testing.T) { org := "org" usersRepoName := "users-repository" orgsRepoName := "orgs-repository" repoDesc := "description of users-repository" testCases := []struct { description string isUser bool repo RepoCreateRequest statusCode int expectError bool expectRepo *FullRepo }{ { description: "create repo as user", isUser: true, repo: RepoCreateRequest{ RepoRequest: RepoRequest{ Name: &usersRepoName, Description: &repoDesc, }, }, statusCode: http.StatusCreated, expectRepo: &FullRepo{ Repo: Repo{ Name: "users-repository", Description: "CREATED", }, }, }, { description: "create repo as org", isUser: false, repo: RepoCreateRequest{ RepoRequest: RepoRequest{ Name: &orgsRepoName, Description: &repoDesc, }, }, statusCode: http.StatusCreated, expectRepo: &FullRepo{ Repo: Repo{ Name: "orgs-repository", Description: "CREATED", }, }, }, { description: "errors are handled", isUser: false, repo: RepoCreateRequest{ RepoRequest: RepoRequest{ Name: &orgsRepoName, Description: &repoDesc, }, }, statusCode: http.StatusForbidden, expectError: true, }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if tc.isUser && r.URL.Path != "/user/repos" { t.Errorf("Bad request path to create user-owned repo: %s", r.URL.Path) } else if !tc.isUser && r.URL.Path != "/orgs/org/repos" { t.Errorf("Bad request path to create org-owned repo: %s", r.URL.Path) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var repo Repo switch err := json.Unmarshal(b, &repo); { case err != nil: t.Errorf("Could not unmarshal request: %v", err) case repo.Name == "": t.Errorf("client should reject empty names") } repo.Description = "CREATED" b, err = json.Marshal(repo) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.WriteHeader(tc.statusCode) // 201 fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) switch repo, err := c.CreateRepo(org, tc.isUser, tc.repo); { case err != nil && !tc.expectError: t.Errorf("unexpected error: %v", err) case err == nil && tc.expectError: t.Errorf("expected error, but got none") case err == nil && !reflect.DeepEqual(repo, tc.expectRepo): t.Errorf("%s: repo differs from expected:\n%s", tc.description, diff.ObjectReflectDiff(tc.expectRepo, repo)) } }) } } func TestUpdateRepo(t *testing.T) { org := "org" repoName := "repository" yes := true testCases := []struct { description string repo RepoUpdateRequest statusCode int expectError bool expectRepo *FullRepo }{ { description: "Update repository", repo: RepoUpdateRequest{ RepoRequest: RepoRequest{ Name: &repoName, }, Archived: &yes, }, statusCode: http.StatusOK, expectRepo: &FullRepo{ Repo: Repo{ Name: "repository", Description: "UPDATED", Archived: true, }, }, }, { description: "errors are handled", repo: RepoUpdateRequest{ RepoRequest: RepoRequest{ Name: &repoName, }, Archived: &yes, }, statusCode: http.StatusForbidden, expectError: true, }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPatch { t.Errorf("Bad method: %s (expected %s)", r.Method, http.MethodPatch) } expectedPath := "/repos/org/repository" if r.URL.Path != expectedPath { t.Errorf("Bad request path to create user-owned repo: %s (expected %s)", r.URL.Path, expectedPath) } b, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("Could not read request body: %v", err) } var repo Repo switch err := json.Unmarshal(b, &repo); { case err != nil: t.Errorf("Could not unmarshal request: %v", err) case repo.Name == "": t.Errorf("client should reject empty names") } repo.Description = "UPDATED" b, err = json.Marshal(repo) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.WriteHeader(tc.statusCode) // 200 fmt.Fprint(w, string(b)) })) defer ts.Close() c := getClient(ts.URL) switch repo, err := c.UpdateRepo(org, repoName, tc.repo); { case err != nil && !tc.expectError: t.Errorf("unexpected error: %v", err) case err == nil && tc.expectError: t.Errorf("expected error, but got none") case err == nil && !reflect.DeepEqual(repo, tc.expectRepo): t.Errorf("%s: repo differs from expected:\n%s", tc.description, diff.ObjectReflectDiff(tc.expectRepo, repo)) } }) } } type fakeHttpClient struct { received []*http.Request } func (fhc *fakeHttpClient) Do(req *http.Request) (*http.Response, error) { if fhc.received == nil { fhc.received = []*http.Request{} } fhc.received = append(fhc.received, req) return &http.Response{}, nil } func TestAuthHeaderGetsSet(t *testing.T) { t.Parallel() testCases := []struct { name string mod func(*client) expectedHeader http.Header }{ { name: "Empty token, no auth header", mod: func(c *client) { c.getToken = func() []byte { return []byte{} } }, }, { name: "Token, auth header", mod: func(c *client) { c.getToken = func() []byte { return []byte("sup") } }, expectedHeader: http.Header{"Authorization": []string{"Bearer sup"}}, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fake := &fakeHttpClient{} c := &client{delegate: &delegate{client: fake}, logger: logrus.NewEntry(logrus.New())} tc.mod(c) if _, err := c.doRequest("POST", "/hello", "", nil); err != nil { t.Fatalf("unexpected error: %v", err) } if tc.expectedHeader == nil { tc.expectedHeader = http.Header{} } tc.expectedHeader["Accept"] = []string{"application/vnd.github.v3+json"} // Bazel injects some stuff in here, exclude it from comparison so both bazel test // and go test yield the same result. delete(fake.received[0].Header, "User-Agent") if diff := cmp.Diff(tc.expectedHeader, fake.received[0].Header); diff != "" { t.Errorf("expected header differs from actual: %s", diff) } }) } } func TestListTeamRepos(t *testing.T) { ts := simpleTestServer(t, "/teams/1/repos", []Repo{ { Name: "repo-bar", Permissions: RepoPermissions{Pull: true}, }, { Name: "repo-invalid-permission-level", }, }, ) defer ts.Close() c := getClient(ts.URL) repos, err := c.ListTeamRepos(1) if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(repos) != 1 { t.Errorf("Expected one repo, found %d: %v", len(repos), repos) } else if repos[0].Name != "repo-bar" { t.Errorf("Wrong repos: %v", repos) } } func TestCreateFork(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path != "/repos/k8s/kuber/forks" { t.Errorf("Bad request path: %s", r.URL.Path) } w.WriteHeader(202) w.Write([]byte(`{"name":"other"}`)) })) defer ts.Close() c := getClient(ts.URL) if name, err := c.CreateFork("k8s", "kuber"); err != nil { t.Errorf("Unexpected error: %v", err) } else { if name != "other" { t.Errorf("Unexpected fork name: %v", name) } } } func TestToCurl(t *testing.T) { testCases := []struct { name string request *http.Request expected string }{ { name: "Authorization Header with bearer type gets masked", request: &http.Request{Method: http.MethodGet, URL: &url.URL{Scheme: "https", Host: "api.github.com"}, Header: http.Header{"Authorization": []string{"Bearer secret-token"}}}, expected: `curl -k -v -XGET -H "Authorization: Bearer <masked>" 'https://api.github.com'`, }, { name: "Authorization Header with unknown type gets masked", request: &http.Request{Method: http.MethodGet, URL: &url.URL{Scheme: "https", Host: "api.github.com"}, Header: http.Header{"Authorization": []string{"Definitely-not-valid secret-token"}}}, expected: `curl -k -v -XGET -H "Authorization: <masked>" 'https://api.github.com'`, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if result := toCurl(tc.request); result != tc.expected { t.Errorf("result %s differs from expected %s", result, tc.expected) } }) } }
TestGetLabels
file.js
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = void 0; function helpers() { const data = _interopRequireWildcard(require("babylonia/helpers")); helpers = function () { return data; }; return data; } function _traverse() { const data = _interopRequireWildcard(require("babylonia/traverse")); _traverse = function () { return data; }; return data; } function _codeFrame() { const data = require("babylonia/code-frame"); _codeFrame = function () { return data; }; return data; } function t() { const data = _interopRequireWildcard(require("babylonia/types")); t = function () { return data; }; return data; } function _semver() { const data = _interopRequireDefault(require("semver")); _semver = function () { return data; }; return data; } function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = Object.defineProperty && Object.getOwnPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : {}; if (desc.get || desc.set) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } } newObj.default = obj; return newObj; } } const errorVisitor = { enter(path, state) { const loc = path.node.loc; if (loc) { state.loc = loc; path.stop(); } } }; class File { constructor(options, { code, ast, inputMap }) { this._map = new Map(); this.declarations = {}; this.path = null; this.ast = {}; this.metadata = {}; this.code = ""; this.inputMap = null; this.hub = { file: this, getCode: () => this.code, getScope: () => this.scope, addHelper: this.addHelper.bind(this), buildError: this.buildCodeFrameError.bind(this) }; this.opts = options; this.code = code; this.ast = ast; this.inputMap = inputMap; this.path = _traverse().NodePath.get({ hub: this.hub, parentPath: null, parent: this.ast, container: this.ast, key: "program" }).setContext(); this.scope = this.path.scope; } get shebang() { const { interpreter } = this.path.node; return interpreter ? interpreter.value : ""; } set shebang(value) { if (value) { this.path.get("interpreter").replaceWith(t().interpreterDirective(value)); } else { this.path.get("interpreter").remove(); } } set(key, val) { if (key === "helpersNamespace") { throw new Error("Babel 7.0.0-beta.56 has dropped support for the 'helpersNamespace' utility." + "If you are using babylonia/plugin-external-helpers you will need to use a newer " + "version than the one you currently have installed. " + "If you have your own implementation, you'll want to explore using 'helperGenerator' " + "alongside 'file.availableHelper()'."); } this._map.set(key, val); } get(key) { return this._map.get(key); } has(key) { return this._map.has(key); } getModuleName() { const { filename, filenameRelative = filename, moduleId, moduleIds = !!moduleId, getModuleId, sourceRoot: sourceRootTmp, moduleRoot = sourceRootTmp, sourceRoot = moduleRoot } = this.opts; if (!moduleIds) return null; if (moduleId != null && !getModuleId) { return moduleId; } let moduleName = moduleRoot != null ? moduleRoot + "/" : ""; if (filenameRelative) { const sourceRootReplacer = sourceRoot != null ? new RegExp("^" + sourceRoot + "/?") : ""; moduleName += filenameRelative.replace(sourceRootReplacer, "").replace(/\.(\w*?)$/, ""); } moduleName = moduleName.replace(/\\/g, "/"); if (getModuleId) { return getModuleId(moduleName) || moduleName; } else { return moduleName; } } addImport() { throw new Error("This API has been removed. If you're looking for this " + "functionality in Babel 7, you should import the " + "'babylonia/helper-module-imports' module and use the functions exposed " + " from that module, such as 'addNamed' or 'addDefault'."); } availableHelper(name, versionRange) { let minVersion; try { minVersion = helpers().minVersion(name); } catch (err) { if (err.code !== "BABEL_HELPER_UNKNOWN") throw err; return false; } if (typeof versionRange !== "string") return true; if (_semver().default.valid(versionRange)) versionRange = `^${versionRange}`; return !_semver().default.intersects(`<${minVersion}`, versionRange) && !_semver().default.intersects(`>=8.0.0`, versionRange); } addHelper(name) { const declar = this.declarations[name]; if (declar) return t().cloneNode(declar); const generator = this.get("helperGenerator"); if (generator) { const res = generator(name); if (res) return res; } const uid = this.declarations[name] = this.scope.generateUidIdentifier(name); const dependencies = {}; for (const dep of helpers().getDependencies(name)) { dependencies[dep] = this.addHelper(dep); } const { nodes, globals
globals.forEach(name => { if (this.path.scope.hasBinding(name, true)) { this.path.scope.rename(name); } }); nodes.forEach(node => { node._compact = true; }); this.path.unshiftContainer("body", nodes); this.path.get("body").forEach(path => { if (nodes.indexOf(path.node) === -1) return; if (path.isVariableDeclaration()) this.scope.registerDeclaration(path); }); return uid; } addTemplateObject() { throw new Error("This function has been moved into the template literal transform itself."); } buildCodeFrameError(node, msg, Error = SyntaxError) { let loc = node && (node.loc || node._loc); msg = `${this.opts.filename}: ${msg}`; if (!loc && node) { const state = { loc: null }; (0, _traverse().default)(node, errorVisitor, this.scope, state); loc = state.loc; let txt = "This is an error on an internal node. Probably an internal error."; if (loc) txt += " Location has been estimated."; msg += ` (${txt})`; } if (loc) { const { highlightCode = true } = this.opts; msg += "\n" + (0, _codeFrame().codeFrameColumns)(this.code, { start: { line: loc.start.line, column: loc.start.column + 1 } }, { highlightCode }); } return new Error(msg); } } exports.default = File;
} = helpers().get(name, dep => dependencies[dep], uid, Object.keys(this.scope.getAllBindings()));
error.rs
use crate::serde::{DeserializerError, SerializerError}; use serde_json::Error as SerdeJsonError; use pest::error::Error as PestError; use crate::filters::Rule; use std::{error, fmt, io}; pub use bincode::Error as BincodeError; pub use fst::Error as FstError; pub use heed::Error as HeedError; pub use pest::error as pest_error; pub type MResult<T> = Result<T, Error>; #[derive(Debug)] pub enum Error { Io(io::Error), IndexAlreadyExists, MissingPrimaryKey, SchemaMissing, WordIndexMissing, MissingDocumentId, MaxFieldsLimitExceeded, Schema(meilisearch_schema::Error), Zlmdb(heed::Error), Fst(fst::Error), SerdeJson(SerdeJsonError), Bincode(bincode::Error), Serializer(SerializerError), Deserializer(DeserializerError), UnsupportedOperation(UnsupportedOperation), FilterParseError(PestError<Rule>), FacetError(FacetError), } impl From<io::Error> for Error { fn from(error: io::Error) -> Error { Error::Io(error) } } impl From<PestError<Rule>> for Error { fn from(error: PestError<Rule>) -> Error { Error::FilterParseError(error.renamed_rules(|r| { let s = match r { Rule::or => "OR", Rule::and => "AND", Rule::not => "NOT", Rule::string => "string", Rule::word => "word", Rule::greater => "field > value", Rule::less => "field < value", Rule::eq => "field = value", Rule::leq => "field <= value", Rule::geq => "field >= value", Rule::key => "key", _ => "other", }; s.to_string() })) } } impl From<FacetError> for Error { fn from(error: FacetError) -> Error { Error::FacetError(error) } } impl From<meilisearch_schema::Error> for Error { fn from(error: meilisearch_schema::Error) -> Error { Error::Schema(error) } } impl From<HeedError> for Error { fn from(error: HeedError) -> Error { Error::Zlmdb(error) } } impl From<FstError> for Error { fn
(error: FstError) -> Error { Error::Fst(error) } } impl From<SerdeJsonError> for Error { fn from(error: SerdeJsonError) -> Error { Error::SerdeJson(error) } } impl From<BincodeError> for Error { fn from(error: BincodeError) -> Error { Error::Bincode(error) } } impl From<SerializerError> for Error { fn from(error: SerializerError) -> Error { Error::Serializer(error) } } impl From<DeserializerError> for Error { fn from(error: DeserializerError) -> Error { Error::Deserializer(error) } } impl From<UnsupportedOperation> for Error { fn from(op: UnsupportedOperation) -> Error { Error::UnsupportedOperation(op) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; match self { Io(e) => write!(f, "{}", e), IndexAlreadyExists => write!(f, "index already exists"), MissingPrimaryKey => write!(f, "schema cannot be built without a primary key"), SchemaMissing => write!(f, "this index does not have a schema"), WordIndexMissing => write!(f, "this index does not have a word index"), MissingDocumentId => write!(f, "document id is missing"), MaxFieldsLimitExceeded => write!(f, "maximum number of fields in a document exceeded"), Schema(e) => write!(f, "schema error; {}", e), Zlmdb(e) => write!(f, "heed error; {}", e), Fst(e) => write!(f, "fst error; {}", e), SerdeJson(e) => write!(f, "serde json error; {}", e), Bincode(e) => write!(f, "bincode error; {}", e), Serializer(e) => write!(f, "serializer error; {}", e), Deserializer(e) => write!(f, "deserializer error; {}", e), UnsupportedOperation(op) => write!(f, "unsupported operation; {}", op), FilterParseError(e) => write!(f, "error parsing filter; {}", e), FacetError(e) => write!(f, "error processing facet filter: {}", e), } } } impl error::Error for Error {} #[derive(Debug)] pub enum UnsupportedOperation { SchemaAlreadyExists, CannotUpdateSchemaPrimaryKey, CannotReorderSchemaAttribute, CanOnlyIntroduceNewSchemaAttributesAtEnd, CannotRemoveSchemaAttribute, } impl fmt::Display for UnsupportedOperation { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::UnsupportedOperation::*; match self { SchemaAlreadyExists => write!(f, "Cannot update index which already have a schema"), CannotUpdateSchemaPrimaryKey => write!(f, "Cannot update the primary key of a schema"), CannotReorderSchemaAttribute => write!(f, "Cannot reorder the attributes of a schema"), CanOnlyIntroduceNewSchemaAttributesAtEnd => { write!(f, "Can only introduce new attributes at end of a schema") } CannotRemoveSchemaAttribute => write!(f, "Cannot remove attributes from a schema"), } } } #[derive(Debug)] pub enum FacetError { EmptyArray, ParsingError(String), UnexpectedToken { expected: &'static [&'static str], found: String }, InvalidFormat(String), AttributeNotFound(String), AttributeNotSet { expected: Vec<String>, found: String }, InvalidDocumentAttribute(String), } impl FacetError { pub fn unexpected_token(expected: &'static [&'static str], found: impl ToString) -> FacetError { FacetError::UnexpectedToken{ expected, found: found.to_string() } } pub fn attribute_not_set(expected: Vec<String>, found: impl ToString) -> FacetError { FacetError::AttributeNotSet{ expected, found: found.to_string() } } } impl fmt::Display for FacetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use FacetError::*; match self { EmptyArray => write!(f, "empty array in facet filter is unspecified behavior"), ParsingError(msg) => write!(f, "parsing error: {}", msg), UnexpectedToken { expected, found } => write!(f, "unexpected token {}, expected {}", found, expected.join("or")), InvalidFormat(found) => write!(f, "invalid facet: {}, facets should be \"facetName:facetValue\"", found), AttributeNotFound(attr) => write!(f, "unknown {:?} attribute", attr), AttributeNotSet { found, expected } => write!(f, "`{}` is not set as a faceted attribute. available facet attributes: {}", found, expected.join(", ")), InvalidDocumentAttribute(attr) => write!(f, "invalid document attribute {}, accepted types: String and [String]", attr), } } }
from
proxier_test.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables import ( "testing" "fmt" "net" "strings" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/exec" utiliptables "k8s.io/kubernetes/pkg/util/iptables" iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing" ) func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) { chainLines := utiliptables.GetChainLines(table, save) for chain, line := range chainLines { if expected, exists := expectedLines[chain]; exists { if expected != line { t.Errorf("getChainLines expected chain line not present. For chain: %s Expected: %s Got: %s", chain, expected, line) } } else { t.Errorf("getChainLines expected chain not present: %s", chain) } } } func
(t *testing.T) { testFn := func(byteArray []byte, expected []string) { index := 0 readIndex := 0 for ; readIndex < len(byteArray); index++ { line, n := utiliptables.ReadLine(readIndex, byteArray) readIndex = n if expected[index] != line { t.Errorf("expected:%q, actual:%q", expected[index], line) } } // for if readIndex < len(byteArray) { t.Errorf("Byte buffer was only partially read. Buffer length is:%d, readIndex is:%d", len(byteArray), readIndex) } if index < len(expected) { t.Errorf("All expected strings were not compared. expected arr length:%d, matched count:%d", len(expected), index-1) } } byteArray1 := []byte("\n Line 1 \n\n\n L ine4 \nLine 5 \n \n") expected1 := []string{"", "Line 1", "", "", "L ine4", "Line 5", ""} testFn(byteArray1, expected1) byteArray1 = []byte("") expected1 = []string{} testFn(byteArray1, expected1) byteArray1 = []byte("\n\n") expected1 = []string{"", ""} testFn(byteArray1, expected1) } func TestGetChainLines(t *testing.T) { iptables_save := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014 *nat :PREROUTING ACCEPT [2136997:197881818] :POSTROUTING ACCEPT [4284525:258542680] :OUTPUT ACCEPT [5901660:357267963] -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER COMMIT # Completed on Wed Oct 29 14:56:01 2014` expected := map[utiliptables.Chain]string{ utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2136997:197881818]", utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [4284525:258542680]", utiliptables.ChainOutput: ":OUTPUT ACCEPT [5901660:357267963]", } checkAllLines(t, utiliptables.TableNAT, []byte(iptables_save), expected) } func TestGetChainLinesMultipleTables(t *testing.T) { iptables_save := `# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015 *nat :PREROUTING ACCEPT [2:138] :INPUT ACCEPT [0:0] :OUTPUT ACCEPT [0:0] :POSTROUTING ACCEPT [0:0] :DOCKER - [0:0] :KUBE-NODEPORT-CONTAINER - [0:0] :KUBE-NODEPORT-HOST - [0:0] :KUBE-PORTALS-CONTAINER - [0:0] :KUBE-PORTALS-HOST - [0:0] :KUBE-SVC-1111111111111111 - [0:0] :KUBE-SVC-2222222222222222 - [0:0] :KUBE-SVC-3333333333333333 - [0:0] :KUBE-SVC-4444444444444444 - [0:0] :KUBE-SVC-5555555555555555 - [0:0] :KUBE-SVC-6666666666666666 - [0:0] -A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER -A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER -A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER -A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST -A POSTROUTING -s 10.246.1.0/24 ! -o cbr0 -j MASQUERADE -A POSTROUTING -s 10.0.2.15/32 -d 10.0.2.15/32 -m comment --comment "handle pod connecting to self" -j MASQUERADE -A KUBE-PORTALS-CONTAINER -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555 -A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666 -A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222 -A KUBE-PORTALS-HOST -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555 -A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666 -A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222 -A KUBE-SVC-1111111111111111 -p udp -m comment --comment "kube-system/kube-dns:dns" -m recent --set --name KUBE-SVC-1111111111111111 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53 -A KUBE-SVC-2222222222222222 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SVC-3333333333333333 -A KUBE-SVC-3333333333333333 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m recent --set --name KUBE-SVC-3333333333333333 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53 -A KUBE-SVC-4444444444444444 -p tcp -m comment --comment "default/kubernetes:" -m recent --set --name KUBE-SVC-4444444444444444 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.245.1.2:443 -A KUBE-SVC-5555555555555555 -m comment --comment "default/kubernetes:" -j KUBE-SVC-4444444444444444 -A KUBE-SVC-6666666666666666 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SVC-1111111111111111 COMMIT # Completed on Fri Aug 7 14:47:37 2015 # Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015 *filter :INPUT ACCEPT [17514:83115836] :FORWARD ACCEPT [0:0] :OUTPUT ACCEPT [8909:688225] :DOCKER - [0:0] -A FORWARD -o cbr0 -j DOCKER -A FORWARD -o cbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -A FORWARD -i cbr0 ! -o cbr0 -j ACCEPT -A FORWARD -i cbr0 -o cbr0 -j ACCEPT COMMIT ` expected := map[utiliptables.Chain]string{ utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2:138]", utiliptables.Chain("INPUT"): ":INPUT ACCEPT [0:0]", utiliptables.Chain("OUTPUT"): ":OUTPUT ACCEPT [0:0]", utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [0:0]", utiliptables.Chain("DOCKER"): ":DOCKER - [0:0]", utiliptables.Chain("KUBE-NODEPORT-CONTAINER"): ":KUBE-NODEPORT-CONTAINER - [0:0]", utiliptables.Chain("KUBE-NODEPORT-HOST"): ":KUBE-NODEPORT-HOST - [0:0]", utiliptables.Chain("KUBE-PORTALS-CONTAINER"): ":KUBE-PORTALS-CONTAINER - [0:0]", utiliptables.Chain("KUBE-PORTALS-HOST"): ":KUBE-PORTALS-HOST - [0:0]", utiliptables.Chain("KUBE-SVC-1111111111111111"): ":KUBE-SVC-1111111111111111 - [0:0]", utiliptables.Chain("KUBE-SVC-2222222222222222"): ":KUBE-SVC-2222222222222222 - [0:0]", utiliptables.Chain("KUBE-SVC-3333333333333333"): ":KUBE-SVC-3333333333333333 - [0:0]", utiliptables.Chain("KUBE-SVC-4444444444444444"): ":KUBE-SVC-4444444444444444 - [0:0]", utiliptables.Chain("KUBE-SVC-5555555555555555"): ":KUBE-SVC-5555555555555555 - [0:0]", utiliptables.Chain("KUBE-SVC-6666666666666666"): ":KUBE-SVC-6666666666666666 - [0:0]", } checkAllLines(t, utiliptables.TableNAT, []byte(iptables_save), expected) } func TestGetRemovedEndpoints(t *testing.T) { testCases := []struct { currentEndpoints []string newEndpoints []string removedEndpoints []string }{ { currentEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, newEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, removedEndpoints: []string{}, }, { currentEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80", "10.0.2.3:80"}, newEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, removedEndpoints: []string{"10.0.2.3:80"}, }, { currentEndpoints: []string{}, newEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, removedEndpoints: []string{}, }, { currentEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, newEndpoints: []string{}, removedEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, }, { currentEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80", "10.0.2.2:443"}, newEndpoints: []string{"10.0.2.1:80", "10.0.2.2:80"}, removedEndpoints: []string{"10.0.2.2:443"}, }, } for i := range testCases { res := getRemovedEndpoints(testCases[i].currentEndpoints, testCases[i].newEndpoints) if !slicesEquiv(res, testCases[i].removedEndpoints) { t.Errorf("Expected: %v, but getRemovedEndpoints returned: %v", testCases[i].removedEndpoints, res) } } } func TestExecConntrackTool(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, func() ([]byte, error) { return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted.") }, }, } fexec := exec.FakeExec{ CommandScript: []exec.FakeCommandAction{ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, }, LookPathFunc: func(cmd string) (string, error) { return cmd, nil }, } fakeProxier := Proxier{exec: &fexec} testCases := [][]string{ {"-L", "-p", "udp"}, {"-D", "-p", "udp", "-d", "10.0.240.1"}, {"-D", "-p", "udp", "--orig-dst", "10.240.0.2", "--dst-nat", "10.0.10.2"}, } expectErr := []bool{false, false, true} for i := range testCases { err := fakeProxier.execConntrackTool(testCases[i]...) if expectErr[i] { if err == nil { t.Errorf("expected err, got %v", err) } } else { if err != nil { t.Errorf("expected success, got %v", err) } } execCmd := strings.Join(fcmd.CombinedOutputLog[i], " ") expectCmd := fmt.Sprintf("%s %s", "conntrack", strings.Join(testCases[i], " ")) if execCmd != expectCmd { t.Errorf("expect execute command: %s, but got: %s", expectCmd, execCmd) } } } func newFakeServiceInfo(service proxy.ServicePortName, ip net.IP, port int, protocol api.Protocol, onlyNodeLocalEndpoints bool) *serviceInfo { return &serviceInfo{ sessionAffinityType: api.ServiceAffinityNone, // default stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API. clusterIP: ip, port: port, protocol: protocol, onlyNodeLocalEndpoints: onlyNodeLocalEndpoints, } } func TestDeleteEndpointConnections(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, func() ([]byte, error) { return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted.") }, }, } fexec := exec.FakeExec{ CommandScript: []exec.FakeCommandAction{ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, }, LookPathFunc: func(cmd string) (string, error) { return cmd, nil }, } serviceMap := make(map[proxy.ServicePortName]*serviceInfo) svc1 := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: "svc1"}, Port: "80"} svc2 := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: "svc2"}, Port: "80"} serviceMap[svc1] = newFakeServiceInfo(svc1, net.IPv4(10, 20, 30, 40), 80, api.ProtocolUDP, false) serviceMap[svc2] = newFakeServiceInfo(svc1, net.IPv4(10, 20, 30, 41), 80, api.ProtocolTCP, false) fakeProxier := Proxier{exec: &fexec, serviceMap: serviceMap} testCases := []endpointServicePair{ { endpoint: "10.240.0.3:80", servicePortName: svc1, }, { endpoint: "10.240.0.4:80", servicePortName: svc1, }, { endpoint: "10.240.0.5:80", servicePortName: svc2, }, } expectCommandExecCount := 0 for i := range testCases { input := map[endpointServicePair]bool{testCases[i]: true} fakeProxier.deleteEndpointConnections(input) svcInfo := fakeProxier.serviceMap[testCases[i].servicePortName] if svcInfo.protocol == api.ProtocolUDP { svcIp := svcInfo.clusterIP.String() endpointIp := strings.Split(testCases[i].endpoint, ":")[0] expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p udp", svcIp, endpointIp) execCommand := strings.Join(fcmd.CombinedOutputLog[expectCommandExecCount], " ") if expectCommand != execCommand { t.Errorf("Exepect comand: %s, but executed %s", expectCommand, execCommand) } expectCommandExecCount += 1 } if expectCommandExecCount != fexec.CommandCalls { t.Errorf("Exepect comand executed %d times, but got %d", expectCommandExecCount, fexec.CommandCalls) } } } func TestDeleteServiceConnections(t *testing.T) { fcmd := exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, func() ([]byte, error) { return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted.") }, }, } fexec := exec.FakeExec{ CommandScript: []exec.FakeCommandAction{ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, }, LookPathFunc: func(cmd string) (string, error) { return cmd, nil }, } fakeProxier := Proxier{exec: &fexec} testCases := [][]string{ { "10.240.0.3", "10.240.0.5", }, { "10.240.0.4", }, } svcCount := 0 for i := range testCases { fakeProxier.deleteServiceConnections(testCases[i]) for _, ip := range testCases[i] { expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s -p udp", ip) execCommand := strings.Join(fcmd.CombinedOutputLog[svcCount], " ") if expectCommand != execCommand { t.Errorf("Exepect comand: %s, but executed %s", expectCommand, execCommand) } svcCount += 1 } if svcCount != fexec.CommandCalls { t.Errorf("Exepect comand executed %d times, but got %d", svcCount, fexec.CommandCalls) } } } type fakeClosable struct { closed bool } func (c *fakeClosable) Close() error { c.closed = true return nil } func TestRevertPorts(t *testing.T) { testCases := []struct { replacementPorts []localPort existingPorts []localPort expectToBeClose []bool }{ { replacementPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, }, existingPorts: []localPort{}, expectToBeClose: []bool{true, true, true}, }, { replacementPorts: []localPort{}, existingPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, }, expectToBeClose: []bool{}, }, { replacementPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, }, existingPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, }, expectToBeClose: []bool{false, false, false}, }, { replacementPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, }, existingPorts: []localPort{ {port: 5001}, {port: 5003}, }, expectToBeClose: []bool{false, true, false}, }, { replacementPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, }, existingPorts: []localPort{ {port: 5001}, {port: 5002}, {port: 5003}, {port: 5004}, }, expectToBeClose: []bool{false, false, false}, }, } for i, tc := range testCases { replacementPortsMap := make(map[localPort]closeable) for _, lp := range tc.replacementPorts { replacementPortsMap[lp] = &fakeClosable{} } existingPortsMap := make(map[localPort]closeable) for _, lp := range tc.existingPorts { existingPortsMap[lp] = &fakeClosable{} } revertPorts(replacementPortsMap, existingPortsMap) for j, expectation := range tc.expectToBeClose { if replacementPortsMap[tc.replacementPorts[j]].(*fakeClosable).closed != expectation { t.Errorf("Expect replacement localport %v to be %v in test case %v", tc.replacementPorts[j], expectation, i) } } for _, lp := range tc.existingPorts { if existingPortsMap[lp].(*fakeClosable).closed == true { t.Errorf("Expect existing localport %v to be false in test case %v", lp, i) } } } } // fakePortOpener implements portOpener. type fakePortOpener struct { openPorts []*localPort } // OpenLocalPort fakes out the listen() and bind() used by syncProxyRules // to lock a local port. func (f *fakePortOpener) OpenLocalPort(lp *localPort) (closeable, error) { f.openPorts = append(f.openPorts, lp) return nil, nil } func NewFakeProxier(ipt utiliptables.Interface) *Proxier { // TODO: Call NewProxier after refactoring out the goroutine // invocation into a Run() method. return &Proxier{ exec: &exec.FakeExec{}, serviceMap: make(map[proxy.ServicePortName]*serviceInfo), iptables: ipt, endpointsMap: make(map[proxy.ServicePortName][]*endpointsInfo), clusterCIDR: "10.0.0.0/24", haveReceivedEndpointsUpdate: true, haveReceivedServiceUpdate: true, hostname: "test-hostname", portsMap: make(map[localPort]closeable), portMapper: &fakePortOpener{[]*localPort{}}, } } func hasJump(rules []iptablestest.Rule, destChain, destIP, destPort string) bool { match := false for _, r := range rules { if r[iptablestest.Jump] == destChain { match = true if destIP != "" { if strings.Contains(r[iptablestest.Destination], destIP) && (strings.Contains(r[iptablestest.DPort], destPort) || r[iptablestest.DPort] == "") { return true } match = false } if destPort != "" { if strings.Contains(r[iptablestest.DPort], destPort) && (strings.Contains(r[iptablestest.Destination], destIP) || r[iptablestest.Destination] == "") { return true } match = false } } } return match } func TestHasJump(t *testing.T) { testCases := map[string]struct { rules []iptablestest.Rule destChain string destIP string destPort string expected bool }{ "case 1": { // Match the 1st rule(both dest IP and dest Port) rules: []iptablestest.Rule{ {"-d ": "10.20.30.41/32", "--dport ": "80", "-p ": "tcp", "-j ": "REJECT"}, {"--dport ": "3001", "-p ": "tcp", "-j ": "KUBE-MARK-MASQ"}, }, destChain: "REJECT", destIP: "10.20.30.41", destPort: "80", expected: true, }, "case 2": { // Match the 2nd rule(dest Port) rules: []iptablestest.Rule{ {"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"}, {"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"}, }, destChain: "REJECT", destIP: "", destPort: "3001", expected: true, }, "case 3": { // Match both dest IP and dest Port rules: []iptablestest.Rule{ {"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"}, }, destChain: "KUBE-XLB-GF53O3C2HZEXL2XN", destIP: "1.2.3.4", destPort: "80", expected: true, }, "case 4": { // Match dest IP but doesn't match dest Port rules: []iptablestest.Rule{ {"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"}, }, destChain: "KUBE-XLB-GF53O3C2HZEXL2XN", destIP: "1.2.3.4", destPort: "8080", expected: false, }, "case 5": { // Match dest Port but doesn't match dest IP rules: []iptablestest.Rule{ {"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"}, }, destChain: "KUBE-XLB-GF53O3C2HZEXL2XN", destIP: "10.20.30.40", destPort: "80", expected: false, }, "case 6": { // Match the 2nd rule(dest IP) rules: []iptablestest.Rule{ {"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"}, {"-d ": "1.2.3.4/32", "-p ": "tcp", "-j ": "REJECT"}, {"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"}, }, destChain: "REJECT", destIP: "1.2.3.4", destPort: "8080", expected: true, }, "case 7": { // Match the 2nd rule(dest Port) rules: []iptablestest.Rule{ {"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"}, {"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"}, }, destChain: "REJECT", destIP: "1.2.3.4", destPort: "3001", expected: true, }, "case 8": { // Match the 1st rule(dest IP) rules: []iptablestest.Rule{ {"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"}, {"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"}, }, destChain: "REJECT", destIP: "10.20.30.41", destPort: "8080", expected: true, }, "case 9": { rules: []iptablestest.Rule{ {"-j ": "KUBE-SEP-LWSOSDSHMKPJHHJV"}, }, destChain: "KUBE-SEP-LWSOSDSHMKPJHHJV", destIP: "", destPort: "", expected: true, }, "case 10": { rules: []iptablestest.Rule{ {"-j ": "KUBE-SEP-FOO"}, }, destChain: "KUBE-SEP-BAR", destIP: "", destPort: "", expected: false, }, } for k, tc := range testCases { if got := hasJump(tc.rules, tc.destChain, tc.destIP, tc.destPort); got != tc.expected { t.Errorf("%v: expected %v, got %v", k, tc.expected, got) } } } func hasDNAT(rules []iptablestest.Rule, endpoint string) bool { for _, r := range rules { if r[iptablestest.ToDest] == endpoint { return true } } return false } func errorf(msg string, rules []iptablestest.Rule, t *testing.T) { for _, r := range rules { t.Logf("%v", r) } t.Errorf("%v", msg) } func TestClusterIPReject(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "80"} fp.serviceMap[svc] = newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, false) fp.syncProxyRules() svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP)))) svcRules := ipt.GetRules(svcChain) if len(svcRules) != 0 { errorf(fmt.Sprintf("Unexpected rule for chain %v service %v without endpoints", svcChain, svcName), svcRules, t) } kubeSvcRules := ipt.GetRules(string(kubeServicesChain)) if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP.String(), "80") { errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcName), kubeSvcRules, t) } } func TestClusterIPEndpointsJump(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "80"} fp.serviceMap[svc] = newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, true) ep := "10.180.0.1:80" fp.endpointsMap[svc] = []*endpointsInfo{{ep, false}} fp.syncProxyRules() svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP)))) epChain := string(servicePortEndpointChainName(svc, strings.ToLower(string(api.ProtocolTCP)), ep)) kubeSvcRules := ipt.GetRules(string(kubeServicesChain)) if !hasJump(kubeSvcRules, svcChain, svcIP.String(), "80") { errorf(fmt.Sprintf("Failed to find jump from KUBE-SERVICES to %v chain", svcChain), kubeSvcRules, t) } svcRules := ipt.GetRules(svcChain) if !hasJump(svcRules, epChain, "", "") { errorf(fmt.Sprintf("Failed to jump to ep chain %v", epChain), svcRules, t) } epRules := ipt.GetRules(epChain) if !hasDNAT(epRules, ep) { errorf(fmt.Sprintf("Endpoint chain %v lacks DNAT to %v", epChain, ep), epRules, t) } } func typeLoadBalancer(svcInfo *serviceInfo) *serviceInfo { svcInfo.nodePort = 3001 svcInfo.loadBalancerStatus = api.LoadBalancerStatus{ Ingress: []api.LoadBalancerIngress{{IP: "1.2.3.4"}}, } return svcInfo } func TestLoadBalancer(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "80"} svcInfo := newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, false) fp.serviceMap[svc] = typeLoadBalancer(svcInfo) ep1 := "10.180.0.1:80" fp.endpointsMap[svc] = []*endpointsInfo{{ep1, false}} fp.syncProxyRules() proto := strings.ToLower(string(api.ProtocolTCP)) fwChain := string(serviceFirewallChainName(svc, proto)) svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP)))) //lbChain := string(serviceLBChainName(svc, proto)) kubeSvcRules := ipt.GetRules(string(kubeServicesChain)) if !hasJump(kubeSvcRules, fwChain, svcInfo.loadBalancerStatus.Ingress[0].IP, "80") { errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t) } fwRules := ipt.GetRules(fwChain) if !hasJump(fwRules, svcChain, "", "") || !hasJump(fwRules, string(KubeMarkMasqChain), "", "") { errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, svcChain), fwRules, t) } } func TestNodePort(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "80"} svcInfo := newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, false) svcInfo.nodePort = 3001 fp.serviceMap[svc] = svcInfo ep1 := "10.180.0.1:80" fp.endpointsMap[svc] = []*endpointsInfo{{ep1, false}} fp.syncProxyRules() proto := strings.ToLower(string(api.ProtocolTCP)) svcChain := string(servicePortChainName(svc, strings.ToLower(proto))) kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain)) if !hasJump(kubeNodePortRules, svcChain, "", fmt.Sprintf("%v", svcInfo.nodePort)) { errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeNodePortRules, t) } } func TestOnlyLocalLoadBalancing(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "80"} svcInfo := newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, true) fp.serviceMap[svc] = typeLoadBalancer(svcInfo) nonLocalEp := "10.180.0.1:80" localEp := "10.180.2.1:80" fp.endpointsMap[svc] = []*endpointsInfo{{nonLocalEp, false}, {localEp, true}} fp.syncProxyRules() proto := strings.ToLower(string(api.ProtocolTCP)) fwChain := string(serviceFirewallChainName(svc, proto)) lbChain := string(serviceLBChainName(svc, proto)) nonLocalEpChain := string(servicePortEndpointChainName(svc, strings.ToLower(string(api.ProtocolTCP)), nonLocalEp)) localEpChain := string(servicePortEndpointChainName(svc, strings.ToLower(string(api.ProtocolTCP)), localEp)) kubeSvcRules := ipt.GetRules(string(kubeServicesChain)) if !hasJump(kubeSvcRules, fwChain, svcInfo.loadBalancerStatus.Ingress[0].IP, "") { errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t) } fwRules := ipt.GetRules(fwChain) if !hasJump(fwRules, lbChain, "", "") { errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, lbChain), fwRules, t) } if hasJump(fwRules, string(KubeMarkMasqChain), "", "") { errorf(fmt.Sprintf("Found jump from fw chain %v to MASQUERADE", fwChain), fwRules, t) } lbRules := ipt.GetRules(lbChain) if hasJump(lbRules, nonLocalEpChain, "", "") { errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, nonLocalEp), lbRules, t) } if !hasJump(lbRules, localEpChain, "", "") { errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, nonLocalEp), lbRules, t) } } func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) // set cluster CIDR to empty before test fp.clusterCIDR = "" onlyLocalNodePorts(t, fp, ipt) } func TestOnlyLocalNodePorts(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) onlyLocalNodePorts(t, fp, ipt) } func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) { shouldLBTOSVCRuleExist := len(fp.clusterCIDR) > 0 svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "80"} svcInfo := newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, true) svcInfo.nodePort = 3001 fp.serviceMap[svc] = svcInfo nonLocalEp := "10.180.0.1:80" localEp := "10.180.2.1:80" fp.endpointsMap[svc] = []*endpointsInfo{{nonLocalEp, false}, {localEp, true}} fp.syncProxyRules() proto := strings.ToLower(string(api.ProtocolTCP)) lbChain := string(serviceLBChainName(svc, proto)) nonLocalEpChain := string(servicePortEndpointChainName(svc, strings.ToLower(string(api.ProtocolTCP)), nonLocalEp)) localEpChain := string(servicePortEndpointChainName(svc, strings.ToLower(string(api.ProtocolTCP)), localEp)) kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain)) if !hasJump(kubeNodePortRules, lbChain, "", fmt.Sprintf("%v", svcInfo.nodePort)) { errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t) } svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP)))) lbRules := ipt.GetRules(lbChain) if hasJump(lbRules, nonLocalEpChain, "", "") { errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, nonLocalEp), lbRules, t) } if hasJump(lbRules, svcChain, "", "") != shouldLBTOSVCRuleExist { prefix := "Did not find " if !shouldLBTOSVCRuleExist { prefix = "Found " } errorf(fmt.Sprintf("%s jump from lb chain %v to svc %v", prefix, lbChain, svcChain), lbRules, t) } if !hasJump(lbRules, localEpChain, "", "") { errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, nonLocalEp), lbRules, t) } } // TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
TestReadLinesFromByteBuffer
polygonVertices16F.d.ts
export const polygonVertices16F: string;
main.rs
use std::sync::Arc; use log::Level; use stylist::{yew::Global, StyleSource, YieldStyle}; use yew::{html, Component, Context, Html}; use yew_agent::{ utils::store::{Bridgeable, ReadOnly, StoreWrapper}, Bridge, }; mod theme; use theme::{Theme, ThemeKind, ThemeStore}; pub(crate) enum InsideMsg { SetTheme(ThemeKind), ThemeUpdated(ReadOnly<ThemeStore>), } pub(crate) struct Inside { theme_kind: ThemeKind, theme: Option<Arc<Theme>>, theme_store: Box<dyn Bridge<StoreWrapper<ThemeStore>>>, } impl Component for Inside { type Message = InsideMsg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let callback = ctx.link().callback(InsideMsg::ThemeUpdated); Self { theme_kind: ThemeKind::Light, theme: None, theme_store: ThemeStore::bridge(callback), } } fn update(&mut self, _: &Context<Self>, msg: Self::Message) -> bool { match msg { InsideMsg::ThemeUpdated(m) => { let m = m.borrow(); self.theme_kind = m.kind.clone(); self.theme = Some(m.current()); } InsideMsg::SetTheme(m) => { self.theme_store.send(theme::Action::SetTheme(m)); } } true } fn changed(&mut self, _: &Context<Self>) -> bool { false } fn view(&self, ctx: &Context<Self>) -> Html { let theme_str = match self.theme_kind { ThemeKind::Light => "Dark Theme", ThemeKind::Dark => "Light Theme", }; let other_theme = match self.theme_kind { ThemeKind::Light => ThemeKind::Dark, ThemeKind::Dark => ThemeKind::Light, }; let switch_theme = ctx .link() .callback(move |_| InsideMsg::SetTheme(other_theme.clone())); html! { <div class={self.style()}> <button onclick={switch_theme}>{"Switch to "}{theme_str}</button> </div> } } } impl YieldStyle for Inside { fn style_from(&self) -> StyleSource<'static> { r#" button { color: white; height: 50px; width: 300px; font-size: 20px; background-color: rgb(88, 164, 255); border-radius: 5px; border: none; } "# .into() } } pub(crate) enum AppMsg { ThemeUpdated(ReadOnly<ThemeStore>), } pub(crate) struct App { theme: Option<Arc<Theme>>, theme_kind: ThemeKind, _theme_store: Box<dyn Bridge<StoreWrapper<ThemeStore>>>, } impl Component for App { type Message = AppMsg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let callback = ctx.link().callback(AppMsg::ThemeUpdated); Self { theme: None, theme_kind: ThemeKind::Light, _theme_store: ThemeStore::bridge(callback), } } fn update(&mut self, _: &Context<Self>, msg: Self::Message) -> bool { match msg { AppMsg::ThemeUpdated(m) => { let m = m.borrow(); self.theme = Some(m.current()); self.theme_kind = m.kind.clone(); } } true } fn changed(&mut self, _: &Context<Self>) -> bool { false } fn view(&self, _: &Context<Self>) -> Html { if self.theme.is_none() { return Html::default(); } let theme = self.theme.clone().unwrap(); let theme_str = match self.theme_kind { ThemeKind::Light => "light theme", ThemeKind::Dark => "dark theme", }; html! { <> // Global Styles can be applied with <Global /> component. <Global css={format!( r#" html, body {{ font-family: sans-serif; padding: 0; margin: 0; display: flex; justify-content: center; align-items: center; min-height: 100vh; flex-direction: column; background-color: {bg}; color: {ft_color}; }} "#, bg = theme.background_color, ft_color = theme.font_color, )} /> <h1>{"Yew Theming w/ Agent"}</h1> <div class={self.style()}> {"You are now using the "}{theme_str}{"!"} <Inside /> </div> </> } // let toggle_theme = self // .link // .callback(move |_| AppMsg::SetTheme(other_theme.clone())); // html! { // <div class=self.style()> // <Inside /> // <button onclick=toggle_theme>{"Toggle Theme"}</button> // </div> // } } } impl YieldStyle for App { fn style_from(&self) -> StyleSource<'static> { if let Some(ref m) = self.theme { format!( r#" box-shadow: 0 0 5px 1px rgba(0, 0, 0, 0.7); height: 500px; width: 500px; border-radius: 5px; display: flex; justify-content: space-around; align-items: center; padding: 15px; box-sizing: border-box; flex-direction: column; background-color: {bg}; "#, bg = m.paper_color ) .into() } else { "".into() } } } fn
() { console_log::init_with_level(Level::Trace).expect("Failed to initialise Log!"); yew::start_app::<App>(); }
main
ghttp_unit_session_test.go
// Copyright 2018 gf Author(https://github.com/zhongdalu/gf). All Rights Reserved. // // This Source Code Form is subject to the terms of the MIT License. // If a copy of the MIT was not distributed with this file, // You can obtain one at https://github.com/zhongdalu/gf. // SESSION测试 package ghttp_test import ( "fmt" "github.com/zhongdalu/gf/g" "github.com/zhongdalu/gf/g/net/ghttp" "github.com/zhongdalu/gf/g/test/gtest" "testing" "time" ) func Test
testing.T) { p := ports.PopRand() s := g.Server(p) s.BindHandler("/set", func(r *ghttp.Request) { r.Session.Set(r.Get("k"), r.Get("v")) }) s.BindHandler("/get", func(r *ghttp.Request) { r.Response.Write(r.Session.Get(r.Get("k"))) }) s.BindHandler("/remove", func(r *ghttp.Request) { r.Session.Remove(r.Get("k")) }) s.BindHandler("/clear", func(r *ghttp.Request) { r.Session.Clear() }) s.SetPort(p) s.SetDumpRouteMap(false) s.Start() defer s.Shutdown() // 等待启动完成 time.Sleep(time.Second) gtest.Case(t, func() { client := ghttp.NewClient() client.SetBrowserMode(true) client.SetPrefix(fmt.Sprintf("http://127.0.0.1:%d", p)) r1, e1 := client.Get("/set?k=key1&v=100") if r1 != nil { defer r1.Close() } gtest.Assert(e1, nil) gtest.Assert(r1.ReadAllString(), "") gtest.Assert(client.GetContent("/set?k=key2&v=200"), "") gtest.Assert(client.GetContent("/get?k=key1"), "100") gtest.Assert(client.GetContent("/get?k=key2"), "200") gtest.Assert(client.GetContent("/get?k=key3"), "") gtest.Assert(client.GetContent("/remove?k=key1"), "") gtest.Assert(client.GetContent("/remove?k=key3"), "") gtest.Assert(client.GetContent("/remove?k=key4"), "") gtest.Assert(client.GetContent("/get?k=key1"), "") gtest.Assert(client.GetContent("/get?k=key2"), "200") gtest.Assert(client.GetContent("/clear"), "") gtest.Assert(client.GetContent("/get?k=key2"), "") }) }
_Session(t *
tasks.py
from __future__ import absolute_import, unicode_literals # from time import sleep import binascii import os from celery import shared_task from django.conf import settings # Django from django.core.cache import cache from django.core.mail import send_mail from django.template.loader import render_to_string # local Django from apps.mail.models import Mail # from apps.utils.basetaskcelery import VerifyTaskBase # from djheavy.celery import app # @app.task(base=VerifyTaskBase) @shared_task def example_add(x: int, y: int): """ ... """ return x + y @shared_task def simulate_send_emails(text: str): """ ... """ Mail.objects.create(name=text) # print("task db", Mail.objects.count()) if settings.ACTIVE_EMAIL: # pragma: no cover subject = "Thank you for registering to our site" message = " it means a world to us " email_from = settings.EMAIL_HOST_USER recipient_list = [text] send_mail(subject, message, email_from, recipient_list) dict_task = { "sended_to": text, } return dict_task @shared_task def send_email_activation(username: str, email: str, domain: str): """ ... """ token: str = binascii.hexlify(os.urandom(20)).decode() if settings.ACTIVE_EMAIL: # pragma: no cover subject = "Thank you for registering to our site" message = render_to_string( "activate_account.html", {"username": username, "domain": domain, "token": token,}, )
recipient_list = [email] send_mail(subject, message, email_from, recipient_list) cache.set(token, f"{username}_{email}_{token}", 60) return token
email_from = settings.EMAIL_HOST_USER
VirtualGridList-GT-28445-specs.js
const Page = require('../VirtualGridListPage'); describe('Change ItemSize', function () { beforeEach(function () { Page.open(); }); it('should change minWidth and minHeight [GT-28445]', function () { // Step 3: Knobs > VirtualGridList > ItemSize.minWidth > 800 Page.inputMinWidth.moveTo(); Page.spotlightSelect(); Page.backSpace(); Page.backSpace(); Page.backSpace(); Page.numPad(8); Page.numPad(0); Page.numPad(0); Page.spotlightDown(); // Step 3 Verify: The width of items grow bigger. expect(Page.getItemSize().width).to.be.above(400); // Step 4: Knobs > VirtualGridList > ItemSize.minHeight > 700 Page.inputMinHeight.moveTo(); Page.spotlightSelect(); Page.backSpace(); Page.backSpace(); Page.backSpace();
Page.numPad(0); Page.numPad(0); Page.backKey(); // Step4 Verify: The height of items grow bigger. expect(Page.getItemSize().height).to.be.above(350); }); });
Page.numPad(7);
serial.rs
//! CDC-ACM serial port example using cortex-m-rtfm. #![no_main] #![no_std] #![allow(non_snake_case)] #![allow(dead_code)] #![allow(unused_imports)] use core::{ panic::PanicInfo, sync::atomic::{self, Ordering}, str::from_utf8_unchecked, ptr::{ read_volatile, write_volatile, }, convert::TryFrom, mem, ops::RangeInclusive, }; use cortex_m::{ interrupt, asm::*, }; use embedded_hal::digital::v2::OutputPin; use rtfm::app; use stm32f1xx_hal::{ prelude::*, time::Hertz, }; use stm32f1xx_hal::{ usb::{ Peripheral, UsbBus, UsbBusType, }, pac::FLASH, }; use usb_device::{ bus, device::{ UsbDevice, UsbDeviceBuilder, UsbVidPid, }, UsbError, }; use usbd_serial::{CdcAcmClass, SerialPort, USB_CLASS_CDC}; use itm_logger::*; use usb_bootloader::hardware_extra::*; // VID and PID are from dapboot bluepill bootloader const USB_VID: u16 = 0x1209; const USB_PID: u16 = 0xDB42; const USB_CLASS_MISCELLANEOUS: u8 = 0xEF; #[cfg(feature = "itm")] use cortex_m::{iprintln, peripheral::ITM}; #[app(device = stm32f1xx_hal::stm32, peripherals = true)] const APP: () = { struct Resources { usb_dev: UsbDevice<'static, UsbBusType>, serial: SerialPort<'static, UsbBusType>, } #[init] fn init(cx: init::Context) -> init::LateResources
#[task(binds = USB_HP_CAN_TX, resources = [usb_dev, serial])] fn usb_tx(mut cx: usb_tx::Context) { usb_poll(&mut cx.resources.usb_dev, &mut cx.resources.serial); } #[task(binds = USB_LP_CAN_RX0, resources = [usb_dev, serial])] fn usb_rx0(mut cx: usb_rx0::Context) { usb_poll(&mut cx.resources.usb_dev, &mut cx.resources.serial); } }; fn usb_poll<B: bus::UsbBus>( usb_dev: &mut UsbDevice<'static, B>, serial: &mut SerialPort<'static, B>, ) { if !usb_dev.poll(&mut [serial]) { return; } let mut buf = [0; 64]; match serial.read(&mut buf) { Ok(count) => { let _ = serial.write(&buf[..count]); }, Err(UsbError::WouldBlock) => {}, Err(e) => info!("Err: {:?}", e), } } #[panic_handler] fn panic( #[cfg_attr(not(feature = "itm"), allow(unused_variables))] info: &PanicInfo ) -> ! { interrupt::disable(); #[cfg(feature = "itm")] { let itm = unsafe { &mut *ITM::ptr() }; let stim = &mut itm.stim[0]; iprintln!(stim, "{}", info); } loop { // add some side effect to prevent this from turning into a UDF instruction // see rust-lang/rust#28728 for details atomic::compiler_fence(Ordering::SeqCst) } }
{ static mut USB_BUS: Option<bus::UsbBusAllocator<UsbBusType>> = None; #[cfg(feature = "itm")] { update_tpiu_baudrate(8_000_000, ITM_BAUD_RATE).expect("Failed to reset TPIU baudrate"); logger_init(); } info!("ITM reset ok."); let mut flash = cx.device.FLASH.constrain(); let mut rcc = cx.device.RCC.constrain(); let clocks = rcc .cfgr .use_hse(8.mhz()) .sysclk(48.mhz()) .pclk1(24.mhz()) .freeze(&mut flash.acr); #[cfg(feature = "itm")] { let sysclk: Hertz = clocks.sysclk().into(); update_tpiu_baudrate(sysclk.0, ITM_BAUD_RATE).expect("Failed to reset TPIU baudrate"); } assert!(clocks.usbclk_valid()); let flash_kib = FlashSize::get().kibi_bytes(); info!("Flash: {} KiB", flash_kib); let mut gpioa = cx.device.GPIOA.split(&mut rcc.apb2); // BluePill board has a pull-up resistor on the D+ line. // Pull the D+ pin down to send a RESET condition to the USB bus. // This forced reset is needed only for development, without it host // will not reset your device when you upload new firmware. let mut usb_dp = gpioa.pa12.into_push_pull_output(&mut gpioa.crh); usb_dp.set_low().unwrap(); delay(clocks.sysclk().0 / 100); let usb_dm = gpioa.pa11; let usb_dp = usb_dp.into_floating_input(&mut gpioa.crh); let usb = Peripheral { usb: cx.device.USB, pin_dm: usb_dm, pin_dp: usb_dp, }; *USB_BUS = Some(UsbBus::new(usb)); let serial = SerialPort::new(USB_BUS.as_ref().unwrap()); let serial_number = get_serial_number(); info!("Serial number: {}", serial_number); let usb_dev = UsbDeviceBuilder::new(USB_BUS.as_ref().unwrap(), UsbVidPid(USB_VID, USB_PID)) .manufacturer("Fake company") .product("Serial port") .serial_number(serial_number) .self_powered(true) .device_class(USB_CLASS_CDC) .build(); init::LateResources { usb_dev, serial } }
test_notify_slack.py
from datetime import datetime, timezone from http import HTTPStatus from json import dumps, load from logging import getLogger from os import environ from unittest.mock import MagicMock, patch from mypy_boto3_events import EventBridgeClient from mypy_boto3_lambda import LambdaClient from mypy_boto3_sns.type_defs import MessageAttributeValueTypeDef from pytest import mark from pytest_subtests import SubTests from backend.api_keys import EVENT_KEY from backend.api_responses import STATUS_CODE_KEY from backend.aws_message_attributes import DATA_TYPE_STRING from backend.notify_status_update.task import ( EVENT_DETAIL_KEY, MESSAGE_ATTRIBUTE_DATASET_KEY, MESSAGE_ATTRIBUTE_STATUS_KEY, SLACK_URL_ENV_NAME, STEP_FUNCTION_ARN_KEY, STEP_FUNCTION_STARTDATE_KEY, STEP_FUNCTION_STOPDATE_KEY, WEBHOOK_MESSAGE_BLOCKS_KEY, lambda_handler, publish_sns_message, ) from backend.resources import ResourceName from backend.step_function import Outcome from backend.step_function_keys import ( ASSET_UPLOAD_KEY, DATASET_ID_KEY, DATASET_PREFIX_KEY, ERRORS_KEY, INPUT_KEY, JOB_STATUS_FAILED, JOB_STATUS_RUNNING, JOB_STATUS_SUCCEEDED, METADATA_UPLOAD_KEY, NEW_VERSION_S3_LOCATION, OUTPUT_KEY, STATUS_KEY, STEP_FUNCTION_KEY, UPDATE_DATASET_KEY, UPLOAD_STATUS_KEY, VALIDATION_KEY, VERSION_ID_KEY, ) from .aws_utils import any_arn_formatted_string, any_lambda_context, any_s3_url from .general_generators import any_https_url from .stac_generators import any_dataset_id, any_dataset_prefix, any_dataset_version_id STEP_FUNCTION_START_MILLISECOND_TIMESTAMP = round( datetime( 2001, 2, 3, hour=4, minute=5, second=6, microsecond=789876, tzinfo=timezone.utc ).timestamp() * 1000 ) STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP = STEP_FUNCTION_START_MILLISECOND_TIMESTAMP + 10 @patch("backend.notify_status_update.task.WebhookClient.send") @patch("backend.notify_status_update.task.get_import_status_given_arn") def should_notify_slack_with_finished_details_when_url_set( step_func_status_mock: MagicMock, webhook_client_mock: MagicMock ) -> None: # Given webhook_client_mock.return_value.status_code = HTTPStatus.OK step_func_status_mock.return_value = { STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_SUCCEEDED}, VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, } mock_slack_url = any_https_url() with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): # When notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_SUCCEEDED, STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(), INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), VERSION_ID_KEY: any_dataset_version_id(), } ), OUTPUT_KEY: dumps( { UPLOAD_STATUS_KEY: { VALIDATION_KEY: "", ASSET_UPLOAD_KEY: "", METADATA_UPLOAD_KEY: "", }, UPDATE_DATASET_KEY: {NEW_VERSION_S3_LOCATION: any_s3_url()}, } ), STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP, STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP, } } lambda_handler(notify_status_update_input, any_lambda_context()) # Then assert there is 15 slack_sdk message 'blocks' sent to webhook url webhook_client_mock.assert_called_once() assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 15 @patch("backend.notify_status_update.task.WebhookClient.send") def should_not_notify_slack_when_step_function_running(webhook_client_mock: MagicMock) -> None: # Given webhook_client_mock.return_value.status_code = HTTPStatus.OK mock_slack_url = any_https_url() with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): # When notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_RUNNING, STEP_FUNCTION_STOPDATE_KEY: None, } } lambda_handler(notify_status_update_input, any_lambda_context()) # Then webhook_client_mock.assert_not_called() @patch("backend.notify_status_update.task.WebhookClient.send") @patch("backend.notify_status_update.task.get_import_status_given_arn") def should_notify_slack_when_step_function_failed( step_func_status_mock: MagicMock, webhook_client_mock: MagicMock ) -> None: # Given webhook_client_mock.return_value.status_code = HTTPStatus.OK mock_slack_url = any_https_url() step_func_status_mock.return_value = { STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_FAILED}, VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, } with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): # When notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_FAILED, STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(), INPUT_KEY: dumps(
DATASET_PREFIX_KEY: any_dataset_prefix(), VERSION_ID_KEY: any_dataset_version_id(), } ), STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP, STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP, }, OUTPUT_KEY: None, } lambda_handler(notify_status_update_input, any_lambda_context()) # Then assert there is 13 slack_sdk message 'blocks' sent to webhook url webhook_client_mock.assert_called_once() assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 13 @patch("backend.notify_status_update.task.WebhookClient.send") def should_log_and_not_post_to_slack_when_url_not_set( webhook_client_mock: MagicMock, subtests: SubTests ) -> None: # Given logger = getLogger("backend.notify_status_update.task") with patch("backend.notify_status_update.task.publish_sns_message"), patch.object( logger, "debug" ) as logger_mock: # When lambda_handler({}, any_lambda_context()) # Then with subtests.test("no slack message"): assert not webhook_client_mock.called with subtests.test("log created"): expected_log = dumps({EVENT_KEY: {}}) logger_mock.assert_any_call(expected_log) @patch("backend.notify_status_update.task.get_param") def should_publish_sns_message(get_param_mock: MagicMock) -> None: # Given get_param_mock.return_value = topic_arn = any_arn_formatted_string() dataset_prefix = any_dataset_prefix() publish_sns_message_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_SUCCEEDED, INPUT_KEY: dumps( { DATASET_PREFIX_KEY: dataset_prefix, } ), } } expected_sns_call = { "TopicArn": topic_arn, "Message": dumps(publish_sns_message_input), "MessageAttributes": { MESSAGE_ATTRIBUTE_DATASET_KEY: MessageAttributeValueTypeDef( DataType=DATA_TYPE_STRING, StringValue=dataset_prefix ), MESSAGE_ATTRIBUTE_STATUS_KEY: MessageAttributeValueTypeDef( DataType=DATA_TYPE_STRING, StringValue=JOB_STATUS_SUCCEEDED ), }, } # When with patch("backend.notify_status_update.task.SNS_CLIENT.publish") as sns_client_mock: publish_sns_message(publish_sns_message_input) # Then assert sns_client_mock.call_args[1] == expected_sns_call @mark.infrastructure def should_launch_notify_slack_endpoint_lambda_function( lambda_client: LambdaClient, events_client: EventBridgeClient ) -> None: notify_status_lambda_arn = events_client.list_targets_by_rule( Rule=ResourceName.CLOUDWATCH_RULE_NAME.value )["Targets"][0]["Arn"] # When body = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_FAILED, INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), } ), }, OUTPUT_KEY: None, } resp = load( lambda_client.invoke( FunctionName=notify_status_lambda_arn, Payload=dumps(body).encode(), )["Payload"] ) assert resp.get(STATUS_CODE_KEY) == HTTPStatus.OK, resp
{ DATASET_ID_KEY: any_dataset_id(),
mnist.py
from datetime import datetime import cv2 import re import base64 from flask import Flask, render_template, request, jsonify from flask_cors import CORS import numpy as np from io import BytesIO from PIL import Image, ImageOps import os,sys import requests from graphpipe import remote from matplotlib import pylab as plt app = Flask(__name__) CORS(app) # To Post by Ajax @app.route('/', methods=['GET', 'POST']) def index(): if request.method == 'POST': ans,t1,t2,t3 = get_answer(request) return jsonify({'ans': ans, 't1': t1, 't2': t2, 't3': t3}) else: return render_template('index.html') def result(img):
def get_answer(req): img_str = re.search(r'base64,(.*)', req.form['img']).group(1) nparr = np.fromstring(base64.b64decode(img_str), np.uint8) img_src = cv2.imdecode(nparr, cv2.IMREAD_COLOR) img_negaposi = 255 - img_src img_gray = cv2.cvtColor(img_negaposi, cv2.COLOR_BGR2GRAY) img_resize = cv2.resize(img_gray,(28,28)) cv2.imwrite(f"images/{datetime.now().strftime('%s')}.jpg",img_resize) ans,t1,t2,t3 = result(img_resize) return int(ans),t1,t2,t3 if __name__ == "__main__": app.run(debug=False, host='0.0.0.0', port=8001)
img = img.reshape(1, 784) img = img.astype(np.float32) img = np.multiply(img, 1.0 / 255.0) pred = remote.execute("http://localhost:9001", img) r = np.argmax(pred, axis=1) pp = pred*100 top1 = str(np.argsort(-pp)[0][0])+ " (" +str(int(np.sort(-pp)[0][0]*-1))+"%)" top2 = str(np.argsort(-pp)[0][1])+ " (" +str(int(np.sort(-pp)[0][1]*-1))+"%)" top3 = str(np.argsort(-pp)[0][2])+ " (" +str(int(np.sort(-pp)[0][2]*-1))+"%)" # return int(r) return r,top1,top2,top3
daily_default_setting.py
# -*- coding: utf-8 -*- # Copyright (c) 2019, steve and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class DailyDefaultSetting(Document):
pass
leef_output_test.go
package tests import (
func marshalLeef(msgs []map[string]interface{}) (string, error) { var ret string for _, msg := range msgs { msg["cb_server"] = "cbserver" marshaled, err := leef.Encode(msg) if err != nil { return "", err } ret += marshaled + "\n" } return ret, nil } func TestLeefOutput(t *testing.T) { t.Log("Generating LEEF output to leef_output...") processTestEvents(t, "leef_output", marshalLeef) }
"testing" )
datecontrol.js
goog.declareModuleId('os.ui.datetime.DateControlUI'); import './wheeldate.js'; import {ROOT} from '../../os.js'; import Duration from '../../time/duration.js'; import * as time from '../../time/time.js'; import TimelineController from '../../time/timelinecontroller.js'; import TimelineEventType from '../../time/timelineeventtype.js'; import Module from '../module.js'; import {apply} from '../ui.js'; const Disposable = goog.require('goog.Disposable'); const Delay = goog.require('goog.async.Delay'); const dispose = goog.require('goog.dispose'); const log = goog.require('goog.log'); const {default: TimelineControllerEvent} = goog.requireType('os.time.TimelineControllerEvent'); /** * The date-control directive. * * @return {angular.Directive} */ export const directive = () => ({ restrict: 'AE', replace: true, scope: true, templateUrl: ROOT + 'views/datetime/datecontrol.html', controller: Controller, controllerAs: 'dateControl' }); /** * The element tag for the directive. * @type {string} */ export const directiveTag = 'date-control'; /** * Add the directive to the module. */ Module.directive('dateControl', [directive]); /** * Controller for the date-control directive. * @unrestricted */ export class Controller extends Disposable { /** * Constructor. * @param {!angular.Scope} $scope The Angular scope. * @ngInject */ constructor($scope) { super(); /** * The Angular scope. * @type {?angular.Scope} * @private */ this.scope_ = $scope; /** * The timeline controller. * @type {?TimelineController} * @private */ this.tlc_ = TimelineController.getInstance(); this.tlc_.listen(TimelineEventType.RESET, this.onTimelineReset_, false, this); /** * Timer to debounce updates to the timeline controller. * @type {Delay} * @private */ this.updateControllerDelay_ = new Delay(this.updateController_, 10, this); /** * The start date. * @type {Date} */ this['startDate'] = time.toLocalDate(new Date(this.tlc_.getStart())); /** * The end date. Inclusive for custom duration. * @type {Date} */ this['endDate'] = this.getUIEndDate(); /** * The current duration choice. * @type {string} */ this['duration'] = this.getDuration(); /** * Available duration choices in the UI. * @type {!Array<string>} */ this['durations'] = [ Duration.DAY, Duration.WEEK, Duration.MONTH, Duration.LAST24HOURS, Duration.LAST48HOURS, Duration.LAST7DAYS, Duration.LAST14DAYS, Duration.LAST30DAYS, Duration.CUSTOM ]; /** * If the control is disabled (not impacting the timeline controller). * @type {boolean} */ this['disabled'] = false; /** * If the duration is relative to the current date. * @type {boolean} */ this['relativeDuration'] = time.isRelativeDuration(this['duration']); // take over updating the timeline controller this.assumeControl(); // watch for changes to start/end dates $scope.$watch('dateControl.startDate', this.onStartDateChanged_.bind(this)); $scope.$watch('dateControl.endDate', this.onEndDateChanged_.bind(this)); $scope.$on('startDate.userSelected', this.onStartDateSelected_.bind(this)); $scope.$on('$destroy', this.dispose.bind(this)); } /** * @inheritDoc */ disposeInternal() { super.disposeInternal(); dispose(this.updateControllerDelay_); this.updateControllerDelay_ = null; this.tlc_.unlisten(TimelineEventType.RESET, this.onTimelineReset_, false, this); this.tlc_ = null; this.scope_ = null; } /** * Change handler for the start date control. * * @param {?Date} newVal The new value. * @param {?Date} oldVal The old value. * @private */ onStartDateChanged_(newVal, oldVal) { if (newVal && oldVal && newVal.getTime() != oldVal.getTime()) { if (!this['relativeDuration'] && this['duration'] != Duration.CUSTOM) { this['startDate'] = time.floor(newVal, this['duration'], true); } else { this['startDate'] = newVal; } if (this['duration'] === Duration.CUSTOM) { // if the start date is after the end date for custom duration, make them the same (end is inclusive) if (this['startDate'] > this['endDate']) { this['endDate'] = new Date(this['startDate']); } } else if (!this['relativeDuration']) { // for all other durations (that aren't relative), set the end date from the start date this['endDate'] = time.offset(this['startDate'], this['duration'], 1, true); } if (!this['disabled']) { this.startControllerUpdate_(); } log.fine(logger, 'start changed: ' + this['startDate'].toUTCString() + ' to ' + this['endDate'].toUTCString()); } } /** * Selection handler for start date control. * * @param {angular.Scope.Event} event * @private */ onStartDateSelected_(event) { event.stopPropagation(); this.scope_.$broadcast('endDate.open'); } /** * Change handler for the end date control. * * @param {?Date} newVal The new value. * @param {?Date} oldVal The old value. * @private */ onEndDateChanged_(newVal, oldVal) { // this can only be changed by the user for custom duration, so let the controller handle it otherwise if (this['duration'] === Duration.CUSTOM && newVal && oldVal && newVal.getTime() != oldVal.getTime()) { this['endDate'] = newVal; if (this['startDate'] > this['endDate']) { // if start is after end, make them the same (end is inclusive) this['startDate'] = new Date(this['endDate']); } if (!this['disabled']) { this.startControllerUpdate_(); } log.fine(logger, 'end changed: ' + this['startDate'].toUTCString() + ' to ' + this['endDate'].toUTCString()); } } /** * Change handler for duration chooser. * * @export */ onDurationChanged() { if (!this['disabled']) { // If switching from a relative duration, make the new start relative to now. if (this['relativeDuration']) { this['startDate'] = new Date(); } // We only want to round times that are neither custom nor relative if (!this['relativeDuration'] && this['duration'] != Duration.CUSTOM) { this['startDate'] = time.floor(this['startDate'], this['duration'], true); } // Check if the NEW duration is relative this['relativeDuration'] = time.isRelativeDuration(this['duration']); switch (this['duration']) { case Duration.LAST24HOURS: this.setRelativeDateRange(1); break; case Duration.LAST48HOURS: this.setRelativeDateRange(2); break; case Duration.LAST7DAYS: this.setRelativeDateRange(7); break; case Duration.LAST14DAYS: this.setRelativeDateRange(14); break; case Duration.LAST30DAYS: this.setRelativeDateRange(30); break; case Duration.CUSTOM: // for custom duration, make dates the same (end is inclusive) this['endDate'] = new Date(this['startDate']); break; default: // for all other durations, set the end date from the start date this['endDate'] = time.offset(this['startDate'], this['duration'], 1, true); break; } this.startControllerUpdate_(); log.fine(logger, 'duration changed: ' + this['startDate'].toUTCString() + ' to ' + this['endDate'].toUTCString()); } } /** * Set the duration to a date range relative to now. * * @param {number} days * @private */ setRelativeDateRange(days) { this['startDate'] = new Date(); this['endDate'] = new Date(); this['startDate'].setUTCDate(this['startDate'].getUTCDate() - days); } /** * Start the delay to update the timeline controller. * * @private */ startControllerUpdate_() { if (this.updateControllerDelay_) { this.updateControllerDelay_.start(); } } /** * Updates the start/end date on the timeline controller. * * @private */ updateController_() { if (this.tlc_) { // convert local dates to utc before setting timeline controller values var controllerStart = this.getControllerStartDate(); var controllerEnd = this.getControllerEndDate(); var startTime = controllerStart.getTime(); var endTime = controllerEnd.getTime(); this.tlc_.setSuppressShowEvents(true); this.tlc_.setRange(this.tlc_.buildRange(startTime, endTime)); this.tlc_.setDuration(this['duration']); this.tlc_.setOffset(this.tlc_.getSmallestAnimateRangeLength()); this.tlc_.setSuppressShowEvents(false); this.tlc_.setCurrent(endTime); } } /** * Change handler for duration chooser. * * @param {number} direction * @export */ shiftDate(direction) { var modifier = 1; if (!this['disabled']) { if (this['duration'] === Duration.CUSTOM) { // For custom durations, multiply the offset by the difference in days between the start and end dates const startDate = this['startDate'].getTime(); const endDate = this['endDate'].getTime() + time.millisecondsInDay; modifier = (endDate - startDate) / time.millisecondsInDay; } this['startDate'] = time.offset(this['startDate'], this['duration'], direction * modifier, true); this['endDate'] = time.offset(this['endDate'], this['duration'], direction * modifier, true); } } /** * Enables the date control to update the timeline controller. */ assumeControl() { if (this.tlc_) { this.tlc_.stop(); this.tlc_.clearSliceRanges(); this.updateController_(); } this['disabled'] = false; } /** * Suspends the date control from updating the timeline controller. */ releaseControl() { this['disabled'] = true; } /** * Updates the control from the timeline controller. */ update() { if (this.tlc_) { this['duration'] = this.getDuration(); this['relativeDuration'] = time.isRelativeDuration(this['duration']); this['startDate'] = this.getUIStartDate(); this['endDate'] = this.getUIEndDate(); apply(this.scope_); } } /** * Get the duration from the timeline controller. * * @return {string} The duration, or `Duration.DAY` if the timeline controller is not available. * @protected */ getDuration() { return this.tlc_ ? this.tlc_.getDuration() : Duration.DAY; } /** * Get the start date from the UI to set in the timeline controller. * * @return {Date} The start date. * @protected */ getControllerStartDate() { // Dates relative to "now" should not be translated to UTC because they did not come from jQueryUI. return this['relativeDuration'] ? this['startDate'] : time.toUTCDate(this['startDate']); } /** * Get the end date from the UI to set in the timeline controller. * * @return {Date} The end date. * @protected */ getControllerEndDate() { // Dates relative to "now" should not be translated to UTC because they did not come from jQueryUI. var endDate = this['relativeDuration'] ? this['endDate'] : time.toUTCDate(this['endDate']); if (this['duration'] === Duration.CUSTOM) { endDate.setDate(endDate.getDate() + 1); } return endDate; } /** * Get the start date from the timeline controller to display in the UI.
* * @return {Date} The start date. * @protected */ getUIStartDate() { if (this.tlc_) { // Dates relative to "now" should not be translated from UTC because they will not be used by jQueryUI. var tlcStartDate = new Date(this.tlc_.getStart()); return this['relativeDuration'] ? tlcStartDate : time.toLocalDate(tlcStartDate); } return null; } /** * Get the end date from the timeline controller to display in the UI. * * @return {Date} The end date. * @protected */ getUIEndDate() { if (this.tlc_) { // Dates relative to "now" should not be translated from UTC because they will not be used by jQueryUI. var tlcEndDate = new Date(this.tlc_.getEnd()); var endDate = this['relativeDuration'] ? tlcEndDate : time.toLocalDate(tlcEndDate); if (this['duration'] === Duration.CUSTOM) { endDate.setDate(endDate.getDate() - 1); } return endDate; } return null; } /** * Handler for timeline reset. * * @param {TimelineControllerEvent} event * @private */ onTimelineReset_(event) { this.update(); } } /** * The logger. * @type {goog.log.Logger} */ const logger = log.getLogger('os.ui.datetime.DateControlUI');
tile_list.go
package cache import ( "bufio" "context" "fmt" "io" "os" "strings" "github.com/go-spatial/cobra" "github.com/go-spatial/geom/slippy" gdcmd "github.com/go-spatial/tegola/internal/cmd" "github.com/go-spatial/tegola/internal/log" "github.com/go-spatial/tegola/provider" ) var ( // the minimum zoom to cache from maxZoom uint // the maximum zoom to cache to minZoom uint // the zoom range zooms []uint // input string format tileListFormat string ) var tileListFile *os.File var format Format = defaultTileNameFormat var explicit bool var TileListCmd = &cobra.Command{ Use: "tile-list filename|-", Short: "operate on a list of tile names separated by new lines", Example: "tile-list my-tile-list.txt", PreRunE: tileListValidate, RunE: tileListCommand, } func init() { setupMinMaxZoomFlags(TileListCmd, 0, 0) setupTileNameFormat(TileListCmd) } func tileListValidate(cmd *cobra.Command, args []string) (err error) { explicit = IsMinMaxZoomExplicit(cmd) if !explicit { // get the zoom ranges. if err = minMaxZoomValidate(cmd, args); err != nil { return err } } if len(args) == 0 { return fmt.Errorf("filename must be provided.") } fname := strings.TrimSpace(args[0]) // - is used to indicate the use of stdin. if fname != "-" { // we have been provided a file name // let's set that up if tileListFile, err = os.Open(args[0]); err != nil { return err } } return tileNameFormatValidate(cmd, args) } func tileListCommand(cmd *cobra.Command, args []string) (err error)
// generateTilesForTileList will return a channel where all the tiles in the list will be published // if explicit is false and zooms is not empty, it will include the tiles above and below with in the provided zooms func generateTilesForTileList(ctx context.Context, tilelist io.Reader, explicit bool, zooms []uint, format Format) *TileChannel { tce := &TileChannel{ channel: make(chan *slippy.Tile), } go func() { defer tce.Close() var ( err error lineNumber int tile *slippy.Tile ) scanner := bufio.NewScanner(tilelist) for scanner.Scan() { lineNumber++ txt := scanner.Text() tile, err = format.ParseTile(txt) if err != nil { tce.setError(fmt.Errorf("failed to parse tile (%v) from line [%v]: %w", txt, lineNumber, err)) return } if explicit || len(zooms) == 0 { select { case tce.channel <- tile: case <-ctx.Done(): // we have been cancelled return } continue } for _, zoom := range zooms { // range will include the original tile. err = tile.RangeFamilyAt(zoom, func(tile *slippy.Tile) error { select { case tce.channel <- tile: case <-ctx.Done(): // we have been cancelled return context.Canceled } return nil }) // gracefully stop if cancelled if err != nil { return } } } }() return tce }
{ ctx, cancel := context.WithCancel(context.Background()) defer gdcmd.New().Complete() gdcmd.OnComplete(provider.Cleanup) go func() { select { case <-ctx.Done(): return case <-gdcmd.Cancelled(): cancel() } }() var in io.Reader = os.Stdin if tileListFile != nil { in = tileListFile defer tileListFile.Close() } log.Info("zoom list: ", zooms) tilechannel := generateTilesForTileList(ctx, in, explicit, zooms, format) // start up workers here return doWork(ctx, tilechannel, seedPurgeMaps, cacheConcurrency, seedPurgeWorker) }
test_ui_node.py
""" Ory APIs Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501 The version of the OpenAPI document: v0.0.1-alpha.3 Contact: [email protected] Generated by: https://openapi-generator.tech """ import sys import unittest import ory_client from ory_client.model.meta import Meta from ory_client.model.ui_node_attributes import UiNodeAttributes from ory_client.model.ui_texts import UiTexts globals()['Meta'] = Meta globals()['UiNodeAttributes'] = UiNodeAttributes globals()['UiTexts'] = UiTexts from ory_client.model.ui_node import UiNode class TestUiNode(unittest.TestCase): """UiNode unit test stubs""" def setUp(self): pass def
(self): pass def testUiNode(self): """Test UiNode""" # FIXME: construct object with mandatory attributes with example values # model = UiNode() # noqa: E501 pass if __name__ == '__main__': unittest.main()
tearDown
handy.rs
use crate::sign; use crate::key; use webpki; use crate::server; use crate::error::TLSError; use crate::server::ClientHello; use std::collections; use std::sync::{Arc, SgxMutex as Mutex}; use std::prelude::v1::*; /// Something which never stores sessions. pub struct NoServerSessionStorage {} impl server::StoresServerSessions for NoServerSessionStorage { fn put(&self, _id: Vec<u8>, _sec: Vec<u8>) -> bool { false } fn get(&self, _id: &[u8]) -> Option<Vec<u8>> { None } fn take(&self, _id: &[u8]) -> Option<Vec<u8>> { None } } /// An implementor of `StoresServerSessions` that stores everything /// in memory. If enforces a limit on the number of stored sessions /// to bound memory usage. pub struct ServerSessionMemoryCache { cache: Mutex<collections::HashMap<Vec<u8>, Vec<u8>>>, max_entries: usize, } impl ServerSessionMemoryCache { /// Make a new ServerSessionMemoryCache. `size` is the maximum /// number of stored sessions. pub fn new(size: usize) -> Arc<ServerSessionMemoryCache> { debug_assert!(size > 0); Arc::new(ServerSessionMemoryCache { cache: Mutex::new(collections::HashMap::new()), max_entries: size, }) } fn limit_size(&self) { let mut cache = self.cache.lock().unwrap(); while cache.len() > self.max_entries { let k = cache.keys().next().unwrap().clone(); cache.remove(&k); } } } impl server::StoresServerSessions for ServerSessionMemoryCache { fn put(&self, key: Vec<u8>, value: Vec<u8>) -> bool { self.cache.lock() .unwrap() .insert(key, value); self.limit_size(); true } fn get(&self, key: &[u8]) -> Option<Vec<u8>> { self.cache.lock() .unwrap() .get(key).cloned() } fn take(&self, key: &[u8]) -> Option<Vec<u8>> { self.cache.lock() .unwrap() .remove(key) } } /// Something which never produces tickets. pub struct NeverProducesTickets {} impl server::ProducesTickets for NeverProducesTickets { fn enabled(&self) -> bool { false } fn get_lifetime(&self) -> u32 { 0 } fn encrypt(&self, _bytes: &[u8]) -> Option<Vec<u8>> { None } fn decrypt(&self, _bytes: &[u8]) -> Option<Vec<u8>> { None } } /// Something which never resolves a certificate.
pub struct FailResolveChain {} impl server::ResolvesServerCert for FailResolveChain { fn resolve(&self, _client_hello: ClientHello) -> Option<sign::CertifiedKey> { None } } /// Something which always resolves to the same cert chain. pub struct AlwaysResolvesChain(sign::CertifiedKey); impl AlwaysResolvesChain { /// Creates an `AlwaysResolvesChain`, auto-detecting the underlying private /// key type and encoding. pub fn new(chain: Vec<key::Certificate>, priv_key: &key::PrivateKey) -> Result<AlwaysResolvesChain, TLSError> { let key = sign::any_supported_type(priv_key) .map_err(|_| TLSError::General("invalid private key".into()))?; Ok(AlwaysResolvesChain(sign::CertifiedKey::new(chain, Arc::new(key)))) } /// Creates an `AlwaysResolvesChain`, auto-detecting the underlying private /// key type and encoding. /// /// If non-empty, the given OCSP response and SCTs are attached. pub fn new_with_extras(chain: Vec<key::Certificate>, priv_key: &key::PrivateKey, ocsp: Vec<u8>, scts: Vec<u8>) -> Result<AlwaysResolvesChain, TLSError> { let mut r = AlwaysResolvesChain::new(chain, priv_key)?; if !ocsp.is_empty() { r.0.ocsp = Some(ocsp); } if !scts.is_empty() { r.0.sct_list = Some(scts); } Ok(r) } } impl server::ResolvesServerCert for AlwaysResolvesChain { fn resolve(&self, _client_hello: ClientHello) -> Option<sign::CertifiedKey> { Some(self.0.clone()) } } /// Something that resolves do different cert chains/keys based /// on client-supplied server name (via SNI). pub struct ResolvesServerCertUsingSNI { by_name: collections::HashMap<String, sign::CertifiedKey>, } impl ResolvesServerCertUsingSNI { /// Create a new and empty (ie, knows no certificates) resolver. pub fn new() -> ResolvesServerCertUsingSNI { ResolvesServerCertUsingSNI { by_name: collections::HashMap::new() } } /// Add a new `sign::CertifiedKey` to be used for the given SNI `name`. /// /// This function fails if `name` is not a valid DNS name, or if /// it's not valid for the supplied certificate, or if the certificate /// chain is syntactically faulty. pub fn add(&mut self, name: &str, ck: sign::CertifiedKey) -> Result<(), TLSError> { let checked_name = webpki::DNSNameRef::try_from_ascii_str(name) .map_err(|_| TLSError::General("Bad DNS name".into()))?; ck.cross_check_end_entity_cert(Some(checked_name))?; self.by_name.insert(name.into(), ck); Ok(()) } } impl server::ResolvesServerCert for ResolvesServerCertUsingSNI { fn resolve(&self, client_hello: ClientHello) -> Option<sign::CertifiedKey> { if let Some(name) = client_hello.server_name() { self.by_name.get(name.into()) .cloned() } else { // This kind of resolver requires SNI None } } } #[cfg(test)] mod test { use super::*; use crate::StoresServerSessions; use crate::server::ProducesTickets; use crate::server::ResolvesServerCert; #[test] fn test_noserversessionstorage_drops_put() { let c = NoServerSessionStorage {}; assert_eq!(c.put(vec![0x01], vec![0x02]), false); } #[test] fn test_noserversessionstorage_denies_gets() { let c = NoServerSessionStorage {}; c.put(vec![0x01], vec![0x02]); assert_eq!(c.get(&[]), None); assert_eq!(c.get(&[0x01]), None); assert_eq!(c.get(&[0x02]), None); } #[test] fn test_noserversessionstorage_denies_takes() { let c = NoServerSessionStorage {}; assert_eq!(c.take(&[]), None); assert_eq!(c.take(&[0x01]), None); assert_eq!(c.take(&[0x02]), None); } #[test] fn test_serversessionmemorycache_accepts_put() { let c = ServerSessionMemoryCache::new(4); assert_eq!(c.put(vec![0x01], vec![0x02]), true); } #[test] fn test_serversessionmemorycache_persists_put() { let c = ServerSessionMemoryCache::new(4); assert_eq!(c.put(vec![0x01], vec![0x02]), true); assert_eq!(c.get(&[0x01]), Some(vec![0x02])); assert_eq!(c.get(&[0x01]), Some(vec![0x02])); } #[test] fn test_serversessionmemorycache_overwrites_put() { let c = ServerSessionMemoryCache::new(4); assert_eq!(c.put(vec![0x01], vec![0x02]), true); assert_eq!(c.put(vec![0x01], vec![0x04]), true); assert_eq!(c.get(&[0x01]), Some(vec![0x04])); } #[test] fn test_serversessionmemorycache_drops_to_maintain_size_invariant() { let c = ServerSessionMemoryCache::new(4); assert_eq!(c.put(vec![0x01], vec![0x02]), true); assert_eq!(c.put(vec![0x03], vec![0x04]), true); assert_eq!(c.put(vec![0x05], vec![0x06]), true); assert_eq!(c.put(vec![0x07], vec![0x08]), true); assert_eq!(c.put(vec![0x09], vec![0x0a]), true); let mut count = 0; if c.get(&[0x01]).is_some() { count += 1; } if c.get(&[0x03]).is_some() { count += 1; } if c.get(&[0x05]).is_some() { count += 1; } if c.get(&[0x07]).is_some() { count += 1; } if c.get(&[0x09]).is_some() { count += 1; } assert_eq!(count, 4); } #[test] fn test_neverproducestickets_does_nothing() { let npt = NeverProducesTickets {}; assert_eq!(false, npt.enabled()); assert_eq!(0, npt.get_lifetime()); assert_eq!(None, npt.encrypt(&[])); assert_eq!(None, npt.decrypt(&[])); } #[test] fn test_failresolvechain_does_nothing() { let frc = FailResolveChain {}; assert!(frc.resolve(ClientHello::new(None, &[], None)).is_none()); } #[test] fn test_resolvesservercertusingsni_requires_sni() { let rscsni = ResolvesServerCertUsingSNI::new(); assert!(rscsni.resolve(ClientHello::new(None, &[], None)).is_none()); } #[test] fn test_resolvesservercertusingsni_handles_unknown_name() { let rscsni = ResolvesServerCertUsingSNI::new(); let name = webpki::DNSNameRef::try_from_ascii_str("hello.com").unwrap(); assert!(rscsni.resolve(ClientHello::new(Some(name), &[], None)).is_none()); } }
workbook.rs
//! File Xl Workbook //! //! This module provides the functionality necessary to interact with an Excel workbook (i.e., the //! entire file). use std::collections::HashMap; use std::io::{BufReader, Read, Seek}; use lazy_static::lazy_static; use quick_xml::{events::Event, Reader}; use zip::ZipArchive; use super::{util, DateSystem, SheetReader, Worksheet}; use crate::{FabrixError, FabrixResult}; /// The main struct of this module. /// /// The Workbook is the primary object you will use in this module. The public interface allows you /// to see the path of the workbook as well as its date system. /// /// # Example usage: /// /// use xl::Workbook; /// let mut wb = Workbook::open("tests/data/Book1.xlsx").unwrap(); /// #[derive(Debug)] pub struct Workbook<READER: Read + Seek> { xls: ZipArchive<READER>, encoding: String, date_system: DateSystem, strings: Vec<String>, styles: Vec<String>, } /// A `SheetMap` is an object containing all the sheets in a given workbook. The only way to obtain /// a `SheetMap` is from an `xl::Worksheet` object. /// /// # Example usage: /// /// use xl::{Workbook, Worksheet}; /// /// let mut wb = Workbook::open("tests/data/Book1.xlsx").unwrap(); /// let sheets = wb.sheets(); #[derive(Debug)] pub struct SheetMap { sheets_by_name: HashMap<String, u8>, sheets_by_num: Vec<Option<Worksheet>>, } impl SheetMap { /// After you obtain a `SheetMap`, `by_name` gives you a list of sheets in the `SheetMap` /// ordered by their position in the workbook. /// /// Example usage: /// /// use xl::{Workbook, Worksheet}; /// /// let mut wb = Workbook::open("tests/data/Book1.xlsx").unwrap(); /// let sheets = wb.sheets(); /// let sheet_names = sheets.by_name(); /// assert_eq!(sheet_names[2], "Time"); /// /// Note that the returned array is **ZERO** based rather than **ONE** based like `get`. The /// reason for this is that we want `get` to act like VBA, but here we are only looking for a /// list of names so the `Option` type seemed like overkill. (We have `get` act like VBA /// because I expect people who will use this library will be very used to that "style" and may /// expect the same thing in this library. If it becomes an issue, we can change it later). pub fn by_name(&self) -> Vec<&str> { self.sheets_by_num .iter() .filter(|&s| s.is_some()) .map(|s| &s.as_ref().unwrap().name[..]) .collect() } } /// Struct to let you refer to sheets by name or by position (1-based). pub enum SheetNameOrNum<'a> { Name(&'a str), Pos(usize), } /// Trait to make it easy to use `get` when trying to get a sheet. You will probably not use this /// struct directly./// Trait to make it easy to use `get` when trying to get a sheet. pub trait SheetAccessTrait { fn go(&self) -> SheetNameOrNum; } impl SheetAccessTrait for &str { fn go(&self) -> SheetNameOrNum { SheetNameOrNum::Name(*self) } } impl SheetAccessTrait for &String { fn go(&self) -> SheetNameOrNum { SheetNameOrNum::Name(*self) } } impl SheetAccessTrait for usize { fn go(&self) -> SheetNameOrNum { SheetNameOrNum::Pos(*self) } } impl SheetMap { /// An easy way to obtain a reference to a `Worksheet` within this `Workbook`. Note that we /// return an `Option` because the sheet you want may not exist in the workbook. Also note that /// when you try to `get` a worksheet by number (i.e., by its position within the workbook), /// the tabs use **1-based indexing** rather than 0-based indexing (like the rest of Rust and /// most of the programming world). This was an intentional design choice to make things /// consistent with VBA. It's possible it may change in the future, but it seems intuitive /// enough if you are familiar with VBA and Excel programming, so it may not. /// /// # Example usage /// /// use xl::{Workbook, Worksheet}; /// /// let mut wb = Workbook::open("tests/data/Book1.xlsx").unwrap(); /// let sheets = wb.sheets(); /// /// // by sheet name /// let time_sheet = sheets.get("Time"); /// assert!(time_sheet.is_some()); /// /// // unknown sheet name /// let unknown_sheet = sheets.get("not in this workbook"); /// assert!(unknown_sheet.is_none()); /// /// // by position /// let unknown_sheet = sheets.get(1); /// assert_eq!(unknown_sheet.unwrap().name, "Sheet1"); pub fn get<T: SheetAccessTrait>(&self, sheet: T) -> Option<&Worksheet> { let sheet = sheet.go(); match sheet { SheetNameOrNum::Name(n) => match self.sheets_by_name.get(n) { Some(p) => self.sheets_by_num.get(*p as usize)?.as_ref(), None => None, }, SheetNameOrNum::Pos(n) => self.sheets_by_num.get(n)?.as_ref(), } } pub fn is_empty(&self) -> bool { self.sheets_by_num.is_empty() } /// The number of active sheets in the workbook. /// /// # Example usage /// /// use xl::{Workbook, Worksheet}; /// /// let mut wb = Workbook::open("tests/data/Book1.xlsx").unwrap(); /// let sheets = wb.sheets(); /// assert_eq!(sheets.len(), 4); pub fn len(&self) -> u8 { (self.sheets_by_num.len() - 1) as u8 } } impl<READER> Workbook<READER> where READER: Read + Seek, { /// xlsx zips contain an xml file that has a mapping of "ids" to "targets." The ids are used /// to uniquely identify sheets within the file. The targets have information on where the /// sheets can be found within the zip. This function returns a hashmap of id -> target so that /// you can quickly determine the name of the sheet xml file within the zip. fn rels(&mut self) -> FabrixResult<HashMap<String, String>> { let mut map = HashMap::new(); match self.xls.by_name("xl/_rels/workbook.xml.rels") { Ok(rels) => { // Looking for tree structure like: // Relationships // Relationship(id = "abc", target = "def") // Relationship(id = "ghi", target = "lkm") // etc. // Each relationship contains an id that is used to reference // the sheet and a target which tells us where we can find the // sheet in the zip file. // // Uncomment the following line to print out a copy of what // the xml looks like (will probably not be too big). // let _ = std::io::copy(&mut rels, &mut std::io::stdout()); let reader = BufReader::new(rels); let mut reader = Reader::from_reader(reader); reader.trim_text(true); let mut buf = Vec::new(); loop { match reader.read_event(&mut buf) { Ok(Event::Empty(ref e)) if e.name() == b"Relationship" => { let mut id = String::new(); let mut target = String::new(); e.attributes().for_each(|a| { let a = a.unwrap(); if a.key == b"Id" { id = util::attr_value(&a); } if a.key == b"Target" { target = util::attr_value(&a); } }); map.insert(id, target); } // exits the loop when reaching end of file Ok(Event::Eof) => break, Err(e) => { return Err(FabrixError::new_common_error(format!( "Error at position {}: {:?}", reader.buffer_position(), e ))) } // There are several other `Event`s we do not consider here _ => (), } buf.clear(); } Ok(map) } Err(_) => Ok(map), } } /// Return `SheetMap` of all sheets in this workbook. See `SheetMap` class and associated /// methods for more detailed documentation. pub fn sheets(&mut self) -> FabrixResult<SheetMap> { let rels = self.rels()?; let num_sheets = rels .iter() .filter(|(_, v)| v.starts_with("worksheet")) .count(); let mut sheets = SheetMap { sheets_by_name: HashMap::new(), sheets_by_num: Vec::with_capacity(num_sheets + 1), }; // never a "0" sheet (consistent with VBA) sheets.sheets_by_num.push(None); match self.xls.by_name("xl/workbook.xml") { Ok(wb) => { // let _ = std::io::copy(&mut wb, &mut std::io::stdout()); let reader = BufReader::new(wb); let mut reader = Reader::from_reader(reader); reader.trim_text(true); let mut buf = Vec::new(); let mut current_sheet_num: u8 = 0; loop { match reader.read_event(&mut buf) { Ok(Event::Empty(ref e)) if e.name() == b"sheet" => { current_sheet_num += 1; let mut name = String::new(); let mut id = String::new(); let mut num = 0; e.attributes().for_each(|a| { let a = a.unwrap(); if a.key == b"r:id" { id = util::attr_value(&a); } if a.key == b"name" { name = util::attr_value(&a); } if a.key == b"sheetId" { if let Ok(r) = util::attr_value(&a).parse() { num = r; } } }); sheets .sheets_by_name .insert(name.clone(), current_sheet_num); let target = { let s = rels.get(&id).unwrap(); if let Some(stripped) = s.strip_prefix('/') { stripped.to_string() } else { "xl/".to_owned() + s } }; let ws = Worksheet::new(name, current_sheet_num, id, target, num); sheets.sheets_by_num.push(Some(ws)); } Ok(Event::Eof) => break, Err(e) => { return Err(FabrixError::new_common_error(format!( "Error at position {}: {:?}", reader.buffer_position(), e ))) } _ => (), } buf.clear(); } Ok(sheets) } Err(e) => Err(e.into()), } } /// Open an existing workbook (xlsx file). Returns a `Result` in case there is an error opening /// the workbook. /// /// # Example usage: /// /// use xl::Workbook; /// /// let mut wb = Workbook::open("tests/data/Book1.xlsx"); /// assert!(wb.is_ok()); /// /// // non-existant file /// let mut wb = Workbook::open("Non-existant xlsx"); /// assert!(wb.is_err()); /// /// // non-xlsx file /// let mut wb = Workbook::open("src/main.rs"); /// assert!(wb.is_err()); pub fn new(file: READER) -> FabrixResult<Self> { match ZipArchive::new(file) { Ok(mut xls) => { let strings = strings(&mut xls)?; let styles = find_styles(&mut xls)?; let date_system = get_date_system(&mut xls)?; Ok(Workbook { xls, encoding: "utf8".to_owned(), date_system, strings, styles, }) } Err(e) => Err(e.into()), } } /// Simple method to print out all the inner files of the xlsx zip. pub fn contents(&mut self) { for i in 0..self.xls.len() { let file = self.xls.by_index(i).unwrap(); let outpath = match file.enclosed_name() { Some(path) => path.to_owned(), None => continue, }; if (&*file.name()).ends_with('/') { println!("File {}: \"{}\"", i, outpath.display()); } else { println!( "File {}: \"{}\" ({} bytes)", i, outpath.display(), file.size() ); } } } /// Create a SheetReader for the given worksheet. A `SheetReader` is a struct in the /// `xl::Worksheet` class that can be used to iterate over rows, etc. See documentation in the /// `xl::Worksheet` module for more information. pub(crate) fn sheet_reader<'a>( &'a mut self, zip_target: &str, ) -> FabrixResult<SheetReader<'a>> { let target = match self.xls.by_name(zip_target) { Ok(ws) => ws, Err(_) => { return Err(FabrixError::new_common_error(format!( "Could not find worksheet: {}", zip_target ))) } }; // let _ = std::io::copy(&mut target, &mut std::io::stdout()); let reader = BufReader::new(target); let mut reader = Reader::from_reader(reader); reader.trim_text(true); Ok(SheetReader::new( reader, &self.strings, &self.styles, &self.date_system, )) } pub fn encoding(&self) -> &str { &self.encoding } } fn strings<READER: Read + Seek>(zip_file: &mut ZipArchive<READER>) -> FabrixResult<Vec<String>> { let mut strings = Vec::new(); match zip_file.by_name("xl/sharedStrings.xml") { Ok(strings_file) => { let reader = BufReader::new(strings_file); let mut reader = Reader::from_reader(reader); reader.trim_text(true); let mut buf = Vec::new(); let mut this_string = String::new(); let mut preserve_space = false; loop { match reader.read_event(&mut buf) { Ok(Event::Start(ref e)) if e.name() == b"t" => { if let Some(att) = util::get(e.attributes(), b"xml:space") { if att == "preserve" { preserve_space = true; } else { preserve_space = false; } } else { preserve_space = false; } } Ok(Event::Text(ref e)) => { this_string.push_str(&e.unescape_and_decode(&reader).unwrap()[..]) } Ok(Event::Empty(ref e)) if e.name() == b"t" => strings.push("".to_owned()), Ok(Event::End(ref e)) if e.name() == b"t" => { if preserve_space { strings.push(this_string.to_owned()); } else { strings.push(this_string.trim().to_owned()); } this_string = String::new(); } Ok(Event::Eof) => break, Err(e) => { return Err(FabrixError::new_common_error(format!( "Error at position {}: {:?}", reader.buffer_position(), e, ))) } _ => (), } buf.clear(); }
} Err(_) => Ok(strings), } } /// find the number of rows and columns used in a particular worksheet. takes the workbook xlsx /// location as its first parameter, and the location of the worksheet in question (within the zip) /// as the second parameter. Returns a tuple of (rows, columns) in the worksheet. fn find_styles<READER: Read + Seek>(xlsx: &mut ZipArchive<READER>) -> FabrixResult<Vec<String>> { let mut styles = Vec::new(); let mut number_formats = standard_styles(); let styles_xml = match xlsx.by_name("xl/styles.xml") { Ok(s) => s, Err(_) => return Ok(styles), }; // let _ = std::io::copy(&mut styles_xml, &mut std::io::stdout()); let reader = BufReader::new(styles_xml); let mut reader = Reader::from_reader(reader); reader.trim_text(true); let mut buf = Vec::new(); let mut record_styles = false; loop { match reader.read_event(&mut buf) { Ok(Event::Empty(ref e)) if e.name() == b"numFmt" => { let id = util::get(e.attributes(), b"numFmtId").unwrap(); let code = util::get(e.attributes(), b"formatCode").unwrap(); number_formats.insert(id, code); } Ok(Event::Start(ref e)) if e.name() == b"cellXfs" => { // Section 2.1.589 Part 1 Section 18.3.1.4, c (Cell) // Item g. states that Office specifies that @s indexes into the cellXfs collection // in the style part. See https://tinyurl.com/yju9a6ox for more information. record_styles = true; } Ok(Event::End(ref e)) if e.name() == b"cellXfs" => record_styles = false, Ok(Event::Start(ref e)) | Ok(Event::Empty(ref e)) if record_styles && e.name() == b"xf" => { let id = util::get(e.attributes(), b"numFmtId").unwrap(); if number_formats.contains_key(&id) { styles.push(number_formats.get(&id).unwrap().to_string()); } } Ok(Event::Eof) => break, Err(e) => { return Err(FabrixError::new_common_error(format!( "Error at position {}: {:?}", reader.buffer_position(), e ))) } _ => (), } buf.clear(); } Ok(styles) } lazy_static! { static ref STANDARD_STYLES: HashMap<&'static str, &'static str> = vec![ ("0", "General"), ("1", "0"), ("2", "0.00"), ("3", "#,##0"), ("4", "#,##0.00"), ("9", "0%"), ("10", "0.00%"), ("11", "0.00E+00"), ("12", "# ?/?"), ("13", "# ??/??"), ("14", "mm-dd-yy"), ("15", "d-mmm-yy"), ("16", "d-mmm"), ("17", "mmm-yy"), ("18", "h:mm AM/PM"), ("19", "h:mm:ss AM/PM"), ("20", "h:mm"), ("21", "h:mm:ss"), ("22", "m/d/yy h:mm"), ("37", "#,##0 ;(#,##0)"), ("38", "#,##0 ;[Red](#,##0)"), ("39", "#,##0.00;(#,##0.00)"), ("40", "#,##0.00;[Red](#,##0.00)"), ("45", "mm:ss"), ("46", "[h]:mm:ss"), ("47", "mmss.0"), ("48", "##0.0E+0"), ("49", "@"), ] .into_iter() .collect(); } /// Return hashmap of standard styles (ISO/IEC 29500:2011 in Part 1, section 18.8.30) fn standard_styles() -> HashMap<String, String> { let mut styles = HashMap::new(); for (id, code) in STANDARD_STYLES.iter() { styles.insert(id.to_string(), code.to_string()); } styles } fn get_date_system<READER: Read + Seek>(xlsx: &mut ZipArchive<READER>) -> FabrixResult<DateSystem> { match xlsx.by_name("xl/workbook.xml") { Ok(wb) => { let reader = BufReader::new(wb); let mut reader = Reader::from_reader(reader); reader.trim_text(true); let mut buf = Vec::new(); loop { match reader.read_event(&mut buf) { Ok(Event::Empty(ref e)) if e.name() == b"workbookPr" => { if let Some(system) = util::get(e.attributes(), b"date1904") { if system == "1" { break Ok(DateSystem::V1904); } } break Ok(DateSystem::V1900); } Ok(Event::Eof) => break Ok(DateSystem::V1900), Err(e) => { return Err(FabrixError::new_common_error(format!( "Error at position {}: {:?}", reader.buffer_position(), e ))) } _ => (), } buf.clear(); } } Err(_) => Err(FabrixError::new_common_error( "Could not find xl/workbook.xml", )), } }
Ok(strings)
device.py
# -*- coding: utf-8 -*- # # osc2rtmidi/device.py # """MIDI device abstraction classes.""" import logging import time from rtmidi.midiutil import open_midioutput __all__ = ("RtMidiDevice",) log = logging.getLogger(__name__) class RtMidiDevice(object): """Provides a common API for different MIDI driver implementations.""" def __init__(self, name="RtMidiDevice", port=None, portname=None): self.name = name self.port = port self.portname = portname self._output = None def __str__(self): return self.portname def open_output(self): self._output, self.portname = open_midioutput(self.port, interactive=False, client_name=self.name, use_virtual=True) def close_output(self): if self._output is not None:
self._output.close_port() def send(self, events): if self._output: for ev in events: self._output.send_message(ev) def send_sysex(self, msg): if self._output: self._output.send_message([ord(c) for c in msg]) @classmethod def time(cls): return time.time() / 1000.
azmutils.py
from math import sqrt from math import pi import json import tf from geometry_msgs.msg import Quaternion
o = 0 for i in range(len(a)): o += (a[i]-b[i])**2 return sqrt(o) def quaternion_from_euler(roll, pitch, yaw): ''' From HSR's utils.py ''' q = tf.transformations.quaternion_from_euler(roll / 180.0 * pi, pitch / 180.0 * pi, yaw / 180.0 * pi, 'rxyz') return Quaternion(q[0], q[1], q[2], q[3]) def euler_from_quaternion(q): q = tf.transformations.euler_from_quaternion([q.x, q.y, q.z, q.w], 'rxyz') return (q[0]/pi * 180, q[1]/pi * 180, q[2]/pi * 180) def str_to_obj(string): """ Converts JSON string to data structure Args: string (str): valid JSON string Raises: ValueError: if input isnt a valid JSON string Returns: Data structure: [description] """ try: return json.loads(string) except ValueError as e: raise ValueError("ValueError occured when loading JSON string: {}, the input was: {}".format(e, string)) def obj_to_str(obj): return json.dumps(obj)
def dynamic_euclid_dist(a, b):
tls_test.go
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package mtls import ( "crypto/x509" "errors" "fmt" "io/ioutil" "net" "net/http" "strings" "testing" "time" "github.com/alipay/sofa-mosn/pkg/api/v2" "github.com/alipay/sofa-mosn/pkg/log" "github.com/alipay/sofa-mosn/pkg/mtls/certtool" "github.com/alipay/sofa-mosn/pkg/types" "golang.org/x/net/http2" ) type MockListener struct { net.Listener Mng types.TLSContextManager } func
(t *testing.T, addr string, cltMng types.TLSContextManager) (*http.Response, error) { c, err := net.Dial("tcp", addr) if err != nil { return nil, fmt.Errorf("request server error %v", err) } var conn net.Conn var req *http.Request conn = c if cltMng != nil { req, _ = http.NewRequest("GET", "https://"+addr, nil) conn = cltMng.Conn(c) tlsConn, _ := conn.(*TLSConn) if err := tlsConn.Handshake(); err != nil { return nil, fmt.Errorf("request tls handshake error %v", err) } } else { req, _ = http.NewRequest("GET", "http://"+addr, nil) } transport := &http2.Transport{} h2Conn, err := transport.NewClientConn(conn) return h2Conn.RoundTrip(req) } func (ln MockListener) Accept() (net.Conn, error) { conn, err := ln.Listener.Accept() if err != nil { return conn, err } return ln.Mng.Conn(conn), nil } type MockServer struct { Mng types.TLSContextManager Addr string server *http2.Server t *testing.T listener *MockListener } func (s *MockServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "mock server") } func (s *MockServer) GoListenAndServe(t *testing.T) { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Errorf("listen failed %v", err) return } s.Addr = ln.Addr().String() s.listener = &MockListener{ln, s.Mng} go s.serve(s.listener) } func (s *MockServer) serve(ln *MockListener) { for { c, e := ln.Accept() if e != nil { return } go func() { mux := http.NewServeMux() mux.HandleFunc("/", s.ServeHTTP) server := &http2.Server{} s.server = server if tlsConn, ok := c.(*TLSConn); ok { tlsConn.SetALPN(http2.NextProtoTLS) if err := tlsConn.Handshake(); err != nil { s.t.Logf("Hanshake failed, %v", err) return } } server.ServeConn(c, &http2.ServeConnOpts{ Handler: mux, }) }() } } func (s *MockServer) Close() { s.listener.Close() } type certInfo struct { CommonName string Curve string DNS string } func (c *certInfo) CreateCertConfig() (*v2.TLSConfig, error) { priv, err := certtool.GeneratePrivateKey(c.Curve) if err != nil { return nil, fmt.Errorf("generate key failed %v", err) } var dns []string if c.DNS != "" { dns = append(dns, c.DNS) } tmpl, err := certtool.CreateTemplate(c.CommonName, false, dns) if err != nil { return nil, fmt.Errorf("generate certificate template failed %v", err) } cert, err := certtool.SignCertificate(tmpl, priv) if err != nil { return nil, fmt.Errorf("sign certificate failed %v", err) } return &v2.TLSConfig{ Status: true, CACert: certtool.GetRootCA().CertPem, CertChain: cert.CertPem, PrivateKey: cert.KeyPem, }, nil } // TestServerContextManagerWithMultipleCert tests the contextManager's core logic // make three certificates with different dns and common name // test context manager can find correct certificate for different client func TestServerContextManagerWithMultipleCert(t *testing.T) { var filterChains []v2.FilterChain testCases := []struct { Info *certInfo Addr string }{ {Info: &certInfo{"Cert1", "RSA", "www.example.com"}, Addr: "www.example.com"}, {Info: &certInfo{"Cert2", "RSA", "*.example.com"}, Addr: "test.example.com"}, {Info: &certInfo{"Cert3", "P256", "*.com"}, Addr: "www.foo.com"}, } for i, tc := range testCases { cfg, err := tc.Info.CreateCertConfig() if err != nil { t.Errorf("#%d %v", i, err) return } fc := v2.FilterChain{ TLS: *cfg, } filterChains = append(filterChains, fc) } lc := &v2.Listener{ ListenerConfig: v2.ListenerConfig{ FilterChains: filterChains, }, } ctxMng, err := NewTLSServerContextManager(lc, nil, log.StartLogger) if err != nil { t.Errorf("create context manager failed %v", err) return } server := MockServer{ Mng: ctxMng, t: t, } server.GoListenAndServe(t) defer server.Close() time.Sleep(time.Second) //wait server start // request with different "servername" // context manager just find a certificate to response // the certificate may be not match the client for i, tc := range testCases { cfg := &v2.TLSConfig{ Status: true, ServerName: tc.Addr, InsecureSkip: true, } cltMng, err := NewTLSClientContextManager(cfg, nil) if err != nil { t.Errorf("create client context manager failed %v", err) continue } resp, err := MockClient(t, server.Addr, cltMng) if err != nil { t.Errorf("#%d request server error %v", i, err) continue } serverCN := resp.TLS.PeerCertificates[0].Subject.CommonName if serverCN != tc.Info.CommonName { t.Errorf("#%d expected request server config %s , but got %s", i, tc.Info.CommonName, serverCN) } ioutil.ReadAll(resp.Body) resp.Body.Close() } // request a unknown server name, return the first certificate cfg := &v2.TLSConfig{ Status: true, ServerName: "www.example.net", InsecureSkip: true, } cltMng, err := NewTLSClientContextManager(cfg, nil) if err != nil { t.Errorf("create client context manager failed %v", err) return } resp, err := MockClient(t, server.Addr, cltMng) if err != nil { t.Errorf("request server error %v", err) return } defer resp.Body.Close() serverCN := resp.TLS.PeerCertificates[0].Subject.CommonName expected := testCases[0].Info.CommonName if serverCN != expected { t.Errorf("expected request server config %s , but got %s", expected, serverCN) } ioutil.ReadAll(resp.Body) } // TestVerifyClient tests a client must have certificate to server func TestVerifyClient(t *testing.T) { info := &certInfo{ CommonName: "test", Curve: "P256", } cfg, err := info.CreateCertConfig() if err != nil { t.Error(err) return } cfg.VerifyClient = true filterChains := []v2.FilterChain{ { TLS: *cfg, }, } lc := &v2.Listener{} lc.FilterChains = filterChains ctxMng, err := NewTLSServerContextManager(lc, nil, log.StartLogger) if err != nil { t.Errorf("create context manager failed %v", err) return } server := MockServer{ Mng: ctxMng, t: t, } server.GoListenAndServe(t) defer server.Close() time.Sleep(time.Second) //wait server start clientConfigs := []*v2.TLSConfig{ // Verify Server { Status: true, CACert: cfg.CACert, CertChain: cfg.CertChain, PrivateKey: cfg.PrivateKey, ServerName: "127.0.0.1", }, // Skip Verify Server { Status: true, CertChain: cfg.CertChain, PrivateKey: cfg.PrivateKey, InsecureSkip: true, }, } for i, cfg := range clientConfigs { cltMng, err := NewTLSClientContextManager(cfg, nil) if err != nil { t.Errorf("#%d create client context manager failed %v", i, err) continue } resp, err := MockClient(t, server.Addr, cltMng) if err != nil { t.Errorf("request server error %v", err) continue } ioutil.ReadAll(resp.Body) resp.Body.Close() } cfg = &v2.TLSConfig{ Status: true, ServerName: "127.0.0.1", InsecureSkip: true, } cltMng, err := NewTLSClientContextManager(cfg, nil) if err != nil { t.Errorf("create client context manager failed %v", err) return } resp, err := MockClient(t, server.Addr, cltMng) // expected bad certificate if err == nil { ioutil.ReadAll(resp.Body) resp.Body.Close() t.Errorf("server should verify client certificate") return } } // TestInspector tests context manager support both tls and non-tls func TestInspector(t *testing.T) { info := &certInfo{ CommonName: "test", Curve: "P256", DNS: "test", } cfg, err := info.CreateCertConfig() if err != nil { t.Error(err) return } cfg.VerifyClient = true filterChains := []v2.FilterChain{ { TLS: *cfg, }, } lc := &v2.Listener{ ListenerConfig: v2.ListenerConfig{ Inspector: true, FilterChains: filterChains, }, } ctxMng, err := NewTLSServerContextManager(lc, nil, log.StartLogger) if err != nil { t.Errorf("create context manager failed %v", err) return } server := MockServer{ Mng: ctxMng, t: t, } server.GoListenAndServe(t) defer server.Close() time.Sleep(time.Second) //wait server start cltMng, err := NewTLSClientContextManager(&v2.TLSConfig{ Status: true, CACert: cfg.CACert, CertChain: cfg.CertChain, PrivateKey: cfg.PrivateKey, ServerName: "test", }, nil) if err != nil { t.Errorf("create client context manager failed %v", err) return } // non-tls resp, err := MockClient(t, server.Addr, nil) if err != nil { t.Errorf("request server error %v", err) return } ioutil.ReadAll(resp.Body) resp.Body.Close() // tls resp, err = MockClient(t, server.Addr, cltMng) if err != nil { t.Errorf("request server error %v", err) return } ioutil.ReadAll(resp.Body) resp.Body.Close() } // test ConfigHooks // define VerifyPeerCertificate, verify common name instead of san, ignore keyusage type testConfigHooks struct { defaultConfigHooks Name string Root *x509.CertPool PassCommonName string } // over write func (hook *testConfigHooks) VerifyPeerCertificate() func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { return hook.verifyPeerCertificate } func (hook *testConfigHooks) GetX509Pool(caIndex string) (*x509.CertPool, error) { return hook.Root, nil } // verifiedChains is always nil func (hook *testConfigHooks) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { var certs []*x509.Certificate for _, asn1Data := range rawCerts { cert, err := x509.ParseCertificate(asn1Data) if err != nil { return err } certs = append(certs, cert) } intermediates := x509.NewCertPool() for _, cert := range certs[1:] { intermediates.AddCert(cert) } opts := x509.VerifyOptions{ Roots: hook.Root, Intermediates: intermediates, } leaf := certs[0] _, err := leaf.Verify(opts) if err != nil { return err } if leaf.Subject.CommonName != hook.PassCommonName { return errors.New("common name miss match") } return nil } func pass(resp *http.Response, err error) bool { if err != nil { return false } ioutil.ReadAll(resp.Body) resp.Body.Close() return true } func fail(resp *http.Response, err error) bool { if err != nil { return true } ioutil.ReadAll(resp.Body) resp.Body.Close() return false } const testType = "test" type testConfigHooksFactory struct{} func (f *testConfigHooksFactory) CreateConfigHooks(config map[string]interface{}) ConfigHooks { c := make(map[string]string) for k, v := range config { if s, ok := v.(string); ok { c[strings.ToLower(k)] = s } } root := certtool.GetRootCA() pool := x509.NewCertPool() pool.AppendCertsFromPEM([]byte(root.CertPem)) return &testConfigHooks{ defaultConfigHooks: defaultConfigHooks{}, Name: c["name"], PassCommonName: c["cn"], Root: pool, } } // TestTLSExtensionsVerifyClient tests server allow request with certificate's common name is client only func TestTLSExtensionsVerifyClient(t *testing.T) { // Server extendVerify := map[string]interface{}{ "name": "server", "cn": "client", } serverInfo := &certInfo{ CommonName: extendVerify["name"].(string), Curve: "RSA", } serverConfig, err := serverInfo.CreateCertConfig() if err != nil { t.Errorf("create server certificate error %v", err) return } serverConfig.VerifyClient = true serverConfig.Type = testType serverConfig.ExtendVerify = extendVerify filterChains := []v2.FilterChain{ { TLS: *serverConfig, }, } lc := &v2.Listener{} lc.FilterChains = filterChains ctxMng, err := NewTLSServerContextManager(lc, nil, log.StartLogger) if err != nil { t.Errorf("create context manager failed %v", err) return } server := MockServer{ Mng: ctxMng, t: t, } server.GoListenAndServe(t) defer server.Close() time.Sleep(time.Second) //wait server start testCases := []struct { Info *certInfo Pass func(resp *http.Response, err error) bool }{ { Info: &certInfo{ CommonName: extendVerify["cn"].(string), Curve: serverInfo.Curve, }, Pass: pass, }, { Info: &certInfo{ CommonName: "invalid client", Curve: serverInfo.Curve, }, Pass: fail, }, } for i, tc := range testCases { cfg, err := tc.Info.CreateCertConfig() cfg.ServerName = "127.0.0.1" if err != nil { t.Errorf("#%d create client certificate error %v", i, err) continue } cltMng, err := NewTLSClientContextManager(cfg, nil) if err != nil { t.Errorf("#%d create client context manager failed %v", i, err) continue } resp, err := MockClient(t, server.Addr, cltMng) if !tc.Pass(resp, err) { t.Errorf("#%d verify failed", i) } } } // TestTestTLSExtensionsVerifyServer tests client accept server response with cerificate's common name is server only func TestTestTLSExtensionsVerifyServer(t *testing.T) { extendVerify := map[string]interface{}{ "name": "client", "cn": "server", } clientInfo := &certInfo{ CommonName: extendVerify["name"].(string), Curve: "RSA", } testCases := []struct { Info *certInfo Pass func(resp *http.Response, err error) bool }{ { Info: &certInfo{ CommonName: extendVerify["cn"].(string), Curve: clientInfo.Curve, DNS: "www.pass.com", }, Pass: pass, }, { Info: &certInfo{ CommonName: "invalid server", Curve: clientInfo.Curve, DNS: "www.fail.com", }, Pass: fail, }, } var filterChains []v2.FilterChain for i, tc := range testCases { cfg, err := tc.Info.CreateCertConfig() if err != nil { t.Errorf("#%d %v", i, err) return } fc := v2.FilterChain{ TLS: *cfg, } filterChains = append(filterChains, fc) } lc := &v2.Listener{} lc.FilterChains = filterChains ctxMng, err := NewTLSServerContextManager(lc, nil, log.StartLogger) if err != nil { t.Errorf("create context manager failed %v", err) return } server := MockServer{ Mng: ctxMng, t: t, } server.GoListenAndServe(t) defer server.Close() time.Sleep(time.Second) //wait server start clientConfig, err := clientInfo.CreateCertConfig() if err != nil { t.Errorf("create client certificate error %v", err) return } clientConfig.Type = testType clientConfig.ExtendVerify = extendVerify for i, tc := range testCases { clientConfig.ServerName = tc.Info.DNS cltMng, err := NewTLSClientContextManager(clientConfig, nil) if err != nil { t.Errorf("create client context manager failed %v", err) return } resp, err := MockClient(t, server.Addr, cltMng) if !tc.Pass(resp, err) { t.Errorf("#%d verify failed", i) } } // insecure skip will skip even if it is registered skipConfig := &v2.TLSConfig{ Status: true, Type: clientConfig.Type, CACert: clientConfig.CACert, CertChain: clientConfig.CertChain, PrivateKey: clientConfig.PrivateKey, InsecureSkip: true, } for i, tc := range testCases { skipConfig.ServerName = tc.Info.DNS skipMng, err := NewTLSClientContextManager(skipConfig, nil) if err != nil { t.Errorf("create client context manager failed %v", err) return } resp, err := MockClient(t, server.Addr, skipMng) // ignore the case, must be pass if !pass(resp, err) { t.Errorf("#%d skip verify failed", i) } } } func TestFallback(t *testing.T) { cfg := v2.TLSConfig{ Status: true, CertChain: "invalid_certificate", PrivateKey: "invalid_key", Fallback: true, } filterChains := []v2.FilterChain{ { TLS: cfg, }, } lc := &v2.Listener{} lc.FilterChains = filterChains serverMgr, err := NewTLSServerContextManager(lc, nil, log.StartLogger) if err != nil { t.Errorf("create context manager failed %v", err) return } if serverMgr.Enabled() { t.Error("tls maanger is not fallabck") return } clientMgr, err := NewTLSClientContextManager(&cfg, nil) if err != nil { t.Errorf("create client context manager failed %v", err) return } if clientMgr.Enabled() { t.Error("tls maanger is not fallabck") return } } func TestMain(m *testing.M) { Register(testType, &testConfigHooksFactory{}) m.Run() }
MockClient
turbine.py
from django import forms from wopr.utils import makeTurbineList, makeSiteList from wopr.models import TSiteconfig from wopr.widgets import XDSoftDateTimePickerInput class TurbineSelectionForm(forms.Form): # Default values CHOICES = list(range(1, 100)) start_time = forms.DateTimeField(input_formats=['%d/%m/%Y %H:%M'], widget=XDSoftDateTimePickerInput(attrs={'autocomplete':'off'})) end_time = forms.DateTimeField(input_formats=['%d/%m/%Y %H:%M'], widget=XDSoftDateTimePickerInput(attrs={'autocomplete':'off'})) view_turbines_from = forms.ChoiceField(choices=CHOICES) view_turbines_till = forms.ChoiceField(choices=CHOICES) # Constructor with parameters def __init__(self, *args,**kwargs): self.siteid = kwargs.pop('siteid') self.CHOICES = makeTurbineList(self.siteid) super(TurbineSelectionForm, self).__init__(*args, **kwargs) self.fields['view_turbines_from'] = forms.ChoiceField(choices=self.CHOICES) self.fields['view_turbines_till'] = forms.ChoiceField(choices=self.CHOICES, initial=str(len(self.CHOICES))) class CompareTurbinePowerForm(forms.Form): site_id_1 = forms.CharField(widget=forms.Select(choices=[])) turbine_id_1 = forms.CharField(widget=forms.Select(choices={})) start_time_1 = forms.DateTimeField(input_formats=['%d/%m/%Y %H:%M'], widget=XDSoftDateTimePickerInput(attrs={'autocomplete':'off'})) end_time_1 = forms.DateTimeField(input_formats=['%d/%m/%Y %H:%M'], widget=XDSoftDateTimePickerInput(attrs={'autocomplete':'off'})) site_id_2 = forms.CharField(widget=forms.Select(choices=[])) turbine_id_2 = forms.CharField(widget=forms.Select(choices={})) start_time_2 = forms.DateTimeField(input_formats=['%d/%m/%Y %H:%M'], widget=XDSoftDateTimePickerInput(attrs={'autocomplete':'off'})) end_time_2 = forms.DateTimeField(input_formats=['%d/%m/%Y %H:%M'], widget=XDSoftDateTimePickerInput(attrs={'autocomplete':'off'})) def
(self): cleaned_data = super().clean() start_time_1 = cleaned_data.get("start_time_1") end_time_1 = cleaned_data.get("end_time_1") start_time_2 = cleaned_data.get("start_time_2") end_time_2 = cleaned_data.get("end_time_2") if start_time_1 and end_time_1 and start_time_2 and end_time_2: if (end_time_1 <= start_time_1) or (end_time_2 <= start_time_2): print(str(end_time)+"<="+str(start_time)+" returned "+str(end_time <= start_time)) raise forms.ValidationError(" - ERROR - Please ensure the time ranges are valid.") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['turbine_id_1'].queryset = TSiteconfig.objects.none().values("turbine") self.fields[ 'site_id_1' ].choices = makeSiteList() self.fields[ 'site_id_2' ].choices = makeSiteList() if 'site_id_1' in self.data: try: site = int(self.data.get('site_id_1')) #need site id too... self.fields['turbine_id_1'].queryset = TSiteconfig.objects.filter(siteid=site).values('turbine') except (ValueError, TypeError): pass # invalid input from the client; ignore and fallback to empty turbine queryset if 'site_id_2' in self.data: try: site = int(self.data.get('site_id_2')) #need site id too... self.fields['turbine_id_2'].queryset = TSiteconfig.objects.filter(siteid=site).values('turbine') except (ValueError, TypeError): pass # invalid input from the client; ignore and fallback to empty turbine queryset
clean
user.py
# type: ignore # MIT License # # Copyright (c) 2018-2019 Red Hat, Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from ogr.services import gitlab as ogr_gitlab from ogr.services.base import BaseGitUser class GitlabUser(BaseGitUser): service: "ogr_gitlab.GitlabService" def __init__(self, service: "ogr_gitlab.GitlabService") -> None: super().__init__(service=service)
return f'Gitlab(username="{self.get_username()}")' @property def _gitlab_user(self): return self.service.gitlab_instance.user def get_username(self) -> str: return self._gitlab_user.username def get_email(self) -> str: return self._gitlab_user.email
def __str__(self) -> str:
authRouter.js
const express = require('express') const router = express.Router()
module.exports = router
const { authControllers } = require('../controllers') router.post('/login', authControllers.login)
procedure_document.py
# -*- coding: utf-8 -*- from openprocurement.auctions.core.utils import ( json_view, context_unpack, opresource ) from openprocurement.auctions.core.validation import ( validate_file_update, validate_file_upload, validate_patch_document_data ) from openprocurement.auctions.core.views.mixins import AuctionDocumentResource from openprocurement.auctions.core.interfaces import ( IManager ) from openprocurement.auctions.core.utils import ( get_file ) @opresource(name='geb:Auction Documents', collection_path='/auctions/{auction_id}/documents', path='/auctions/{auction_id}/documents/{document_id}', auctionsprocurementMethodType="geb", description="Auction related binary files (PDFs, etc.)") class AuctionDocumentResource(AuctionDocumentResource): @json_view(permission='upload_auction_documents', validators=(validate_file_upload,)) def collection_post(self): """Auction Document Upload""" save = None manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager) applicant = self.request.validated.get('document', self.request.validated.get('file')) document = manager.create(applicant) if document: save = manager.save() if save: msg = 'Created auction document {}'.format(document.id) extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_create'}, {'document_id': document['id']}) self.LOGGER.info(msg, extra=extra) self.request.response.status = 201 route = self.request.matched_route.name.replace("collection_", "") locations = self.request.current_route_url(_route_name=route, document_id=document.id, _query={}) self.request.response.headers['Location'] = locations return {'data': document.serialize("view")} @json_view(permission='view_auction') def
(self): """Auction Document Read""" # TODO rm black box document = self.request.validated['document'] offline = bool(document.get('documentType') == 'x_dgfAssetFamiliarization') if self.request.params.get('download') and not offline: return get_file(self.request) document_data = document.serialize("view") document_data['previousVersions'] = [ i.serialize("view") for i in self.request.validated['documents'] if i.url != document.url or (offline and i.dateModified != document.dateModified) ] return {'data': document_data} @json_view(content_type="application/json", permission='upload_auction_documents', validators=(validate_patch_document_data,)) def patch(self): """Auction Document Update""" save = None manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager) manager.change() save = manager.save() if save: extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_patch'}) msg = 'Updated auction document {}'.format(self.request.context.id) self.LOGGER.info(msg, extra=extra) return {'data': self.request.context.serialize("view")} @json_view(permission='upload_auction_documents', validators=(validate_file_update,)) def put(self): save = None manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager) document = manager.put() save = manager.save() if save: extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_put'}) msg = 'Updated auction document {}'.format(document.id) self.LOGGER.info(msg, extra=extra) return {'data': document.serialize("view")}
get
models.py
from django.db import models from django.db.models import Q from core.models import ( choices, DateTime, EventField, GivName, SurName, link_inc, link_dec, ) from person_app.person.models import Person class Birth(EventField): _person = models.ForeignKey( Person, on_delete=models.CASCADE, blank=True, null=True, ) gender = models.CharField( max_length=1, choices=choices.GENDER_CHOICES, default='U', ) _givname = models.ForeignKey( GivName, on_delete=models.SET_NULL, blank=True, null=True, ) _surname = models.ForeignKey( SurName, on_delete=models.SET_NULL, blank=True, null=True, ) @property def person(self): return self._person @person.setter def person(self, value): if isinstance(value, Person): self._person = value elif Person.objects.filter(pk=value).exists(): self._person = Person.objects.get(pk=value) @property def givname(self): if self._givname: return self._givname.givname else: return '' @givname.setter def givname(self, value): if isinstance(value, str): value, created = GivName.objects.get_or_create( givname=value, ) if self._givname: if self._givname is not value: if self._givname.link > 1: self._givname.link -= 1 self._givname.save() elif self._givname.note: self._givname.link = 0 self._givname.save() else: self._givname.delete() else: return link_inc(value) self._givname = value @property def surname(self):
@surname.setter def surname(self, value): if isinstance(value, str): if self.gender == 'F': query = SurName.objects.filter(_surname_female=value) else: query = SurName.objects.filter(_surname_male=value) if query.exists(): value = ( SurName.objects.get(_surname_female=value) if self.gender == 'F' else SurName.objects.get(_surname_male=value) ) else: value = ( SurName.objects.create(surname={'surname_female': value}) if self.gender == 'F' else SurName.objects.create(surname={'surname_male': value}) ) if self._surname: if self._surname is not value: link_dec(self._surname) else: return link_inc(value) self._surname = value def save(self, *args, **kwargs): # if self.pk is None and self.person is None: if self._person is None: self._person = Person.objects.create() super().save(*args, **kwargs) def delete(self, *args, **kwargs): link_dec(self._givname) link_dec(self._surname) link_dec(self._datetime) super().delete(*args, **kwargs)
if self._surname: if self.gender == 'F': surname = self._surname.surname[1] else: surname = self._surname.surname[0] else: surname = '' return '%s' % surname
scripts.js
/* ======================================================================== * Bootstrap: transition.js v3.3.1 * http://getbootstrap.com/javascript/#transitions * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/) // ============================================================ function transitionEnd() { var el = document.createElement('bootstrap') var transEndEventNames = { WebkitTransition : 'webkitTransitionEnd', MozTransition : 'transitionend', OTransition : 'oTransitionEnd otransitionend', transition : 'transitionend' } for (var name in transEndEventNames) { if (el.style[name] !== undefined) { return { end: transEndEventNames[name] } } } return false // explicit for ie8 ( ._.) } // http://blog.alexmaccaw.com/css-transitions $.fn.emulateTransitionEnd = function (duration) { var called = false var $el = this $(this).one('bsTransitionEnd', function () { called = true }) var callback = function () { if (!called) $($el).trigger($.support.transition.end) } setTimeout(callback, duration) return this } $(function () { $.support.transition = transitionEnd() if (!$.support.transition) return $.event.special.bsTransitionEnd = { bindType: $.support.transition.end, delegateType: $.support.transition.end, handle: function (e) { if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments) } } }) }(jQuery); ;/* ======================================================================== * Bootstrap: alert.js v3.3.1 * http://getbootstrap.com/javascript/#alerts * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // ALERT CLASS DEFINITION // ====================== var dismiss = '[data-dismiss="alert"]' var Alert = function (el) { $(el).on('click', dismiss, this.close) } Alert.VERSION = '3.3.1' Alert.TRANSITION_DURATION = 150 Alert.prototype.close = function (e) { var $this = $(this) var selector = $this.attr('data-target') if (!selector) { selector = $this.attr('href') selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 } var $parent = $(selector) if (e) e.preventDefault() if (!$parent.length) { $parent = $this.closest('.alert') } $parent.trigger(e = $.Event('close.bs.alert')) if (e.isDefaultPrevented()) return $parent.removeClass('in') function removeElement() { // detach from parent, fire event then clean up data $parent.detach().trigger('closed.bs.alert').remove() } $.support.transition && $parent.hasClass('fade') ? $parent .one('bsTransitionEnd', removeElement) .emulateTransitionEnd(Alert.TRANSITION_DURATION) : removeElement() } // ALERT PLUGIN DEFINITION // ======================= function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.alert') if (!data) $this.data('bs.alert', (data = new Alert(this))) if (typeof option == 'string') data[option].call($this) }) } var old = $.fn.alert $.fn.alert = Plugin $.fn.alert.Constructor = Alert // ALERT NO CONFLICT // ================= $.fn.alert.noConflict = function () { $.fn.alert = old return this } // ALERT DATA-API // ============== $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close) }(jQuery); ;/* ======================================================================== * Bootstrap: button.js v3.3.1 * http://getbootstrap.com/javascript/#buttons * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // BUTTON PUBLIC CLASS DEFINITION // ============================== var Button = function (element, options) { this.$element = $(element) this.options = $.extend({}, Button.DEFAULTS, options) this.isLoading = false } Button.VERSION = '3.3.1' Button.DEFAULTS = { loadingText: 'loading...' } Button.prototype.setState = function (state) { var d = 'disabled' var $el = this.$element var val = $el.is('input') ? 'val' : 'html' var data = $el.data() state = state + 'Text' if (data.resetText == null) $el.data('resetText', $el[val]()) // push to event loop to allow forms to submit setTimeout($.proxy(function () { $el[val](data[state] == null ? this.options[state] : data[state]) if (state == 'loadingText') { this.isLoading = true $el.addClass(d).attr(d, d) } else if (this.isLoading) { this.isLoading = false $el.removeClass(d).removeAttr(d) } }, this), 0) } Button.prototype.toggle = function () { var changed = true var $parent = this.$element.closest('[data-toggle="buttons"]') if ($parent.length) { var $input = this.$element.find('input') if ($input.prop('type') == 'radio') { if ($input.prop('checked') && this.$element.hasClass('active')) changed = false else $parent.find('.active').removeClass('active') } if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change') } else { this.$element.attr('aria-pressed', !this.$element.hasClass('active')) } if (changed) this.$element.toggleClass('active') } // BUTTON PLUGIN DEFINITION // ======================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.button') var options = typeof option == 'object' && option if (!data) $this.data('bs.button', (data = new Button(this, options))) if (option == 'toggle') data.toggle() else if (option) data.setState(option) }) } var old = $.fn.button $.fn.button = Plugin $.fn.button.Constructor = Button // BUTTON NO CONFLICT // ================== $.fn.button.noConflict = function () { $.fn.button = old return this } // BUTTON DATA-API // =============== $(document) .on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) { var $btn = $(e.target) if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') Plugin.call($btn, 'toggle') e.preventDefault() }) .on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^="button"]', function (e) { $(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type)) }) }(jQuery); ;/* ======================================================================== * Bootstrap: carousel.js v3.3.1 * http://getbootstrap.com/javascript/#carousel * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // CAROUSEL CLASS DEFINITION // ========================= var Carousel = function (element, options) { this.$element = $(element) this.$indicators = this.$element.find('.carousel-indicators') this.options = options this.paused = this.sliding = this.interval = this.$active = this.$items = null this.options.keyboard && this.$element.on('keydown.bs.carousel', $.proxy(this.keydown, this)) this.options.pause == 'hover' && !('ontouchstart' in document.documentElement) && this.$element .on('mouseenter.bs.carousel', $.proxy(this.pause, this)) .on('mouseleave.bs.carousel', $.proxy(this.cycle, this)) } Carousel.VERSION = '3.3.1' Carousel.TRANSITION_DURATION = 600 Carousel.DEFAULTS = { interval: 5000, pause: 'hover', wrap: true, keyboard: true } Carousel.prototype.keydown = function (e) { if (/input|textarea/i.test(e.target.tagName)) return switch (e.which) { case 37: this.prev(); break case 39: this.next(); break default: return } e.preventDefault() } Carousel.prototype.cycle = function (e) { e || (this.paused = false) this.interval && clearInterval(this.interval) this.options.interval && !this.paused && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) return this } Carousel.prototype.getItemIndex = function (item) { this.$items = item.parent().children('.item') return this.$items.index(item || this.$active) } Carousel.prototype.getItemForDirection = function (direction, active) { var delta = direction == 'prev' ? -1 : 1 var activeIndex = this.getItemIndex(active) var itemIndex = (activeIndex + delta) % this.$items.length return this.$items.eq(itemIndex) } Carousel.prototype.to = function (pos) { var that = this var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active')) if (pos > (this.$items.length - 1) || pos < 0) return if (this.sliding) return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, "slid" if (activeIndex == pos) return this.pause().cycle() return this.slide(pos > activeIndex ? 'next' : 'prev', this.$items.eq(pos)) } Carousel.prototype.pause = function (e) { e || (this.paused = true) if (this.$element.find('.next, .prev').length && $.support.transition) { this.$element.trigger($.support.transition.end) this.cycle(true) } this.interval = clearInterval(this.interval) return this } Carousel.prototype.next = function () { if (this.sliding) return return this.slide('next') } Carousel.prototype.prev = function () { if (this.sliding) return return this.slide('prev') } Carousel.prototype.slide = function (type, next) { var $active = this.$element.find('.item.active') var $next = next || this.getItemForDirection(type, $active) var isCycling = this.interval var direction = type == 'next' ? 'left' : 'right' var fallback = type == 'next' ? 'first' : 'last' var that = this if (!$next.length) { if (!this.options.wrap) return $next = this.$element.find('.item')[fallback]() } if ($next.hasClass('active')) return (this.sliding = false) var relatedTarget = $next[0] var slideEvent = $.Event('slide.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) this.$element.trigger(slideEvent) if (slideEvent.isDefaultPrevented()) return this.sliding = true isCycling && this.pause() if (this.$indicators.length) { this.$indicators.find('.active').removeClass('active') var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)]) $nextIndicator && $nextIndicator.addClass('active') } var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, "slid" if ($.support.transition && this.$element.hasClass('slide')) { $next.addClass(type) $next[0].offsetWidth // force reflow $active.addClass(direction) $next.addClass(direction) $active .one('bsTransitionEnd', function () { $next.removeClass([type, direction].join(' ')).addClass('active') $active.removeClass(['active', direction].join(' ')) that.sliding = false setTimeout(function () { that.$element.trigger(slidEvent) }, 0) }) .emulateTransitionEnd(Carousel.TRANSITION_DURATION) } else { $active.removeClass('active') $next.addClass('active') this.sliding = false this.$element.trigger(slidEvent) } isCycling && this.cycle() return this } // CAROUSEL PLUGIN DEFINITION // ========================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.carousel') var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option) var action = typeof option == 'string' ? option : options.slide if (!data) $this.data('bs.carousel', (data = new Carousel(this, options))) if (typeof option == 'number') data.to(option) else if (action) data[action]() else if (options.interval) data.pause().cycle() }) } var old = $.fn.carousel $.fn.carousel = Plugin $.fn.carousel.Constructor = Carousel // CAROUSEL NO CONFLICT // ==================== $.fn.carousel.noConflict = function () { $.fn.carousel = old return this } // CAROUSEL DATA-API // ================= var clickHandler = function (e) { var href var $this = $(this) var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) // strip for ie7 if (!$target.hasClass('carousel')) return var options = $.extend({}, $target.data(), $this.data()) var slideIndex = $this.attr('data-slide-to') if (slideIndex) options.interval = false Plugin.call($target, options) if (slideIndex) { $target.data('bs.carousel').to(slideIndex) } e.preventDefault() } $(document) .on('click.bs.carousel.data-api', '[data-slide]', clickHandler) .on('click.bs.carousel.data-api', '[data-slide-to]', clickHandler) $(window).on('load', function () { $('[data-ride="carousel"]').each(function () { var $carousel = $(this) Plugin.call($carousel, $carousel.data()) }) }) }(jQuery); ;/* ======================================================================== * Bootstrap: collapse.js v3.3.1 * http://getbootstrap.com/javascript/#collapse * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // COLLAPSE PUBLIC CLASS DEFINITION // ================================ var Collapse = function (element, options) { this.$element = $(element) this.options = $.extend({}, Collapse.DEFAULTS, options) this.$trigger = $(this.options.trigger).filter('[href="#' + element.id + '"], [data-target="#' + element.id + '"]') this.transitioning = null if (this.options.parent) { this.$parent = this.getParent() } else { this.addAriaAndCollapsedClass(this.$element, this.$trigger) } if (this.options.toggle) this.toggle() } Collapse.VERSION = '3.3.1' Collapse.TRANSITION_DURATION = 350 Collapse.DEFAULTS = { toggle: true, trigger: '[data-toggle="collapse"]' } Collapse.prototype.dimension = function () { var hasWidth = this.$element.hasClass('width') return hasWidth ? 'width' : 'height' } Collapse.prototype.show = function () { if (this.transitioning || this.$element.hasClass('in')) return var activesData var actives = this.$parent && this.$parent.find('> .panel').children('.in, .collapsing') if (actives && actives.length) { activesData = actives.data('bs.collapse') if (activesData && activesData.transitioning) return } var startEvent = $.Event('show.bs.collapse') this.$element.trigger(startEvent) if (startEvent.isDefaultPrevented()) return if (actives && actives.length) { Plugin.call(actives, 'hide') activesData || actives.data('bs.collapse', null) } var dimension = this.dimension() this.$element .removeClass('collapse') .addClass('collapsing')[dimension](0) .attr('aria-expanded', true) this.$trigger .removeClass('collapsed') .attr('aria-expanded', true) this.transitioning = 1 var complete = function () { this.$element .removeClass('collapsing') .addClass('collapse in')[dimension]('') this.transitioning = 0 this.$element .trigger('shown.bs.collapse') } if (!$.support.transition) return complete.call(this) var scrollSize = $.camelCase(['scroll', dimension].join('-')) this.$element .one('bsTransitionEnd', $.proxy(complete, this)) .emulateTransitionEnd(Collapse.TRANSITION_DURATION)[dimension](this.$element[0][scrollSize]) } Collapse.prototype.hide = function () { if (this.transitioning || !this.$element.hasClass('in')) return var startEvent = $.Event('hide.bs.collapse') this.$element.trigger(startEvent) if (startEvent.isDefaultPrevented()) return var dimension = this.dimension() this.$element[dimension](this.$element[dimension]())[0].offsetHeight this.$element .addClass('collapsing') .removeClass('collapse in') .attr('aria-expanded', false) this.$trigger .addClass('collapsed') .attr('aria-expanded', false) this.transitioning = 1 var complete = function () { this.transitioning = 0 this.$element .removeClass('collapsing') .addClass('collapse') .trigger('hidden.bs.collapse') } if (!$.support.transition) return complete.call(this) this.$element [dimension](0) .one('bsTransitionEnd', $.proxy(complete, this)) .emulateTransitionEnd(Collapse.TRANSITION_DURATION) } Collapse.prototype.toggle = function () { this[this.$element.hasClass('in') ? 'hide' : 'show']() } Collapse.prototype.getParent = function () { return $(this.options.parent) .find('[data-toggle="collapse"][data-parent="' + this.options.parent + '"]') .each($.proxy(function (i, element) { var $element = $(element) this.addAriaAndCollapsedClass(getTargetFromTrigger($element), $element) }, this)) .end() } Collapse.prototype.addAriaAndCollapsedClass = function ($element, $trigger) { var isOpen = $element.hasClass('in') $element.attr('aria-expanded', isOpen) $trigger .toggleClass('collapsed', !isOpen) .attr('aria-expanded', isOpen) } function getTargetFromTrigger($trigger) { var href var target = $trigger.attr('data-target') || (href = $trigger.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7 return $(target) } // COLLAPSE PLUGIN DEFINITION // ========================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.collapse') var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option) if (!data && options.toggle && option == 'show') options.toggle = false if (!data) $this.data('bs.collapse', (data = new Collapse(this, options))) if (typeof option == 'string') data[option]() }) } var old = $.fn.collapse $.fn.collapse = Plugin $.fn.collapse.Constructor = Collapse // COLLAPSE NO CONFLICT // ==================== $.fn.collapse.noConflict = function () { $.fn.collapse = old return this } // COLLAPSE DATA-API // ================= $(document).on('click.bs.collapse.data-api', '[data-toggle="collapse"]', function (e) { var $this = $(this) if (!$this.attr('data-target')) e.preventDefault() var $target = getTargetFromTrigger($this) var data = $target.data('bs.collapse') var option = data ? 'toggle' : $.extend({}, $this.data(), { trigger: this }) Plugin.call($target, option) }) }(jQuery); ;/* ======================================================================== * Bootstrap: dropdown.js v3.3.1 * http://getbootstrap.com/javascript/#dropdowns * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // DROPDOWN CLASS DEFINITION // ========================= var backdrop = '.dropdown-backdrop' var toggle = '[data-toggle="dropdown"]' var Dropdown = function (element) { $(element).on('click.bs.dropdown', this.toggle) } Dropdown.VERSION = '3.3.1' Dropdown.prototype.toggle = function (e) { var $this = $(this) if ($this.is('.disabled, :disabled')) return var $parent = getParent($this) var isActive = $parent.hasClass('open') clearMenus() if (!isActive) { if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) { // if mobile we use a backdrop because click events don't delegate $('<div class="dropdown-backdrop"/>').insertAfter($(this)).on('click', clearMenus) } var relatedTarget = { relatedTarget: this } $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget)) if (e.isDefaultPrevented()) return $this .trigger('focus') .attr('aria-expanded', 'true') $parent .toggleClass('open') .trigger('shown.bs.dropdown', relatedTarget) } return false } Dropdown.prototype.keydown = function (e) { if (!/(38|40|27|32)/.test(e.which) || /input|textarea/i.test(e.target.tagName)) return var $this = $(this) e.preventDefault() e.stopPropagation() if ($this.is('.disabled, :disabled')) return var $parent = getParent($this) var isActive = $parent.hasClass('open') if ((!isActive && e.which != 27) || (isActive && e.which == 27)) { if (e.which == 27) $parent.find(toggle).trigger('focus') return $this.trigger('click') } var desc = ' li:not(.divider):visible a' var $items = $parent.find('[role="menu"]' + desc + ', [role="listbox"]' + desc) if (!$items.length) return var index = $items.index(e.target) if (e.which == 38 && index > 0) index-- // up if (e.which == 40 && index < $items.length - 1) index++ // down if (!~index) index = 0 $items.eq(index).trigger('focus') } function clearMenus(e) { if (e && e.which === 3) return $(backdrop).remove() $(toggle).each(function () { var $this = $(this) var $parent = getParent($this) var relatedTarget = { relatedTarget: this } if (!$parent.hasClass('open')) return $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget)) if (e.isDefaultPrevented()) return $this.attr('aria-expanded', 'false') $parent.removeClass('open').trigger('hidden.bs.dropdown', relatedTarget) }) } function getParent($this) { var selector = $this.attr('data-target') if (!selector) { selector = $this.attr('href') selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 } var $parent = selector && $(selector) return $parent && $parent.length ? $parent : $this.parent() } // DROPDOWN PLUGIN DEFINITION // ========================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.dropdown') if (!data) $this.data('bs.dropdown', (data = new Dropdown(this))) if (typeof option == 'string') data[option].call($this) }) } var old = $.fn.dropdown $.fn.dropdown = Plugin $.fn.dropdown.Constructor = Dropdown // DROPDOWN NO CONFLICT // ==================== $.fn.dropdown.noConflict = function () { $.fn.dropdown = old return this } // APPLY TO STANDARD DROPDOWN ELEMENTS // =================================== $(document) .on('click.bs.dropdown.data-api', clearMenus) .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() }) .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle) .on('keydown.bs.dropdown.data-api', toggle, Dropdown.prototype.keydown) .on('keydown.bs.dropdown.data-api', '[role="menu"]', Dropdown.prototype.keydown) .on('keydown.bs.dropdown.data-api', '[role="listbox"]', Dropdown.prototype.keydown) }(jQuery); ;/* ======================================================================== * Bootstrap: modal.js v3.3.1
* ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // MODAL CLASS DEFINITION // ====================== var Modal = function (element, options) { this.options = options this.$body = $(document.body) this.$element = $(element) this.$backdrop = this.isShown = null this.scrollbarWidth = 0 if (this.options.remote) { this.$element .find('.modal-content') .load(this.options.remote, $.proxy(function () { this.$element.trigger('loaded.bs.modal') }, this)) } } Modal.VERSION = '3.3.1' Modal.TRANSITION_DURATION = 300 Modal.BACKDROP_TRANSITION_DURATION = 150 Modal.DEFAULTS = { backdrop: true, keyboard: true, show: true } Modal.prototype.toggle = function (_relatedTarget) { return this.isShown ? this.hide() : this.show(_relatedTarget) } Modal.prototype.show = function (_relatedTarget) { var that = this var e = $.Event('show.bs.modal', { relatedTarget: _relatedTarget }) this.$element.trigger(e) if (this.isShown || e.isDefaultPrevented()) return this.isShown = true this.checkScrollbar() this.setScrollbar() this.$body.addClass('modal-open') this.escape() this.resize() this.$element.on('click.dismiss.bs.modal', '[data-dismiss="modal"]', $.proxy(this.hide, this)) this.backdrop(function () { var transition = $.support.transition && that.$element.hasClass('fade') if (!that.$element.parent().length) { that.$element.appendTo(that.$body) // don't move modals dom position } that.$element .show() .scrollTop(0) if (that.options.backdrop) that.adjustBackdrop() that.adjustDialog() if (transition) { that.$element[0].offsetWidth // force reflow } that.$element .addClass('in') .attr('aria-hidden', false) that.enforceFocus() var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget }) transition ? that.$element.find('.modal-dialog') // wait for modal to slide in .one('bsTransitionEnd', function () { that.$element.trigger('focus').trigger(e) }) .emulateTransitionEnd(Modal.TRANSITION_DURATION) : that.$element.trigger('focus').trigger(e) }) } Modal.prototype.hide = function (e) { if (e) e.preventDefault() e = $.Event('hide.bs.modal') this.$element.trigger(e) if (!this.isShown || e.isDefaultPrevented()) return this.isShown = false this.escape() this.resize() $(document).off('focusin.bs.modal') this.$element .removeClass('in') .attr('aria-hidden', true) .off('click.dismiss.bs.modal') $.support.transition && this.$element.hasClass('fade') ? this.$element .one('bsTransitionEnd', $.proxy(this.hideModal, this)) .emulateTransitionEnd(Modal.TRANSITION_DURATION) : this.hideModal() } Modal.prototype.enforceFocus = function () { $(document) .off('focusin.bs.modal') // guard against infinite focus loop .on('focusin.bs.modal', $.proxy(function (e) { if (this.$element[0] !== e.target && !this.$element.has(e.target).length) { this.$element.trigger('focus') } }, this)) } Modal.prototype.escape = function () { if (this.isShown && this.options.keyboard) { this.$element.on('keydown.dismiss.bs.modal', $.proxy(function (e) { e.which == 27 && this.hide() }, this)) } else if (!this.isShown) { this.$element.off('keydown.dismiss.bs.modal') } } Modal.prototype.resize = function () { if (this.isShown) { $(window).on('resize.bs.modal', $.proxy(this.handleUpdate, this)) } else { $(window).off('resize.bs.modal') } } Modal.prototype.hideModal = function () { var that = this this.$element.hide() this.backdrop(function () { that.$body.removeClass('modal-open') that.resetAdjustments() that.resetScrollbar() that.$element.trigger('hidden.bs.modal') }) } Modal.prototype.removeBackdrop = function () { this.$backdrop && this.$backdrop.remove() this.$backdrop = null } Modal.prototype.backdrop = function (callback) { var that = this var animate = this.$element.hasClass('fade') ? 'fade' : '' if (this.isShown && this.options.backdrop) { var doAnimate = $.support.transition && animate this.$backdrop = $('<div class="modal-backdrop ' + animate + '" />') .prependTo(this.$element) .on('click.dismiss.bs.modal', $.proxy(function (e) { if (e.target !== e.currentTarget) return this.options.backdrop == 'static' ? this.$element[0].focus.call(this.$element[0]) : this.hide.call(this) }, this)) if (doAnimate) this.$backdrop[0].offsetWidth // force reflow this.$backdrop.addClass('in') if (!callback) return doAnimate ? this.$backdrop .one('bsTransitionEnd', callback) .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) : callback() } else if (!this.isShown && this.$backdrop) { this.$backdrop.removeClass('in') var callbackRemove = function () { that.removeBackdrop() callback && callback() } $.support.transition && this.$element.hasClass('fade') ? this.$backdrop .one('bsTransitionEnd', callbackRemove) .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) : callbackRemove() } else if (callback) { callback() } } // these following methods are used to handle overflowing modals Modal.prototype.handleUpdate = function () { if (this.options.backdrop) this.adjustBackdrop() this.adjustDialog() } Modal.prototype.adjustBackdrop = function () { this.$backdrop .css('height', 0) .css('height', this.$element[0].scrollHeight) } Modal.prototype.adjustDialog = function () { var modalIsOverflowing = this.$element[0].scrollHeight > document.documentElement.clientHeight this.$element.css({ paddingLeft: !this.bodyIsOverflowing && modalIsOverflowing ? this.scrollbarWidth : '', paddingRight: this.bodyIsOverflowing && !modalIsOverflowing ? this.scrollbarWidth : '' }) } Modal.prototype.resetAdjustments = function () { this.$element.css({ paddingLeft: '', paddingRight: '' }) } Modal.prototype.checkScrollbar = function () { this.bodyIsOverflowing = document.body.scrollHeight > document.documentElement.clientHeight this.scrollbarWidth = this.measureScrollbar() } Modal.prototype.setScrollbar = function () { var bodyPad = parseInt((this.$body.css('padding-right') || 0), 10) if (this.bodyIsOverflowing) this.$body.css('padding-right', bodyPad + this.scrollbarWidth) } Modal.prototype.resetScrollbar = function () { this.$body.css('padding-right', '') } Modal.prototype.measureScrollbar = function () { // thx walsh var scrollDiv = document.createElement('div') scrollDiv.className = 'modal-scrollbar-measure' this.$body.append(scrollDiv) var scrollbarWidth = scrollDiv.offsetWidth - scrollDiv.clientWidth this.$body[0].removeChild(scrollDiv) return scrollbarWidth } // MODAL PLUGIN DEFINITION // ======================= function Plugin(option, _relatedTarget) { return this.each(function () { var $this = $(this) var data = $this.data('bs.modal') var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option) if (!data) $this.data('bs.modal', (data = new Modal(this, options))) if (typeof option == 'string') data[option](_relatedTarget) else if (options.show) data.show(_relatedTarget) }) } var old = $.fn.modal $.fn.modal = Plugin $.fn.modal.Constructor = Modal // MODAL NO CONFLICT // ================= $.fn.modal.noConflict = function () { $.fn.modal = old return this } // MODAL DATA-API // ============== $(document).on('click.bs.modal.data-api', '[data-toggle="modal"]', function (e) { var $this = $(this) var href = $this.attr('href') var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\s]+$)/, ''))) // strip for ie7 var option = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data()) if ($this.is('a')) e.preventDefault() $target.one('show.bs.modal', function (showEvent) { if (showEvent.isDefaultPrevented()) return // only register focus restorer if modal will actually get shown $target.one('hidden.bs.modal', function () { $this.is(':visible') && $this.trigger('focus') }) }) Plugin.call($target, option, this) }) }(jQuery); ;/* ======================================================================== * Bootstrap: tooltip.js v3.3.1 * http://getbootstrap.com/javascript/#tooltip * Inspired by the original jQuery.tipsy by Jason Frame * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // TOOLTIP PUBLIC CLASS DEFINITION // =============================== var Tooltip = function (element, options) { this.type = this.options = this.enabled = this.timeout = this.hoverState = this.$element = null this.init('tooltip', element, options) } Tooltip.VERSION = '3.3.1' Tooltip.TRANSITION_DURATION = 150 Tooltip.DEFAULTS = { animation: true, placement: 'top', selector: false, template: '<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>', trigger: 'hover focus', title: '', delay: 0, html: false, container: false, viewport: { selector: 'body', padding: 0 } } Tooltip.prototype.init = function (type, element, options) { this.enabled = true this.type = type this.$element = $(element) this.options = this.getOptions(options) this.$viewport = this.options.viewport && $(this.options.viewport.selector || this.options.viewport) var triggers = this.options.trigger.split(' ') for (var i = triggers.length; i--;) { var trigger = triggers[i] if (trigger == 'click') { this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this)) } else if (trigger != 'manual') { var eventIn = trigger == 'hover' ? 'mouseenter' : 'focusin' var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout' this.$element.on(eventIn + '.' + this.type, this.options.selector, $.proxy(this.enter, this)) this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this)) } } this.options.selector ? (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) : this.fixTitle() } Tooltip.prototype.getDefaults = function () { return Tooltip.DEFAULTS } Tooltip.prototype.getOptions = function (options) { options = $.extend({}, this.getDefaults(), this.$element.data(), options) if (options.delay && typeof options.delay == 'number') { options.delay = { show: options.delay, hide: options.delay } } return options } Tooltip.prototype.getDelegateOptions = function () { var options = {} var defaults = this.getDefaults() this._options && $.each(this._options, function (key, value) { if (defaults[key] != value) options[key] = value }) return options } Tooltip.prototype.enter = function (obj) { var self = obj instanceof this.constructor ? obj : $(obj.currentTarget).data('bs.' + this.type) if (self && self.$tip && self.$tip.is(':visible')) { self.hoverState = 'in' return } if (!self) { self = new this.constructor(obj.currentTarget, this.getDelegateOptions()) $(obj.currentTarget).data('bs.' + this.type, self) } clearTimeout(self.timeout) self.hoverState = 'in' if (!self.options.delay || !self.options.delay.show) return self.show() self.timeout = setTimeout(function () { if (self.hoverState == 'in') self.show() }, self.options.delay.show) } Tooltip.prototype.leave = function (obj) { var self = obj instanceof this.constructor ? obj : $(obj.currentTarget).data('bs.' + this.type) if (!self) { self = new this.constructor(obj.currentTarget, this.getDelegateOptions()) $(obj.currentTarget).data('bs.' + this.type, self) } clearTimeout(self.timeout) self.hoverState = 'out' if (!self.options.delay || !self.options.delay.hide) return self.hide() self.timeout = setTimeout(function () { if (self.hoverState == 'out') self.hide() }, self.options.delay.hide) } Tooltip.prototype.show = function () { var e = $.Event('show.bs.' + this.type) if (this.hasContent() && this.enabled) { this.$element.trigger(e) var inDom = $.contains(this.$element[0].ownerDocument.documentElement, this.$element[0]) if (e.isDefaultPrevented() || !inDom) return var that = this var $tip = this.tip() var tipId = this.getUID(this.type) this.setContent() $tip.attr('id', tipId) this.$element.attr('aria-describedby', tipId) if (this.options.animation) $tip.addClass('fade') var placement = typeof this.options.placement == 'function' ? this.options.placement.call(this, $tip[0], this.$element[0]) : this.options.placement var autoToken = /\s?auto?\s?/i var autoPlace = autoToken.test(placement) if (autoPlace) placement = placement.replace(autoToken, '') || 'top' $tip .detach() .css({ top: 0, left: 0, display: 'block' }) .addClass(placement) .data('bs.' + this.type, this) this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element) var pos = this.getPosition() var actualWidth = $tip[0].offsetWidth var actualHeight = $tip[0].offsetHeight if (autoPlace) { var orgPlacement = placement var $container = this.options.container ? $(this.options.container) : this.$element.parent() var containerDim = this.getPosition($container) placement = placement == 'bottom' && pos.bottom + actualHeight > containerDim.bottom ? 'top' : placement == 'top' && pos.top - actualHeight < containerDim.top ? 'bottom' : placement == 'right' && pos.right + actualWidth > containerDim.width ? 'left' : placement == 'left' && pos.left - actualWidth < containerDim.left ? 'right' : placement $tip .removeClass(orgPlacement) .addClass(placement) } var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight) this.applyPlacement(calculatedOffset, placement) var complete = function () { var prevHoverState = that.hoverState that.$element.trigger('shown.bs.' + that.type) that.hoverState = null if (prevHoverState == 'out') that.leave(that) } $.support.transition && this.$tip.hasClass('fade') ? $tip .one('bsTransitionEnd', complete) .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) : complete() } } Tooltip.prototype.applyPlacement = function (offset, placement) { var $tip = this.tip() var width = $tip[0].offsetWidth var height = $tip[0].offsetHeight // manually read margins because getBoundingClientRect includes difference var marginTop = parseInt($tip.css('margin-top'), 10) var marginLeft = parseInt($tip.css('margin-left'), 10) // we must check for NaN for ie 8/9 if (isNaN(marginTop)) marginTop = 0 if (isNaN(marginLeft)) marginLeft = 0 offset.top = offset.top + marginTop offset.left = offset.left + marginLeft // $.fn.offset doesn't round pixel values // so we use setOffset directly with our own function B-0 $.offset.setOffset($tip[0], $.extend({ using: function (props) { $tip.css({ top: Math.round(props.top), left: Math.round(props.left) }) } }, offset), 0) $tip.addClass('in') // check to see if placing tip in new offset caused the tip to resize itself var actualWidth = $tip[0].offsetWidth var actualHeight = $tip[0].offsetHeight if (placement == 'top' && actualHeight != height) { offset.top = offset.top + height - actualHeight } var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight) if (delta.left) offset.left += delta.left else offset.top += delta.top var isVertical = /top|bottom/.test(placement) var arrowDelta = isVertical ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight' $tip.offset(offset) this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical) } Tooltip.prototype.replaceArrow = function (delta, dimension, isHorizontal) { this.arrow() .css(isHorizontal ? 'left' : 'top', 50 * (1 - delta / dimension) + '%') .css(isHorizontal ? 'top' : 'left', '') } Tooltip.prototype.setContent = function () { var $tip = this.tip() var title = this.getTitle() $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title) $tip.removeClass('fade in top bottom left right') } Tooltip.prototype.hide = function (callback) { var that = this var $tip = this.tip() var e = $.Event('hide.bs.' + this.type) function complete() { if (that.hoverState != 'in') $tip.detach() that.$element .removeAttr('aria-describedby') .trigger('hidden.bs.' + that.type) callback && callback() } this.$element.trigger(e) if (e.isDefaultPrevented()) return $tip.removeClass('in') $.support.transition && this.$tip.hasClass('fade') ? $tip .one('bsTransitionEnd', complete) .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) : complete() this.hoverState = null return this } Tooltip.prototype.fixTitle = function () { var $e = this.$element if ($e.attr('title') || typeof ($e.attr('data-original-title')) != 'string') { $e.attr('data-original-title', $e.attr('title') || '').attr('title', '') } } Tooltip.prototype.hasContent = function () { return this.getTitle() } Tooltip.prototype.getPosition = function ($element) { $element = $element || this.$element var el = $element[0] var isBody = el.tagName == 'BODY' var elRect = el.getBoundingClientRect() if (elRect.width == null) { // width and height are missing in IE8, so compute them manually; see https://github.com/twbs/bootstrap/issues/14093 elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, height: elRect.bottom - elRect.top }) } var elOffset = isBody ? { top: 0, left: 0 } : $element.offset() var scroll = { scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop() } var outerDims = isBody ? { width: $(window).width(), height: $(window).height() } : null return $.extend({}, elRect, scroll, outerDims, elOffset) } Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) { return placement == 'bottom' ? { top: pos.top + pos.height, left: pos.left + pos.width / 2 - actualWidth / 2 } : placement == 'top' ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } : placement == 'left' ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } : /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width } } Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) { var delta = { top: 0, left: 0 } if (!this.$viewport) return delta var viewportPadding = this.options.viewport && this.options.viewport.padding || 0 var viewportDimensions = this.getPosition(this.$viewport) if (/right|left/.test(placement)) { var topEdgeOffset = pos.top - viewportPadding - viewportDimensions.scroll var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight if (topEdgeOffset < viewportDimensions.top) { // top overflow delta.top = viewportDimensions.top - topEdgeOffset } else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset } } else { var leftEdgeOffset = pos.left - viewportPadding var rightEdgeOffset = pos.left + viewportPadding + actualWidth if (leftEdgeOffset < viewportDimensions.left) { // left overflow delta.left = viewportDimensions.left - leftEdgeOffset } else if (rightEdgeOffset > viewportDimensions.width) { // right overflow delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset } } return delta } Tooltip.prototype.getTitle = function () { var title var $e = this.$element var o = this.options title = $e.attr('data-original-title') || (typeof o.title == 'function' ? o.title.call($e[0]) : o.title) return title } Tooltip.prototype.getUID = function (prefix) { do prefix += ~~(Math.random() * 1000000) while (document.getElementById(prefix)) return prefix } Tooltip.prototype.tip = function () { return (this.$tip = this.$tip || $(this.options.template)) } Tooltip.prototype.arrow = function () { return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow')) } Tooltip.prototype.enable = function () { this.enabled = true } Tooltip.prototype.disable = function () { this.enabled = false } Tooltip.prototype.toggleEnabled = function () { this.enabled = !this.enabled } Tooltip.prototype.toggle = function (e) { var self = this if (e) { self = $(e.currentTarget).data('bs.' + this.type) if (!self) { self = new this.constructor(e.currentTarget, this.getDelegateOptions()) $(e.currentTarget).data('bs.' + this.type, self) } } self.tip().hasClass('in') ? self.leave(self) : self.enter(self) } Tooltip.prototype.destroy = function () { var that = this clearTimeout(this.timeout) this.hide(function () { that.$element.off('.' + that.type).removeData('bs.' + that.type) }) } // TOOLTIP PLUGIN DEFINITION // ========================= function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.tooltip') var options = typeof option == 'object' && option var selector = options && options.selector if (!data && option == 'destroy') return if (selector) { if (!data) $this.data('bs.tooltip', (data = {})) if (!data[selector]) data[selector] = new Tooltip(this, options) } else { if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options))) } if (typeof option == 'string') data[option]() }) } var old = $.fn.tooltip $.fn.tooltip = Plugin $.fn.tooltip.Constructor = Tooltip // TOOLTIP NO CONFLICT // =================== $.fn.tooltip.noConflict = function () { $.fn.tooltip = old return this } }(jQuery); ;/* ======================================================================== * Bootstrap: popover.js v3.3.1 * http://getbootstrap.com/javascript/#popovers * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // POPOVER PUBLIC CLASS DEFINITION // =============================== var Popover = function (element, options) { this.init('popover', element, options) } if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js') Popover.VERSION = '3.3.1' Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, { placement: 'right', trigger: 'click', content: '', template: '<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>' }) // NOTE: POPOVER EXTENDS tooltip.js // ================================ Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype) Popover.prototype.constructor = Popover Popover.prototype.getDefaults = function () { return Popover.DEFAULTS } Popover.prototype.setContent = function () { var $tip = this.tip() var title = this.getTitle() var content = this.getContent() $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title) $tip.find('.popover-content').children().detach().end()[ // we use append for html objects to maintain js events this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text' ](content) $tip.removeClass('fade top bottom left right in') // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do // this manually by checking the contents. if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide() } Popover.prototype.hasContent = function () { return this.getTitle() || this.getContent() } Popover.prototype.getContent = function () { var $e = this.$element var o = this.options return $e.attr('data-content') || (typeof o.content == 'function' ? o.content.call($e[0]) : o.content) } Popover.prototype.arrow = function () { return (this.$arrow = this.$arrow || this.tip().find('.arrow')) } Popover.prototype.tip = function () { if (!this.$tip) this.$tip = $(this.options.template) return this.$tip } // POPOVER PLUGIN DEFINITION // ========================= function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.popover') var options = typeof option == 'object' && option var selector = options && options.selector if (!data && option == 'destroy') return if (selector) { if (!data) $this.data('bs.popover', (data = {})) if (!data[selector]) data[selector] = new Popover(this, options) } else { if (!data) $this.data('bs.popover', (data = new Popover(this, options))) } if (typeof option == 'string') data[option]() }) } var old = $.fn.popover $.fn.popover = Plugin $.fn.popover.Constructor = Popover // POPOVER NO CONFLICT // =================== $.fn.popover.noConflict = function () { $.fn.popover = old return this } }(jQuery); ;/* ======================================================================== * Bootstrap: scrollspy.js v3.3.1 * http://getbootstrap.com/javascript/#scrollspy * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // SCROLLSPY CLASS DEFINITION // ========================== function ScrollSpy(element, options) { var process = $.proxy(this.process, this) this.$body = $('body') this.$scrollElement = $(element).is('body') ? $(window) : $(element) this.options = $.extend({}, ScrollSpy.DEFAULTS, options) this.selector = (this.options.target || '') + ' .nav li > a' this.offsets = [] this.targets = [] this.activeTarget = null this.scrollHeight = 0 this.$scrollElement.on('scroll.bs.scrollspy', process) this.refresh() this.process() } ScrollSpy.VERSION = '3.3.1' ScrollSpy.DEFAULTS = { offset: 10 } ScrollSpy.prototype.getScrollHeight = function () { return this.$scrollElement[0].scrollHeight || Math.max(this.$body[0].scrollHeight, document.documentElement.scrollHeight) } ScrollSpy.prototype.refresh = function () { var offsetMethod = 'offset' var offsetBase = 0 if (!$.isWindow(this.$scrollElement[0])) { offsetMethod = 'position' offsetBase = this.$scrollElement.scrollTop() } this.offsets = [] this.targets = [] this.scrollHeight = this.getScrollHeight() var self = this this.$body .find(this.selector) .map(function () { var $el = $(this) var href = $el.data('target') || $el.attr('href') var $href = /^#./.test(href) && $(href) return ($href && $href.length && $href.is(':visible') && [[$href[offsetMethod]().top + offsetBase, href]]) || null }) .sort(function (a, b) { return a[0] - b[0] }) .each(function () { self.offsets.push(this[0]) self.targets.push(this[1]) }) } ScrollSpy.prototype.process = function () { var scrollTop = this.$scrollElement.scrollTop() + this.options.offset var scrollHeight = this.getScrollHeight() var maxScroll = this.options.offset + scrollHeight - this.$scrollElement.height() var offsets = this.offsets var targets = this.targets var activeTarget = this.activeTarget var i if (this.scrollHeight != scrollHeight) { this.refresh() } if (scrollTop >= maxScroll) { return activeTarget != (i = targets[targets.length - 1]) && this.activate(i) } if (activeTarget && scrollTop < offsets[0]) { this.activeTarget = null return this.clear() } for (i = offsets.length; i--;) { activeTarget != targets[i] && scrollTop >= offsets[i] && (!offsets[i + 1] || scrollTop <= offsets[i + 1]) && this.activate(targets[i]) } } ScrollSpy.prototype.activate = function (target) { this.activeTarget = target this.clear() var selector = this.selector + '[data-target="' + target + '"],' + this.selector + '[href="' + target + '"]' var active = $(selector) .parents('li') .addClass('active') if (active.parent('.dropdown-menu').length) { active = active .closest('li.dropdown') .addClass('active') } active.trigger('activate.bs.scrollspy') } ScrollSpy.prototype.clear = function () { $(this.selector) .parentsUntil(this.options.target, '.active') .removeClass('active') } // SCROLLSPY PLUGIN DEFINITION // =========================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.scrollspy') var options = typeof option == 'object' && option if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options))) if (typeof option == 'string') data[option]() }) } var old = $.fn.scrollspy $.fn.scrollspy = Plugin $.fn.scrollspy.Constructor = ScrollSpy // SCROLLSPY NO CONFLICT // ===================== $.fn.scrollspy.noConflict = function () { $.fn.scrollspy = old return this } // SCROLLSPY DATA-API // ================== $(window).on('load.bs.scrollspy.data-api', function () { $('[data-spy="scroll"]').each(function () { var $spy = $(this) Plugin.call($spy, $spy.data()) }) }) }(jQuery); ;/* ======================================================================== * Bootstrap: tab.js v3.3.1 * http://getbootstrap.com/javascript/#tabs * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // TAB CLASS DEFINITION // ==================== var Tab = function (element) { this.element = $(element) } Tab.VERSION = '3.3.1' Tab.TRANSITION_DURATION = 150 Tab.prototype.show = function () { var $this = this.element var $ul = $this.closest('ul:not(.dropdown-menu)') var selector = $this.data('target') if (!selector) { selector = $this.attr('href') selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 } if ($this.parent('li').hasClass('active')) return var $previous = $ul.find('.active:last a') var hideEvent = $.Event('hide.bs.tab', { relatedTarget: $this[0] }) var showEvent = $.Event('show.bs.tab', { relatedTarget: $previous[0] }) $previous.trigger(hideEvent) $this.trigger(showEvent) if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return var $target = $(selector) this.activate($this.closest('li'), $ul) this.activate($target, $target.parent(), function () { $previous.trigger({ type: 'hidden.bs.tab', relatedTarget: $this[0] }) $this.trigger({ type: 'shown.bs.tab', relatedTarget: $previous[0] }) }) } Tab.prototype.activate = function (element, container, callback) { var $active = container.find('> .active') var transition = callback && $.support.transition && (($active.length && $active.hasClass('fade')) || !!container.find('> .fade').length) function next() { $active .removeClass('active') .find('> .dropdown-menu > .active') .removeClass('active') .end() .find('[data-toggle="tab"]') .attr('aria-expanded', false) element .addClass('active') .find('[data-toggle="tab"]') .attr('aria-expanded', true) if (transition) { element[0].offsetWidth // reflow for transition element.addClass('in') } else { element.removeClass('fade') } if (element.parent('.dropdown-menu')) { element .closest('li.dropdown') .addClass('active') .end() .find('[data-toggle="tab"]') .attr('aria-expanded', true) } callback && callback() } $active.length && transition ? $active .one('bsTransitionEnd', next) .emulateTransitionEnd(Tab.TRANSITION_DURATION) : next() $active.removeClass('in') } // TAB PLUGIN DEFINITION // ===================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.tab') if (!data) $this.data('bs.tab', (data = new Tab(this))) if (typeof option == 'string') data[option]() }) } var old = $.fn.tab $.fn.tab = Plugin $.fn.tab.Constructor = Tab // TAB NO CONFLICT // =============== $.fn.tab.noConflict = function () { $.fn.tab = old return this } // TAB DATA-API // ============ var clickHandler = function (e) { e.preventDefault() Plugin.call($(this), 'show') } $(document) .on('click.bs.tab.data-api', '[data-toggle="tab"]', clickHandler) .on('click.bs.tab.data-api', '[data-toggle="pill"]', clickHandler) }(jQuery); ;/* ======================================================================== * Bootstrap: affix.js v3.3.1 * http://getbootstrap.com/javascript/#affix * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // AFFIX CLASS DEFINITION // ====================== var Affix = function (element, options) { this.options = $.extend({}, Affix.DEFAULTS, options) this.$target = $(this.options.target) .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this)) .on('click.bs.affix.data-api', $.proxy(this.checkPositionWithEventLoop, this)) this.$element = $(element) this.affixed = this.unpin = this.pinnedOffset = null this.checkPosition() } Affix.VERSION = '3.3.1' Affix.RESET = 'affix affix-top affix-bottom' Affix.DEFAULTS = { offset: 0, target: window } Affix.prototype.getState = function (scrollHeight, height, offsetTop, offsetBottom) { var scrollTop = this.$target.scrollTop() var position = this.$element.offset() var targetHeight = this.$target.height() if (offsetTop != null && this.affixed == 'top') return scrollTop < offsetTop ? 'top' : false if (this.affixed == 'bottom') { if (offsetTop != null) return (scrollTop + this.unpin <= position.top) ? false : 'bottom' return (scrollTop + targetHeight <= scrollHeight - offsetBottom) ? false : 'bottom' } var initializing = this.affixed == null var colliderTop = initializing ? scrollTop : position.top var colliderHeight = initializing ? targetHeight : height if (offsetTop != null && colliderTop <= offsetTop) return 'top' if (offsetBottom != null && (colliderTop + colliderHeight >= scrollHeight - offsetBottom)) return 'bottom' return false } Affix.prototype.getPinnedOffset = function () { if (this.pinnedOffset) return this.pinnedOffset this.$element.removeClass(Affix.RESET).addClass('affix') var scrollTop = this.$target.scrollTop() var position = this.$element.offset() return (this.pinnedOffset = position.top - scrollTop) } Affix.prototype.checkPositionWithEventLoop = function () { setTimeout($.proxy(this.checkPosition, this), 1) } Affix.prototype.checkPosition = function () { if (!this.$element.is(':visible')) return var height = this.$element.height() var offset = this.options.offset var offsetTop = offset.top var offsetBottom = offset.bottom var scrollHeight = $('body').height() if (typeof offset != 'object') offsetBottom = offsetTop = offset if (typeof offsetTop == 'function') offsetTop = offset.top(this.$element) if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element) var affix = this.getState(scrollHeight, height, offsetTop, offsetBottom) if (this.affixed != affix) { if (this.unpin != null) this.$element.css('top', '') var affixType = 'affix' + (affix ? '-' + affix : '') var e = $.Event(affixType + '.bs.affix') this.$element.trigger(e) if (e.isDefaultPrevented()) return this.affixed = affix this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null this.$element .removeClass(Affix.RESET) .addClass(affixType) .trigger(affixType.replace('affix', 'affixed') + '.bs.affix') } if (affix == 'bottom') { this.$element.offset({ top: scrollHeight - height - offsetBottom }) } } // AFFIX PLUGIN DEFINITION // ======================= function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.affix') var options = typeof option == 'object' && option if (!data) $this.data('bs.affix', (data = new Affix(this, options))) if (typeof option == 'string') data[option]() }) } var old = $.fn.affix $.fn.affix = Plugin $.fn.affix.Constructor = Affix // AFFIX NO CONFLICT // ================= $.fn.affix.noConflict = function () { $.fn.affix = old return this } // AFFIX DATA-API // ============== $(window).on('load', function () { $('[data-spy="affix"]').each(function () { var $spy = $(this) var data = $spy.data() data.offset = data.offset || {} if (data.offsetBottom != null) data.offset.bottom = data.offsetBottom if (data.offsetTop != null) data.offset.top = data.offsetTop Plugin.call($spy, data) }) }) }(jQuery); ;/* ======================================================================== * DOM-based Routing * Based on http://goo.gl/EUTi53 by Paul Irish * * Only fires on body classes that match. If a body class contains a dash, * replace the dash with an underscore when adding it to the object below. * * .noConflict() * The routing is enclosed within an anonymous function so that you can * always reference jQuery with $, even when in .noConflict() mode. * * Google CDN, Latest jQuery * To use the default WordPress version of jQuery, go to lib/config.php and * remove or comment out: add_theme_support('jquery-cdn'); * ======================================================================== */ (function($) { // Use this variable to set up the common and page specific functions. If you // rename this variable, you will also need to rename the namespace below. var Roots = { // All pages common: { init: function() { // JavaScript to be fired on all pages } }, // Home page home: { init: function() { // JavaScript to be fired on the home page } }, // About us page, note the change from about-us to about_us. about_us: { init: function() { // JavaScript to be fired on the about us page } } }; // The routing fires all common scripts, followed by the page specific scripts. // Add additional events for more control over timing e.g. a finalize event var UTIL = { fire: function(func, funcname, args) { var namespace = Roots; funcname = (funcname === undefined) ? 'init' : funcname; if (func !== '' && namespace[func] && typeof namespace[func][funcname] === 'function') { namespace[func][funcname](args); } }, loadEvents: function() { UTIL.fire('common'); $.each(document.body.className.replace(/-/g, '_').split(/\s+/),function(i,classnm) { UTIL.fire(classnm); }); } }; $(document).ready(UTIL.loadEvents); })(jQuery); // Fully reference jQuery after this point.
* http://getbootstrap.com/javascript/#modals
app.go
package app import ( "io" "os" "path/filepath" "github.com/spf13/viper" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" dbm "github.com/tendermint/tm-db" bam "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/simapp" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/version" "github.com/cosmos/cosmos-sdk/x/auth" authvesting "github.com/cosmos/cosmos-sdk/x/auth/vesting" "github.com/cosmos/cosmos-sdk/x/bank" "github.com/cosmos/cosmos-sdk/x/crisis" distr "github.com/cosmos/cosmos-sdk/x/distribution" "github.com/cosmos/cosmos-sdk/x/evidence" "github.com/cosmos/cosmos-sdk/x/genutil" "github.com/cosmos/cosmos-sdk/x/gov" "github.com/cosmos/cosmos-sdk/x/mint" "github.com/cosmos/cosmos-sdk/x/params" paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" "github.com/cosmos/cosmos-sdk/x/slashing" "github.com/cosmos/cosmos-sdk/x/staking" "github.com/cosmos/cosmos-sdk/x/supply" "github.com/cosmos/cosmos-sdk/x/upgrade" upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" "github.com/CosmWasm/wasmd/x/wasm" ) const appName = "WasmApp" var ( // DefaultCLIHome default home directories for wasmcli DefaultCLIHome = os.ExpandEnv("$HOME/.wasmcli") // DefaultNodeHome default home directories for wasmd DefaultNodeHome = os.ExpandEnv("$HOME/.wasmd") // ModuleBasics The module BasicManager is in charge of setting up basic, // non-dependant module elements, such as codec registration // and genesis verification. ModuleBasics = module.NewBasicManager( genutil.AppModuleBasic{}, auth.AppModuleBasic{}, bank.AppModuleBasic{}, staking.AppModuleBasic{}, mint.AppModuleBasic{}, distr.AppModuleBasic{}, gov.NewAppModuleBasic(paramsclient.ProposalHandler, distr.ProposalHandler, upgradeclient.ProposalHandler), params.AppModuleBasic{}, wasm.AppModuleBasic{}, crisis.AppModuleBasic{}, slashing.AppModuleBasic{}, supply.AppModuleBasic{}, upgrade.AppModuleBasic{}, evidence.AppModuleBasic{}, ) // module account permissions maccPerms = map[string][]string{ auth.FeeCollectorName: nil, distr.ModuleName: nil, mint.ModuleName: {supply.Minter}, staking.BondedPoolName: {supply.Burner, supply.Staking}, staking.NotBondedPoolName: {supply.Burner, supply.Staking}, gov.ModuleName: {supply.Burner}, } ) // MakeCodec creates the application codec. The codec is sealed before it is // returned. func MakeCodec() *codec.Codec { var cdc = codec.New() ModuleBasics.RegisterCodec(cdc) sdk.RegisterCodec(cdc) codec.RegisterCrypto(cdc) codec.RegisterEvidences(cdc) authvesting.RegisterCodec(cdc) return cdc.Seal() } // Verify app interface at compile time var _ simapp.App = (*WasmApp)(nil) // WasmApp extended ABCI application type WasmApp struct { *bam.BaseApp cdc *codec.Codec invCheckPeriod uint // keys to access the substores keys map[string]*sdk.KVStoreKey
// keepers accountKeeper auth.AccountKeeper bankKeeper bank.Keeper supplyKeeper supply.Keeper stakingKeeper staking.Keeper slashingKeeper slashing.Keeper mintKeeper mint.Keeper distrKeeper distr.Keeper govKeeper gov.Keeper crisisKeeper crisis.Keeper paramsKeeper params.Keeper evidenceKeeper *evidence.Keeper upgradeKeeper upgrade.Keeper wasmKeeper wasm.Keeper // the module manager mm *module.Manager // simulation manager sm *module.SimulationManager } // WasmWrapper allows us to use namespacing in the config file // This is only used for parsing in the app, x/wasm expects WasmConfig type WasmWrapper struct { Wasm wasm.WasmConfig `mapstructure:"wasm"` } // NewWasmApp returns a reference to an initialized WasmApp. func NewWasmApp( logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, invCheckPeriod uint, skipUpgradeHeights map[int64]bool, baseAppOptions ...func(*bam.BaseApp), ) *WasmApp { cdc := MakeCodec() debug := viper.GetBool(cli.TraceFlag) baseAppOptions = append(baseAppOptions, bam.SetDebug(debug)) bApp := bam.NewBaseApp(appName, logger, db, auth.DefaultTxDecoder(cdc), baseAppOptions...) bApp.SetCommitMultiStoreTracer(traceStore) bApp.SetAppVersion(version.Version) keys := sdk.NewKVStoreKeys( bam.MainStoreKey, auth.StoreKey, staking.StoreKey, supply.StoreKey, mint.StoreKey, distr.StoreKey, slashing.StoreKey, gov.StoreKey, params.StoreKey, evidence.StoreKey, upgrade.StoreKey, wasm.StoreKey, ) tKeys := sdk.NewTransientStoreKeys(staking.TStoreKey, params.TStoreKey) app := &WasmApp{ BaseApp: bApp, cdc: cdc, invCheckPeriod: invCheckPeriod, keys: keys, tKeys: tKeys, subspaces: make(map[string]params.Subspace), } // init params keeper and subspaces app.paramsKeeper = params.NewKeeper(app.cdc, keys[params.StoreKey], tKeys[params.TStoreKey]) app.subspaces[auth.ModuleName] = app.paramsKeeper.Subspace(auth.DefaultParamspace) app.subspaces[bank.ModuleName] = app.paramsKeeper.Subspace(bank.DefaultParamspace) app.subspaces[staking.ModuleName] = app.paramsKeeper.Subspace(staking.DefaultParamspace) app.subspaces[mint.ModuleName] = app.paramsKeeper.Subspace(mint.DefaultParamspace) app.subspaces[distr.ModuleName] = app.paramsKeeper.Subspace(distr.DefaultParamspace) app.subspaces[slashing.ModuleName] = app.paramsKeeper.Subspace(slashing.DefaultParamspace) app.subspaces[gov.ModuleName] = app.paramsKeeper.Subspace(gov.DefaultParamspace).WithKeyTable(gov.ParamKeyTable()) app.subspaces[crisis.ModuleName] = app.paramsKeeper.Subspace(crisis.DefaultParamspace) app.subspaces[evidence.ModuleName] = app.paramsKeeper.Subspace(evidence.DefaultParamspace) // add keepers app.accountKeeper = auth.NewAccountKeeper( app.cdc, keys[auth.StoreKey], app.subspaces[auth.ModuleName], auth.ProtoBaseAccount, ) app.bankKeeper = bank.NewBaseKeeper( app.accountKeeper, app.subspaces[bank.ModuleName], app.ModuleAccountAddrs(), ) app.supplyKeeper = supply.NewKeeper( app.cdc, keys[supply.StoreKey], app.accountKeeper, app.bankKeeper, maccPerms, ) stakingKeeper := staking.NewKeeper( app.cdc, keys[staking.StoreKey], app.supplyKeeper, app.subspaces[staking.ModuleName], ) app.mintKeeper = mint.NewKeeper( app.cdc, keys[mint.StoreKey], app.subspaces[mint.ModuleName], &stakingKeeper, app.supplyKeeper, auth.FeeCollectorName, ) app.distrKeeper = distr.NewKeeper( app.cdc, keys[distr.StoreKey], app.subspaces[distr.ModuleName], &stakingKeeper, app.supplyKeeper, auth.FeeCollectorName, app.ModuleAccountAddrs(), ) app.slashingKeeper = slashing.NewKeeper( app.cdc, keys[slashing.StoreKey], &stakingKeeper, app.subspaces[slashing.ModuleName], ) app.crisisKeeper = crisis.NewKeeper( app.subspaces[crisis.ModuleName], invCheckPeriod, app.supplyKeeper, auth.FeeCollectorName, ) app.upgradeKeeper = upgrade.NewKeeper(skipUpgradeHeights, keys[upgrade.StoreKey], app.cdc) // create evidence keeper with evidence router evidenceKeeper := evidence.NewKeeper( app.cdc, keys[evidence.StoreKey], app.subspaces[evidence.ModuleName], &stakingKeeper, app.slashingKeeper, ) evidenceRouter := evidence.NewRouter() // TODO: register evidence routes evidenceKeeper.SetRouter(evidenceRouter) app.evidenceKeeper = evidenceKeeper // register the proposal types govRouter := gov.NewRouter() govRouter.AddRoute(gov.RouterKey, gov.ProposalHandler). AddRoute(params.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)). AddRoute(distr.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)). AddRoute(upgrade.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper)) app.govKeeper = gov.NewKeeper( app.cdc, keys[gov.StoreKey], app.subspaces[gov.ModuleName], app.supplyKeeper, &stakingKeeper, govRouter, ) // register the staking hooks // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks app.stakingKeeper = *stakingKeeper.SetHooks( staking.NewMultiStakingHooks(app.distrKeeper.Hooks(), app.slashingKeeper.Hooks()), ) // just re-use the full router - do we want to limit this more? var wasmRouter = bApp.Router() // better way to get this dir??? homeDir := viper.GetString(cli.HomeFlag) wasmDir := filepath.Join(homeDir, "wasm") wasmWrap := WasmWrapper{Wasm: wasm.DefaultWasmConfig()} err := viper.Unmarshal(&wasmWrap) if err != nil { panic("error while reading wasm config: " + err.Error()) } wasmConfig := wasmWrap.Wasm // The last arguments can contain custom message handlers, and custom query handlers, // if we want to allow any custom callbacks supportedFeatures := "staking" app.wasmKeeper = wasm.NewKeeper(app.cdc, keys[wasm.StoreKey], app.accountKeeper, app.bankKeeper, app.stakingKeeper, wasmRouter, wasmDir, wasmConfig, supportedFeatures, nil, nil) // NOTE: Any module instantiated in the module manager that is later modified // must be passed by reference here. app.mm = module.NewManager( genutil.NewAppModule(app.accountKeeper, app.stakingKeeper, app.BaseApp.DeliverTx), auth.NewAppModule(app.accountKeeper), bank.NewAppModule(app.bankKeeper, app.accountKeeper), crisis.NewAppModule(&app.crisisKeeper), supply.NewAppModule(app.supplyKeeper, app.accountKeeper), gov.NewAppModule(app.govKeeper, app.accountKeeper, app.supplyKeeper), mint.NewAppModule(app.mintKeeper), slashing.NewAppModule(app.slashingKeeper, app.accountKeeper, app.stakingKeeper), distr.NewAppModule(app.distrKeeper, app.accountKeeper, app.supplyKeeper, app.stakingKeeper), staking.NewAppModule(app.stakingKeeper, app.accountKeeper, app.supplyKeeper), evidence.NewAppModule(*app.evidenceKeeper), wasm.NewAppModule(app.wasmKeeper), upgrade.NewAppModule(app.upgradeKeeper), evidence.NewAppModule(*app.evidenceKeeper), ) // During begin block slashing happens after distr.BeginBlocker so that // there is nothing left over in the validator fee pool, so as to keep the // CanWithdrawInvariant invariant. app.mm.SetOrderBeginBlockers(upgrade.ModuleName, mint.ModuleName, distr.ModuleName, slashing.ModuleName) app.mm.SetOrderEndBlockers(crisis.ModuleName, gov.ModuleName, staking.ModuleName) // NOTE: The genutils module must occur after staking so that pools are // properly initialized with tokens from genesis accounts. app.mm.SetOrderInitGenesis( distr.ModuleName, staking.ModuleName, auth.ModuleName, bank.ModuleName, slashing.ModuleName, gov.ModuleName, mint.ModuleName, supply.ModuleName, crisis.ModuleName, genutil.ModuleName, evidence.ModuleName, wasm.ModuleName, ) app.mm.RegisterInvariants(&app.crisisKeeper) app.mm.RegisterRoutes(app.Router(), app.QueryRouter()) // create the simulation manager and define the order of the modules for deterministic simulations // // NOTE: This is not required for apps that don't use the simulator for fuzz testing // transactions. app.sm = module.NewSimulationManager( auth.NewAppModule(app.accountKeeper), bank.NewAppModule(app.bankKeeper, app.accountKeeper), supply.NewAppModule(app.supplyKeeper, app.accountKeeper), gov.NewAppModule(app.govKeeper, app.accountKeeper, app.supplyKeeper), mint.NewAppModule(app.mintKeeper), distr.NewAppModule(app.distrKeeper, app.accountKeeper, app.supplyKeeper, app.stakingKeeper), staking.NewAppModule(app.stakingKeeper, app.accountKeeper, app.supplyKeeper), slashing.NewAppModule(app.slashingKeeper, app.accountKeeper, app.stakingKeeper), ) app.sm.RegisterStoreDecoders() // initialize stores app.MountKVStores(keys) app.MountTransientStores(tKeys) // initialize BaseApp app.SetInitChainer(app.InitChainer) app.SetBeginBlocker(app.BeginBlocker) app.SetAnteHandler(auth.NewAnteHandler(app.accountKeeper, app.supplyKeeper, auth.DefaultSigVerificationGasConsumer)) app.SetEndBlocker(app.EndBlocker) if loadLatest { err := app.LoadLatestVersion(app.keys[bam.MainStoreKey]) if err != nil { tmos.Exit(err.Error()) } } return app } // Name returns the name of the App func (app *WasmApp) Name() string { return app.BaseApp.Name() } // application updates every begin block func (app *WasmApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { return app.mm.BeginBlock(ctx, req) } // EndBlocker application updates every end block func (app *WasmApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { return app.mm.EndBlock(ctx, req) } // InitChainer application update at chain initialization func (app *WasmApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { var genesisState simapp.GenesisState app.cdc.MustUnmarshalJSON(req.AppStateBytes, &genesisState) return app.mm.InitGenesis(ctx, genesisState) } // LoadHeight loads a particular height func (app *WasmApp) LoadHeight(height int64) error { return app.LoadVersion(height, app.keys[bam.MainStoreKey]) } // ModuleAccountAddrs returns all the app's module account addresses. func (app *WasmApp) ModuleAccountAddrs() map[string]bool { modAccAddrs := make(map[string]bool) for acc := range maccPerms { modAccAddrs[supply.NewModuleAddress(acc).String()] = true } return modAccAddrs } // Codec returns the application's sealed codec. func (app *WasmApp) Codec() *codec.Codec { return app.cdc } // SimulationManager implements the SimulationApp interface func (app *WasmApp) SimulationManager() *module.SimulationManager { return app.sm } // GetMaccPerms returns a mapping of the application's module account permissions. func GetMaccPerms() map[string][]string { modAccPerms := make(map[string][]string) for k, v := range maccPerms { modAccPerms[k] = v } return modAccPerms }
tKeys map[string]*sdk.TransientStoreKey // subspaces subspaces map[string]params.Subspace
from_test.go
package clause_test import ( "fmt" "testing" "github.com/vine-io/vine/lib/dao/clause" ) func TestFrom(t *testing.T) { results := []struct { Clauses []clause.Interface Result string Vars []interface{} }{ { []clause.Interface{clause.Select{}, clause.From{}}, "SELECT * FROM `users`", nil, }, { []clause.Interface{ clause.Select{}, clause.From{ Tables: []clause.Table{{Name: "users"}}, Joins: []clause.Join{ { Type: clause.InnerJoin, Table: clause.Table{Name: "articles"}, ON: clause.Where{ []clause.Expression{clause.Eq{clause.Column{Table: "articles", Name: "id"}, clause.PrimaryColumn}}, }, }, }, }, }, "SELECT * FROM `users` INNER JOIN `articles` ON `articles`.`id` = `users`.`id`", nil, }, { []clause.Interface{ clause.Select{}, clause.From{ Tables: []clause.Table{{Name: "users"}}, Joins: []clause.Join{ { Type: clause.RightJoin, Table: clause.Table{Name: "profiles"}, ON: clause.Where{ []clause.Expression{clause.Eq{clause.Column{Table: "profiles", Name: "email"}, clause.Column{Table: clause.CurrentTable, Name: "email"}}}, }, }, }, }, clause.From{ Joins: []clause.Join{ { Type: clause.InnerJoin, Table: clause.Table{Name: "articles"}, ON: clause.Where{ []clause.Expression{clause.Eq{clause.Column{Table: "articles", Name: "id"}, clause.PrimaryColumn}}, }, }, { Type: clause.LeftJoin, Table: clause.Table{Name: "companies"}, Using: []string{"company_name"}, }, }, }, }, "SELECT * FROM `users` INNER JOIN `articles` ON `articles`.`id` = `users`.`id` LEFT JOIN `companies` USING (`company_name`)", nil, }, } for idx, result := range results { t.Run(fmt.Sprintf("case #%v", idx), func(t *testing.T) {
} }
checkBuildClauses(t, result.Clauses, result.Result, result.Vars) })
base_algorithm.py
import abc import pickle import time from collections import OrderedDict from copy import deepcopy import gtimer as gt import numpy as np from rlkit.core import logger, eval_util from rlkit.data_management.env_replay_buffer import EnvReplayBuffer from rlkit.data_management.path_builder import PathBuilder from rlkit.policies.base import ExplorationPolicy from rlkit.torch.sac.policies import MakeDeterministic from rlkit.samplers import PathSampler from rlkit.envs.wrapped_absorbing_env import WrappedAbsorbingEnv from gym.spaces import Dict class BaseAlgorithm(metaclass=abc.ABCMeta): """ base algorithm for single task setting can be used for RL or Learning from Demonstrations """ def __init__( self, env, exploration_policy: ExplorationPolicy, training_env=None, eval_policy=None, eval_sampler=None, num_epochs=100, num_steps_per_epoch=10000, num_steps_between_train_calls=1000, num_steps_per_eval=1000, max_path_length=1000, min_steps_before_training=0, replay_buffer=None, replay_buffer_size=10000, freq_saving=1, save_replay_buffer=False, save_environment=False, save_algorithm=False, save_best=False, save_best_starting_from_epoch=0, best_key='AverageReturn', # higher is better no_terminal=False, wrap_absorbing=False, render=False, render_kwargs={}, freq_log_visuals=1, eval_deterministic=False ): self.env = env self.training_env = training_env or pickle.loads(pickle.dumps(env)) self.exploration_policy = exploration_policy self.num_epochs = num_epochs self.num_env_steps_per_epoch = num_steps_per_epoch self.num_steps_between_train_calls = num_steps_between_train_calls self.num_steps_per_eval = num_steps_per_eval self.max_path_length = max_path_length self.min_steps_before_training = min_steps_before_training self.render = render self.save_replay_buffer = save_replay_buffer self.save_algorithm = save_algorithm self.save_environment = save_environment self.save_best = save_best self.save_best_starting_from_epoch = save_best_starting_from_epoch self.best_key = best_key self.best_statistic_so_far = float('-Inf')
eval_policy = exploration_policy eval_policy = MakeDeterministic(eval_policy) eval_sampler = PathSampler( env, eval_policy, num_steps_per_eval, max_path_length, no_terminal=no_terminal, render=render, render_kwargs=render_kwargs ) self.eval_policy = eval_policy self.eval_sampler = eval_sampler self.action_space = env.action_space self.obs_space = env.observation_space self.replay_buffer_size = replay_buffer_size if replay_buffer is None: assert max_path_length < replay_buffer_size replay_buffer = EnvReplayBuffer( self.replay_buffer_size, self.env, random_seed=np.random.randint(10000) ) else: assert max_path_length < replay_buffer._max_replay_buffer_size self.replay_buffer = replay_buffer self._n_env_steps_total = 0 self._n_train_steps_total = 0 self._n_rollouts_total = 0 self._do_train_time = 0 self._epoch_start_time = None self._algo_start_time = None self._old_table_keys = None self._current_path_builder = PathBuilder() self._exploration_paths = [] if wrap_absorbing: # needs to be properly handled both here and in replay buffer raise NotImplementedError() self.wrap_absorbing = wrap_absorbing self.freq_saving = freq_saving self.no_terminal = no_terminal self.eval_statistics = None self.freq_log_visuals = freq_log_visuals def train(self, start_epoch=0): self.pretrain() if start_epoch == 0: params = self.get_epoch_snapshot(-1) logger.save_itr_params(-1, params) self.training_mode(False) self._n_env_steps_total = start_epoch * self.num_env_steps_per_epoch gt.reset() gt.set_def_unique(False) self.start_training(start_epoch=start_epoch) def pretrain(self): """ Do anything before the main training phase. """ pass def start_training(self, start_epoch=0): self._current_path_builder = PathBuilder() observation = self._start_new_rollout() for epoch in gt.timed_for( range(start_epoch, self.num_epochs), save_itrs=True, ): self._start_epoch(epoch) for steps_this_epoch in range(self.num_env_steps_per_epoch): action, agent_info = self._get_action_and_info(observation) if self.render: self.training_env.render() next_ob, raw_reward, terminal, env_info = ( self.training_env.step(action) ) if self.no_terminal: terminal = False self._n_env_steps_total += 1 reward = np.array([raw_reward]) terminal = np.array([terminal]) self._handle_step( observation, action, reward, next_ob, np.array([False]) if self.no_terminal else terminal, absorbing=np.array([0., 0.]), agent_info=agent_info, env_info=env_info, ) if terminal[0]: if self.wrap_absorbing: raise NotImplementedError() ''' If we wrap absorbing states, two additional transitions must be added: (s_T, s_abs) and (s_abs, s_abs). In Disc Actor Critic paper they make s_abs be a vector of 0s with last dim set to 1. Here we are going to add the following: ([next_ob,0], random_action, [next_ob, 1]) and ([next_ob,1], random_action, [next_ob, 1]) This way we can handle varying types of terminal states. ''' # next_ob is the absorbing state # for now just taking the previous action self._handle_step( next_ob, action, # env.action_space.sample(), # the reward doesn't matter reward, next_ob, np.array([False]), absorbing=np.array([0.0, 1.0]), agent_info=agent_info, env_info=env_info ) self._handle_step( next_ob, action, # env.action_space.sample(), # the reward doesn't matter reward, next_ob, np.array([False]), absorbing=np.array([1.0, 1.0]), agent_info=agent_info, env_info=env_info ) self._handle_rollout_ending() observation = self._start_new_rollout() elif len(self._current_path_builder) >= self.max_path_length: self._handle_rollout_ending() observation = self._start_new_rollout() else: observation = next_ob if self._n_env_steps_total % self.num_steps_between_train_calls == 0: gt.stamp('sample') self._try_to_train(epoch) gt.stamp('train') gt.stamp('sample') self._try_to_eval(epoch) gt.stamp('eval') self._end_epoch() def _try_to_train(self, epoch): if self._can_train(): self.training_mode(True) self._do_training(epoch) self._n_train_steps_total += 1 self.training_mode(False) def _try_to_eval(self, epoch): if self._can_evaluate(): # save if it's time to save if (epoch % self.freq_saving == 0) or (epoch + 1 >= self.num_epochs): # if epoch + 1 >= self.num_epochs: # epoch = 'final' logger.save_extra_data(self.get_extra_data_to_save(epoch)) params = self.get_epoch_snapshot(epoch) logger.save_itr_params(epoch, params) self.evaluate(epoch) logger.record_tabular( "Number of train calls total", self._n_train_steps_total, ) logger.record_tabular( "Number of env steps total", self._n_env_steps_total, ) logger.record_tabular( "Number of rollouts total", self._n_rollouts_total, ) times_itrs = gt.get_times().stamps.itrs train_time = times_itrs['train'][-1] sample_time = times_itrs['sample'][-1] eval_time = times_itrs['eval'][-1] if epoch > 0 else 0 epoch_time = train_time + sample_time + eval_time total_time = gt.get_times().total logger.record_tabular('Train Time (s)', train_time) logger.record_tabular('(Previous) Eval Time (s)', eval_time) logger.record_tabular('Sample Time (s)', sample_time) logger.record_tabular('Epoch Time (s)', epoch_time) logger.record_tabular('Total Train Time (s)', total_time) logger.record_tabular("Epoch", epoch) logger.dump_tabular(with_prefix=False, with_timestamp=False) else: logger.log("Skipping eval for now.") def _can_evaluate(self): """ One annoying thing about the logger table is that the keys at each iteration need to be the exact same. So unless you can compute everything, skip evaluation. A common example for why you might want to skip evaluation is that at the beginning of training, you may not have enough data for a validation and training set. :return: """ return ( len(self._exploration_paths) > 0 and self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training ) def _can_train(self): return self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training def _get_action_and_info(self, observation): """ Get an action to take in the environment. :param observation: :return: """ self.exploration_policy.set_num_steps_total(self._n_env_steps_total) return self.exploration_policy.get_action( observation, ) def _start_epoch(self, epoch): self._epoch_start_time = time.time() self._exploration_paths = [] self._do_train_time = 0 logger.push_prefix('Iteration #%d | ' % epoch) def _end_epoch(self): self.eval_statistics = None logger.log("Epoch Duration: {0}".format( time.time() - self._epoch_start_time )) logger.log("Started Training: {0}".format(self._can_train())) logger.pop_prefix() def _start_new_rollout(self): self.exploration_policy.reset() return self.training_env.reset() def _handle_path(self, path): """ Naive implementation: just loop through each transition. :param path: :return: """ for ( ob, action, reward, next_ob, terminal, agent_info, env_info ) in zip( path["observations"], path["actions"], path["rewards"], path["next_observations"], path["terminals"], path["agent_infos"], path["env_infos"], ): self._handle_step( ob, action, reward, next_ob, terminal, agent_info=agent_info, env_info=env_info, ) self._handle_rollout_ending() def _handle_step( self, observation, action, reward, next_observation, terminal, absorbing, agent_info, env_info, ): """ Implement anything that needs to happen after every step :return: """ self._current_path_builder.add_all( observations=observation, actions=action, rewards=reward, next_observations=next_observation, terminals=terminal, absorbing=absorbing, agent_infos=agent_info, env_infos=env_info, ) self.replay_buffer.add_sample( observation=observation, action=action, reward=reward, terminal=terminal, next_observation=next_observation, absorbing=absorbing, agent_info=agent_info, env_info=env_info, ) def _handle_rollout_ending(self): """ Implement anything that needs to happen after every rollout. """ self.replay_buffer.terminate_episode() self._n_rollouts_total += 1 if len(self._current_path_builder) > 0: self._exploration_paths.append( self._current_path_builder ) self._current_path_builder = PathBuilder() def get_epoch_snapshot(self, epoch): """ Probably will be overridden by each algorithm """ data_to_save = dict( epoch=epoch, exploration_policy=self.exploration_policy, ) if self.save_environment: data_to_save['env'] = self.training_env return data_to_save # @abc.abstractmethod # def load_snapshot(self, snapshot): # """ # Should be implemented on a per algorithm basis # taking into consideration the particular # get_epoch_snapshot implementation for the algorithm # """ # pass def get_extra_data_to_save(self, epoch): """ Save things that shouldn't be saved every snapshot but rather overwritten every time. :param epoch: :return: """ if self.render: self.training_env.render(close=True) data_to_save = dict( epoch=epoch, ) if self.save_environment: data_to_save['env'] = self.training_env if self.save_replay_buffer: data_to_save['replay_buffer'] = self.replay_buffer if self.save_algorithm: data_to_save['algorithm'] = self return data_to_save @abc.abstractmethod def training_mode(self, mode): """ Set training mode to `mode`. :param mode: If True, training will happen (e.g. set the dropout probabilities to not all ones). """ pass @abc.abstractmethod def _do_training(self): """ Perform some update, e.g. perform one gradient step. :return: """ pass def evaluate(self, epoch): """ Evaluate the policy, e.g. save/print progress. :param epoch: :return: """ statistics = OrderedDict() try: statistics.update(self.eval_statistics) self.eval_statistics = None except: print('No Stats to Eval') logger.log("Collecting samples for evaluation") test_paths = self.eval_sampler.obtain_samples() statistics.update(eval_util.get_generic_path_information( test_paths, stat_prefix="Test", )) statistics.update(eval_util.get_generic_path_information( self._exploration_paths, stat_prefix="Exploration", )) if hasattr(self.env, "log_diagnostics"): self.env.log_diagnostics(test_paths) if hasattr(self.env, "log_statistics"): statistics.update(self.env.log_statistics(test_paths)) if epoch % self.freq_log_visuals == 0: if hasattr(self.env, "log_visuals"): self.env.log_visuals(test_paths, epoch, logger.get_snapshot_dir()) average_returns = eval_util.get_average_returns(test_paths) statistics['AverageReturn'] = average_returns for key, value in statistics.items(): logger.record_tabular(key, value) best_statistic = statistics[self.best_key] if best_statistic > self.best_statistic_so_far: self.best_statistic_so_far = best_statistic if self.save_best and epoch >= self.save_best_starting_from_epoch: data_to_save = { 'epoch': epoch, 'statistics': statistics } data_to_save.update(self.get_epoch_snapshot(epoch)) logger.save_extra_data(data_to_save, 'best.pkl') print('\n\nSAVED BEST\n\n')
if eval_sampler is None: if eval_policy is None:
aa2ua_cube.py
#!/usr/bin/env python """ Maps point charges obtained by GPAW and HORTON on the original' ' GROMACS topology initially modified by insertHbyList.py """ ## jlh 2018/04/02 import ast import h5py import ase.io from ase.io.cube import read_cube_data import parmed as pmd from parmed import gromacs from insertHbyList import insertHbyList import argparse def
(): parser = argparse.ArgumentParser(\ description='Converts an all-atom cube file into united-atom' ' representation based on certain replacement rules') #parser.add_argument('-c', '--charge',metavar='INTEGER_CHARGE', # type=int,nargs='?', const=1, default=0) #parser.add_argument('infile', nargs='?') parser.add_argument('infile_pdb', nargs='?', metavar='infile.pdb', default='system.pdb', help="Original .pdb file, before insertion of implicit hydrogen.") parser.add_argument('infile_top', nargs='?', metavar='infile.top', default='system.top', help="Original GROMACS .top file") parser.add_argument('infile_cube', nargs='?', metavar='infile.cube', default='esp.cube', help="ESP descrition (or other scalar field) in all-atom cube file.") parser.add_argument('outfile_cube', nargs='?', metavar='outfile.cube', default='esp_fitted_system.top', help="Output truncated by atoms only" "present in all-atoms description") parser.add_argument('-i','--insertion-rules', default="{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}", help="A string representation of a python dictionary, describing how " "many implicit hydrogens have been inserted at which atom. Example: " "{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}") args = parser.parse_args() #implicitHbondingPartners={'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2} print('Using replacement rules "{}"...'.format(args.insertion_rules)) implicitHbondingPartners = ast.literal_eval(args.insertion_rules) aa2ua_cube(args.infile_pdb, args.infile_top, args.infile_cube, args.outfile_cube,implicitHbondingPartners=implicitHbondingPartners) def aa2ua_cube(infile_pdb, infile_top, infile_cube, outfile_cube,implicitHbondingPartners= {'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}): #infile_pdb = args.infile_pdb #infile_top = args.infile_top #infile_cube = args.infile_cube #outfile_cube = args.outfile_cube ase_struct=ase.io.read(infile_pdb) pmd_struct = pmd.load_file(infile_pdb) pmd_top = gromacs.GromacsTopologyFile(infile_top,parametrize=False) # throws some warnings on angle types, does not matter for bonding info pmd_top.strip(':SOL,CL') # strip water and electrolyte from system pmd_top.box = pmd_struct.box # Needed because .prmtop contains box info pmd_top.positions = pmd_struct.positions new_ase_struct, new_pmd_struct, names, residues = insertHbyList( ase_struct,pmd_top,implicitHbondingPartners,1.0) surplus_atoms = len(new_ase_struct) - len(ase_struct) print("{} atoms are going to be truncated from file" "{}...".format(surplus_atoms,infile_cube)) # hdf5 = h5py.File(infile_h5,'r') cube_data, cube_atoms = read_cube_data(infile_cube) ase.io.write(outfile_cube, cube_atoms[0:len(ase_struct)], data=cube_data) # ATTENTION: this script just truncates atoms based on total count difference # in UA and AA representations if __name__ == '__main__': main()
main
bpchar.go
/* * Copyright (c) 2019 Ready Stock * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing * permissions and limitations under the License. */ package types import ( "database/sql/driver" ) // BPChar is fixed-length, blank padded char type // character(n), char(n) type BPChar Text // Set converts from src to dst. func (dst *BPChar) Set(src interface{}) error { return (*Text)(dst).Set(src) } // Get returns underlying value func (dst *BPChar) Get() interface{} { return (*Text)(dst).Get() } // AssignTo assigns from src to dst. func (src *BPChar) AssignTo(dst interface{}) error { if src.Status == Present
return (*Text)(src).AssignTo(dst) } func (dst *BPChar) DecodeText(ci *ConnInfo, src []byte) error { return (*Text)(dst).DecodeText(ci, src) } func (dst *BPChar) DecodeBinary(ci *ConnInfo, src []byte) error { return (*Text)(dst).DecodeBinary(ci, src) } func (src *BPChar) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) { return (*Text)(src).EncodeText(ci, buf) } func (src *BPChar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) { return (*Text)(src).EncodeBinary(ci, buf) } // Scan implements the database/sql Scanner interface. func (dst *BPChar) Scan(src interface{}) error { return (*Text)(dst).Scan(src) } // Value implements the database/sql/driver Valuer interface. func (src *BPChar) Value() (driver.Value, error) { return (*Text)(src).Value() } func (src *BPChar) MarshalJSON() ([]byte, error) { return (*Text)(src).MarshalJSON() } func (dst *BPChar) UnmarshalJSON(b []byte) error { return (*Text)(dst).UnmarshalJSON(b) }
{ switch v := dst.(type) { case *rune: runes := []rune(src.String) if len(runes) == 1 { *v = runes[0] return nil } } }
main.go
package main
"github.com/DeNA/unity-meta-check/tool/unity-meta-check-github-pr-comment/cmd" "github.com/DeNA/unity-meta-check/util/cli" "os" ) func main() { main := cmd.NewMain() exitStatus := main(os.Args[1:], cli.GetProcessInout(), cli.NewEnv()) os.Exit(int(exitStatus)) }
import (
base.js
"use strict"; require("core-js/modules/es.symbol.js"); require("core-js/modules/es.symbol.description.js"); require("core-js/modules/es.symbol.iterator.js"); require("core-js/modules/es.array.slice.js"); require("core-js/modules/es.function.name.js"); require("core-js/modules/es.array.from.js"); exports.__esModule = true; exports.BasePlugin = exports.PLUGIN_KEY = void 0; require("core-js/modules/es.array.iterator.js"); require("core-js/modules/es.map.js"); require("core-js/modules/es.object.to-string.js"); require("core-js/modules/es.string.iterator.js"); require("core-js/modules/web.dom-collections.iterator.js"); require("core-js/modules/es.weak-map.js"); require("core-js/modules/web.dom-collections.for-each.js"); require("core-js/modules/es.regexp.exec.js"); require("core-js/modules/es.string.split.js"); require("core-js/modules/es.array.concat.js"); require("core-js/modules/es.array.join.js"); require("core-js/modules/es.array.index-of.js"); require("core-js/modules/es.array.splice.js"); var _object = require("../../helpers/object"); var _array = require("../../helpers/array"); var _registry = require("../registry"); var _registry2 = require("../../cellTypes/registry"); var _registry3 = require("../../editors/registry"); var _registry4 = require("../../renderers/registry"); var _registry5 = require("../../validators/registry"); function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _unsupportedIterableToArray(arr, i) || _nonIterableRest(); } function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); } function _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); } function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; } function _iterableToArrayLimit(arr, i) { var _i = arr == null ? null : typeof Symbol !== "undefined" && arr[Symbol.iterator] || arr["@@iterator"]; if (_i == null) return; var _arr = []; var _n = true; var _d = false; var _s, _e; try { for (_i = _i.call(arr); !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function
(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } var DEPS_TYPE_CHECKERS = new Map([['plugin', _registry.hasPlugin], ['cell-type', _registry2.hasCellType], ['editor', _registry3.hasEditor], ['renderer', _registry4.hasRenderer], ['validator', _registry5.hasValidator]]); var PLUGIN_KEY = 'base'; exports.PLUGIN_KEY = PLUGIN_KEY; var privatePool = new WeakMap(); var missingDependeciesMsgs = []; var initializedPlugins = null; /** * @util */ var BasePlugin = /*#__PURE__*/function () { /** * @param {object} hotInstance Handsontable instance. */ function BasePlugin(hotInstance) { var _this = this; _classCallCheck(this, BasePlugin); /** * Handsontable instance. * * @type {Core} */ (0, _object.defineGetter)(this, 'hot', hotInstance, { writable: false }); privatePool.set(this, { hooks: {} }); initializedPlugins = null; this.pluginName = null; this.pluginsInitializedCallbacks = []; this.isPluginsReady = false; this.enabled = false; this.initialized = false; this.hot.addHook('afterPluginsInitialized', function () { return _this.onAfterPluginsInitialized(); }); this.hot.addHook('afterUpdateSettings', function (newSettings) { return _this.onUpdateSettings(newSettings); }); this.hot.addHook('beforeInit', function () { return _this.init(); }); } _createClass(BasePlugin, [{ key: "init", value: function init() { var _this2 = this; this.pluginName = this.hot.getPluginName(this); var pluginDeps = this.constructor.PLUGIN_DEPS; var dependecies = Array.isArray(pluginDeps) ? pluginDeps : []; if (dependecies.length > 0) { var missingDependencies = []; dependecies.forEach(function (dependency) { var _dependency$split = dependency.split(':'), _dependency$split2 = _slicedToArray(_dependency$split, 2), type = _dependency$split2[0], moduleName = _dependency$split2[1]; if (!DEPS_TYPE_CHECKERS.has(type)) { throw new Error("Unknown plugin dependency type \"".concat(type, "\" was found.")); } if (!DEPS_TYPE_CHECKERS.get(type)(moduleName)) { missingDependencies.push(" - ".concat(moduleName, " (").concat(type, ")")); } }); if (missingDependencies.length > 0) { var errorMsg = ["The ".concat(this.pluginName, " plugin requires the following modules:\n"), "".concat(missingDependencies.join('\n'), "\n")].join(''); missingDependeciesMsgs.push(errorMsg); } } if (!initializedPlugins) { initializedPlugins = (0, _registry.getPluginsNames)(); } // Workaround for the UndoRedo plugin which, currently doesn't follow the plugin architecture. // Without this line the `callOnPluginsReady` callback won't be triggered after all plugin // initialization. if (initializedPlugins.indexOf('UndoRedo') >= 0) { initializedPlugins.splice(initializedPlugins.indexOf('UndoRedo'), 1); } if (initializedPlugins.indexOf(this.pluginName) >= 0) { initializedPlugins.splice(initializedPlugins.indexOf(this.pluginName), 1); } this.hot.addHookOnce('afterPluginsInitialized', function () { if (_this2.isEnabled && _this2.isEnabled()) { _this2.enablePlugin(); } }); var isAllPluginsAreInitialized = initializedPlugins.length === 0; if (isAllPluginsAreInitialized) { if (missingDependeciesMsgs.length > 0) { var _errorMsg = ["".concat(missingDependeciesMsgs.join('\n'), "\n"), 'You have to import and register them manually.'].join(''); throw new Error(_errorMsg); } this.hot.runHooks('afterPluginsInitialized'); } this.initialized = true; } /** * Enable plugin for this Handsontable instance. */ }, { key: "enablePlugin", value: function enablePlugin() { this.enabled = true; } /** * Disable plugin for this Handsontable instance. */ }, { key: "disablePlugin", value: function disablePlugin() { if (this.eventManager) { this.eventManager.clear(); } this.clearHooks(); this.enabled = false; } /** * Add listener to plugin hooks system. * * @param {string} name The hook name. * @param {Function} callback The listener function to add. */ }, { key: "addHook", value: function addHook(name, callback) { privatePool.get(this).hooks[name] = privatePool.get(this).hooks[name] || []; var hooks = privatePool.get(this).hooks[name]; this.hot.addHook(name, callback); hooks.push(callback); privatePool.get(this).hooks[name] = hooks; } /** * Remove all hooks listeners by hook name. * * @param {string} name The hook name. */ }, { key: "removeHooks", value: function removeHooks(name) { var _this3 = this; (0, _array.arrayEach)(privatePool.get(this).hooks[name] || [], function (callback) { _this3.hot.removeHook(name, callback); }); } /** * Clear all hooks. */ }, { key: "clearHooks", value: function clearHooks() { var _this4 = this; var hooks = privatePool.get(this).hooks; (0, _object.objectEach)(hooks, function (callbacks, name) { return _this4.removeHooks(name); }); hooks.length = 0; } /** * Register function which will be immediately called after all plugins initialized. * * @param {Function} callback The listener function to call. */ }, { key: "callOnPluginsReady", value: function callOnPluginsReady(callback) { if (this.isPluginsReady) { callback(); } else { this.pluginsInitializedCallbacks.push(callback); } } /** * On after plugins initialized listener. * * @private */ }, { key: "onAfterPluginsInitialized", value: function onAfterPluginsInitialized() { (0, _array.arrayEach)(this.pluginsInitializedCallbacks, function (callback) { return callback(); }); this.pluginsInitializedCallbacks.length = 0; this.isPluginsReady = true; } /** * On update settings listener. * * @private * @param {object} newSettings New set of settings passed to the `updateSettings` method. */ }, { key: "onUpdateSettings", value: function onUpdateSettings(newSettings) { if (this.isEnabled) { if (this.enabled && !this.isEnabled()) { this.disablePlugin(); } if (!this.enabled && this.isEnabled()) { this.enablePlugin(); } if (this.enabled && this.isEnabled()) { this.updatePlugin(newSettings); } } } /** * Updates the plugin to use the latest options you have specified. * * @private */ }, { key: "updatePlugin", value: function updatePlugin() {} /** * Destroy plugin. */ }, { key: "destroy", value: function destroy() { var _this5 = this; if (this.eventManager) { this.eventManager.destroy(); } this.clearHooks(); (0, _object.objectEach)(this, function (value, property) { if (property !== 'hot') { _this5[property] = null; } }); delete this.t; delete this.hot; } }], [{ key: "PLUGIN_KEY", get: function get() { return PLUGIN_KEY; } }]); return BasePlugin; }(); exports.BasePlugin = BasePlugin;
_defineProperties
syscontent.go
package router import ( "github.com/gin-gonic/gin" "go-admin/app/admin/apis/syscontent" "go-admin/app/admin/middleware" jwt "go-admin/pkg/jwtauth" ) // 需认证的路由代码 func registerSysConte
roup, authMiddleware *jwt.GinJWTMiddleware) { r := v1.Group("/syscontent").Use(authMiddleware.MiddlewareFunc()).Use(middleware.AuthCheckRole()) { r.GET("/:id", syscontent.GetSysContent) r.POST("", syscontent.InsertSysContent) r.PUT("", syscontent.UpdateSysContent) r.DELETE("/:id", syscontent.DeleteSysContent) } l := v1.Group("").Use(authMiddleware.MiddlewareFunc()).Use(middleware.AuthCheckRole()) { l.GET("/syscontentList", syscontent.GetSysContentList) } }
ntRouter(v1 *gin.RouterG
lib.rs
//! The Rust Linkage Model and Symbol Names //! ======================================= //! //! The semantic model of Rust linkage is, broadly, that "there's no global //! namespace" between crates. Our aim is to preserve the illusion of this //! model despite the fact that it's not *quite* possible to implement on //! modern linkers. We initially didn't use system linkers at all, but have //! been convinced of their utility. //! //! There are a few issues to handle: //! //! - Linkers operate on a flat namespace, so we have to flatten names. //! We do this using the C++ namespace-mangling technique. Foo::bar //! symbols and such. //! //! - Symbols for distinct items with the same *name* need to get different //! linkage-names. Examples of this are monomorphizations of functions or //! items within anonymous scopes that end up having the same path. //! //! - Symbols in different crates but with same names "within" the crate need //! to get different linkage-names. //! //! - Symbol names should be deterministic: Two consecutive runs of the //! compiler over the same code base should produce the same symbol names for //! the same items. //! //! - Symbol names should not depend on any global properties of the code base, //! so that small modifications to the code base do not result in all symbols //! changing. In previous versions of the compiler, symbol names incorporated //! the SVH (Stable Version Hash) of the crate. This scheme turned out to be //! infeasible when used in conjunction with incremental compilation because //! small code changes would invalidate all symbols generated previously. //! //! - Even symbols from different versions of the same crate should be able to //! live next to each other without conflict. //! //! In order to fulfill the above requirements the following scheme is used by //! the compiler: //! //! The main tool for avoiding naming conflicts is the incorporation of a 64-bit //! hash value into every exported symbol name. Anything that makes a difference //! to the symbol being named, but does not show up in the regular path needs to //! be fed into this hash: //! //! - Different monomorphizations of the same item have the same path but differ //! in their concrete type parameters, so these parameters are part of the //! data being digested for the symbol hash. //! //! - Rust allows items to be defined in anonymous scopes, such as in //! `fn foo() { { fn bar() {} } { fn bar() {} } }`. Both `bar` functions have //! the path `foo::bar`, since the anonymous scopes do not contribute to the //! path of an item. The compiler already handles this case via so-called //! disambiguating `DefPaths` which use indices to distinguish items with the //! same name. The DefPaths of the functions above are thus `foo[0]::bar[0]` //! and `foo[0]::bar[1]`. In order to incorporate this disambiguation //! information into the symbol name too, these indices are fed into the //! symbol hash, so that the above two symbols would end up with different //! hash values. //! //! The two measures described above suffice to avoid intra-crate conflicts. In //! order to also avoid inter-crate conflicts two more measures are taken: //! //! - The name of the crate containing the symbol is prepended to the symbol //! name, i.e., symbols are "crate qualified". For example, a function `foo` in //! module `bar` in crate `baz` would get a symbol name like //! `baz::bar::foo::{hash}` instead of just `bar::foo::{hash}`. This avoids //! simple conflicts between functions from different crates. //! //! - In order to be able to also use symbols from two versions of the same //! crate (which naturally also have the same name), a stronger measure is //! required: The compiler accepts an arbitrary "disambiguator" value via the //! `-C metadata` command-line argument. This disambiguator is then fed into //! the symbol hash of every exported item. Consequently, the symbols in two //! identical crates but with different disambiguators are not in conflict //! with each other. This facility is mainly intended to be used by build //! tools like Cargo. //! //! A note on symbol name stability //! ------------------------------- //! Previous versions of the compiler resorted to feeding NodeIds into the //! symbol hash in order to disambiguate between items with the same path. The //! current version of the name generation algorithm takes great care not to do //! that, since NodeIds are notoriously unstable: A small change to the //! code base will offset all NodeIds after the change and thus, much as using //! the SVH in the hash, invalidate an unbounded number of symbol names. This //! makes re-using previously compiled code for incremental compilation //! virtually impossible. Thus, symbol hash generation exclusively relies on //! DefPaths which are much more robust in the face of changes to the code base. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![feature(never_type)] #![feature(nll)] #![feature(in_band_lifetimes)] #![recursion_limit = "256"] #[macro_use] extern crate rustc_middle; use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc_hir::Node; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{InstantiationMode, MonoItem}; use rustc_middle::ty::query::Providers; use rustc_middle::ty::subst::SubstsRef; use rustc_middle::ty::{self, Instance, TyCtxt}; use rustc_session::config::SymbolManglingVersion; use tracing::debug; mod legacy; mod v0; pub mod test; /// This function computes the symbol name for the given `instance` and the /// given instantiating crate. That is, if you know that instance X is /// instantiated in crate Y, this is the symbol name this instance would have. pub fn symbol_name_for_instance_in_crate( tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, instantiating_crate: CrateNum, ) -> String { compute_symbol_name(tcx, instance, || instantiating_crate) } pub fn provide(providers: &mut Providers) { *providers = Providers { symbol_name: symbol_name_provider, ..*providers }; } // The `symbol_name` query provides the symbol name for calling a given // instance from the local crate. In particular, it will also look up the // correct symbol name of instances from upstream crates. fn symbol_name_provider(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::SymbolName<'tcx> { let symbol_name = compute_symbol_name(tcx, instance, || { // This closure determines the instantiating crate for instances that // need an instantiating-crate-suffix for their symbol name, in order // to differentiate between local copies. if is_generic(instance.substs) { // For generics we might find re-usable upstream instances. If there // is one, we rely on the symbol being instantiated locally. instance.upstream_monomorphization(tcx).unwrap_or(LOCAL_CRATE) } else { // For non-generic things that need to avoid naming conflicts, we // always instantiate a copy in the local crate. LOCAL_CRATE } }); ty::SymbolName::new(tcx, &symbol_name) } /// Computes the symbol name for the given instance. This function will call /// `compute_instantiating_crate` if it needs to factor the instantiating crate /// into the symbol name. fn
( tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, compute_instantiating_crate: impl FnOnce() -> CrateNum, ) -> String { let def_id = instance.def_id(); let substs = instance.substs; debug!("symbol_name(def_id={:?}, substs={:?})", def_id, substs); // FIXME(eddyb) Precompute a custom symbol name based on attributes. let is_foreign = if let Some(def_id) = def_id.as_local() { if tcx.plugin_registrar_fn(()) == Some(def_id) { let disambiguator = tcx.sess.local_crate_disambiguator(); return tcx.sess.generate_plugin_registrar_symbol(disambiguator); } if tcx.proc_macro_decls_static(()) == Some(def_id) { let disambiguator = tcx.sess.local_crate_disambiguator(); return tcx.sess.generate_proc_macro_decls_symbol(disambiguator); } let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); matches!(tcx.hir().get(hir_id), Node::ForeignItem(_)) } else { tcx.is_foreign_item(def_id) }; let attrs = tcx.codegen_fn_attrs(def_id); // Foreign items by default use no mangling for their symbol name. There's a // few exceptions to this rule though: // // * This can be overridden with the `#[link_name]` attribute // // * On the wasm32 targets there is a bug (or feature) in LLD [1] where the // same-named symbol when imported from different wasm modules will get // hooked up incorrectly. As a result foreign symbols, on the wasm target, // with a wasm import module, get mangled. Additionally our codegen will // deduplicate symbols based purely on the symbol name, but for wasm this // isn't quite right because the same-named symbol on wasm can come from // different modules. For these reasons if `#[link(wasm_import_module)]` // is present we mangle everything on wasm because the demangled form will // show up in the `wasm-import-name` custom attribute in LLVM IR. // // [1]: https://bugs.llvm.org/show_bug.cgi?id=44316 if is_foreign && (!tcx.sess.target.is_like_wasm || !tcx.wasm_import_module_map(def_id.krate).contains_key(&def_id)) { if let Some(name) = attrs.link_name { return name.to_string(); } return tcx.item_name(def_id).to_string(); } if let Some(name) = attrs.export_name { // Use provided name return name.to_string(); } if attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE) { // Don't mangle return tcx.item_name(def_id).to_string(); } let avoid_cross_crate_conflicts = // If this is an instance of a generic function, we also hash in // the ID of the instantiating crate. This avoids symbol conflicts // in case the same instances is emitted in two crates of the same // project. is_generic(substs) || // If we're dealing with an instance of a function that's inlined from // another crate but we're marking it as globally shared to our // compliation (aka we're not making an internal copy in each of our // codegen units) then this symbol may become an exported (but hidden // visibility) symbol. This means that multiple crates may do the same // and we want to be sure to avoid any symbol conflicts here. matches!(MonoItem::Fn(instance).instantiation_mode(tcx), InstantiationMode::GloballyShared { may_conflict: true }); let instantiating_crate = if avoid_cross_crate_conflicts { Some(compute_instantiating_crate()) } else { None }; // Pick the crate responsible for the symbol mangling version, which has to: // 1. be stable for each instance, whether it's being defined or imported // 2. obey each crate's own `-Z symbol-mangling-version`, as much as possible // We solve these as follows: // 1. because symbol names depend on both `def_id` and `instantiating_crate`, // both their `CrateNum`s are stable for any given instance, so we can pick // either and have a stable choice of symbol mangling version // 2. we favor `instantiating_crate` where possible (i.e. when `Some`) let mangling_version_crate = instantiating_crate.unwrap_or(def_id.krate); let mangling_version = if mangling_version_crate == LOCAL_CRATE { tcx.sess.opts.debugging_opts.get_symbol_mangling_version() } else { tcx.symbol_mangling_version(mangling_version_crate) }; match mangling_version { SymbolManglingVersion::Legacy => legacy::mangle(tcx, instance, instantiating_crate), SymbolManglingVersion::V0 => v0::mangle(tcx, instance, instantiating_crate), } } fn is_generic(substs: SubstsRef<'_>) -> bool { substs.non_erasable_generics().next().is_some() }
compute_symbol_name
auth.rs
//use calls::user::{UserFixture}; use testing_fixtures::fixtures::user::UserFixture; use testing_fixtures::fixtures::user::PASSWORD; use common::setup::*; use diesel::PgConnection; //use db::user::{User, NewUser}; use db::auth; use wire::login::LoginRequest; use auth_lib::ServerJwt; #[test] fn fail_login_invalid_password() { setup(|fixture: &UserFixture, conn: &PgConnection| {
user_name: fixture.admin_user.user_name.clone(), password: "Invalid Password".to_string(), }; auth::login(bad_login, &fixture.secret, conn) .expect_err("Should have failed to log the user in"); }) } #[test] fn fail_login_invalid_username() { setup(|fixture: &UserFixture, conn: &PgConnection| { let bad_login = LoginRequest { user_name: "Non-existent username".to_string(), password: "Inconsequential password".to_string(), }; auth::login(bad_login, &fixture.secret, conn) .expect_err("Should have failed to log the user in"); }) } #[test] fn successful_login() { use wire::user::UserRole; setup(|fixture: &UserFixture, conn: &PgConnection| { let login_request = LoginRequest { user_name: fixture.admin_user.user_name.clone(), password: PASSWORD.to_string() }; let jwt_string: String = auth::login(login_request, &fixture.secret, conn) .expect("Should have logged the user in"); let jwt = ServerJwt::decode_jwt_string(jwt_string.as_str(), &fixture.secret ) .expect("Decoded jwt token"); assert_eq!(jwt.0.sub.0, fixture.admin_user.uuid); let expected_roles: Vec<UserRole> = fixture.admin_user.roles.clone().into_iter().map(UserRole::from).collect(); assert_eq!(jwt.0.user_roles, expected_roles); }) } #[test] fn successful_reauth() { setup(|fixture: &UserFixture, conn: &PgConnection| { let login_request = LoginRequest { user_name: fixture.admin_user.user_name.clone(), password: PASSWORD.to_string() }; let jwt_string: String = auth::login(login_request, &fixture.secret, conn) .expect("Should have logged the user in"); let jwt = ServerJwt::decode_jwt_string(jwt_string.as_str(), &fixture.secret ) .expect("Decoded jwt token"); auth::reauth(jwt, &fixture.secret).expect("New JWT should be provided"); }) }
let bad_login = LoginRequest {
array.go
package utils // InArray checkes that item is included in items. func
(item string, items []string) bool { for index := range items { if items[index] == item { return true } } return false } // Unpack destructuring. func Unpack(s []string, vars ...*string) { for i, str := range s { *vars[i] = str } }
InArray
decl_vars.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use ast_body::AstBody; use ffi::{Just, Maybe}; use hash::HashSet; use hhas_param::HhasParam; use label::Label; use naming_special_names_rust::{emitter_special_functions, special_idents}; use oxidized::{ aast, aast_visitor::{visit, AstParams, Node, Visitor}, ast::*, }; use unique_id_builder::SSet; use unique_list::UniqueList; struct DeclvarVisitorContext<'a> { explicit_use_set_opt: Option<&'a SSet>, } struct DeclvarVisitor<'a> { // set of locals used inside the functions locals: UniqueList<String>, context: DeclvarVisitorContext<'a>, } impl<'a> DeclvarVisitor<'a> { fn new(explicit_use_set_opt: Option<&'a SSet>) -> Self { Self { locals: UniqueList::new(), context: DeclvarVisitorContext { explicit_use_set_opt, }, } } fn add_local<S: Into<String> + AsRef<str>>(&mut self, name: S) { let name_ref = name.as_ref(); if name_ref == special_idents::DOLLAR_DOLLAR || special_idents::is_tmp_var(name_ref) || name_ref == special_idents::THIS { } else { self.locals.add(name.into()) } } fn on_class_get( &mut self, cid: &ClassId, cge: &ClassGetExpr, is_call_target: bool, ) -> Result<(), String>
} impl<'ast, 'a> Visitor<'ast> for DeclvarVisitor<'a> { type Params = AstParams<(), String>; fn object(&mut self) -> &mut dyn Visitor<'ast, Params = Self::Params> { self } fn visit_stmt_(&mut self, env: &mut (), s: &Stmt_) -> Result<(), String> { match s { Stmt_::Try(x) => { let (body, catch_list, finally) = (&x.0, &x.1, &x.2); visit(self, env, body)?; for Catch(_, id, catch_body) in catch_list { self.add_local(id.name()); visit(self, env, catch_body)?; } visit(self, env, finally) } _ => s.recurse(env, self), } } fn visit_expr_(&mut self, env: &mut (), e: &Expr_) -> Result<(), String> { use aast::Expr_::*; match e { ObjGet(x) if x.as_ref().3 == PropOrMethod::IsProp => { let (receiver_e, prop_e) = (&x.0, &x.1); match &receiver_e.2 { Lvar(id) if id.name() == "$this" => {} _ => receiver_e.recurse(env, self.object())?, } match &prop_e.2 { Lvar(id) => self.add_local(id.name()), _ => prop_e.recurse(env, self.object())?, } Ok(()) } Binop(x) => { let (binop, e1, e2) = (&x.0, &x.1, &x.2); match (binop, &e2.2) { (Bop::Eq(_), Await(_)) | (Bop::Eq(_), Yield(_)) => { // Visit e2 before e1. The ordering of declvars in async // expressions matters to HHVM. See D5674623. self.visit_expr(env, e2)?; self.visit_expr(env, e1) } _ => e.recurse(env, self.object()), } } Lvar(x) => Ok(self.add_local(x.name())), ClassGet(x) if x.as_ref().2 == PropOrMethod::IsProp => { self.on_class_get(&x.0, &x.1, false) } // For an Lfun, we don't want to recurse, because it's a separate scope. Lfun(_) => Ok(()), Efun(x) => { // at this point AST is already rewritten so use lists on EFun nodes // contain list of captured variables. However if use list was initially absent // it is not correct to traverse such nodes to collect locals because it will impact // the order of locals in generated .declvars section: // // .declvars $a, $c, $b // $a = () => { $b = 1 }; // $c = 1; // $b = 2; // // .declvars $a, $b, $c // $a = function () use ($b) => { $b = 1 }; // $c = 1; // $b = 2; // // 'explicit_use_set' is used to in order to avoid synthesized use list let (fun_, use_list) = (&x.0, &x.1); let fn_name = &fun_.name.1; let has_use_list = self .context .explicit_use_set_opt .map_or(false, |s| s.contains(fn_name)); if has_use_list { for id in use_list { self.add_local(id.name()) } } Ok(()) } Call(x) => { let (func_e, pos_args, unpacked_arg) = (&x.0, &x.2, &x.3); if let Id(x) = &func_e.2 { let call_name = &x.1; if call_name == emitter_special_functions::SET_FRAME_METADATA { self.add_local("$86metadata"); } } let on_arg = |self_: &mut Self, env: &mut (), x: &Expr| match &x.2 { // Only add $this to locals if it's bare Lvar(id) if &(id.1).1 == "$this" => Ok(()), _ => self_.visit_expr(env, x), }; match &func_e.2 { ClassGet(x) if x.as_ref().2 == PropOrMethod::IsMethod => { let (id, prop) = (&x.0, &x.1); self.on_class_get(id, prop, true)? } _ => self.visit_expr(env, func_e)?, } // Calling convention doesn't matter here: we're just trying to figure out what // variables are declared in this scope. for (_, arg) in pos_args { on_arg(self, env, arg)? } if let Some(arg) = unpacked_arg { on_arg(self, env, arg)? } Ok(()) } New(x) => { let (exprs1, expr2) = (&x.2, &x.3); let add_bare_expr = |self_: &mut Self, env: &mut (), expr: &Expr| match &expr.2 { Lvar(x) if &(x.1).1 == "$this" => Ok(()), _ => self_.visit_expr(env, expr), }; for expr in exprs1 { add_bare_expr(self, env, expr)?; } if let Some(expr) = expr2 { add_bare_expr(self, env, expr)? } Ok(()) } e => e.recurse(env, self.object()), } } fn visit_class_(&mut self, _: &mut (), _: &Class_) -> Result<(), String> { Ok(()) } fn visit_fun_(&mut self, _: &mut (), _: &Fun_) -> Result<(), String> { Ok(()) } } fn uls_from_ast<P, F1, F2>( params: &[P], get_param_name: F1, get_param_default_value: F2, explicit_use_set_opt: Option<&SSet>, b: &AstBody<'_>, ) -> Result<impl Iterator<Item = String>, String> where F1: Fn(&P) -> &str, F2: Fn(&P) -> Maybe<&Expr>, { let mut visitor = DeclvarVisitor::new(explicit_use_set_opt); for p in params { if let Just(e) = get_param_default_value(p) { visitor.visit_expr(&mut (), e)?; } } visit(&mut visitor, &mut (), b)?; for param in params { visitor.locals.remove(get_param_name(param)) } Ok(visitor.locals.into_iter()) } pub fn from_ast<'arena>( params: &[(HhasParam<'arena>, Option<(Label, Expr)>)], body: &AstBody<'_>, explicit_use_set: &SSet, ) -> Result<Vec<String>, String> { let decl_vars = uls_from_ast( params, |(param, _)| param.name.unsafe_as_str(), |(_, default_value)| Maybe::from(default_value.as_ref().map(|x| &x.1)), Some(explicit_use_set), body, )?; Ok(decl_vars.collect()) } pub fn vars_from_ast(params: &[FunParam], b: &AstBody<'_>) -> Result<HashSet<String>, String> { let decl_vars = uls_from_ast( params, |p| &p.name, // get_param_name |p| Maybe::from(p.expr.as_ref()), // get_param_default_value None, // explicit_use_set_opt b, )?; Ok(decl_vars.collect()) }
{ use aast::ClassId_::*; match &cid.2 { CIparent | CIself | CIstatic | CI(_) => { Err("Expects CIexpr as class_id on aast where expr was on ast".into()) } CIexpr(e) => { self.visit_expr(&mut (), e)?; use aast::ClassGetExpr::*; match cge { CGstring(pstr) => { // TODO(thomasjiang): For this to match correctly, we need to adjust // ast_to_nast because it does not make a distinction between ID and Lvar, // which is needed here if is_call_target { self.add_local(&pstr.1) } Ok(()) } CGexpr(e2) => self.visit_expr(&mut (), e2), } } } }
test.py
"""Functional test.""" import os import os.path from jicbioimage.core.image import MicroscopyCollection from jicbioimage.core.io import ( AutoName, DataManager, FileBackend, _md5_hexdigest_from_file, ) from plasmodesmata_analysis import plasmodesmata_analysis def test_plasmodesmata_analysis():
if __name__ == "__main__": test_plasmodesmata_analysis()
output_dir = "/output/tmp" if not os.path.isdir(output_dir): os.mkdir(output_dir) try: AutoName.directory = output_dir backend_dir = "/backend" test_image = "/scripts/test_data/test.tif" file_backend = FileBackend(backend_dir) data_manager = DataManager(file_backend) data_manager.load(test_image) md5_hex = _md5_hexdigest_from_file(test_image) manifest_path = os.path.join(backend_dir, md5_hex, "manifest.json") microscopy_collection = MicroscopyCollection() microscopy_collection.parse_manifest(manifest_path) plasmodesmata_analysis(microscopy_collection, 0, 15000, 2, 50) expected_data_dir = "/scripts/test_data/expected" for fname in os.listdir(expected_data_dir): expected_fpath = os.path.join(expected_data_dir, fname) result_fpath = os.path.join(output_dir, fname) expected_md5 = _md5_hexdigest_from_file(expected_fpath) result_md5 = _md5_hexdigest_from_file(result_fpath) assert expected_md5 == result_md5 finally: for fname in os.listdir(output_dir): fpath = os.path.join(output_dir, fname) os.unlink(fpath) os.rmdir(output_dir)
main.go
package main import ( "crypto/tls" "encoding/json" "fmt" "gopkg.in/yaml.v2" "io/ioutil" "net/http" "os" "regexp" "strings" openapi "github.com/rancher/gen-api-docs/openapi/v3.0.1" norman "github.com/rancher/norman/types" log "github.com/sirupsen/logrus" ) var ( url string skips = map[string]bool{ "root": true, "self": true, "subscribe": true, // action "shell": true, // open shell "yaml": true, // export link "icon": true, // export link "readme": true, // export link "app-readme": true, // export link "exportYaml": true, // export link "authConfigs": true, // Objects don't match schema/are not under the collection "dynamicSchemas": true, // Not sure what this is for yet and its a schema its self "ldapConfigs": true, // 404 - No collection. } ) // Collections - type Collections struct { Links map[string]string `json:"links"` } // Collection - type Collection struct { *norman.Collection Data []norman.Resource `json:"data"` } func init() { val, ok := os.LookupEnv("LOG_LEVEL") if ok { level, _ := log.ParseLevel(val) log.SetLevel(level) } } // Main func main() { url, ok := os.LookupEnv("RANCHER_URL") if !ok { log.Fatal("Set RANCHER_URL") } log.Debug("Import descriptions") descriptions := make(map[string]string) yamlDescriptions, err := ioutil.ReadFile("./data/descriptions.yml") if err != nil { log.Fatal(err) } err = yaml.Unmarshal(yamlDescriptions, descriptions) if err != nil { log.Fatal(err) } log.Debug("Import base") swagger := &openapi.OpenAPI{} yamlFile, err := ioutil.ReadFile("./data/base.yml") if err != nil { log.Fatal(err) } err = yaml.Unmarshal(yamlFile, swagger) if err != nil { log.Fatal(err) } log.Debug("Initialize swagger maps") swagger.Paths = make(map[string]openapi.PathItem) swagger.Components.Parameters = make(map[string]openapi.Parameter) log.Debug("Get Root Collections") collections, err := getCollections(url) if err != nil { log.Fatal(err) } for col, link := range collections { // Only follow a specific root collection only, ok := os.LookupEnv("COLLECTION") if ok { if col != only { log.Debug("Single Collection Only: ", col) continue } } err = parseCollection(col, link, "/", url, swagger) if err != nil { log.Warnf("Failed to parse %s, %s, %s - %v", col, link, "/", err) } } // Render swagger doc out, err := json.Marshal(swagger) log.Debug(string(out)) err = ioutil.WriteFile("./build/swagger.json", out, 0644) } func parseCollection(col string, link string, base string, url string, swagger *openapi.OpenAPI) error { // Skip "weird/broken" collections if skips[col] { log.Debug("Skipped: ", col) return nil } log.Infof("Parse Collection: %s -> %s - %s", col, link, base) collection, err := getCollection(link) if err != nil { log.Errorf("Failed to get collection: %v", err) return err } if collection.Type != "collection" { log.Debugf("%s is not a collection, skipping - %s %s", col, link, base) return nil } parameters := make([]openapi.Parameter, 0) // (╯°□°)╯︵ ┻━┻ some schemas are under the /{collection}/{id}/schemas // Base schema path on createTypes. schemaRootRegex := regexp.MustCompile(fmt.Sprintf("(?i)^%s(.*)/%s$", url, col)) _, ok := collection.CreateTypes[collection.ResourceType] if !ok { return fmt.Errorf("%s, Collection doesn't have CreateTypes", collection.ResourceType) } schemaRootSlice := schemaRootRegex.FindStringSubmatch(collection.CreateTypes[collection.ResourceType]) log.Debugf("schema return: %v", schemaRootSlice) schemaRoot := schemaRootSlice[1] log.Debug("resourceType for collection: ", collection.ResourceType) rSchema, err := getSchema(fmt.Sprintf("%v%s/schemas/%s", url, schemaRoot, collection.ResourceType)) if err != nil { return fmt.Errorf("Failed to get Schema for %s/%s - %v", schemaRoot, collection.ResourceType, err) } // populate swagger schema objects translateSchema(rSchema, url, swagger) // set schema for collection createCollectionSchema(col, collection, swagger) // set previous parameters searchPrams := regexp.MustCompile("{(\\w+)}") previousPrams := searchPrams.FindAllStringSubmatch(base, -1) if len(previousPrams) > 0 { // previousPrams = previousPrams[1:] for _, p := range previousPrams { param := openapi.Parameter{ Ref: fmt.Sprintf("#/components/parameters/%s", p[1]), } parameters = append(parameters, param) } } // /{collection} colParameters := make([]openapi.Parameter, 0) if len(parameters) > 0 { colParameters = parameters } else { colParameters = nil } colPathItem := openapi.PathItem{ Parameters: colParameters, } for _, method := range rSchema.CollectionMethods { if method == "GET" { colPathItem.Get = createCollection("GET", col, collection) } else if method == "POST" { colPathItem.Post = createCollection("POST", col, collection) } else { log.Error("Unknown Collection Method: ", method) } } swagger.Paths[fmt.Sprintf("%s%s", base, col)] = colPathItem // Resource /{collection}/{id} newPramID := fmt.Sprintf("%sId", collection.ResourceType) createPathParameter(newPramID, swagger) param := openapi.Parameter{ Ref: fmt.Sprintf("#/components/parameters/%s", newPramID), } parameters = append(parameters, param) resourcePathItem := openapi.PathItem{ Parameters: parameters, } for _, method := range rSchema.ResourceMethods { if method == "GET" { resourcePathItem.Get = createResource("GET", collection.ResourceType) } else if method == "PUT" { resourcePathItem.Put = createResource("PUT", collection.ResourceType) } else if method == "DELETE" { resourcePathItem.Delete = createResource("DELETE", collection.ResourceType) } else { log.Error("Unknown Resource Method: ", method) } } swagger.Paths[fmt.Sprintf("%s%s/{%s}", base, col, newPramID)] = resourcePathItem if len(collection.Data) > 0 { // take the first one subBase := fmt.Sprintf("%s%s/{%s}/", base, col, newPramID) for subCol, subColLink := range collection.Data[0].Links { if subColLink != collection.Data[0].Links["self"] { err = parseCollection(subCol, subColLink, subBase, url, swagger) if err != nil { log.Warnf("Failed to parse %s, %s, %s - %v", subCol, subColLink, subBase, err) } } } } return nil } func translateSchema(rancherSchema norman.Schema, url string, swagger *openapi.OpenAPI) { properties := make(map[string]openapi.Schema) required := make([]string, 0) name := rancherSchema.ID resourceFields := rancherSchema.ResourceFields // Skip Schema if it already exists _, ok := swagger.Components.Schemas[name] if ok { log.Debug(name, " Schema Already Exists") return } // Required for resourceName, resourceValue := range resourceFields { if resourceValue.Required { required = append(required, resourceName) } } // Properties for resourceName, resourceValue := range resourceFields { desc := make([]string, 0) p := &openapi.Schema{ Default: resourceValue.Default, Enum: resourceValue.Options, Maximum: resourceValue.Max, MaxLength: resourceValue.MaxLength, Minimum: resourceValue.Min, MinLength: resourceValue.MinLength, Pattern: resourceValue.ValidChars, Nullable: resourceValue.Nullable, // other values that I'm not sure what to do with yet. // resourceValue.CodeName // resourceValue.DynamicField // resourceValue.InvalidChars } usage := "" if resourceValue.Create == false && resourceValue.Update == false { p.ReadOnly = true } if resourceValue.Update || resourceValue.Create { usage = fmt.Sprint("Allowed in Methods:") } if resourceValue.Create { usage = fmt.Sprint(usage, " `POST`") } if resourceValue.Update { usage = fmt.Sprint(usage, " `PUT`") } if usage != "" { desc = append(desc, usage) } // Populate existing Description if resourceValue.Description != "" { desc = append(desc, resourceValue.Description) } // regex to find Schema base path findSchemaBase := regexp.MustCompile("^/v3([/\\w]*)") // remap types to valid types for openapi isValid := regexp.MustCompile("^(string|boolean|object|array)$") isIntOrString := regexp.MustCompile("^intOrString$") isArrayString := regexp.MustCompile("^array\\[string\\]$") isArrayInt := regexp.MustCompile("^array\\[int\\]$") isArrayEnum := regexp.MustCompile("^array\\[enum\\]$") isRefArray := regexp.MustCompile("^array\\[(\\w+)\\]$") isMapString := regexp.MustCompile("^map\\[string\\]$") isMapBase64 := regexp.MustCompile("^map\\[base64\\]$") isRefMap := regexp.MustCompile("^map\\[(\\w+)\\]$") isRefID := regexp.MustCompile("^reference\\[([a-zA-Z0-9/]+)\\]$") isArrayRefID := regexp.MustCompile("^array\\[reference\\[([a-zA-Z0-9/]+)\\]\\]$") isEnum := regexp.MustCompile("^enum$") isDNSLabel := regexp.MustCompile("^(dnsLabel|hostname|dnsLabelRestricted)$") isDate := regexp.MustCompile("^date$") isPassword := regexp.MustCompile("^password$") isInt := regexp.MustCompile("^int$") isBase64 := regexp.MustCompile("^base64$") switch { case isValid.MatchString(resourceValue.Type): p.Type = resourceValue.Type case isDate.MatchString(resourceValue.Type): p.Type = "string" p.Format = "date-time" case isPassword.MatchString(resourceValue.Type): p.Type = "string" p.Format = "password" case isIntOrString.MatchString(resourceValue.Type): p.OneOf = []openapi.Schema{ openapi.Schema{ Type: "string", }, openapi.Schema{ Type: "integer", }, } case isInt.MatchString(resourceValue.Type): p.Type = "integer" case isBase64.MatchString(resourceValue.Type): p.Type = "string" desc = append(desc, "Base64 encoded string") case isEnum.MatchString(resourceValue.Type): p.Type = "string" case isDNSLabel.MatchString(resourceValue.Type): p.Type = "string" p.Pattern = "^(\\w|[A-Za-z0-9-\\.]*\\w)$" desc = append(desc, "Must be valid Hostname") case isArrayString.MatchString(resourceValue.Type): p.Type = "array" desc = append(desc, "Array of Strings") p.Items = &openapi.Schema{ Type: "string", } case isArrayInt.MatchString(resourceValue.Type): p.Type = "array" desc = append(desc, "Array of Integers") p.Items = &openapi.Schema{ Type: "integer", } case isArrayEnum.MatchString(resourceValue.Type): p.Type = "array" p.Items = &openapi.Schema{ Type: "string", } desc = append(desc, "Array of Valid Options") case isRefArray.MatchString(resourceValue.Type): // Will be a ref to other resource, resolve the other resource refSchemaName := isRefArray.FindStringSubmatch(resourceValue.Type)[1] schemaBase := findSchemaBase.FindStringSubmatch(rancherSchema.Version.Path)[1] subSchema, err := getSchema(fmt.Sprintf("%s%s/schemas/%s", url, schemaBase, refSchemaName)) if err != nil { log.Errorf("Failed to get Schema for base:%s name:%s ref:%s url:%s - %v", schemaBase, name, refSchemaName, url, err) } else { // turtles all the way down translateSchema(subSchema, url, swagger) p.Type = "array" p.Items = &openapi.Schema{ Ref: fmt.Sprintf("#/components/schemas/%s", subSchema.ID), } } case isMapString.MatchString(resourceValue.Type): p.Type = "object" example := make(map[string]string) example["key"] = "value" p.Example = example case isMapBase64.MatchString(resourceValue.Type): p.Type = "object" example := make(map[string]string) example["key"] = "base64 encoded string" p.Example = example case isRefMap.MatchString(resourceValue.Type): refSchemaName := isRefMap.FindStringSubmatch(resourceValue.Type)[1] schemaBase := findSchemaBase.FindStringSubmatch(rancherSchema.Version.Path)[1] subSchema, err := getSchema(fmt.Sprintf("%s%s/schemas/%s", url, schemaBase, refSchemaName)) if err != nil { log.Errorf("Failed to get Schema for base:%s name:%s ref:%s url:%s - %v", schemaBase, name, refSchemaName, url, err) } else { // turtles all the way down translateSchema(subSchema, url, swagger) p.Type = "object" p.AdditionalProperties = &openapi.Schema{ Ref: fmt.Sprintf("#/components/schemas/%s", subSchema.ID), } } case isRefID.MatchString(resourceValue.Type): ref := isRefID.FindStringSubmatch(resourceValue.Type)[1] p.Type = "string" desc = append(desc, fmt.Sprintf("Id of %s", ref)) case isArrayRefID.MatchString(resourceValue.Type): ref := isArrayRefID.FindStringSubmatch(resourceValue.Type)[1] p.Type = "array" p.Items = &openapi.Schema{ Type: "string", } desc = append(desc, fmt.Sprintf("Array of Ids of %s", ref)) default: // Should be schema object schemaBase := findSchemaBase.FindStringSubmatch(rancherSchema.Version.Path)[1] subSchema, err := getSchema(fmt.Sprintf("%s%s/schemas/%s", url, schemaBase, resourceValue.Type)) if err != nil { log.Errorf("Failed to get Schema for base:%s name:%s ref:%s url:%s - %v", schemaBase, name, resourceValue.Type, url, err) } else { // turtles all the way down translateSchema(subSchema, url, swagger) // reset other fields p.Default = nil p.Description = "" p.Enum = nil p.Maximum = nil p.MaxLength = nil p.Minimum = nil p.MinLength = nil p.Pattern = "" p.Nullable = false if subSchema.ID == "" { log.Error("id is empty") } p.Ref = fmt.Sprintf("#/components/schemas/%s", subSchema.ID) desc = []string{} } } p.Description = strings.Join(desc, "; ") properties[resourceName] = *p } schemaObject := openapi.Schema{ Type: "object", Properties: properties, Required: required, } swagger.Components.Schemas[name] = schemaObject } func createResource(method string, resourceType string) *openapi.Operation { schema := &openapi.Schema{ Ref: fmt.Sprintf("#/components/schemas/%s", resourceType), } content := make(map[string]openapi.MediaType) content["application/json"] = openapi.MediaType{ Schema: schema, } resp := make(map[string]openapi.Response) request := &openapi.RequestBody{} if method == "GET" { request = nil resp["200"] = openapi.Response{ Description: fmt.Sprintf("Returns '%s' object.", resourceType), Content: content, } } if method == "PUT" { request.Description = fmt.Sprintf("Update `%s` object.", resourceType) request.Content = content resp["200"] = openapi.Response{ Description: fmt.Sprintf("Returns '%s' object.", resourceType), Content: content, } } if method == "DELETE" { request = nil resp["204"] = openapi.Response{ Description: fmt.Sprint("Delete Successful"), } } return &openapi.Operation{ Description: fmt.Sprintf("`%s` Resource", resourceType), Responses: resp, RequestBody: request, } } func createCollection(method string, col string, collection *Collection) *openapi.Operation { schema := &openapi.Schema{ Ref: fmt.Sprintf("#/components/schemas/%s", collection.ResourceType), } content := make(map[string]openapi.MediaType) content["application/json"] = openapi.MediaType{ Schema: schema, } request := &openapi.RequestBody{} resp := make(map[string]openapi.Response) if method == "GET" { request = nil resp["200"] = openapi.Response{ Description: fmt.Sprintf("Returns list of '%s'", col), Content: content, } } if method == "POST" { request.Description = fmt.Sprintf("Create a new `%s` object.", collection.ResourceType) request.Content = content resp["200"] = openapi.Response{ Description: fmt.Sprintf("Returns new `%s` object.", collection.ResourceType), Content: content, } } return &openapi.Operation{ Description: fmt.Sprintf("`%s` Collection", col), Responses: resp, RequestBody: request, } } func createPathParameter(name string, swagger *openapi.OpenAPI) { _, ok := swagger.Components.Parameters[name] if !ok { swagger.Components.Parameters[name] = openapi.Parameter{ Name: name, In: "path", Required: true, Schema: &openapi.Schema{ Type: "string", }, } } } func createCollectionSchema(name string, collection *Collection, swagger *openapi.OpenAPI) { colAllOf := make([]openapi.Schema, 0) resTypeRef := openapi.Schema{ Ref: "#/components/schemas/collection", } colAllOf = append(colAllOf, resTypeRef) colData := openapi.Schema{ Type: "array", Items: &openapi.Schema{ Ref: fmt.Sprintf("#/components/schemas/%s", collection.ResourceType), }, } colProp := make(map[string]openapi.Schema) colProp["data"] = colData colSchema := openapi.Schema{ Type: "object", AllOf: colAllOf, Properties: colProp, } swagger.Components.Schemas[name] = colSchema } func getSchema(link string) (norman.Schema, error) { schema := norman.Schema{} schemaResponse, err := httpGet(link) if err != nil { return schema, err } err = json.Unmarshal(schemaResponse, &schema) if err != nil { log.Error(string(schemaResponse)) log.Error(err) return schema, err } return schema, nil } func getCollections(link string) (map[string]string, error) { collectionsResponse, err := httpGet(link) if err != nil { return nil, err } collections := Collections{} err = json.Unmarshal(collectionsResponse, &collections) if err != nil { return nil, err } return collections.Links, nil } func getCollection(link string) (*Collection, error) { collectionResponse, err := httpGet(link) if err != nil { return nil, err } collection := &Collection{} err = json.Unmarshal(collectionResponse, collection) if err != nil { return nil, err } return collection, nil } func httpGet(url string
te, error) { token := os.Getenv("RANCHER_TOKEN") tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} req, err := http.NewRequest("GET", url, nil) req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprint("Bearer ", token)) resp, err := client.Do(req) if err != nil { return nil, err } goodStatus := regexp.MustCompile("^2\\d\\d") if !goodStatus.MatchString(resp.Status) { return nil, fmt.Errorf("%s %s returned %s", req.Method, req.URL, resp.Status) } defer resp.Body.Close() jsonBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } return jsonBody, nil } func printPretty(data interface{}) string { jsonData, err := json.MarshalIndent(data, "", " ") if err != nil { return "" } return string(jsonData) } func contains(a []string, x string) bool { for _, n := range a { if x == n { return true } } return false }
) ([]by
menuItems.js
import React from "react"; import { Icon } from "components"; import Label from "utils/translationNode"; const items = { citizen: { sections: { one: { items: [ { primaryText: <Label label="CS_HOME_HOMEHEADER" />, route: "/citizen", leftIcon: <Icon action="action" name="home" />, style: { paddingBottom: "1px", paddingTop: "1px", borderLeft: "3px solid #00bbd3", }, id: "header-home", }, { primaryText: <Label label="CS_HOME_HEADER_PROFILE" />, route: "/citizen/user/profile", leftIcon: <Icon action="social" name="person" />, style: { paddingBottom: "3px", paddingTop: "3px", }, id: "header-profile", }, { primaryText: <Label label="CS_HOME_HEADER_LANGUAGE" />, route: "/language-selection", leftIcon: <Icon action="action" name="language" />, style: { borderBottom: "none", }, id: "header-language", }, ], }, two: { items: [ { primaryText: <Label label="CS_HOME_HEADER_CONTACT_US" />, route: "/citizen/contact-us", leftIcon: <Icon action="communication" name="call" />, style: { paddingBottom: "8px", paddingTop: "8px", }, id: "header-contact-us", }, { primaryText: <Label label="CS_HOME_HEADER_HOW_IT_WORKS" />, route: "/citizen/how-it-works", leftIcon: <Icon action="custom" name="help-circle" />, style: { paddingBottom: "2px", paddingTop: "2px", }, id: "header-how-it-works", }, { primaryText: <Label label="CORE_COMMON_LOGOUT" />, route: "/logout", leftIcon: <Icon action="action" name="power-settings-new" />, style: { borderBottom: "none", borderLeft: "red", }, id: "header-logout", }, ], }, }, },
one: { items: [ { primaryText: <Label label="CS_HOME_HOMEHEADER" />, route: "/employee", leftIcon: <Icon action="action" name="home" />, style: { paddingBottom: "1px", paddingTop: "1px", borderLeft: "3px solid #00bbd3", }, id: "header-home", }, { primaryText: <Label label="ES_CLOSED_COMPLAINTS_HEADER" />, route: "/employee/closed-complaints", leftIcon: <Icon action="custom" name="file-check" />, id: "header-closed-complaint", }, { primaryText: <Label label="ES_EMPLOYEE_DIRECTORY_HEADER" />, route: "/employee/employee-directory", leftIcon: <Icon action="communication" name="call" />, style: { paddingBottom: "2px", paddingTop: "2px", }, id: "header-contact-us", }, { primaryText: <Label label="CS_HOME_HEADER_PROFILE" />, route: "/employee/user/profile", leftIcon: <Icon action="social" name="person" />, style: { paddingBottom: "3px", paddingTop: "3px", }, id: "header-profile", }, { primaryText: <Label label="CS_HOME_HEADER_LANGUAGE" />, route: "/language-selection", leftIcon: <Icon action="action" name="language" />, style: { borderBottom: "none", }, id: "header-language", }, ], }, two: { items: [ { primaryText: <Label label="CORE_COMMON_LOGOUT" />, route: "/logout", leftIcon: <Icon action="action" name="power-settings-new" />, style: { borderBottom: "none", borderLeft: "red", }, id: "header-logout", }, ], }, }, }, }; const menuItems = (role = "citizen", section = "one") => { return items[role].sections[section].items; }; export default menuItems;
employee: { sections: {
cross_tabulation.rs
/* This tool is part of the WhiteboxTools geospatial analysis library. Authors: Dr. John Lindsay Created: 18/12/2017 Last Modified: 12/10/2018 License: MIT */ use crate::raster::*; use crate::tools::*; use std::env; use std::f64; use std::fs::File; use std::io::prelude::*; use std::io::BufWriter; use std::io::{Error, ErrorKind}; use std::path; use std::process::Command; /// This tool can be used to perform a cross-tabulation on two input raster images (`--i1` and `--i2`) containing /// categorical data, i.e. classes. It will output a [contingency table](https://en.wikipedia.org/wiki/Contingency_table) /// in HTML format (`--output`). A contingency table, also known as a cross tabulation or crosstab, is a type of table /// that displays the multivariate frequency distribution of the variables. These tables provide a basic picture of the /// interrelation between two categorical variables and can help find interactions between them. `CrossTabulation` /// can provide useful information about the nature of land-use/land-cover (LULC) changes between two dates of classified /// multi-spectral satellite imagery. For example, the extent of urban expansion could be described using the information /// about the extent of pixels in an 'urban' class in Date 2 that were previously assigned to other classes (e.g. /// agricultural LULC categories) in the Date 1 imagery. /// /// Both input images must share the same grid, as the analysis requires a comparison of a pair of images on a cell-by-cell /// basis. If a grid cell contains a **NoData** value in either of the input images, the cell will be excluded from the /// analysis. pub struct CrossTabulation { name: String, description: String, toolbox: String, parameters: Vec<ToolParameter>, example_usage: String, } impl CrossTabulation { pub fn new() -> CrossTabulation { // public constructor let name = "CrossTabulation".to_string(); let toolbox = "Math and Stats Tools".to_string(); let description = "Performs a cross-tabulation on two categorical images.".to_string(); let mut parameters = vec![]; parameters.push(ToolParameter { name: "Input File 1".to_owned(), flags: vec!["--i1".to_owned(), "--input1".to_owned()], description: "Input raster file 1.".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "Input File 2".to_owned(), flags: vec!["--i2".to_owned(), "--input2".to_owned()], description: "Input raster file 1.".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "Output HTML File".to_owned(), flags: vec!["-o".to_owned(), "--output".to_owned()], description: "Output HTML file (default name will be based on input file if unspecified)." .to_owned(), parameter_type: ParameterType::NewFile(ParameterFileType::Html), default_value: None, optional: false, }); let sep: String = path::MAIN_SEPARATOR.to_string(); let p = format!("{}", env::current_dir().unwrap().display()); let e = format!("{}", env::current_exe().unwrap().display()); let mut short_exe = e .replace(&p, "") .replace(".exe", "") .replace(".", "") .replace(&sep, ""); if e.contains(".exe") { short_exe += ".exe"; } let usage = format!(">>.*{0} -r={1} -v --wd=\"*path*to*data*\" --i1=\"file1.tif\" --i2=\"file2.tif\" -o=outfile.html", short_exe, name).replace("*", &sep); CrossTabulation { name: name, description: description, toolbox: toolbox, parameters: parameters, example_usage: usage, } } } impl WhiteboxTool for CrossTabulation { fn get_source_file(&self) -> String { String::from(file!()) } fn get_tool_name(&self) -> String { self.name.clone() } fn get_tool_description(&self) -> String { self.description.clone() } fn get_tool_parameters(&self) -> String { let mut s = String::from("{\"parameters\": ["); for i in 0..self.parameters.len() { if i < self.parameters.len() - 1 { s.push_str(&(self.parameters[i].to_string())); s.push_str(","); } else { s.push_str(&(self.parameters[i].to_string())); } } s.push_str("]}"); s } fn
(&self) -> String { self.example_usage.clone() } fn get_toolbox(&self) -> String { self.toolbox.clone() } fn run<'a>( &self, args: Vec<String>, working_directory: &'a str, verbose: bool, ) -> Result<(), Error> { let mut input_file1: String = String::new(); let mut input_file2: String = String::new(); let mut output_file = String::new(); if args.len() == 0 { return Err(Error::new( ErrorKind::InvalidInput, "Tool run with no parameters.", )); } for i in 0..args.len() { let mut arg = args[i].replace("\"", ""); arg = arg.replace("\'", ""); let cmd = arg.split("="); // in case an equals sign was used let vec = cmd.collect::<Vec<&str>>(); let mut keyval = false; if vec.len() > 1 { keyval = true; } let flag_val = vec[0].to_lowercase().replace("--", "-"); if flag_val == "-i1" || flag_val == "-input1" { if keyval { input_file1 = vec[1].to_string(); } else { input_file1 = args[i + 1].to_string(); } } else if flag_val == "-i2" || flag_val == "-input2" { if keyval { input_file2 = vec[1].to_string(); } else { input_file2 = args[i + 1].to_string(); } } else if flag_val == "-o" || flag_val == "-output" { if keyval { output_file = vec[1].to_string(); } else { output_file = args[i + 1].to_string(); } } } if verbose { println!("***************{}", "*".repeat(self.get_tool_name().len())); println!("* Welcome to {} *", self.get_tool_name()); println!("***************{}", "*".repeat(self.get_tool_name().len())); } let sep: String = path::MAIN_SEPARATOR.to_string(); let mut progress: usize; let mut old_progress: usize = 1; let start = Instant::now(); if !input_file1.contains(&sep) && !input_file1.contains("/") { input_file1 = format!("{}{}", working_directory, input_file1); } if !input_file2.contains(&sep) && !input_file2.contains("/") { input_file2 = format!("{}{}", working_directory, input_file2); } if !output_file.contains(&sep) && !output_file.contains("/") { output_file = format!("{}{}", working_directory, output_file); } let input1 = Raster::new(&input_file1, "r")?; let rows = input1.configs.rows as isize; let columns = input1.configs.columns as isize; let nodata1 = input1.configs.nodata; let input2 = Raster::new(&input_file2, "r")?; let nodata2 = input2.configs.nodata; let min1 = input1.configs.minimum.round() as isize; let min2 = input2.configs.minimum.round() as isize; let max1 = input1.configs.maximum.round() as isize; let max2 = input2.configs.maximum.round() as isize; let image1_range = (max1 - min1) as usize + 1; let image2_range = (max2 - min2) as usize + 1; let mut contingency_table = vec![vec![0; image2_range]; image1_range]; let mut class_exists1 = vec![false; image1_range]; let mut class_exists2 = vec![false; image2_range]; let mut z1: f64; let mut z2: f64; let mut index1: usize; let mut index2: usize; for row in 0..rows { for col in 0..columns { z1 = input1.get_value(row, col); z2 = input2.get_value(row, col); if z1 != nodata1 && z2 != nodata2 { index1 = (z1.round() as isize - min1) as usize; index2 = (z2.round() as isize - min2) as usize; class_exists1[index1] = true; class_exists2[index2] = true; contingency_table[index1][index2] += 1; } } if verbose { progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize; if progress != old_progress { println!("Creating contingency table: {}%", progress); old_progress = progress; } } } let elapsed_time = get_formatted_elapsed_time(start); if verbose { println!( "\n{}", &format!("Elapsed Time (excluding I/O): {}", elapsed_time) ); } let f = File::create(output_file.clone())?; let mut writer = BufWriter::new(f); writer.write_all("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> <head> <meta content=\"text/html; charset=UTF-8\" http-equiv=\"content-type\"> <title>Cross Tabulation</title> <style type=\"text/css\"> h1 { font-size: 14pt; margin-left: 15px; margin-right: 15px; text-align: center; font-family: Helvetica, Verdana, Geneva, Arial, sans-serif; } p { font-size: 12pt; font-family: Helvetica, Verdana, Geneva, Arial, sans-serif; margin-left: 15px; margin-right: 15px; } caption { font-family: Helvetica, Verdana, Geneva, Arial, sans-serif; font-size: 12pt; margin-left: 15px; margin-right: 15px; } table { font-size: 12pt; font-family: Helvetica, Verdana, Geneva, Arial, sans-serif; font-family: arial, sans-serif; border-collapse: collapse; align: center; } td, th { border: 1px solid #222222; text-align: centre; padding: 8px; } tr:nth-child(even) { background-color: #dddddd; } .numberCell { text-align: right; } .header { font-weight: bold; text-align: center; } </style> </head> <body> <h1>Cross Tabulation Report</h1> ".as_bytes())?; writer.write_all( &format!( "<p><strong>Image 1</strong> (columns): {}</p>", input_file1.clone() ) .as_bytes(), )?; writer.write_all( &format!( "<p><strong>Image 2</strong> (rows): {}</p>", input_file2.clone() ) .as_bytes(), )?; // output the table. writer.write_all("<div><table align=\"center\">".as_bytes())?; writer.write_all("<caption>Cross Tabulation Results</caption>".as_bytes())?; let mut s = String::from("<tr><td></td>"); for a in 0..image1_range { if class_exists1[a] { s.push_str(&format!("<td class=\"header\">{}</td>", a as isize + min1)); } } s.push_str("</tr>"); writer.write_all(s.as_bytes())?; for b in 0..image2_range { if class_exists2[b] { let mut s = format!("<tr><td class=\"header\">{}</td>", b as isize + min2); for a in 0..image1_range { if class_exists1[a] { s.push_str(&format!( "<td class=\"numberCell\">{}</td>", contingency_table[a][b] )); } } s.push_str("</tr>"); writer.write_all(s.as_bytes())?; } } writer.write_all("</table></div>".as_bytes())?; writer.write_all("</body>".as_bytes())?; let _ = writer.flush(); if verbose { if cfg!(target_os = "macos") || cfg!(target_os = "ios") { let output = Command::new("open") .arg(output_file.clone()) .output() .expect("failed to execute process"); let _ = output.stdout; } else if cfg!(target_os = "windows") { // let output = Command::new("cmd /c start") let output = Command::new("explorer.exe") .arg(output_file.clone()) .output() .expect("failed to execute process"); let _ = output.stdout; } else if cfg!(target_os = "linux") { let output = Command::new("xdg-open") .arg(output_file.clone()) .output() .expect("failed to execute process"); let _ = output.stdout; } if verbose { println!("Complete! Please see {} for output.", output_file); } } Ok(()) } }
get_example_usage
architecture.go
// Package architecture reads a microservice architecture definition from a file // It creates and controls a collection of aws and netflix application microservices package architecture import ( "encoding/json" "fmt" "github.com/adrianco/spigo/actors/packagenames" // name definitions "github.com/adrianco/spigo/tooling/archaius" // global configuration "github.com/adrianco/spigo/tooling/asgard" // tools to create an architecture "io/ioutil" "log" "os" "time" ) type archV0r1 struct { Arch string `json:"arch"` Version string `json:"version"` Description string `json:"description,omitempty"` Args string `json:"args,omitempty"` Date string `json:"date,omitempty"` Victim string `json:"victim,omitempty"` Services []containerV0r0 `json:"services"` } type serviceV0r0 struct { Name string `json:"name"` Package string `json:"package"` Regions int `json:"regions,omitempty"` Count int `json:"count"` Dependencies []string `json:"dependencies"` } type containerV0r0 struct { Name string `json:"name"` Machine string `json:"machine,omitempty"` Instance string `json:"instance,omitempty"` Container string `json:"container,omitempty"` Process string `json:"process,omitempty"` Gopackage string `json:"package"` Regions int `json:"regions,omitempty"` Count int `json:"count"` Dependencies []string `json:"dependencies"` } // Start architecture func Start(a *archV0r1) { var r string if archaius.Conf.Population < 1 { log.Fatal("architecture: can't create less than 1 microservice") } else { log.Printf("architecture: scaling to %v%%", archaius.Conf.Population) } asgard.CreateChannels() asgard.CreateEureka() // service registries for each zone for _, s := range a.Services { log.Printf("Starting: %v\n", s) r = asgard.Create(s.Name, s.Gopackage, s.Regions*archaius.Conf.Regions, s.Count*archaius.Conf.Population/100, s.Dependencies...) } asgard.Run(r, a.Victim) // run the last service in the list, and point chaos monkey at the victim } // Connection type Connection struct { Source, Dest string } // Extract dependencies from an architecture func ListDependencies(a *archV0r1, nodes *[]string, dependencies *[]Connection) { for _, s := range a.Services { *nodes = append(*nodes, s.Name) for _, d := range s.Dependencies { *dependencies = append(*dependencies, Connection{s.Name, d}) } } } // ReadArch parses archjson func ReadArch(arch string) *archV0r1 { fn := "json_arch/" + arch + "_arch.json" log.Println("Loading architecture from " + fn) data, err := ioutil.ReadFile(fn) if err != nil { log.Fatal(err) } a := new(archV0r1) e := json.Unmarshal(data, a) if e == nil { names := make(map[string]bool) names[packagenames.EurekaPkg] = true // special case to allow cross region references packs := make(map[string]bool) for _, p := range packagenames.Packages { packs[p] = true } // map all the service names and check packages exist for _, s := range a.Services { if names[s.Name] == true { log.Println(names) log.Println(s) log.Fatal("Duplicate service name in architecture: " + s.Name) } else { names[s.Name] = true } if packs[s.Gopackage] != true { log.Println(packs) log.Println(s) log.Fatal("Unknown package name in architecture: " + s.Gopackage) } } // check all the dependencies for _, s := range a.Services { for _, d := range s.Dependencies { if names[d] == false { log.Println(names) log.Println(s) log.Fatal("Unknown dependency name in architecture: " + d) } } } log.Printf("Architecture: %v %v\n", a.Arch, a.Description) return a } log.Fatal(e) return nil } // MakeArch returns a new architecture object func MakeArch(arch, des string) *archV0r1 { a := new(archV0r1) a.Arch = arch a.Version = "arch-0.1" a.Description = des a.Args = fmt.Sprintf("%v", os.Args) a.Date = time.Now().Format(time.RFC3339Nano) a.Victim = "" return a } // AddContainer creates a new container level service func AddContainer(a *archV0r1, name, machine, instance, container, process, gopackage string, regions, count int, dependencies []string) { var c containerV0r0 c.Name = name c.Machine = machine c.Instance = instance c.Container = container c.Process = process c.Gopackage = gopackage c.Regions = regions c.Count = count c.Dependencies = dependencies a.Services = append(a.Services, c) } // Write coverts the architecture to json and writes to stdout func Write(a *archV0r1)
// WriteFile writes the architecture to a file in json format func WriteFile(a *archV0r1, fn string) { dfile, err := os.Create(fn + ".json") if err != nil { log.Fatal(err) } sj, _ := json.Marshal(a) dfile.WriteString(fmt.Sprintf("%v", string(sj))) dfile.Close() }
{ b, err := json.Marshal(a) if err != nil { fmt.Println("error:", err) } else { os.Stdout.Write(b) } }
derive-a.rs
// force-host // no-prefer-dynamic #![crate_type = "proc-macro"] extern crate proc_macro; use proc_macro::TokenStream; #[proc_macro_derive(A)] pub fn derive(input: TokenStream) -> TokenStream
{ let input = input.to_string(); assert!(input.contains("struct A ;")); "".parse().unwrap() }
hashing.py
import os import hashlib def _update_sha256(filename, sha256):
def hash_tree(root): """ Returns a cryptographically secure hash for a whole directory tree taking into account the names and the content of the files. """ file_list = [] for root_directory, directories, files in os.walk(root): for file in files: file_list.append(os.path.join(root_directory, file)) sorted_file_list = sorted(file_list) sha256 = hashlib.sha256() for file in sorted_file_list: _update_sha256(file, sha256) return sha256.hexdigest()
""" Updates a SHA-256 algorithm with the filename and the contents of a file. """ block_size = 64 * 1024 # 64 KB with open(filename, 'rb') as input_file: while True: data = input_file.read(block_size) if not data: break sha256.update(data) sha256.update(filename.encode("utf-8")) return sha256
Range.py
from Ranger.src.Range.Cut import Cut class Range(object): """ Class used to represent a range along some 1-D domain. The range is represented by 2 cutpoints can can be unbounded by specifying an aboveAll or belowAll Cut. """ def __init__(self, lowerCut, upperCut): """ Instantiates a Range Parameters ---------- lowerCut : Cut object Specifies the lower cut for the range upperCut : Cut object Specifies the upper cut for the range Raises ------ ValueError If bound(s) are not Cut objects or lower > upper """ if not all(map(lambda x: isinstance(x, Cut), (lowerCut,upperCut))): raise ValueError("Bounds must be Cut objects") elif lowerCut > upperCut: raise ValueError("Lower bound cannot be greater than upper bound") self.lowerCut = lowerCut self.upperCut = upperCut def __repr__(self): try: return_str = '[' if self.isLowerBoundClosed() else '(' except TypeError: return_str = '(' return_str += (str(self.lowerCut.point) if not self.lowerCut.belowAll \ else '') return_str += ' , ' return_str += (str(self.upperCut.point) if not self.upperCut.aboveAll \ else '') try: return_str += ']' if self.isUpperBoundClosed() else ')' except TypeError: return_str += ')' return return_str def __hash__(self): return (hash(self.lowerCut)*31 + hash(self.upperCut)) def __eq__(self, other): if not isinstance(other, Range): return False else: return ((self.lowerCut == other.lowerCut) and \ (self.upperCut == other.upperCut)) def __ne__(self, other): return not self.__eq__(other) def contains(self, val): """ Returns true if the range contains the value Parameters ---------- val : Comparable object of the appropriate type for the range Value to query whether in the range Raises ------ ValueError If the value type not compatible with cutpoint type Returns ------- True if the range contains the value """ return (self.lowerCut < val and \ self.upperCut > val) def containsAll(self, vals): """ Returns True if the range contains all values in some iterable Parameters ---------- vals : Iterable of comparable object of appropriate type for range Values to query against the range Raises ------ ValueError If there is a value type not compatible with the cutpoint type Returns ------- True if the range contains all values """ for val in vals: if not self.contains(val): return False return True def getDistanceFromPoint(self, val, distFunc = lambda x1, x2: abs(x1-x2)): """ Returns the minimum distance of a Range from a Point, returning 0 if there is an overlap. Note that both upper and lower bounds must be closed for this function to work Parameters ---------- val : comparable, compatible with cutpoint type The value of the point where the distance is desired distFunc : callable Function that calculates the distance between two points in the domain of the Range Raises ------ TypeError If the upper and/or lower bounds of this Range are not closed
Returns ------- The minimum distance between the Range and the Point. Returns 0 if there is an overlap """ if not all((self.isLowerBoundClosed(), self.isUpperBoundClosed())): raise TypeError("Range is not closed") if self.contains(val): return 0. else: return min(distFunc(self.lowerCut.point, val), distFunc(self.upperCut.point, val)) def getDistanceFromRange(self, other, distFunc = lambda x1,x2: abs(x1-x2)): """ Returns the minimum distance of a Range from another Range, returning 0 if there is any overlap Note that both Ranges must be closed for this function to work Parameters ---------- other : Range, compatible with this Range's domain The Range to compare to distFunc : callable Function that calculates the distance between two points in the domain of the Range Raises ------ TypeError If the upper and/or lower bounds of this Range are not closed or if the distFunc not compatible with the type Returns ------- Minimum distance between the ranges """ if not isinstance(other, Range): raise TypeError("other is not a Range") if not all((self.isLowerBoundClosed(), self.isUpperBoundClosed(), other.isLowerBoundClosed(), other.isUpperBoundClosed())): raise TypeError("Not all Ranges closed") if self.isConnected(other): return 0. else: return min(distFunc(self.lowerCut.point, other.upperCut.point), distFunc(other.lowerCut.point, self.upperCut.point)) def hasLowerBound(self): """ Returns True if the range has a lower endpoint (not unbounded at the lower end) Returns ------- True if the range has a lower endpoint """ return (not self.lowerCut.belowAll) def hasUpperBound(self): """ Returns True if the range has an upper endpoint (not unbounded at the upper end) Returns ------- True if the range has an upper endpoint """ return (not self.upperCut.aboveAll) def lowerEndpoint(self): """ Returns the lower endpoint of the range if it exists. Otherwise raises a TypeError Raises ------ TypeError If the range is unbounded below Returns ------- The lower endpoint of the range """ if self.lowerCut.point is None: raise TypeError("Range unbounded below") else: return self.lowerCut.point def upperEndpoint(self): """ Returns the upper endpoint of the range if it exists. Otherwise raises a TypeError Raises ------ TypeError If the range is unbounded above Returns ------- The upper endpoint of the range """ if self.upperCut.point is None: raise TypeError("Range unbounded above") else: return self.upperCut.point def isLowerBoundClosed(self): """ Returns whether the lower bound is closed (if there is a lower bound) Raises ------ TypeError If the range is unbounded below Returns ------- True if the lower bound is closed """ if self.lowerCut.point is None: raise TypeError("Range unbounded below") else: return self.lowerCut.below def isUpperBoundClosed(self): """ Returns whether the upper bound is closed (if there is an upper bound) Raises ------ TypeError If the range is unbounded above Returns ------- True if the upper bound is closed """ if self.upperCut.point is None: raise TypeError("Range unbounded above") else: return (not self.upperCut.below) def isEmpty(self): """ Returns True if the range is of form [v, v) or (v, v] Returns ------- True if the range is of the form [v,v) or (v,v] """ return self.lowerCut == self.upperCut def encloses(self, other): """ Returns True if the bounds of the other range do not extend outside the bounds of this range Examples: [3,6] encloses [4,5] (3,6) encloses (3,6) [3,6] encloses [4,4] (3,6] does not enclose [3,6] [4,5] does not enclose (3,6) Parameters ---------- other : A Range The range to compare to Raises ------ ValueError If object passed in is not a Range Returns ------- True if the bounds of the other range do not extend outside the bounds of this range """ if not isinstance(other, Range): raise ValueError("Range required") return ((self.lowerCut <= other.lowerCut) and \ (self.upperCut >= other.upperCut)) def isConnected(self, other): """ Returns True if there is a (possibly empty) range that is enclosed by both this range and other Examples: [2,4] and [5,7] are not connected [2,4] and [3,5] are connected [2,4] and [4,6] are connected [3,5] and (5,10) are connected Parameters ---------- other : A range The range to compare to Raises ------ ValueError If object passed in is not a Range Returns ------- True if there is a (possibly empty) range that is enclosed by both this range and other """ if not isinstance(other, Range): raise ValueError("Range required") return ((self.lowerCut <= other.upperCut) and \ (other.lowerCut <= self.upperCut)) def intersection(self, other): """ Returns the maximal range enclosed by both this range and the other range, if such a range exists Examples: Intersection of [1,5] and [3,7] is [3,5] Intersection of [1,5] and [5,7] is [5,5] Parameters ---------- other : A range The range to compare to Raises ------ ValueError If object passed in is not a Range or if there is no intersection Returns ------- The intersection range """ if not isinstance(other, Range): raise ValueError("Range required") if ((self.lowerCut >= other.lowerCut) and \ (self.upperCut <= other.upperCut)): return Range(self.lowerCut, self.upperCut) elif ((self.lowerCut <= other.lowerCut) and \ (self.upperCut >= other.upperCut)): return Range(other.lowerCut, other.upperCut) else: newLower = self.lowerCut if (self.lowerCut >= other.lowerCut) else \ other.lowerCut newUpper = self.upperCut if (self.upperCut <= other.upperCut) else \ other.upperCut return Range(newLower, newUpper) def span(self, other): """ Returns the minimal range that encloses both this range and the other. Note that if the input ranges are not connected, the span can contain values that are not contained within either input range Examples: Span of [1,3] and [5,7] is [1,7] Parameters ---------- other : A range A range to span with Raises ------ ValueError If object passed in is not a Range or if there is no intersection Returns ------- The minimal range enclosing both with and the other range """ if ((self.lowerCut <= other.lowerCut) and \ (self.upperCut >= other.upperCut)): return Range(self.lowerCut, self.upperCut) elif ((self.lowerCut >= other.lowerCut) and \ (self.upperCut <= other.upperCut)): return Range(other.lowerCut, other.upperCut) else: newLower = self.lowerCut if (self.lowerCut <= other.lowerCut) else \ other.lowerCut newUpper = self.upperCut if (self.upperCut >= other.upperCut) else \ other.upperCut return Range(newLower, newUpper) ################## # Static methods # ################## @staticmethod def _validate_cutpoints(*pts): if not all(map(lambda x: (hasattr(x, "__lt__") and \ hasattr(x, "__gt__")) or hasattr(x,'__cmp__'), pts)): raise ValueError("Cutpoint type(s) not comparable") if len(pts) == 2: if not (issubclass(type(pts[0]),type(pts[1])) or \ issubclass(type(pts[1]),type(pts[0]))): raise ValueError("Cutpoints are not compatible") return True @staticmethod def _get_type(*pts): if len(pts) == 1: return type(pts[0]) elif len(pts) == 2: if issubclass(type(pts[0]),type(pts[1])): return type(pts[1]) elif issubclass(type(pts[1]),type(pts[0])): return type(pts[0]) else: raise ValueError("Cutpoints are not compatible") @staticmethod def closed(lower, upper): """ Creates a range including the endpoints (i.e. [lower, upper]) Parameters ---------- lower : comparable, of same type as or subclass of upper type The lower bound upper : comparable, of same type as or subclass of lower type The upper bound Raises ------ ValueError If type(s) are not comparable or compatible Returns ------- A Range object [lower, upper] """ # Ensure cutpoints are of compatible, appropriate types Range._validate_cutpoints(lower, upper) theType = Range._get_type(lower,upper) return Range(Cut.belowValue(lower, theType=theType), Cut.aboveValue(upper, theType=theType)) @staticmethod def closedOpen(lower, upper): """ Creates a range including the lower endpoint (i.e. [lower, upper)) Parameters ---------- lower : comparable, of same type as or subclass of upper type The lower bound upper : comparable, of same type as or subclass of lower type The upper bound Raises ------ ValueError If type(s) are not comparable or compatible Returns ------- A Range object [lower, upper) """ # Ensure cutpoints are of compatible, appropriate types Range._validate_cutpoints(lower, upper) theType = Range._get_type(lower,upper) return Range(Cut.belowValue(lower, theType=theType), Cut.belowValue(upper, theType=theType)) @staticmethod def openClosed(lower, upper): """ Creates a range including the upper (i.e. (lower, upper]) Parameters ---------- lower : comparable, of same type as or subclass of upper type The lower bound upper : comparable, of same type as or subclass of lower type The upper bound Raises ------ ValueError If type(s) are not comparable or compatible Returns ------- A Range object (lower, upper] """ # Ensure cutpoints are of compatible, appropriate types Range._validate_cutpoints(lower, upper) theType = Range._get_type(lower,upper) return Range(Cut.aboveValue(lower, theType=theType), Cut.aboveValue(upper, theType=theType)) @staticmethod def open(lower, upper): """ Creates a range excluding the endpoints (i.e. (lower, upper)) Parameters ---------- lower : comparable, of same type as or subclass of upper type The lower bound upper : comparable, of same type as or subclass of lower type The upper bound Raises ------ ValueError If type(s) are not comparable or compatible or if constructing a range of type (v,v), which is invalid Returns ------- A Range object (lower, upper) """ # Ensure cutpoints are of compatible, appropriate types Range._validate_cutpoints(lower, upper) theType = Range._get_type(lower,upper) if lower == upper: raise TypeError("Range of type (v,v) is not valid") return Range(Cut.aboveValue(lower, theType=theType), Cut.belowValue(upper, theType=theType)) @staticmethod def lessThan(val): """ Makes range including all values less than some value (i.e. (-inf, val)) Parameters ---------- val : comparable The upper bound Raises ------ ValueError If type not comparable Returns ------- A Range object (-inf, val) """ Range._validate_cutpoints(val) theType = Range._get_type(val) return Range(Cut.belowAll(theType=theType), Cut.belowValue(val, theType=theType)) @staticmethod def atMost(val): """ Makes range including all values less than or equal to some value (i.e. (-inf, val]) Parameters ---------- val : comparable The upper bound Raises ------ ValueError If type not comparable Returns ------- A Range object (-inf, val] """ Range._validate_cutpoints(val) theType = Range._get_type(val) return Range(Cut.belowAll(theType=theType), Cut.aboveValue(val, theType=theType)) @staticmethod def greaterThan(val): """ Makes range including all values greater than some value (i.e. (val, inf]) Parameters ---------- val : comparable The lower bound Raises ------ ValueError If type not comparable Returns ------- A Range object (val, inf) """ Range._validate_cutpoints(val) theType = Range._get_type(val) return Range(Cut.aboveValue(val,theType=theType), Cut.aboveAll(theType=theType)) @staticmethod def atLeast(val): """ Makes range including all values greater than or equal to some value (i.e. [val, inf)) Parameters ---------- val : comparable The lower bound Raises ------ ValueError If type not comparable Returns ------- A Range object [val, inf) """ Range._validate_cutpoints(val) theType = Range._get_type(val) return Range(Cut.belowValue(val, theType=theType), Cut.aboveAll(theType=theType))
or if the distFunc not compatible with the type
itemCmb.py
#!/usr/bin/env python3 # # Este arquivo é parte do programa multi_agenda # # Esta obra está licenciada com uma # Licença Creative Commons Atribuição 4.0 Internacional. # (CC BY 4.0 Internacional) # # Para ver uma cópia da licença, visite # https://creativecommons.org/licenses/by/4.0/legalcode # # WELLINGTON SAMPAIO - [email protected] # https://www.linkedin.com/in/wellsampaio/ # import sys from os.path import dirname, realpath, sep, pardir sys.path.append((dirname(realpath(__file__)) + sep + pardir)) import cgitb cgitb.enable() from objetos.patrimonio.Item import Item from objetos.patrimonio.ItemDAO import ItemDAO from objetos.patrimonio.ModeloDAO import ModeloDAO from objetos.patrimonio.MarcaDAO import MarcaDAO obj = Item() dao = ItemDAO() modeloDAO = ModeloDAO() marcaDAO = MarcaDAO() print("Content-type: application/json\n") #print("Content-type: text/html\n") saida =""" { "itemCmb": [""" i = 0 lista = dao.getItemCmb() contaLista = len(lista) -1 for obj in lista: stringLbl = obj.getItem() stringLbl += " " + modeloDAO.select(obj.getCodModelo()).getModelo()[:10] stringLbl += " - " + marcaDAO.select(obj.getCodMarca()).getMarca() saida += """ {} "codItem": {}, "lbl": "{}" {}""".format( "{", obj.getCodItem(), stringLbl, "}" ) if i < contaLista: saida += "," i += 1 else: break saida += """ ] }
""" print( saida .replace("\n", "") .replace("\t", "") )
types_test.go
package rest import ( "testing" ) func TestMessage_SetAlias(t *testing.T) { type fields struct { MessageText string Emoji string Avatar string Attachments []Attachment alias string } tests := []struct { name string fields fields alias string want string }{ { name: "Example 1", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: "", }, alias: "new", want: "new", }, { name: "Example 2", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: "old", }, alias: "new", want: "new", }, { name: "Example 3", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: "old", }, alias: "", want: "old", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &Message{ MessageText: tt.fields.MessageText, Emoji: tt.fields.Emoji, Avatar: tt.fields.Avatar, Attachments: tt.fields.Attachments, alias: tt.fields.alias, } m.SetAlias(tt.alias) if m.alias != tt.want { t.Errorf("SetAlias() = %v, want %v", m.alias, tt.want) } }) } } func TestMessage_IsAliasPresent(t *testing.T) { type fields struct { MessageText string Emoji string Avatar string Attachments []Attachment alias string } tests := []struct { name string fields fields want bool }{ { name: "Example 1", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: "", }, want: false, }, { name: "Example 2", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: "test", }, want: true, }, { name: "Example 3", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: " ", }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &Message{ MessageText: tt.fields.MessageText, Emoji: tt.fields.Emoji, Avatar: tt.fields.Avatar, Attachments: tt.fields.Attachments, alias: tt.fields.alias, } if got := m.IsAliasPresent(); got != tt.want { t.Errorf("IsAliasPresent() = %v, want %v", got, tt.want) } }) } } func TestMessage_GetAlias(t *testing.T)
{ type fields struct { MessageText string Emoji string Avatar string Attachments []Attachment alias string } tests := []struct { name string fields fields want string }{ { name: "Example 1", fields: fields{ MessageText: "", Emoji: "", Avatar: "", Attachments: nil, alias: "test 22", }, want: "test 22", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &Message{ MessageText: tt.fields.MessageText, Emoji: tt.fields.Emoji, Avatar: tt.fields.Avatar, Attachments: tt.fields.Attachments, alias: tt.fields.alias, } if got := m.GetAlias(); got != tt.want { t.Errorf("GetAlias() = %v, want %v", got, tt.want) } }) } }
hero-detail.component.ts
import {Component, Input, OnInit} from 'angular2/core'; import { RouteParams } from 'angular2/router'; import {Hero} from './hero'; import { HeroService } from './hero.service'; @Component({ selector: 'my-hero-detail', templateUrl: 'app/hero-detail.component.html', styleUrls: ['app/hero-detail.component.css'] }) export class
{ @Input() hero: Hero; constructor( private _heroService: HeroService, private _routeParams: RouteParams) { } ngOnInit() { let id = +this._routeParams.get('id'); // use + to convert route param string into number this._heroService.getHero(id).then(hero => this.hero = hero); } goBack() { window.history.back(); } }
HeroDetailComponent
room.go
package main import ( "log" "net/http" "github.com/gorilla/websocket" "github.com/hyrmn/GoBlueprints/trace" ) type room struct { //holds incoming messages forward chan []byte //channel for clients wishing to join room join chan *client //channel for clients wishing to leave room leave chan *client //clients currently in room clients map[*client]bool //our custom logger tracer trace.Tracer } func
() *room { return &room{ forward: make(chan []byte), join: make(chan *client), leave: make(chan *client), clients: make(map[*client]bool), } } func (r *room) run() { for { select { case client := <-r.join: r.clients[client] = true r.tracer.Trace("New client joined") case client := <-r.leave: delete(r.clients, client) close(client.send) r.tracer.Trace("Client left") case msg := <-r.forward: r.tracer.Trace("Message received: ", string(msg)) for client := range r.clients { client.send <- msg r.tracer.Trace(" -- sent to client") } } } } const ( socketBufferSize = 1024 messageBufferSize = 256 ) var upgrader = &websocket.Upgrader{ReadBufferSize: socketBufferSize, WriteBufferSize: messageBufferSize} func (r *room) ServeHTTP(rw http.ResponseWriter, req *http.Request) { socket, err := upgrader.Upgrade(rw, req, nil) if err != nil { log.Fatalf("ServeHTTP: %s", err) return } client := &client{ socket: socket, send: make(chan []byte, messageBufferSize), room: r, } r.join <- client defer func() { r.leave <- client }() go client.write() client.read() }
newRoom
gen_parser.py
#!/usr/bin/python import datetime import sys import textwrap import common from xml.dom import pulldom PARSER = """\ /** * Copyright 2009 Joe LaPenna */ package com.joelapenna.foursquare.parsers; import com.joelapenna.foursquare.Foursquare; import com.joelapenna.foursquare.error.FoursquareError; import com.joelapenna.foursquare.error.FoursquareParseException; import com.joelapenna.foursquare.types.%(type_name)s; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import java.io.IOException; import java.util.logging.Level; import java.util.logging.Logger; /** * Auto-generated: %(timestamp)s * * @author Joe LaPenna ([email protected]) * @param <T> */ public class %(type_name)sParser extends AbstractParser<%(type_name)s> { private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName()); private static final boolean DEBUG = Foursquare.PARSER_DEBUG; @Override public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException, FoursquareError, FoursquareParseException { parser.require(XmlPullParser.START_TAG, null, null); %(type_name)s %(top_node_name)s = new %(type_name)s(); while (parser.nextTag() == XmlPullParser.START_TAG) { String name = parser.getName(); %(stanzas)s } else { // Consume something we don't understand. if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name); skipSubTree(parser); } } return %(top_node_name)s; } }""" BOOLEAN_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText())); """ GROUP_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser)); """ COMPLEX_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser)); """ STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(parser.nextText()); """ def main(): type_name, top_node_name, attributes = common.WalkNodesForAttributes( sys.argv[1]) GenerateClass(type_name, top_node_name, attributes) def GenerateClass(type_name, top_node_name, attributes): """generate it. type_name: the type of object the parser returns top_node_name: the name of the object the parser returns. per common.WalkNodsForAttributes """ stanzas = [] for name in sorted(attributes): typ, children = attributes[name] replacements = Replacements(top_node_name, name, typ, children) if typ == common.BOOLEAN: stanzas.append(BOOLEAN_STANZA % replacements) elif typ == common.GROUP: stanzas.append(GROUP_STANZA % replacements) elif typ in common.COMPLEX: stanzas.append(COMPLEX_STANZA % replacements) else: stanzas.append(STANZA % replacements) if stanzas: # pop off the extranious } else for the first conditional stanza. stanzas[0] = stanzas[0].replace('} else ', '', 1) replacements = Replacements(top_node_name, name, typ, [None]) replacements['stanzas'] = '\n'.join(stanzas).strip() print PARSER % replacements def
(top_node_name, name, typ, children): # CameCaseClassName type_name = ''.join([word.capitalize() for word in top_node_name.split('_')]) # CamelCaseClassName camel_name = ''.join([word.capitalize() for word in name.split('_')]) # camelCaseLocalName attribute_name = camel_name.lower().capitalize() # mFieldName field_name = 'm' + camel_name if children[0]: sub_parser_camel_case = children[0] + 'Parser' else: sub_parser_camel_case = (camel_name[:-1] + 'Parser') return { 'type_name': type_name, 'name': name, 'top_node_name': top_node_name, 'camel_name': camel_name, 'parser_name': typ + 'Parser', 'attribute_name': attribute_name, 'field_name': field_name, 'typ': typ, 'timestamp': datetime.datetime.now(), 'sub_parser_camel_case': sub_parser_camel_case, 'sub_type': children[0] } if __name__ == '__main__': main()
Replacements
buffered_io.rs
use std::{io, pin::Pin}; use xitca_io::{ bytes::BytesMut, io::{AsyncIo, AsyncWrite, Interest}, }; use xitca_unsafe_collection::{ futures::{poll_fn, Select as _, SelectOutput}, uninit, }; #[cfg(not(feature = "single-thread"))] use tokio::sync::mpsc::{channel, Receiver}; #[cfg(feature = "single-thread")] use xitca_unsafe_collection::channel::mpsc::{async_vec as channel, Receiver}; use crate::{client::Client, error::Error, request::Request, response::Response}; use super::context::Context; pub struct BufferedIo<Io, const BATCH_LIMIT: usize> { io: Io, rx: Receiver<Request>, ctx: Context<BATCH_LIMIT>, } impl<Io, const BATCH_LIMIT: usize> BufferedIo<Io, BATCH_LIMIT> where Io: AsyncIo, { pub fn new_pair(io: Io, backlog: usize) -> (Client, Self) { let ctx = Context::<BATCH_LIMIT>::new(); let (tx, rx) = channel(backlog); (Client::new(tx), Self { io, rx, ctx }) } // send request in self blocking manner. this call would not utilize concurrent read/write nor // pipeline/batch. A single response is returned. pub async fn linear_request<F>(&mut self, encoder: F) -> Result<Response, Error> where F: FnOnce(&mut BytesMut) -> Result<(), Error>, { let mut buf = BytesMut::new(); encoder(&mut buf)?; let msg = buf.freeze(); let (req, res) = Request::new_pair(msg); self.ctx.push_req(req); while !self.ctx.req_is_empty() { let _ = self.io.ready(Interest::WRITABLE).await?; self.try_write()?; } loop { let _ = self.io.ready(Interest::READABLE).await?; self.try_read()?; if self.ctx.try_response_once()? { return Ok(res); } } } pub fn clear_ctx(&mut self) { self.ctx.clear(); } // try read async io until connection error/closed/blocked. fn
(&mut self) -> Result<(), Error> { loop { match self.io.try_read_buf(&mut self.ctx.buf) { Ok(0) => return Err(Error::ConnectionClosed), Ok(_) => continue, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(()), Err(e) => return Err(e.into()), } } } // try write to async io with vectored write enabled. fn try_write(&mut self) -> Result<(), Error> { while !self.ctx.req_is_empty() { let mut iovs = uninit::uninit_array::<_, BATCH_LIMIT>(); let slice = self.ctx.chunks_vectored(&mut iovs); match self.io.try_write_vectored(slice) { Ok(0) => return Err(Error::ConnectionClosed), Ok(n) => self.ctx.advance(n), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(()), Err(e) => return Err(e.into()), } } Ok(()) } } #[cfg(feature = "single-thread")] mod io_impl { use super::*; use std::io::IoSlice; use xitca_io::bytes::Buf; use xitca_unsafe_collection::uninit::PartialInit; impl<Io, const BATCH_LIMIT: usize> BufferedIo<Io, BATCH_LIMIT> where Io: AsyncIo, { pub async fn run(mut self) -> Result<(), Error> { loop { let ready = match self.rx.wait().select(self.io.ready(Interest::READABLE)).await { SelectOutput::A(res) => { res.map_err(|_| Error::ConnectionClosed)?; self.io.ready(Interest::READABLE | Interest::WRITABLE).await? } SelectOutput::B(ready) => ready?, }; if ready.is_readable() { self.try_read()?; self.ctx.try_response()?; } if ready.is_writable() { self.try_write2()?; poll_fn(|cx| AsyncWrite::poll_flush(Pin::new(&mut self.io), cx)).await?; } } } fn try_write2(&mut self) -> Result<(), Error> { let res = self.rx.with_iter(|iter| { let mut iovs = uninit::uninit_array::<_, BATCH_LIMIT>(); let slice = iovs.init_from(iter).into_init_with(|req| IoSlice::new(req.msg.chunk())); self.io.try_write_vectored(slice) }); match res { Ok(0) => Err(Error::ConnectionClosed), Ok(mut n) => { self.rx.advance_until(|req| { let rem = req.msg.remaining(); if rem > n { req.msg.advance(n); false } else { n -= rem; self.ctx.push_res(req.tx.take().unwrap()); true } }); Ok(()) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Ok(()), Err(e) => Err(e.into()), } } } } #[cfg(not(feature = "single-thread"))] mod io_impl { use super::*; use xitca_unsafe_collection::futures::never; impl<Io, const BATCH_LIMIT: usize> BufferedIo<Io, BATCH_LIMIT> where Io: AsyncIo, { pub async fn run(mut self) -> Result<(), Error> { loop { match try_rx(&mut self.rx, &self.ctx) .select(try_io(&mut self.io, &self.ctx)) .await { // batch message and keep polling. SelectOutput::A(Some(msg)) => self.ctx.push_req(msg), // client is gone. SelectOutput::A(None) => break, SelectOutput::B(ready) => { let ready = ready?; if ready.is_readable() { self.try_read()?; self.ctx.try_response()?; } if ready.is_writable() { self.try_write()?; poll_fn(|cx| AsyncWrite::poll_flush(Pin::new(&mut self.io), cx)).await?; } } } } Ok(()) } } async fn try_rx<const BATCH_LIMIT: usize>( rx: &mut Receiver<Request>, ctx: &Context<BATCH_LIMIT>, ) -> Option<Request> { if ctx.req_is_full() { never().await } else { rx.recv().await } } fn try_io<'i, Io, const BATCH_LIMIT: usize>(io: &'i mut Io, ctx: &Context<BATCH_LIMIT>) -> Io::ReadyFuture<'i> where Io: AsyncIo, { let interest = if ctx.req_is_empty() { Interest::READABLE } else { Interest::READABLE | Interest::WRITABLE }; io.ready(interest) } }
try_read
tower_compat.rs
use std::{ convert::Infallible, sync::Arc, task::{Context, Poll}, }; use futures_util::{future::BoxFuture, FutureExt}; use tower::{buffer::Buffer, Layer, Service, ServiceExt}; use crate::{Endpoint, IntoResponse, Middleware, Request, Result}; /// Extension trait for tower layer compat. #[cfg_attr(docsrs, doc(cfg(feature = "tower-compat")))] pub trait TowerLayerCompatExt { /// Converts a tower layer to a poem middleware. fn compat(self) -> TowerCompatMiddleware<Self> where Self: Sized, { TowerCompatMiddleware(self) } } impl<L> TowerLayerCompatExt for L {} /// A tower layer adapter. #[cfg_attr(docsrs, doc(cfg(feature = "tower-compat")))] pub struct TowerCompatMiddleware<L>(L); impl<E, L> Middleware<E> for TowerCompatMiddleware<L> where E: Endpoint, L: Layer<EndpointToTowerService<E>>, L::Service: Service<Request> + Send + 'static, <L::Service as Service<Request>>::Future: Send, <L::Service as Service<Request>>::Response: IntoResponse + Send + 'static, <L::Service as Service<Request>>::Error: Into<tower::BoxError> + Send + Sync, { type Output = TowerServiceToEndpoint<L::Service>; fn transform(&self, ep: E) -> Self::Output { TowerServiceToEndpoint(Buffer::new( self.0.layer(EndpointToTowerService(Arc::new(ep))), 32, )) } } /// An endpoint to tower service adapter. pub struct EndpointToTowerService<E>(Arc<E>); impl<E> Service<Request> for EndpointToTowerService<E> where E: Endpoint + 'static, { type Response = E::Output; type Error = Infallible; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request) -> Self::Future { let ep = self.0.clone(); async move { Ok(ep.call(req).await) }.boxed() } } /// An tower service to endpoint adapter. pub struct TowerServiceToEndpoint<Svc: Service<Request>>(Buffer<Svc, Request>); #[async_trait::async_trait] impl<Svc> Endpoint for TowerServiceToEndpoint<Svc> where Svc: Service<Request> + Send + 'static, Svc::Future: Send, Svc::Response: IntoResponse + Send + 'static, Svc::Error: Into<tower::BoxError> + Send + Sync, { type Output = Result<Svc::Response>; async fn
(&self, req: Request) -> Self::Output { let mut svc = self.0.clone(); svc.ready().await?; let res = svc.call(req).await?; Ok(res) } }
call
0064-Minimum Path Sum.py
class
: def minPathSum(self, grid: List[List[int]]) -> int: m = len(grid) if m == 0: return 0 n = len(grid[0]) for j in range(1, n): grid[0][j] += grid[0][j - 1] for i in range(1, m): grid[i][0] += grid[i - 1][0] for j in range(1, n): grid[i][j] += min(grid[i - 1][j], grid[i][j - 1]) return grid[-1][-1]
Solution
account.ts
import apiRequest from '@/utils/request'; import ApplicationConfig from '@/config'; export function changeUserNickname({ nickname }: { nickname: string }) { return apiRequest(ApplicationConfig.api.userNickname, { method: 'put', data: { nickname, }, }); } export function
({ oldPassword, newPassword, }: { oldPassword: string; newPassword: string; }) { return apiRequest(ApplicationConfig.api.userPassword, { method: 'put', data: { oldPassword, newPassword, }, }); }
changeUserPassword
utils.py
import os import random import subprocess import numpy as np import torch import time try: import torch_xla import torch_xla.core.xla_model as xm XLA = True except ModuleNotFoundError: XLA = False def freeze_module(module): for i, param in enumerate(module.parameters()): param.requires_grad = False def fit_state_dict(state_dict, model): ''' Ignore size mismatch when loading state_dict ''' for name, param in model.named_parameters(): new_param = state_dict[name] if new_param.size() != param.size(): print(f'Size mismatch in {name}: {new_param.shape} -> {param.shape}') state_dict.pop(name) def get_device(arg): if isinstance(arg, torch.device) or \ (XLA and isinstance(arg, xm.xla_device)): device = arg
device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') elif isinstance(arg, str): if arg == 'xla' and XLA: device = xm.xla_device() else: device = torch.device(arg) if isinstance(arg, (list, tuple)): if isinstance(arg[0], int): device_ids = list(arg) elif isinstance(arg[0], str) and arg[0].isnumeric(): device_ids = [ int(a) for a in arg ] else: raise ValueError(f'Invalid device: {arg}') else: if device.type == 'cuda': assert torch.cuda.is_available() if device.index is None: device_count = torch.cuda.device_count() if device_count > 1: device_ids = list(range(device_count)) else: device_ids = [0] else: device_ids = [device.index] else: device_ids = [device.index] return device, device_ids def seed_everything(random_state=0, deterministic=False): random.seed(random_state) os.environ['PYTHONHASHSEED'] = str(random_state) np.random.seed(random_state) torch.manual_seed(random_state) torch.cuda.manual_seed(random_state) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: torch.backends.cudnn.deterministic = False def get_gpu_memory(): """ Code borrowed from: https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4 Get the current gpu usage. Returns ------- usage: dict Keys are device ids as integers. Values are memory usage as integers in MB. """ result = subprocess.check_output( [ 'nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader' ], encoding='utf-8') # Convert lines into a dictionary gpu_memory = [int(x) for x in result.strip().split('\n')] gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory)) return gpu_memory_map def get_time(time_format='%H:%M:%S'): return time.strftime(time_format, time.localtime())
elif arg is None or isinstance(arg, (list, tuple)): if XLA: device = xm.xla_device() else:
main.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "log" "net/http" "os" "strings" "sync" "time" "github.com/boltdb/bolt" "github.com/golang/protobuf/proto" "github.com/gorilla/handlers" pb "github.com/micro/micro/v2/cmd/usage/proto" ) var ( db *bolt.DB fd = "usage.db" mtx sync.RWMutex seen = map[string]uint64{} ) func setup() { // setup db d, err := bolt.Open(fd, 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { log.Fatal(err) } db = d if err := db.Update(func(tx *bolt.Tx) error { for _, b := range []string{"usage", "metrics"} { if _, err := tx.CreateBucketIfNotExists([]byte(b)); err != nil { return err } } return nil }); err != nil { log.Fatal(err) } go flush() } func flush() { for { time.Sleep(time.Hour) now := time.Now().UnixNano() mtx.Lock() for k, v := range seen { d := uint64(now) - v // 48 hours if d > 1.728e14 { delete(seen, k) } } seen = make(map[string]uint64) mtx.Unlock() } } func process(w http.ResponseWriter, r *http.Request, u *pb.Usage)
func metrics(w http.ResponseWriter, r *http.Request) { r.ParseForm() prefix := time.Now().Add(time.Hour * -24).Format("20060102") metrics := map[string]interface{}{} if date := r.Form.Get("date"); len(date) >= 4 && len(date) <= 8 { prefix = date } db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte(`metrics`)).Cursor() for k, v := c.Seek([]byte(prefix)); k != nil && bytes.HasPrefix(k, []byte(prefix)); k, v = c.Next() { m := new(pb.Metrics) proto.Unmarshal(v, m) key := strings.TrimPrefix(string(k), prefix+"-") metrics[key] = m } return nil }) var buf []byte ct := r.Header.Get("Content-Type") if v := r.Form.Get("pretty"); len(v) > 0 || ct != "application/json" { buf, _ = json.MarshalIndent(metrics, "", "\t") } else { buf, _ = json.Marshal(metrics) } if len(buf) == 0 { buf = []byte(`{}`) } w.Header().Set("Content-Type", "application/json") w.Write(buf) } func handler(w http.ResponseWriter, r *http.Request) { r.ParseForm() // return metrics if r.Method == "GET" { metrics(w, r) return } // require post for updates if r.Method != "POST" { return } if r.Header.Get("Content-Type") != "application/protobuf" { return } if r.UserAgent() != "micro/usage" { return } b, err := ioutil.ReadAll(r.Body) if err != nil { http.Error(w, err.Error(), 500) return } u := new(pb.Usage) if err := proto.Unmarshal(b, u); err != nil { http.Error(w, err.Error(), 500) return } go process(w, r, u) } func main() { setup() http.HandleFunc("/", handler) lh := handlers.LoggingHandler(os.Stdout, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.URL.Path, "/usage") { r.URL.Path = strings.TrimPrefix(r.URL.Path, "/usage") } http.DefaultServeMux.ServeHTTP(w, r) })) if err := http.ListenAndServe(":8091", lh); err != nil { log.Fatal(err) } }
{ today := time.Now().Format("20060102") key := fmt.Sprintf("%s-%s", u.Service, u.Id) now := uint64(time.Now().UnixNano()) mtx.Lock() last := seen[key] lastSeen := now - last seen[key] = now mtx.Unlock() db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(`usage`)) buf, err := proto.Marshal(u) if err != nil { return err } k := fmt.Sprintf("%d-%s", u.Timestamp, key) // save this usage if err := b.Put([]byte(k), buf); err != nil { return err } // save daily usage b = tx.Bucket([]byte(`metrics`)) dailyKey := fmt.Sprintf("%s-%s", today, u.Service) // get usage v := b.Get([]byte(dailyKey)) if v == nil { // todo: don't overwrite this u.Metrics.Count["services"] = uint64(1) m, _ := proto.Marshal(u.Metrics) return b.Put([]byte(dailyKey), m) } m := new(pb.Metrics) if err := proto.Unmarshal(v, m); err != nil { return err } // update request count m.Count["requests"] += u.Metrics.Count["requests"] m.Count["services"] += u.Metrics.Count["services"] // not seen today add it if lastSeen == 0 || lastSeen > 7.2e13 { c := m.Count["instances"] c++ m.Count["instances"] = c } buf, err = proto.Marshal(m) if err != nil { return err } // store today-micro.api/new/cli/proxy return b.Put([]byte(dailyKey), buf) }) }
chilled-vibes.py
Clock.bpm=100; Scale.default="minor" p1 >> pulse([0,-1,-2,-3], dur=8, lpf=600, lpr=0.2, crush=8) + (0,2,4,const(6)) p3 >> blip(p1.pitch, dur=8, sus=4, room=1, oct=6) + [0,0,0,P*(2,4,3,-1)] p2 >> saw(P[:5][:9][:16], dur=1/4, oct=var([3,4],[12,4])).penta() d1 >> play("(x )( x)o{ vx[xx]}", crush=16, rate=.8).every([24,5,3], "stutter", 4, dur=3) d2 >> play("<-s>< ~*~>").every(30.5, "jump", cycle=32)
params.go
// Copyright The OpenTelemetry Authors // Copyright Splunk Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 //
// Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reconcile import ( "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/signalfx/splunk-otel-collector-operator/apis/o11y/v1alpha1" ) // Params holds the reconciliation-specific parameters. type Params struct { Client client.Client Instance v1alpha1.SplunkOtelAgent Log logr.Logger Scheme *runtime.Scheme Recorder record.EventRecorder } func (p Params) WithDefaults() Params { return p }
testenv.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package testenv contains helper functions for skipping tests // based on which tools are present in the environment. package testenv import ( "bytes" "fmt" "go/build" "io/ioutil" "os" "os/exec" "runtime" "strings" "sync" ) // Testing is an abstraction of a *testing.T. type Testing interface { Skipf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) } type helperer interface { Helper() } // packageMainIsDevel reports whether the module containing package main // is a development version (if module information is available). // // Builds in GOPATH mode and builds that lack module information are assumed to // be development versions. var packageMainIsDevel = func() bool { return true } var checkGoGoroot struct { once sync.Once err error } func hasTool(tool string) error { if tool == "cgo" { enabled, err := cgoEnabled(false) if err != nil { return fmt.Errorf("checking cgo: %v", err) } if !enabled { return fmt.Errorf("cgo not enabled") } return nil } _, err := exec.LookPath(tool) if err != nil { return err } switch tool { case "patch": // check that the patch tools supports the -o argument temp, err := ioutil.TempFile("", "patch-test") if err != nil { return err } temp.Close() defer os.Remove(temp.Name()) cmd := exec.Command(tool, "-o", temp.Name()) if err := cmd.Run(); err != nil { return err } case "go": checkGoGoroot.once.Do(func() { // Ensure that the 'go' command found by exec.LookPath is from the correct // GOROOT. Otherwise, 'some/path/go test ./...' will test against some // version of the 'go' binary other than 'some/path/go', which is almost // certainly not what the user intended. out, err := exec.Command(tool, "env", "GOROOT").CombinedOutput() if err != nil { checkGoGoroot.err = err return } GOROOT := strings.TrimSpace(string(out)) if GOROOT != runtime.GOROOT() { checkGoGoroot.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) } }) if checkGoGoroot.err != nil { return checkGoGoroot.err } case "diff": // Check that diff is the GNU version, needed for the -u argument and // to report missing newlines at the end of files. out, err := exec.Command(tool, "-version").Output() if err != nil { return err } if !bytes.Contains(out, []byte("GNU diffutils")) { return fmt.Errorf("diff is not the GNU version") } } return nil } func cgoEnabled(bypassEnvironment bool) (bool, error) { cmd := exec.Command("go", "env", "CGO_ENABLED") if bypassEnvironment { cmd.Env = append(append([]string(nil), os.Environ()...), "CGO_ENABLED=") } out, err := cmd.CombinedOutput() if err != nil { return false, err } enabled := strings.TrimSpace(string(out)) return enabled == "1", nil } func allowMissingTool(tool string) bool { if runtime.GOOS == "android" { // Android builds generally run tests on a separate machine from the build, // so don't expect any external tools to be available. return true } switch tool { case "cgo": if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-nocgo") { // Explicitly disabled on -nocgo builders. return true } if enabled, err := cgoEnabled(true); err == nil && !enabled { // No platform support. return true } case "go": if os.Getenv("GO_BUILDER_NAME") == "illumos-amd64-joyent" { // Work around a misconfigured builder (see https://golang.org/issue/33950). return true } case "diff": if os.Getenv("GO_BUILDER_NAME") != "" { return true } case "patch": if os.Getenv("GO_BUILDER_NAME") != "" { return true } } // If a developer is actively working on this test, we expect them to have all // of its dependencies installed. However, if it's just a dependency of some // other module (for example, being run via 'go test all'), we should be more // tolerant of unusual environments. return !packageMainIsDevel() } // NeedsTool skips t if the named tool is not present in the path. // As a special case, "cgo" means "go" is present and can compile cgo programs. func NeedsTool(t Testing, tool string) { if t, ok := t.(helperer); ok { t.Helper() } err := hasTool(tool) if err == nil { return } if allowMissingTool(tool) { t.Skipf("skipping because %s tool not available: %v", tool, err) } else { t.Fatalf("%s tool not available: %v", tool, err) } } // NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by // the current process environment is not present in the path. func NeedsGoPackages(t Testing) { if t, ok := t.(helperer); ok { t.Helper() } tool := os.Getenv("GOPACKAGESDRIVER") switch tool { case "off": // "off" forces go/packages to use the go command. tool = "go" case "": if _, err := exec.LookPath("gopackagesdriver"); err == nil { tool = "gopackagesdriver" } else { tool = "go" } } NeedsTool(t, tool) } // NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied // by env is not present in the path. func NeedsGoPackagesEnv(t Testing, env []string) { if t, ok := t.(helperer); ok { t.Helper() } for _, v := range env { if strings.HasPrefix(v, "GOPACKAGESDRIVER=") { tool := strings.TrimPrefix(v, "GOPACKAGESDRIVER=") if tool == "off" { NeedsTool(t, "go") } else { NeedsTool(t, tool) } return } } NeedsGoPackages(t) } // NeedsGoBuild skips t if the current system can't build programs with ``go build'' // and then run them with os.StartProcess or exec.Command. // android, and darwin/arm systems don't have the userspace go build needs to run, // and js/wasm doesn't support running subprocesses. func NeedsGoBuild(t Testing) { if t, ok := t.(helperer); ok { t.Helper() } NeedsTool(t, "go") switch runtime.GOOS { case "android", "js": t.Skipf("skipping test: %v can't build and run Go binaries", runtime.GOOS) case "darwin": if strings.HasPrefix(runtime.GOARCH, "arm") { t.Skipf("skipping test: darwin/arm can't build and run Go binaries") } } } // ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the // current machine is a builder known to have scarce resources. // // It should be called from within a TestMain function. func ExitIfSmallMachine() { switch os.Getenv("GO_BUILDER_NAME") { case "linux-arm": fmt.Fprintln(os.Stderr, "skipping test: linux-arm builder lacks sufficient memory (https://golang.org/issue/32834)") os.Exit(0) case "plan9-arm": fmt.Fprintln(os.Stderr, "skipping test: plan9-arm builder lacks sufficient memory (https://golang.org/issue/38772)") os.Exit(0) } } // Go1Point returns the x in Go 1.x. func Go1Point() int
// NeedsGo1Point skips t if the Go version used to run the test is older than // 1.x. func NeedsGo1Point(t Testing, x int) { if t, ok := t.(helperer); ok { t.Helper() } if Go1Point() < x { t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x) } } // SkipAfterGo1Point skips t if the Go version used to run the test is newer than // 1.x. func SkipAfterGo1Point(t Testing, x int) { if t, ok := t.(helperer); ok { t.Helper() } if Go1Point() > x { t.Skipf("running Go version %q is version 1.%d, newer than maximum 1.%d", runtime.Version(), Go1Point(), x) } }
{ for i := len(build.Default.ReleaseTags) - 1; i >= 0; i-- { var version int if _, err := fmt.Sscanf(build.Default.ReleaseTags[i], "go1.%d", &version); err != nil { continue } return version } panic("bad release tags") }
client.py
# -*- coding: utf-8 -*- import tuna_service_sdk.api.test_plan.test_plan_client class Client(object): def __init__(self, server_ip="", server_port=0, service_name=""):
self.test_plan = tuna_service_sdk.api.test_plan.test_plan_client.TestPlanClient(server_ip, server_port, service_name)
transaction.go
package wallet import ( "encoding/hex" "fmt" "github.com/tdex-network/tdex-daemon/pkg/bufferutil" "github.com/tdex-network/tdex-daemon/pkg/explorer" "github.com/vulpemventures/go-elements/network" "github.com/vulpemventures/go-elements/pset" "github.com/vulpemventures/go-elements/transaction" ) // CreateTx crafts a new empty partial transaction func (w *Wallet) CreateTx() (string, error) { ptx, err := pset.New([]*transaction.TxInput{}, []*transaction.TxOutput{}, 2, 0) if err != nil { return "", err } return ptx.ToBase64() } // UpdateSwapTxOpts is the struct given to UpdateTx method type UpdateSwapTxOpts struct { PsetBase64 string Unspents []explorer.Utxo InputAmount uint64 InputAsset string OutputAmount uint64 OutputAsset string OutputDerivationPath string ChangeDerivationPath string Network *network.Network } func (o UpdateSwapTxOpts) validate() error { if len(o.PsetBase64) <= 0 { return ErrNullPset } _, err := pset.NewPsetFromBase64(o.PsetBase64) if err != nil { return err } if o.Network == nil { return ErrNullNetwork } // check input args if o.InputAmount == 0 { return ErrZeroInputAmount } if _, err := bufferutil.ValueToBytes(o.InputAmount); err != nil { return err } if len(o.InputAsset)/2 != 32 { return ErrInvalidInputAsset } if _, err := bufferutil.AssetHashToBytes(o.InputAsset); err != nil { return ErrInvalidInputAsset } // check input list if len(o.Unspents) <= 0 { return ErrEmptyUnspents } for _, in := range o.Unspents { _, _, err := in.Parse() if err != nil { return err } } // check output args if o.OutputAmount == 0 { return ErrZeroOutputAmount } if _, err := bufferutil.ValueToBytes(o.OutputAmount); err != nil { return err } if len(o.OutputAsset)/2 != 32 { return ErrInvalidOutputAsset } if _, err := bufferutil.AssetHashToBytes(o.OutputAsset); err != nil { return ErrInvalidOutputAsset } if len(o.OutputDerivationPath) <= 0 { return ErrNullOutputDerivationPath } outputDerivationPath, err := ParseDerivationPath(o.OutputDerivationPath) if err != nil { return err } if err := checkDerivationPath(outputDerivationPath); err != nil { return err } if len(o.ChangeDerivationPath) <= 0 { return ErrNullChangeDerivationPath } changeDerivationPath, err := ParseDerivationPath(o.ChangeDerivationPath) if err != nil { return err } if err := checkDerivationPath(changeDerivationPath); err != nil { return err } return nil } func (o UpdateSwapTxOpts) getUnspentsUnblindingKeys(w *Wallet) ([][]byte, error) { keys := make([][]byte, 0, len(o.Unspents)) for _, u := range o.Unspents { blindingPrvkey, _, err := w.DeriveBlindingKeyPair( DeriveBlindingKeyPairOpts{ Script: u.Script(), }, ) if err != nil { return nil, err } keys = append(keys, blindingPrvkey.Serialize()) } return keys, nil } // UpdateSwapTx takes care of adding inputs and output(s) to the provided partial // transaction. Inputs are selected so that the minimum number of them is used // to reach the target InputAmount. The subset of selected inputs is returned // along with the updated partial transaction func (w *Wallet) UpdateSwapTx(opts UpdateSwapTxOpts) (string, []explorer.Utxo, error) { if err := opts.validate(); err != nil { return "", nil, err } if err := w.validate(); err != nil { return "", nil, err } ptx, _ := pset.NewPsetFromBase64(opts.PsetBase64) selectedUnspents, change, err := explorer.SelectUnspents( opts.Unspents, opts.InputAmount, opts.InputAsset, ) if err != nil { return "", nil, err } _, script, _ := w.DeriveConfidentialAddress(DeriveConfidentialAddressOpts{ DerivationPath: opts.OutputDerivationPath, Network: opts.Network, }) output, _ := newTxOutput(opts.OutputAsset, opts.OutputAmount, script) outputsToAdd := []*transaction.TxOutput{output} if change > 0 { _, script, _ := w.DeriveConfidentialAddress(DeriveConfidentialAddressOpts{ DerivationPath: opts.ChangeDerivationPath, Network: opts.Network, }) changeOutput, _ := newTxOutput(opts.InputAsset, change, script) outputsToAdd = append(outputsToAdd, changeOutput) } psetBase64, err := addInsAndOutsToPset(ptx, selectedUnspents, outputsToAdd) if err != nil { return "", nil, err } return psetBase64, selectedUnspents, nil } // UpdateTxOpts is the struct given to UpdateTx method type UpdateTxOpts struct { PsetBase64 string Unspents []explorer.Utxo Outputs []*transaction.TxOutput ChangePathsByAsset map[string]string MilliSatsPerBytes int Network *network.Network WantPrivateBlindKeys bool WantChangeForFees bool } func (o UpdateTxOpts) validate() error { if len(o.PsetBase64) <= 0 { return ErrNullPset } _, err := pset.NewPsetFromBase64(o.PsetBase64) if err != nil { return err } if o.Network == nil { return ErrNullNetwork } if len(o.Unspents) > 0 { for _, in := range o.Unspents { _, _, err := in.Parse() if err != nil { return err } } if len(o.ChangePathsByAsset) <= 0 { return ErrNullChangePathsByAsset } for _, out := range o.Outputs { asset := bufferutil.AssetHashFromBytes(out.Asset) if _, ok := o.ChangePathsByAsset[asset]; !ok { return fmt.Errorf("missing derivation path for eventual change of asset '%s'", asset) } } // in case change for network fees is requested, make sure that a change // path for LBTC asset exists. if o.WantChangeForFees { lbtcAsset := o.Network.AssetID if _, ok := o.ChangePathsByAsset[lbtcAsset]; !ok { return fmt.Errorf("missing derivation path for eventual change of asset '%s'", lbtcAsset) } } if o.MilliSatsPerBytes < 100 { return ErrInvalidMilliSatsPerBytes } } return nil } func (o UpdateTxOpts) getOutputsTotalAmountsByAsset() map[string]uint64 { totalAmountsByAsset := map[string]uint64{} for _, out := range o.Outputs { asset := bufferutil.AssetHashFromBytes(out.Asset) totalAmountsByAsset[asset] += bufferutil.ValueFromBytes(out.Value) } return totalAmountsByAsset } func (o UpdateTxOpts) getUnspentsUnblindingKeys(w *Wallet) [][]byte { keys := make([][]byte, 0, len(o.Unspents)) for _, u := range o.Unspents { blindingPrvkey, _, _ := w.DeriveBlindingKeyPair( DeriveBlindingKeyPairOpts{ Script: u.Script(), }, ) keys = append(keys, blindingPrvkey.Serialize()) } return keys } func (o UpdateTxOpts) getInputAssets() []string { assets := make([]string, 0, len(o.ChangePathsByAsset)) for asset := range o.ChangePathsByAsset { assets = append(assets, asset) } return assets } // UpdateTxResult is the struct returned by UpdateTx method. // PsetBase64: the updated partial transaction with new inputs and outputs // SelectedUnspents: the list of unspents added as inputs to the pset // ChangeOutptusBlindingKeys: the list of blinding keys for the evnutal // change(s) added to the pset // FeeAmount: the amount in satoshi of the fee amount that can added in a // second moment giving the user the possibility to eventually blind // the pset first type UpdateTxResult struct { PsetBase64 string SelectedUnspents []explorer.Utxo ChangeOutputsBlindingKeys map[string][]byte FeeAmount uint64 } // UpdateTx adds the provided outputs and eventual inputs to the provided // partial transaction. The assets of the inputs to add is determined by the // assets of the provided outputs. For each assset type a derivation path for // an eventual change must be provided. // Its also mandatory to provide a derivation path for the LBTC asset type // since this method takes care of adding inputs (if necessary) for covering // the fee amount. // While the list of outputs is required, the list of unspents is optional. // In case it's not empty, a coin selection is performed for each type of // asset, adding the eventual change output to the list of outputs to add to // the tx. In the other case, only the outputs are added to the provided // partial transaction. func (w *Wallet) UpdateTx(opts UpdateTxOpts) (*UpdateTxResult, error) { if err := opts.validate(); err != nil { return nil, err } if err := w.validate(); err != nil { return nil, err } ptx, _ := pset.NewPsetFromBase64(opts.PsetBase64) inputsToAdd := make([]explorer.Utxo, 0) outputsToAdd := make([]*transaction.TxOutput, len(opts.Outputs)) changeOutputsBlindingKeys := map[string][]byte{} feeAmount := uint64(0) copy(outputsToAdd, opts.Outputs) if len(opts.Unspents) > 0 { // retrieve all the asset hashes of input to add to the pset inAssets := opts.getInputAssets() // calculate target amount of each asset for coin selection totalAmountsByAsset := opts.getOutputsTotalAmountsByAsset() // retrieve input prv blinding keys // select unspents and update the list of inputs to add and eventually the // list of outputs to add by adding the change output if necessary for _, asset := range inAssets { if totalAmountsByAsset[asset] > 0 { selectedUnspents, change, err := explorer.SelectUnspents( opts.Unspents, totalAmountsByAsset[asset], asset, ) if err != nil { return nil, err } inputsToAdd = append(inputsToAdd, selectedUnspents...) if change > 0 { _, script, _ := w.DeriveConfidentialAddress( DeriveConfidentialAddressOpts{ DerivationPath: opts.ChangePathsByAsset[asset], Network: opts.Network, }, ) changeOutput, _ := newTxOutput(asset, change, script) outputsToAdd = append(outputsToAdd, changeOutput) prvBlindingKey, pubBlindingKey, err := w.DeriveBlindingKeyPair( DeriveBlindingKeyPairOpts{ Script: script, }) if err != nil { return nil, err } if opts.WantPrivateBlindKeys { changeOutputsBlindingKeys[hex.EncodeToString(script)] = prvBlindingKey.Serialize() } else { changeOutputsBlindingKeys[hex.EncodeToString(script)] = pubBlindingKey.SerializeCompressed() } } } } if opts.WantChangeForFees { _, lbtcChangeScript, _ := w.DeriveConfidentialAddress( DeriveConfidentialAddressOpts{ DerivationPath: opts.ChangePathsByAsset[opts.Network.AssetID], Network: opts.Network, }, ) feeAmount = estimateTxSize( len(inputsToAdd)+len(ptx.Inputs), len(outputsToAdd)+len(ptx.Outputs), !anyOutputWithScript(outputsToAdd, lbtcChangeScript), opts.MilliSatsPerBytes, ) // if a LBTC change output already exists and its value covers the // estimated fee amount, it's enough to add the fee output and updating // the change output's value by subtracting the fee amount. // Otherwise, another coin selection over those LBTC utxos not already // included is necessary and the already existing change output's value // will be eventually updated by adding the change amount returned by the // coin selection if anyOutputWithScript(outputsToAdd, lbtcChangeScript) { changeOutputIndex := outputIndexByScript(outputsToAdd, lbtcChangeScript) changeAmount := bufferutil.ValueFromBytes(outputsToAdd[changeOutputIndex].Value) if feeAmount < changeAmount { outputsToAdd[changeOutputIndex].Value, _ = bufferutil.ValueToBytes(changeAmount - feeAmount) } else { unspents := getRemainingUnspents(opts.Unspents, inputsToAdd) selectedUnspents, change, err := explorer.SelectUnspents( unspents, feeAmount, opts.Network.AssetID, ) if err != nil { return nil, err } inputsToAdd = append(inputsToAdd, selectedUnspents...) if change > 0 { outputsToAdd[changeOutputIndex].Value, _ = bufferutil.ValueToBytes(changeAmount + change) } } } else { // In case there's no LBTC change, it's necessary to choose some other // unspents from those not yet selected, add it/them to the list of // inputs to add to the tx and add another output for the eventual change // returned by the coin selection unspents := getRemainingUnspents(opts.Unspents, inputsToAdd) selectedUnspents, change, err := explorer.SelectUnspents( unspents, feeAmount, opts.Network.AssetID, ) if err != nil { return nil, err } inputsToAdd = append(inputsToAdd, selectedUnspents...) if change > 0 { lbtcChangeOutput, _ := newTxOutput( opts.Network.AssetID, change, lbtcChangeScript, ) outputsToAdd = append(outputsToAdd, lbtcChangeOutput) lbtcChangePrvBlindingKey, lbtcChangePubBlindingKey, _ := w.DeriveBlindingKeyPair( DeriveBlindingKeyPairOpts{ Script: lbtcChangeScript, }, ) if opts.WantPrivateBlindKeys { changeOutputsBlindingKeys[hex.EncodeToString(lbtcChangeScript)] = lbtcChangePrvBlindingKey.Serialize() } else { changeOutputsBlindingKeys[hex.EncodeToString(lbtcChangeScript)] = lbtcChangePubBlindingKey.SerializeCompressed() } } } } } psetBase64, err := addInsAndOutsToPset(ptx, inputsToAdd, outputsToAdd) if err != nil { return nil, err } return &UpdateTxResult{ PsetBase64: psetBase64, SelectedUnspents: inputsToAdd, ChangeOutputsBlindingKeys: changeOutputsBlindingKeys, FeeAmount: feeAmount, }, nil } // FinalizeAndExtractTransactionOpts is the struct given to FinalizeAndExtractTransaction method type FinalizeAndExtractTransactionOpts struct { PsetBase64 string } func (o FinalizeAndExtractTransactionOpts) validate() error { if _, err := pset.NewPsetFromBase64(o.PsetBase64); err != nil { return err } return nil } // FinalizeAndExtractTransaction attempts to finalize the provided partial // transaction and eventually extracts the final transaction and returns // it in hex string format, along with its transaction id func
(opts FinalizeAndExtractTransactionOpts) (string, string, error) { ptx, _ := pset.NewPsetFromBase64(opts.PsetBase64) ok, err := ptx.ValidateAllSignatures() if err != nil { return "", "", err } if !ok { return "", "", ErrInvalidSignatures } if err := pset.FinalizeAll(ptx); err != nil { return "", "", err } tx, err := pset.Extract(ptx) if err != nil { return "", "", err } txHex, err := tx.ToHex() if err != nil { return "", "", err } return txHex, tx.TxHash().String(), nil }
FinalizeAndExtractTransaction
client.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // HTTP client. See RFC 7230 through 7235. // // This is the high-level Client interface. // The low-level implementation is in transport.go. package http import ( "context" "github.com/sleeyax/ja3rp/crypto/tls" "encoding/base64" "errors" "fmt" "github.com/sleeyax/ja3rp/net/http/internal/ascii" "io" "log" "net/url" "reflect" "sort" "strings" "sync" "time" ) // A Client is an HTTP client. Its zero value (DefaultClient) is a // usable client that uses DefaultTransport. // // The Client's Transport typically has internal state (cached TCP // connections), so Clients should be reused instead of created as // needed. Clients are safe for concurrent use by multiple goroutines. // // A Client is higher-level than a RoundTripper (such as Transport) // and additionally handles HTTP details such as cookies and // redirects. // // When following redirects, the Client will forward all headers set on the // initial Request except: // // • when forwarding sensitive headers like "Authorization", // "WWW-Authenticate", and "Cookie" to untrusted targets. // These headers will be ignored when following a redirect to a domain // that is not a subdomain match or exact match of the initial domain. // For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com" // will forward the sensitive headers, but a redirect to "bar.com" will not. // // • when forwarding the "Cookie" header with a non-nil cookie Jar. // Since each redirect may mutate the state of the cookie jar, // a redirect may possibly alter a cookie set in the initial request. // When forwarding the "Cookie" header, any mutated cookies will be omitted, // with the expectation that the Jar will insert those mutated cookies // with the updated values (assuming the origin matches). // If Jar is nil, the initial cookies are forwarded without change. // type Client struct { // Transport specifies the mechanism by which individual // HTTP requests are made. // If nil, DefaultTransport is used. Transport RoundTripper // CheckRedirect specifies the policy for handling redirects. // If CheckRedirect is not nil, the client calls it before // following an HTTP redirect. The arguments req and via are // the upcoming request and the requests made already, oldest // first. If CheckRedirect returns an error, the Client's Get // method returns both the previous Response (with its Body // closed) and CheckRedirect's error (wrapped in a url.Error) // instead of issuing the Request req. // As a special case, if CheckRedirect returns ErrUseLastResponse, // then the most recent response is returned with its body // unclosed, along with a nil error. // // If CheckRedirect is nil, the Client uses its default policy, // which is to stop after 10 consecutive requests. CheckRedirect func(req *Request, via []*Request) error // Jar specifies the cookie jar. // // The Jar is used to insert relevant cookies into every // outbound Request and is updated with the cookie values // of every inbound Response. The Jar is consulted for every // redirect that the Client follows. // // If Jar is nil, cookies are only sent if they are explicitly // set on the Request. Jar CookieJar // Timeout specifies a time limit for requests made by this // Client. The timeout includes connection time, any // redirects, and reading the response body. The timer remains // running after Get, Head, Post, or Do return and will // interrupt reading of the Response.Body. // // A Timeout of zero means no timeout. // // The Client cancels requests to the underlying Transport // as if the Request's Context ended. // // For compatibility, the Client will also use the deprecated // CancelRequest method on Transport if found. New // RoundTripper implementations should use the Request's Context // for cancellation instead of implementing CancelRequest. Timeout time.Duration } // DefaultClient is the default Client and is used by Get, Head, and Post. var DefaultClient = &Client{} // RoundTripper is an interface representing the ability to execute a // single HTTP transaction, obtaining the Response for a given Request. // // A RoundTripper must be safe for concurrent use by multiple // goroutines. type RoundTripper interface { // RoundTrip executes a single HTTP transaction, returning // a Response for the provided Request. // // RoundTrip should not attempt to interpret the response. In // particular, RoundTrip must return err == nil if it obtained // a response, regardless of the response's HTTP status code. // A non-nil err should be reserved for failure to obtain a // response. Similarly, RoundTrip should not attempt to // handle higher-level protocol details such as redirects, // authentication, or cookies. // // RoundTrip should not modify the request, except for // consuming and closing the Request's Body. RoundTrip may // read fields of the request in a separate goroutine. Callers // should not mutate or reuse the request until the Response's // Body has been closed. // // RoundTrip must always close the body, including on errors, // but depending on the implementation may do so in a separate // goroutine even after RoundTrip returns. This means that // callers wanting to reuse the body for subsequent requests // must arrange to wait for the Close call before doing so. // // The Request's URL and Header fields must be initialized. RoundTrip(*Request) (*Response, error) } // refererForURL returns a referer without any authentication info or // an empty string if lastReq scheme is https and newReq scheme is http. func refererForURL(lastReq, newReq *url.URL) string { // https://tools.ietf.org/html/rfc7231#section-5.5.2 // "Clients SHOULD NOT include a Referer header field in a // (non-secure) HTTP request if the referring page was // transferred with a secure protocol." if lastReq.Scheme == "https" && newReq.Scheme == "http" { return "" } referer := lastReq.String() if lastReq.User != nil { // This is not very efficient, but is the best we can // do without: // - introducing a new method on URL // - creating a race condition // - copying the URL struct manually, which would cause // maintenance problems down the line auth := lastReq.User.String() + "@" referer = strings.Replace(referer, auth, "", 1) } return referer } // didTimeout is non-nil only if err != nil. func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { if c.Jar != nil { for _, cookie := range c.Jar.Cookies(req.URL) { req.AddCookie(cookie) } } resp, didTimeout, err = send(req, c.transport(), deadline) if err != nil { return nil, didTimeout, err } if c.Jar != nil { if rc := resp.Cookies(); len(rc) > 0 { c.Jar.SetCookies(req.URL, rc) } } return resp, nil, nil } func (c *Client) deadline() time.Time { if c.Timeout > 0 { return time.Now().Add(c.Timeout) } return time.Time{} } func (c *Client) transport() RoundTripper { if c.Transport != nil { return c.Transport } return DefaultTransport } // send issues an HTTP request. // Caller should close resp.Body when done reading from it. func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { req := ireq // req is either the original request, or a modified fork if rt == nil { req.closeBody() return nil, alwaysFalse, errors.New("http: no Client.Transport or DefaultTransport") } if req.URL == nil { req.closeBody() return nil, alwaysFalse, errors.New("http: nil Request.URL") } if req.RequestURI != "" { req.closeBody() return nil, alwaysFalse, errors.New("http: Request.RequestURI can't be set in client requests") } // forkReq forks req into a shallow clone of ireq the first // time it's called. forkReq := func() { if ireq == req { req = new(Request) *req = *ireq // shallow clone } } // Most the callers of send (Get, Post, et al) don't need // Headers, leaving it uninitialized. We guarantee to the // Transport that this has been initialized, though. if req.Header == nil { forkReq() req.Header = make(Header) } if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" { username := u.Username() password, _ := u.Password() forkReq() req.Header = cloneOrMakeHeader(ireq.Header) req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) } if !deadline.IsZero() { forkReq() } stopTimer, didTimeout := setRequestCancel(req, rt, deadline) resp, err = rt.RoundTrip(req) if err != nil { stopTimer() if resp != nil { log.Printf("RoundTripper returned a response & error; ignoring response") } if tlsErr, ok := err.(tls.RecordHeaderError); ok { // If we get a bad TLS record header, check to see if the // response looks like HTTP and give a more helpful error. // See golang.org/issue/11111. if string(tlsErr.RecordHeader[:]) == "HTTP/" { err = errors.New("http: server gave HTTP response to HTTPS client") } } return nil, didTimeout, err } if resp == nil { return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt) } if resp.Body == nil { // The documentation on the Body field says “The http Client and Transport // guarantee that Body is always non-nil, even on responses without a body // or responses with a zero-length body.” Unfortunately, we didn't document // that same constraint for arbitrary RoundTripper implementations, and // RoundTripper implementations in the wild (mostly in tests) assume that // they can use a nil Body to mean an empty one (similar to Request.Body). // (See https://golang.org/issue/38095.) // // If the ContentLength allows the Body to be empty, fill in an empty one // here to ensure that it is non-nil. if resp.ContentLength > 0 && req.Method != "HEAD" { return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with content length %d but a nil Body", rt, resp.ContentLength) } resp.Body = io.NopCloser(strings.NewReader("")) } if !deadline.IsZero() { resp.Body = &cancelTimerBody{ stop: stopTimer, rc: resp.Body, reqDidTimeout: didTimeout, } } return resp, nil, nil } // timeBeforeContextDeadline reports whether the non-zero Time t is // before ctx's deadline, if any. If ctx does not have a deadline, it // always reports true (the deadline is considered infinite). func timeBeforeContextDeadline(t time.Time, ctx context.Context) bool { d, ok := ctx.Deadline() if !ok { return true } return t.Before(d) } // knownRoundTripperImpl reports whether rt is a RoundTripper that's // maintained by the Go team and known to implement the latest // optional semantics (notably contexts). The Request is used // to check whether this particular request is using an alternate protocol, // in which case we need to check the RoundTripper for that protocol. func knownRoundTripperImpl(rt RoundTripper, req *Request) bool { switch t := rt.(type) { case *Transport: if altRT := t.alternateRoundTripper(req); altRT != nil { return knownRoundTripperImpl(altRT, req) } return true case *http2Transport, http2noDialH2RoundTripper: return true } // There's a very minor chance of a false positive with this. // Instead of detecting our golang.org/x/net/http2.Transport, // it might detect a Transport type in a different http2 // package. But I know of none, and the only problem would be // some temporarily leaked goroutines if the transport didn't // support contexts. So this is a good enough heuristic: if reflect.TypeOf(rt).String() == "*http2.Transport" { return true } return false } // setRequestCancel sets req.Cancel and adds a deadline context to req // if deadline is non-zero. The RoundTripper's type is used to // determine whether the legacy CancelRequest behavior should be used. // // As background, there are three ways to cancel a request: // First was Transport.CancelRequest. (deprecated) // Second was Request.Cancel. // Third was Request.Context. // This function populates the second and third, and uses the first if it really needs to. func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTimer func(), didTimeout func() bool) { if deadline.IsZero() { return nop, alwaysFalse } knownTransport := knownRoundTripperImpl(rt, req) oldCtx := req.Context() if req.Cancel == nil && knownTransport { // If they already had a Request.Context that's // expiring sooner, do nothing: if !timeBeforeContextDeadline(deadline, oldCtx) { return nop, alwaysFalse } var cancelCtx func() req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline) return cancelCtx, func() bool { return time.Now().After(deadline) } } initialReqCancel := req.Cancel // the user's original Request.Cancel, if any var cancelCtx func() if oldCtx := req.Context(); timeBeforeContextDeadline(deadline, oldCtx) { req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline) } cancel := make(chan struct{}) req.Cancel = cancel doCancel := func() { // The second way in the func comment above: close(cancel) // The first way, used only for RoundTripper // implementations written before Go 1.5 or Go 1.6. type canceler interface{ CancelRequest(*Request) } if v, ok := rt.(canceler); ok { v.CancelRequest(req) } } stopTimerCh := make(chan struct{}) var once sync.Once stopTimer = func() { once.Do(func() { close(stopTimerCh) if cancelCtx != nil { cancelCtx() } }) } timer := time.NewTimer(time.Until(deadline)) var timedOut atomicBool go func() { select { case <-initialReqCancel: doCancel() timer.Stop() case <-timer.C: timedOut.setTrue() doCancel() case <-stopTimerCh: timer.Stop() } }() return stopTimer, timedOut.isSet } // See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt // "To receive authorization, the client sends the userid and password, // separated by a single colon (":") character, within a base64 // encoded string in the credentials." // It is not meant to be urlencoded. func basicAuth(username, password string) string { auth := username + ":" + password return base64.StdEncoding.EncodeToString([]byte(auth)) } // Get issues a GET to the specified URL. If the response is one of // the following redirect codes, Get follows the redirect, up to a // maximum of 10 redirects: // // 301 (Moved Permanently) // 302 (Found) // 303 (See Other) // 307 (Temporary Redirect) // 308 (Permanent Redirect) // // An error is returned if there were too many redirects or if there // was an HTTP protocol error. A non-2xx response doesn't cause an // error. Any returned error will be of type *url.Error. The url.Error // value's Timeout method will report true if the request timed out. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. // // Get is a wrapper around DefaultClient.Get. // // To make a request with custom headers, use NewRequest and // DefaultClient.Do. // // To make a request with a specified context.Context, use NewRequestWithContext // and DefaultClient.Do. func Get(url string) (resp *Response, err error) { return DefaultClient.Get(url) } // Get issues a GET to the specified URL. If the response is one of the // following redirect codes, Get follows the redirect after calling the // Client's CheckRedirect function: // // 301 (Moved Permanently) // 302 (Found) // 303 (See Other) // 307 (Temporary Redirect) // 308 (Permanent Redirect) // // An error is returned if the Client's CheckRedirect function fails // or if there was an HTTP protocol error. A non-2xx response doesn't // cause an error. Any returned error will be of type *url.Error. The // url.Error value's Timeout method will report true if the request // timed out. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. // // To make a request with custom headers, use NewRequest and Client.Do. // // To make a request with a specified context.Context, use NewRequestWithContext // and Client.Do. func (c *Client) Get(url string) (resp *Response, err error) { req, err := NewRequest("GET", url, nil) if err != nil { return nil, err } return c.Do(req) } func alwaysFalse() bool { return
UseLastResponse can be returned by Client.CheckRedirect hooks to // control how redirects are processed. If returned, the next request // is not sent and the most recent response is returned with its body // unclosed. var ErrUseLastResponse = errors.New("net/http: use last response") // checkRedirect calls either the user's configured CheckRedirect // function, or the default. func (c *Client) checkRedirect(req *Request, via []*Request) error { fn := c.CheckRedirect if fn == nil { fn = defaultCheckRedirect } return fn(req, via) } // redirectBehavior describes what should happen when the // client encounters a 3xx status code from the server func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) { switch resp.StatusCode { case 301, 302, 303: redirectMethod = reqMethod shouldRedirect = true includeBody = false // RFC 2616 allowed automatic redirection only with GET and // HEAD requests. RFC 7231 lifts this restriction, but we still // restrict other methods to GET to maintain compatibility. // See Issue 18570. if reqMethod != "GET" && reqMethod != "HEAD" { redirectMethod = "GET" } case 307, 308: redirectMethod = reqMethod shouldRedirect = true includeBody = true // Treat 307 and 308 specially, since they're new in // Go 1.8, and they also require re-sending the request body. if resp.Header.Get("Location") == "" { // 308s have been observed in the wild being served // without Location headers. Since Go 1.7 and earlier // didn't follow these codes, just stop here instead // of returning an error. // See Issue 17773. shouldRedirect = false break } if ireq.GetBody == nil && ireq.outgoingLength() != 0 { // We had a request body, and 307/308 require // re-sending it, but GetBody is not defined. So just // return this response to the user instead of an // error, like we did in Go 1.7 and earlier. shouldRedirect = false } } return redirectMethod, shouldRedirect, includeBody } // urlErrorOp returns the (*url.Error).Op value to use for the // provided (*Request).Method value. func urlErrorOp(method string) string { if method == "" { return "Get" } if lowerMethod, ok := ascii.ToLower(method); ok { return method[:1] + lowerMethod[1:] } return method } // Do sends an HTTP request and returns an HTTP response, following // policy (such as redirects, cookies, auth) as configured on the // client. // // An error is returned if caused by client policy (such as // CheckRedirect), or failure to speak HTTP (such as a network // connectivity problem). A non-2xx status code doesn't cause an // error. // // If the returned error is nil, the Response will contain a non-nil // Body which the user is expected to close. If the Body is not both // read to EOF and closed, the Client's underlying RoundTripper // (typically Transport) may not be able to re-use a persistent TCP // connection to the server for a subsequent "keep-alive" request. // // The request Body, if non-nil, will be closed by the underlying // Transport, even on errors. // // On error, any Response can be ignored. A non-nil Response with a // non-nil error only occurs when CheckRedirect fails, and even then // the returned Response.Body is already closed. // // Generally Get, Post, or PostForm will be used instead of Do. // // If the server replies with a redirect, the Client first uses the // CheckRedirect function to determine whether the redirect should be // followed. If permitted, a 301, 302, or 303 redirect causes // subsequent requests to use HTTP method GET // (or HEAD if the original request was HEAD), with no body. // A 307 or 308 redirect preserves the original HTTP method and body, // provided that the Request.GetBody function is defined. // The NewRequest function automatically sets GetBody for common // standard library body types. // // Any returned error will be of type *url.Error. The url.Error // value's Timeout method will report true if the request timed out. func (c *Client) Do(req *Request) (*Response, error) { return c.do(req) } var testHookClientDoResult func(retres *Response, reterr error) func (c *Client) do(req *Request) (retres *Response, reterr error) { if testHookClientDoResult != nil { defer func() { testHookClientDoResult(retres, reterr) }() } if req.URL == nil { req.closeBody() return nil, &url.Error{ Op: urlErrorOp(req.Method), Err: errors.New("http: nil Request.URL"), } } var ( deadline = c.deadline() reqs []*Request resp *Response copyHeaders = c.makeHeadersCopier(req) reqBodyClosed = false // have we closed the current req.Body? // Redirect behavior: redirectMethod string includeBody bool ) uerr := func(err error) error { // the body may have been closed already by c.send() if !reqBodyClosed { req.closeBody() } var urlStr string if resp != nil && resp.Request != nil { urlStr = stripPassword(resp.Request.URL) } else { urlStr = stripPassword(req.URL) } return &url.Error{ Op: urlErrorOp(reqs[0].Method), URL: urlStr, Err: err, } } for { // For all but the first request, create the next // request hop and replace req. if len(reqs) > 0 { loc := resp.Header.Get("Location") if loc == "" { resp.closeBody() return nil, uerr(fmt.Errorf("%d response missing Location header", resp.StatusCode)) } u, err := req.URL.Parse(loc) if err != nil { resp.closeBody() return nil, uerr(fmt.Errorf("failed to parse Location header %q: %v", loc, err)) } host := "" if req.Host != "" && req.Host != req.URL.Host { // If the caller specified a custom Host header and the // redirect location is relative, preserve the Host header // through the redirect. See issue #22233. if u, _ := url.Parse(loc); u != nil && !u.IsAbs() { host = req.Host } } ireq := reqs[0] req = &Request{ Method: redirectMethod, Response: resp, URL: u, Header: make(Header), Host: host, Cancel: ireq.Cancel, ctx: ireq.ctx, } if includeBody && ireq.GetBody != nil { req.Body, err = ireq.GetBody() if err != nil { resp.closeBody() return nil, uerr(err) } req.ContentLength = ireq.ContentLength } // Copy original headers before setting the Referer, // in case the user set Referer on their first request. // If they really want to override, they can do it in // their CheckRedirect func. copyHeaders(req) // Add the Referer header from the most recent // request URL to the new one, if it's not https->http: if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL); ref != "" { req.Header.Set("Referer", ref) } err = c.checkRedirect(req, reqs) // Sentinel error to let users select the // previous response, without closing its // body. See Issue 10069. if err == ErrUseLastResponse { return resp, nil } // Close the previous response's body. But // read at least some of the body so if it's // small the underlying TCP connection will be // re-used. No need to check for errors: if it // fails, the Transport won't reuse it anyway. const maxBodySlurpSize = 2 << 10 if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { io.CopyN(io.Discard, resp.Body, maxBodySlurpSize) } resp.Body.Close() if err != nil { // Special case for Go 1 compatibility: return both the response // and an error if the CheckRedirect function failed. // See https://golang.org/issue/3795 // The resp.Body has already been closed. ue := uerr(err) ue.(*url.Error).URL = loc return resp, ue } } reqs = append(reqs, req) var err error var didTimeout func() bool if resp, didTimeout, err = c.send(req, deadline); err != nil { // c.send() always closes req.Body reqBodyClosed = true if !deadline.IsZero() && didTimeout() { err = &httpError{ err: err.Error() + " (Client.Timeout exceeded while awaiting headers)", timeout: true, } } return nil, uerr(err) } var shouldRedirect bool redirectMethod, shouldRedirect, includeBody = redirectBehavior(req.Method, resp, reqs[0]) if !shouldRedirect { return resp, nil } req.closeBody() } } // makeHeadersCopier makes a function that copies headers from the // initial Request, ireq. For every redirect, this function must be called // so that it can copy headers into the upcoming Request. func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) { // The headers to copy are from the very initial request. // We use a closured callback to keep a reference to these original headers. var ( ireqhdr = cloneOrMakeHeader(ireq.Header) icookies map[string][]*Cookie ) if c.Jar != nil && ireq.Header.Get("Cookie") != "" { icookies = make(map[string][]*Cookie) for _, c := range ireq.Cookies() { icookies[c.Name] = append(icookies[c.Name], c) } } preq := ireq // The previous request return func(req *Request) { // If Jar is present and there was some initial cookies provided // via the request header, then we may need to alter the initial // cookies as we follow redirects since each redirect may end up // modifying a pre-existing cookie. // // Since cookies already set in the request header do not contain // information about the original domain and path, the logic below // assumes any new set cookies override the original cookie // regardless of domain or path. // // See https://golang.org/issue/17494 if c.Jar != nil && icookies != nil { var changed bool resp := req.Response // The response that caused the upcoming redirect for _, c := range resp.Cookies() { if _, ok := icookies[c.Name]; ok { delete(icookies, c.Name) changed = true } } if changed { ireqhdr.Del("Cookie") var ss []string for _, cs := range icookies { for _, c := range cs { ss = append(ss, c.Name+"="+c.Value) } } sort.Strings(ss) // Ensure deterministic headers ireqhdr.Set("Cookie", strings.Join(ss, "; ")) } } // Copy the initial request's Header values // (at least the safe ones). for k, vv := range ireqhdr { if shouldCopyHeaderOnRedirect(k, preq.URL, req.URL) { req.Header[k] = vv } } preq = req // Update previous Request with the current request } } func defaultCheckRedirect(req *Request, via []*Request) error { if len(via) >= 10 { return errors.New("stopped after 10 redirects") } return nil } // Post issues a POST to the specified URL. // // Caller should close resp.Body when done reading from it. // // If the provided body is an io.Closer, it is closed after the // request. // // Post is a wrapper around DefaultClient.Post. // // To set custom headers, use NewRequest and DefaultClient.Do. // // See the Client.Do method documentation for details on how redirects // are handled. // // To make a request with a specified context.Context, use NewRequestWithContext // and DefaultClient.Do. func Post(url, contentType string, body io.Reader) (resp *Response, err error) { return DefaultClient.Post(url, contentType, body) } // Post issues a POST to the specified URL. // // Caller should close resp.Body when done reading from it. // // If the provided body is an io.Closer, it is closed after the // request. // // To set custom headers, use NewRequest and Client.Do. // // To make a request with a specified context.Context, use NewRequestWithContext // and Client.Do. // // See the Client.Do method documentation for details on how redirects // are handled. func (c *Client) Post(url, contentType string, body io.Reader) (resp *Response, err error) { req, err := NewRequest("POST", url, body) if err != nil { return nil, err } req.Header.Set("Content-Type", contentType) return c.Do(req) } // PostForm issues a POST to the specified URL, with data's keys and // values URL-encoded as the request body. // // The Content-Type header is set to application/x-www-form-urlencoded. // To set other headers, use NewRequest and DefaultClient.Do. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. // // PostForm is a wrapper around DefaultClient.PostForm. // // See the Client.Do method documentation for details on how redirects // are handled. // // To make a request with a specified context.Context, use NewRequestWithContext // and DefaultClient.Do. func PostForm(url string, data url.Values) (resp *Response, err error) { return DefaultClient.PostForm(url, data) } // PostForm issues a POST to the specified URL, // with data's keys and values URL-encoded as the request body. // // The Content-Type header is set to application/x-www-form-urlencoded. // To set other headers, use NewRequest and Client.Do. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. // // See the Client.Do method documentation for details on how redirects // are handled. // // To make a request with a specified context.Context, use NewRequestWithContext // and Client.Do. func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } // Head issues a HEAD to the specified URL. If the response is one of // the following redirect codes, Head follows the redirect, up to a // maximum of 10 redirects: // // 301 (Moved Permanently) // 302 (Found) // 303 (See Other) // 307 (Temporary Redirect) // 308 (Permanent Redirect) // // Head is a wrapper around DefaultClient.Head // // To make a request with a specified context.Context, use NewRequestWithContext // and DefaultClient.Do. func Head(url string) (resp *Response, err error) { return DefaultClient.Head(url) } // Head issues a HEAD to the specified URL. If the response is one of the // following redirect codes, Head follows the redirect after calling the // Client's CheckRedirect function: // // 301 (Moved Permanently) // 302 (Found) // 303 (See Other) // 307 (Temporary Redirect) // 308 (Permanent Redirect) // // To make a request with a specified context.Context, use NewRequestWithContext // and Client.Do. func (c *Client) Head(url string) (resp *Response, err error) { req, err := NewRequest("HEAD", url, nil) if err != nil { return nil, err } return c.Do(req) } // CloseIdleConnections closes any connections on its Transport which // were previously connected from previous requests but are now // sitting idle in a "keep-alive" state. It does not interrupt any // connections currently in use. // // If the Client's Transport does not have a CloseIdleConnections method // then this method does nothing. func (c *Client) CloseIdleConnections() { type closeIdler interface { CloseIdleConnections() } if tr, ok := c.transport().(closeIdler); ok { tr.CloseIdleConnections() } } // cancelTimerBody is an io.ReadCloser that wraps rc with two features: // 1) On Read error or close, the stop func is called. // 2) On Read failure, if reqDidTimeout is true, the error is wrapped and // marked as net.Error that hit its timeout. type cancelTimerBody struct { stop func() // stops the time.Timer waiting to cancel the request rc io.ReadCloser reqDidTimeout func() bool } func (b *cancelTimerBody) Read(p []byte) (n int, err error) { n, err = b.rc.Read(p) if err == nil { return n, nil } b.stop() if err == io.EOF { return n, err } if b.reqDidTimeout() { err = &httpError{ err: err.Error() + " (Client.Timeout or context cancellation while reading body)", timeout: true, } } return n, err } func (b *cancelTimerBody) Close() error { err := b.rc.Close() b.stop() return err } func shouldCopyHeaderOnRedirect(headerKey string, initial, dest *url.URL) bool { switch CanonicalHeaderKey(headerKey) { case "Authorization", "Www-Authenticate", "Cookie", "Cookie2": // Permit sending auth/cookie headers from "foo.com" // to "sub.foo.com". // Note that we don't send all cookies to subdomains // automatically. This function is only used for // Cookies set explicitly on the initial outgoing // client request. Cookies automatically added via the // CookieJar mechanism continue to follow each // cookie's scope as set by Set-Cookie. But for // outgoing requests with the Cookie header set // directly, we don't know their scope, so we assume // it's for *.domain.com. ihost := canonicalAddr(initial) dhost := canonicalAddr(dest) return isDomainOrSubdomain(dhost, ihost) } // All other headers are copied: return true } // isDomainOrSubdomain reports whether sub is a subdomain (or exact // match) of the parent domain. // // Both domains must already be in canonical form. func isDomainOrSubdomain(sub, parent string) bool { if sub == parent { return true } // If sub is "foo.example.com" and parent is "example.com", // that means sub must end in "."+parent. // Do it without allocating. if !strings.HasSuffix(sub, parent) { return false } return sub[len(sub)-len(parent)-1] == '.' } func stripPassword(u *url.URL) string { _, passSet := u.User.Password() if passSet { return strings.Replace(u.String(), u.User.String()+"@", u.User.Username()+":***@", 1) } return u.String() }
false } // Err
util.rs
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::sync::{mpsc, Arc}; use std::time::Duration; use std::thread; use std::path::Path; use tempdir::TempDir; use rocksdb::{CompactionJobInfo, DB}; use protobuf; use kvproto::metapb::{self, RegionEpoch}; use kvproto::raft_cmdpb::{AdminCmdType, AdminRequest, CmdType, RaftCmdRequest, RaftCmdResponse, Request, StatusCmdType, StatusRequest}; use kvproto::pdpb::{ChangePeer, RegionHeartbeatResponse, TransferLeader}; use raft::eraftpb::ConfChangeType; use tikv::raftstore::store::*; use tikv::raftstore::{Error, Result}; use tikv::server::Config as ServerConfig; use tikv::server::readpool::Config as ReadPoolInstanceConfig; use tikv::storage::{Config as StorageConfig, CF_DEFAULT}; use tikv::util::escape; use tikv::util::rocksdb::{self, CompactionListener}; use tikv::util::config::*; use tikv::config::{ReadPoolConfig, TiKvConfig}; use tikv::util::transport::SendCh; use tikv::raftstore::store::Msg as StoreMsg; use super::cluster::{Cluster, Simulator}; pub use tikv::raftstore::store::util::find_peer; pub const MAX_LEADER_LEASE: u64 = 250; // 250ms pub fn must_get(engine: &Arc<DB>, cf: &str, key: &[u8], value: Option<&[u8]>) { for _ in 1..300 { let res = engine.get_value_cf(cf, &keys::data_key(key)).unwrap(); if value.is_some() && res.is_some() { assert_eq!(value.unwrap(), &*res.unwrap()); return; } if value.is_none() && res.is_none() { return; } thread::sleep(Duration::from_millis(20)); } debug!("last try to get {}", escape(key)); let res = engine.get_value_cf(cf, &keys::data_key(key)).unwrap(); if value.is_none() && res.is_none() || value.is_some() && res.is_some() && value.unwrap() == &*res.unwrap() { return; } panic!( "can't get value {:?} for key {:?}", value.map(escape), escape(key) ) } pub fn must_get_equal(engine: &Arc<DB>, key: &[u8], value: &[u8]) { must_get(engine, "default", key, Some(value)); } pub fn must_get_none(engine: &Arc<DB>, key: &[u8]) { must_get(engine, "default", key, None); } pub fn must_get_cf_equal(engine: &Arc<DB>, cf: &str, key: &[u8], value: &[u8]) { must_get(engine, cf, key, Some(value)); } pub fn must_get_cf_none(engine: &Arc<DB>, cf: &str, key: &[u8]) { must_get(engine, cf, key, None); } pub fn new_store_cfg() -> Config { Config { sync_log: false, raft_base_tick_interval: ReadableDuration::millis(10), raft_heartbeat_ticks: 2, raft_election_timeout_ticks: 25, raft_log_gc_tick_interval: ReadableDuration::millis(100), raft_log_gc_threshold: 1, // Use a value of 3 seconds as max_leader_missing_duration just for test. // In production environment, the value of max_leader_missing_duration // should be configured far beyond the election timeout. max_leader_missing_duration: ReadableDuration::secs(3), // Use a value of 2 seconds as abnormal_leader_missing_duration just for a valid config. abnormal_leader_missing_duration: ReadableDuration::secs(2), pd_heartbeat_tick_interval: ReadableDuration::millis(20), region_split_check_diff: ReadableSize(10000), report_region_flow_interval: ReadableDuration::millis(100), raft_store_max_leader_lease: ReadableDuration::millis(MAX_LEADER_LEASE), allow_remove_leader: true, ..Config::default() } } pub fn new_server_config(cluster_id: u64) -> ServerConfig { ServerConfig { cluster_id: cluster_id, addr: "127.0.0.1:0".to_owned(), grpc_concurrency: 1, // Considering connection selection algo is involved, maybe // use 2 or larger value here? grpc_raft_conn_num: 1, end_point_concurrency: 1, ..ServerConfig::default() } } pub fn new_readpool_cfg() -> ReadPoolConfig { ReadPoolConfig { storage: ReadPoolInstanceConfig::default_for_test(), } } pub fn new_tikv_config(cluster_id: u64) -> TiKvConfig { TiKvConfig { storage: StorageConfig { scheduler_worker_pool_size: 1,
..StorageConfig::default() }, server: new_server_config(cluster_id), raft_store: new_store_cfg(), readpool: new_readpool_cfg(), ..TiKvConfig::default() } } // Create a base request. pub fn new_base_request(region_id: u64, epoch: RegionEpoch, read_quorum: bool) -> RaftCmdRequest { let mut req = RaftCmdRequest::new(); req.mut_header().set_region_id(region_id); req.mut_header().set_region_epoch(epoch); req.mut_header().set_read_quorum(read_quorum); req } pub fn new_request( region_id: u64, epoch: RegionEpoch, requests: Vec<Request>, read_quorum: bool, ) -> RaftCmdRequest { let mut req = new_base_request(region_id, epoch, read_quorum); req.set_requests(protobuf::RepeatedField::from_vec(requests)); req } pub fn new_put_cmd(key: &[u8], value: &[u8]) -> Request { let mut cmd = Request::new(); cmd.set_cmd_type(CmdType::Put); cmd.mut_put().set_key(key.to_vec()); cmd.mut_put().set_value(value.to_vec()); cmd } pub fn new_put_cf_cmd(cf: &str, key: &[u8], value: &[u8]) -> Request { let mut cmd = Request::new(); cmd.set_cmd_type(CmdType::Put); cmd.mut_put().set_key(key.to_vec()); cmd.mut_put().set_value(value.to_vec()); cmd.mut_put().set_cf(cf.to_string()); cmd } pub fn new_get_cmd(key: &[u8]) -> Request { let mut cmd = Request::new(); cmd.set_cmd_type(CmdType::Get); cmd.mut_get().set_key(key.to_vec()); cmd } pub fn new_delete_cmd(cf: &str, key: &[u8]) -> Request { let mut cmd = Request::new(); cmd.set_cmd_type(CmdType::Delete); cmd.mut_delete().set_key(key.to_vec()); cmd.mut_delete().set_cf(cf.to_string()); cmd } pub fn new_delete_range_cmd(cf: &str, start: &[u8], end: &[u8]) -> Request { let mut cmd = Request::new(); cmd.set_cmd_type(CmdType::DeleteRange); cmd.mut_delete_range().set_start_key(start.to_vec()); cmd.mut_delete_range().set_end_key(end.to_vec()); cmd.mut_delete_range().set_cf(cf.to_string()); cmd } pub fn new_status_request( region_id: u64, peer: metapb::Peer, request: StatusRequest, ) -> RaftCmdRequest { let mut req = new_base_request(region_id, RegionEpoch::new(), false); req.mut_header().set_peer(peer); req.set_status_request(request); req } pub fn new_region_detail_cmd() -> StatusRequest { let mut cmd = StatusRequest::new(); cmd.set_cmd_type(StatusCmdType::RegionDetail); cmd } pub fn new_region_leader_cmd() -> StatusRequest { let mut cmd = StatusRequest::new(); cmd.set_cmd_type(StatusCmdType::RegionLeader); cmd } pub fn new_admin_request( region_id: u64, epoch: &RegionEpoch, request: AdminRequest, ) -> RaftCmdRequest { let mut req = new_base_request(region_id, epoch.clone(), false); req.set_admin_request(request); req } pub fn new_change_peer_request(change_type: ConfChangeType, peer: metapb::Peer) -> AdminRequest { let mut req = AdminRequest::new(); req.set_cmd_type(AdminCmdType::ChangePeer); req.mut_change_peer().set_change_type(change_type); req.mut_change_peer().set_peer(peer); req } pub fn new_transfer_leader_cmd(peer: metapb::Peer) -> AdminRequest { let mut cmd = AdminRequest::new(); cmd.set_cmd_type(AdminCmdType::TransferLeader); cmd.mut_transfer_leader().set_peer(peer); cmd } pub fn new_peer(store_id: u64, peer_id: u64) -> metapb::Peer { let mut peer = metapb::Peer::new(); peer.set_store_id(store_id); peer.set_id(peer_id); peer } pub fn new_store(store_id: u64, addr: String) -> metapb::Store { let mut store = metapb::Store::new(); store.set_id(store_id); store.set_address(addr); store } pub fn sleep_ms(ms: u64) { thread::sleep(Duration::from_millis(ms)); } pub fn is_error_response(resp: &RaftCmdResponse) -> bool { resp.get_header().has_error() } pub fn new_pd_change_peer( change_type: ConfChangeType, peer: metapb::Peer, ) -> RegionHeartbeatResponse { let mut change_peer = ChangePeer::new(); change_peer.set_change_type(change_type); change_peer.set_peer(peer); let mut resp = RegionHeartbeatResponse::new(); resp.set_change_peer(change_peer); resp } pub fn new_pd_transfer_leader(peer: metapb::Peer) -> RegionHeartbeatResponse { let mut transfer_leader = TransferLeader::new(); transfer_leader.set_peer(peer); let mut resp = RegionHeartbeatResponse::new(); resp.set_transfer_leader(transfer_leader); resp } pub fn make_cb(cmd: &RaftCmdRequest) -> (Callback, mpsc::Receiver<RaftCmdResponse>) { let mut is_read; let mut is_write; is_read = cmd.has_status_request(); is_write = cmd.has_admin_request(); for req in cmd.get_requests() { match req.get_cmd_type() { CmdType::Get | CmdType::Snap => is_read = true, CmdType::Put | CmdType::Delete | CmdType::DeleteRange | CmdType::IngestSST => { is_write = true } CmdType::Invalid | CmdType::Prewrite => panic!("Invalid RaftCmdRequest: {:?}", cmd), } } assert!(is_read ^ is_write, "Invalid RaftCmdRequest: {:?}", cmd); let (tx, rx) = mpsc::channel(); let cb = if is_read { Callback::Read(Box::new(move |resp: ReadResponse| { // we don't care error actually. let _ = tx.send(resp.response); })) } else { Callback::Write(Box::new(move |resp: WriteResponse| { // we don't care error actually. let _ = tx.send(resp.response); })) }; (cb, rx) } // Issue a read request on the specified peer. pub fn read_on_peer<T: Simulator>( cluster: &mut Cluster<T>, peer: metapb::Peer, region: metapb::Region, key: &[u8], timeout: Duration, ) -> Result<Vec<u8>> { let mut request = new_request( region.get_id(), region.get_region_epoch().clone(), vec![new_get_cmd(key)], false, ); request.mut_header().set_peer(peer); let mut resp = cluster.call_command(request, timeout)?; if resp.get_header().has_error() { return Err(Error::Other(box_err!( resp.mut_header().take_error().take_message() ))); } assert_eq!(resp.get_responses().len(), 1); assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get); assert!(resp.get_responses()[0].has_get()); Ok(resp.mut_responses()[0].mut_get().take_value()) } pub fn must_read_on_peer<T: Simulator>( cluster: &mut Cluster<T>, peer: metapb::Peer, region: metapb::Region, key: &[u8], value: &[u8], ) { let timeout = Duration::from_secs(1); match read_on_peer(cluster, peer, region, key, timeout) { Ok(v) => if v != value { panic!( "read key {}, expect value {}, got {}", escape(key), escape(value), escape(&v) ) }, Err(e) => panic!("failed to read for key {}, err {:?}", escape(key), e), } } pub fn must_error_read_on_peer<T: Simulator>( cluster: &mut Cluster<T>, peer: metapb::Peer, region: metapb::Region, key: &[u8], timeout: Duration, ) { if let Ok(value) = read_on_peer(cluster, peer, region, key, timeout) { panic!( "key {}, expect error but got {}", escape(key), escape(&value) ); } } fn dummpy_filter(_: &CompactionJobInfo) -> bool { true } pub fn create_test_engine( engines: Option<Engines>, tx: SendCh<StoreMsg>, cfg: &TiKvConfig, ) -> (Engines, Option<TempDir>) { // Create engine let mut path = None; let engines = match engines { Some(e) => e, None => { path = Some(TempDir::new("test_cluster").unwrap()); let mut kv_db_opt = cfg.rocksdb.build_opt(); let cmpacted_handler = box move |event| { tx.send(StoreMsg::CompactedEvent(event)).unwrap(); }; kv_db_opt.add_event_listener(CompactionListener::new( cmpacted_handler, Some(dummpy_filter), )); let kv_cfs_opt = cfg.rocksdb.build_cf_opts(); let engine = Arc::new( rocksdb::new_engine_opt( path.as_ref().unwrap().path().to_str().unwrap(), kv_db_opt, kv_cfs_opt, ).unwrap(), ); let raft_path = path.as_ref().unwrap().path().join(Path::new("raft")); let raft_engine = Arc::new( rocksdb::new_engine(raft_path.to_str().unwrap(), &[CF_DEFAULT], None).unwrap(), ); Engines::new(engine, raft_engine) } }; (engines, path) } pub fn configure_for_snapshot<T: Simulator>(cluster: &mut Cluster<T>) { // truncate the log quickly so that we can force sending snapshot. cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20); cluster.cfg.raft_store.raft_log_gc_count_limit = 2; cluster.cfg.raft_store.merge_max_log_gap = 1; cluster.cfg.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50); }
move_errors.rs
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed}; use rustc_middle::mir::*; use rustc_middle::ty; use rustc_mir_dataflow::move_paths::{ IllegalMoveOrigin, IllegalMoveOriginKind, LookupResult, MoveError, MovePathIndex, }; use rustc_span::{sym, Span}; use crate::diagnostics::UseSpans; use crate::prefixes::PrefixSet; use crate::MirBorrowckCtxt; // Often when desugaring a pattern match we may have many individual moves in // MIR that are all part of one operation from the user's point-of-view. For // example: // // let (x, y) = foo() // // would move x from the 0 field of some temporary, and y from the 1 field. We // group such errors together for cleaner error reporting. // // Errors are kept separate if they are from places with different parent move // paths. For example, this generates two errors: // // let (&x, &y) = (&String::new(), &String::new()); #[derive(Debug)] enum GroupedMoveError<'tcx> { // Place expression can't be moved from, // e.g., match x[0] { s => (), } where x: &[String] MovesFromPlace { original_path: Place<'tcx>, span: Span, move_from: Place<'tcx>, kind: IllegalMoveOriginKind<'tcx>, binds_to: Vec<Local>, }, // Part of a value expression can't be moved from, // e.g., match &String::new() { &x => (), } MovesFromValue { original_path: Place<'tcx>, span: Span, move_from: MovePathIndex, kind: IllegalMoveOriginKind<'tcx>, binds_to: Vec<Local>, }, // Everything that isn't from pattern matching. OtherIllegalMove { original_path: Place<'tcx>, use_spans: UseSpans<'tcx>, kind: IllegalMoveOriginKind<'tcx>, }, } impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { pub(crate) fn report_move_errors(&mut self, move_errors: Vec<(Place<'tcx>, MoveError<'tcx>)>) { let grouped_errors = self.group_move_errors(move_errors); for error in grouped_errors { self.report(error); } } fn group_move_errors( &self, errors: Vec<(Place<'tcx>, MoveError<'tcx>)>, ) -> Vec<GroupedMoveError<'tcx>> { let mut grouped_errors = Vec::new(); for (original_path, error) in errors { self.append_to_grouped_errors(&mut grouped_errors, original_path, error); } grouped_errors } fn append_to_grouped_errors( &self, grouped_errors: &mut Vec<GroupedMoveError<'tcx>>, original_path: Place<'tcx>, error: MoveError<'tcx>, ) { match error { MoveError::UnionMove { .. } => { unimplemented!("don't know how to report union move errors yet.") } MoveError::IllegalMove { cannot_move_out_of: IllegalMoveOrigin { location, kind } } => { // Note: that the only time we assign a place isn't a temporary // to a user variable is when initializing it. // If that ever stops being the case, then the ever initialized // flow could be used. if let Some(StatementKind::Assign(box ( place, Rvalue::Use(Operand::Move(move_from)), ))) = self.body.basic_blocks()[location.block] .statements .get(location.statement_index) .map(|stmt| &stmt.kind) { if let Some(local) = place.as_local() { let local_decl = &self.body.local_decls[local]; // opt_match_place is the // match_span is the span of the expression being matched on // match *x.y { ... } match_place is Some(*x.y) // ^^^^ match_span is the span of *x.y // // opt_match_place is None for let [mut] x = ... statements, // whether or not the right-hand side is a place expression if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var( VarBindingForm { opt_match_place: Some((opt_match_place, match_span)), binding_mode: _, opt_ty_info: _, pat_span: _, }, )))) = local_decl.local_info { let stmt_source_info = self.body.source_info(location); self.append_binding_error( grouped_errors, kind, original_path, *move_from, local, opt_match_place, match_span, stmt_source_info.span, ); return; } } } let move_spans = self.move_spans(original_path.as_ref(), location); grouped_errors.push(GroupedMoveError::OtherIllegalMove { use_spans: move_spans, original_path, kind, }); } } } fn
( &self, grouped_errors: &mut Vec<GroupedMoveError<'tcx>>, kind: IllegalMoveOriginKind<'tcx>, original_path: Place<'tcx>, move_from: Place<'tcx>, bind_to: Local, match_place: Option<Place<'tcx>>, match_span: Span, statement_span: Span, ) { debug!("append_binding_error(match_place={:?}, match_span={:?})", match_place, match_span); let from_simple_let = match_place.is_none(); let match_place = match_place.unwrap_or(move_from); match self.move_data.rev_lookup.find(match_place.as_ref()) { // Error with the match place LookupResult::Parent(_) => { for ge in &mut *grouped_errors { if let GroupedMoveError::MovesFromPlace { span, binds_to, .. } = ge { if match_span == *span { debug!("appending local({:?}) to list", bind_to); if !binds_to.is_empty() { binds_to.push(bind_to); } return; } } } debug!("found a new move error location"); // Don't need to point to x in let x = ... . let (binds_to, span) = if from_simple_let { (vec![], statement_span) } else { (vec![bind_to], match_span) }; grouped_errors.push(GroupedMoveError::MovesFromPlace { span, move_from, original_path, kind, binds_to, }); } // Error with the pattern LookupResult::Exact(_) => { let LookupResult::Parent(Some(mpi)) = self.move_data.rev_lookup.find(move_from.as_ref()) else { // move_from should be a projection from match_place. unreachable!("Probably not unreachable..."); }; for ge in &mut *grouped_errors { if let GroupedMoveError::MovesFromValue { span, move_from: other_mpi, binds_to, .. } = ge { if match_span == *span && mpi == *other_mpi { debug!("appending local({:?}) to list", bind_to); binds_to.push(bind_to); return; } } } debug!("found a new move error location"); grouped_errors.push(GroupedMoveError::MovesFromValue { span: match_span, move_from: mpi, original_path, kind, binds_to: vec![bind_to], }); } }; } fn report(&mut self, error: GroupedMoveError<'tcx>) { let (mut err, err_span) = { let (span, use_spans, original_path, kind): ( Span, Option<UseSpans<'tcx>>, Place<'tcx>, &IllegalMoveOriginKind<'_>, ) = match error { GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. } | GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => { (span, None, original_path, kind) } GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => { (use_spans.args_or_use(), Some(use_spans), original_path, kind) } }; debug!( "report: original_path={:?} span={:?}, kind={:?} \ original_path.is_upvar_field_projection={:?}", original_path, span, kind, self.is_upvar_field_projection(original_path.as_ref()) ); ( match kind { &IllegalMoveOriginKind::BorrowedContent { target_place } => self .report_cannot_move_from_borrowed_content( original_path, target_place, span, use_spans, ), &IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => { self.cannot_move_out_of_interior_of_drop(span, ty) } &IllegalMoveOriginKind::InteriorOfSliceOrArray { ty, is_index } => { self.cannot_move_out_of_interior_noncopy(span, ty, Some(is_index)) } }, span, ) }; self.add_move_hints(error, &mut err, err_span); self.buffer_error(err); } fn report_cannot_move_from_static( &mut self, place: Place<'tcx>, span: Span, ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { let description = if place.projection.len() == 1 { format!("static item {}", self.describe_any_place(place.as_ref())) } else { let base_static = PlaceRef { local: place.local, projection: &[ProjectionElem::Deref] }; format!( "{} as {} is a static item", self.describe_any_place(place.as_ref()), self.describe_any_place(base_static), ) }; self.cannot_move_out_of(span, &description) } fn report_cannot_move_from_borrowed_content( &mut self, move_place: Place<'tcx>, deref_target_place: Place<'tcx>, span: Span, use_spans: Option<UseSpans<'tcx>>, ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { // Inspect the type of the content behind the // borrow to provide feedback about why this // was a move rather than a copy. let ty = deref_target_place.ty(self.body, self.infcx.tcx).ty; let upvar_field = self .prefixes(move_place.as_ref(), PrefixSet::All) .find_map(|p| self.is_upvar_field_projection(p)); let deref_base = match deref_target_place.projection.as_ref() { [proj_base @ .., ProjectionElem::Deref] => { PlaceRef { local: deref_target_place.local, projection: &proj_base } } _ => bug!("deref_target_place is not a deref projection"), }; if let PlaceRef { local, projection: [] } = deref_base { let decl = &self.body.local_decls[local]; if decl.is_ref_for_guard() { let mut err = self.cannot_move_out_of( span, &format!("`{}` in pattern guard", self.local_names[local].unwrap()), ); err.note( "variables bound in patterns cannot be moved from \ until after the end of the pattern guard", ); return err; } else if decl.is_ref_to_static() { return self.report_cannot_move_from_static(move_place, span); } } debug!("report: ty={:?}", ty); let mut err = match ty.kind() { ty::Array(..) | ty::Slice(..) => { self.cannot_move_out_of_interior_noncopy(span, ty, None) } ty::Closure(def_id, closure_substs) if def_id.as_local() == Some(self.mir_def_id()) && upvar_field.is_some() => { let closure_kind_ty = closure_substs.as_closure().kind_ty(); let closure_kind = match closure_kind_ty.to_opt_closure_kind() { Some(kind @ (ty::ClosureKind::Fn | ty::ClosureKind::FnMut)) => kind, Some(ty::ClosureKind::FnOnce) => { bug!("closure kind does not match first argument type") } None => bug!("closure kind not inferred by borrowck"), }; let capture_description = format!("captured variable in an `{}` closure", closure_kind); let upvar = &self.upvars[upvar_field.unwrap().index()]; let upvar_hir_id = upvar.place.get_root_variable(); let upvar_name = upvar.place.to_string(self.infcx.tcx); let upvar_span = self.infcx.tcx.hir().span(upvar_hir_id); let place_name = self.describe_any_place(move_place.as_ref()); let place_description = if self.is_upvar_field_projection(move_place.as_ref()).is_some() { format!("{}, a {}", place_name, capture_description) } else { format!("{}, as `{}` is a {}", place_name, upvar_name, capture_description) }; debug!( "report: closure_kind_ty={:?} closure_kind={:?} place_description={:?}", closure_kind_ty, closure_kind, place_description, ); let mut diag = self.cannot_move_out_of(span, &place_description); diag.span_label(upvar_span, "captured outer variable"); diag.span_label( self.body.span, format!("captured by this `{}` closure", closure_kind), ); diag } _ => { let source = self.borrowed_content_source(deref_base); match (self.describe_place(move_place.as_ref()), source.describe_for_named_place()) { (Some(place_desc), Some(source_desc)) => self.cannot_move_out_of( span, &format!("`{}` which is behind a {}", place_desc, source_desc), ), (_, _) => self.cannot_move_out_of( span, &source.describe_for_unnamed_place(self.infcx.tcx), ), } } }; let ty = move_place.ty(self.body, self.infcx.tcx).ty; let def_id = match *ty.kind() { ty::Adt(self_def, _) => self_def.did, ty::Foreign(def_id) | ty::FnDef(def_id, _) | ty::Closure(def_id, _) | ty::Generator(def_id, ..) | ty::Opaque(def_id, _) => def_id, _ => return err, }; let diag_name = self.infcx.tcx.get_diagnostic_name(def_id); if matches!(diag_name, Some(sym::Option | sym::Result)) && use_spans.map_or(true, |v| !v.for_closure()) { err.span_suggestion_verbose( span.shrink_to_hi(), &format!("consider borrowing the `{}`'s content", diag_name.unwrap()), ".as_ref()".to_string(), Applicability::MaybeIncorrect, ); } else if let Some(use_spans) = use_spans { self.explain_captures( &mut err, span, span, use_spans, move_place, None, "", "", "", false, true, ); } err } fn add_move_hints(&self, error: GroupedMoveError<'tcx>, err: &mut Diagnostic, span: Span) { match error { GroupedMoveError::MovesFromPlace { mut binds_to, move_from, .. } => { if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) { err.span_suggestion( span, "consider borrowing here", format!("&{}", snippet), Applicability::Unspecified, ); } if binds_to.is_empty() { let place_ty = move_from.ty(self.body, self.infcx.tcx).ty; let place_desc = match self.describe_place(move_from.as_ref()) { Some(desc) => format!("`{}`", desc), None => "value".to_string(), }; self.note_type_does_not_implement_copy( err, &place_desc, place_ty, Some(span), "", ); } else { binds_to.sort(); binds_to.dedup(); self.add_move_error_details(err, &binds_to); } } GroupedMoveError::MovesFromValue { mut binds_to, .. } => { binds_to.sort(); binds_to.dedup(); self.add_move_error_suggestions(err, &binds_to); self.add_move_error_details(err, &binds_to); } // No binding. Nothing to suggest. GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => { let span = use_spans.var_or_use(); let place_ty = original_path.ty(self.body, self.infcx.tcx).ty; let place_desc = match self.describe_place(original_path.as_ref()) { Some(desc) => format!("`{}`", desc), None => "value".to_string(), }; self.note_type_does_not_implement_copy(err, &place_desc, place_ty, Some(span), ""); use_spans.args_span_label(err, format!("move out of {} occurs here", place_desc)); } } } fn add_move_error_suggestions(&self, err: &mut Diagnostic, binds_to: &[Local]) { let mut suggestions: Vec<(Span, &str, String)> = Vec::new(); for local in binds_to { let bind_to = &self.body.local_decls[*local]; if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var( VarBindingForm { pat_span, .. }, )))) = bind_to.local_info { if let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span) { if let Some(stripped) = pat_snippet.strip_prefix('&') { let pat_snippet = stripped.trim_start(); let (suggestion, to_remove) = if pat_snippet.starts_with("mut") && pat_snippet["mut".len()..].starts_with(rustc_lexer::is_whitespace) { (pat_snippet["mut".len()..].trim_start(), "&mut") } else { (pat_snippet, "&") }; suggestions.push((pat_span, to_remove, suggestion.to_owned())); } } } } suggestions.sort_unstable_by_key(|&(span, _, _)| span); suggestions.dedup_by_key(|&mut (span, _, _)| span); for (span, to_remove, suggestion) in suggestions { err.span_suggestion( span, &format!("consider removing the `{}`", to_remove), suggestion, Applicability::MachineApplicable, ); } } fn add_move_error_details(&self, err: &mut Diagnostic, binds_to: &[Local]) { for (j, local) in binds_to.iter().enumerate() { let bind_to = &self.body.local_decls[*local]; let binding_span = bind_to.source_info.span; if j == 0 { err.span_label(binding_span, "data moved here"); } else { err.span_label(binding_span, "...and here"); } if binds_to.len() == 1 { self.note_type_does_not_implement_copy( err, &format!("`{}`", self.local_names[*local].unwrap()), bind_to.ty, Some(binding_span), "", ); } } if binds_to.len() > 1 { err.note( "move occurs because these variables have types that \ don't implement the `Copy` trait", ); } } }
append_binding_error