prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>combinelatest.js<|end_file_name|><|fim▁begin|>/** * Merges the specified observable sequences into one observable sequence by using the selector function whenever any of the observable sequences or Promises produces an element. * * @example * 1 - obs = Rx.Observable.combineLatest(obs1, obs2, obs3, function (o1, o2, o3) { return o1 + o2 + o3; }); * 2 - obs = Rx.Observable.combineLatest([obs1, obs2, obs3], function (o1, o2, o3) { return o1 + o2 + o3; }); * @returns {Observable} An observable sequence containing the result of combining elements of the sources using the specified result selector function. */ var combineLatest = Observable.combineLatest = function () { var args = slice.call(arguments), resultSelector = args.pop(); if (Array.isArray(args[0])) { args = args[0]; } return new AnonymousObservable(function (observer) { var falseFactory = function () { return false; }, n = args.length, hasValue = arrayInitialize(n, falseFactory), hasValueAll = false, isDone = arrayInitialize(n, falseFactory), values = new Array(n); function next(i) { var res; hasValue[i] = true; if (hasValueAll || (hasValueAll = hasValue.every(identity))) { try { res = resultSelector.apply(null, values); } catch (ex) { observer.onError(ex); return; } observer.onNext(res); } else if (isDone.filter(function (x, j) { return j !== i; }).every(identity)) { observer.onCompleted(); } } function done (i) { isDone[i] = true; if (isDone.every(identity)) { observer.onCompleted(); } }<|fim▁hole|> (function (i) { var source = args[i], sad = new SingleAssignmentDisposable(); isPromise(source) && (source = observableFromPromise(source)); sad.setDisposable(source.subscribe(function (x) { values[i] = x; next(i); }, observer.onError.bind(observer), function () { done(i); })); subscriptions[i] = sad; }(idx)); } return new CompositeDisposable(subscriptions); }); };<|fim▁end|>
var subscriptions = new Array(n); for (var idx = 0; idx < n; idx++) {
<|file_name|>ops.py<|end_file_name|><|fim▁begin|>import theano import theano.tensor as T from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, host_from_gpu, gpu_contiguous, HostFromGpu, gpu_alloc_empty) from theano.sandbox.cuda.dnn import GpuDnnConvDesc, GpuDnnConv, GpuDnnConvGradI, dnn_conv, dnn_pool from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from theano.tensor.nnet.abstract_conv import conv3d_grad_wrt_inputs from theano.tensor.nnet import conv3d from rng import t_rng t_rng = RandomStreams() def l2normalize(x, axis=1, e=1e-8, keepdims=True): return x/l2norm(x, axis=axis, e=e, keepdims=keepdims) def l2norm(x, axis=1, e=1e-8, keepdims=True): return T.sqrt(T.sum(T.sqr(x), axis=axis, keepdims=keepdims) + e) def cosine(x, y): d = T.dot(x, y.T) d /= l2norm(x).dimshuffle(0, 'x') d /= l2norm(y).dimshuffle('x', 0) return d def euclidean(x, y, e=1e-8): xx = T.sqr(T.sqrt((x*x).sum(axis=1) + e)) yy = T.sqr(T.sqrt((y*y).sum(axis=1) + e)) dist = T.dot(x, y.T) dist *= -2 dist += xx.dimshuffle(0, 'x') dist += yy.dimshuffle('x', 0) dist = T.sqrt(dist) return dist def dropout(X, p=0.): """ dropout using activation scaling to avoid test time weight rescaling """ if p > 0: retain_prob = 1 - p X *= t_rng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX) X /= retain_prob return X def conv_cond_concat(x, y): """ concatenate conditioning vector on feature map axis """ return T.concatenate([x, y*T.ones((x.shape[0], y.shape[1], x.shape[2], x.shape[3], x.shape[4]))], axis=1) def batchnorm(X ,g = None, b = None, u=None, s=None, a=1., e=1e-7): """ batchnorm with support for not using scale and shift parameters as well as inference values (u and s) and partial batchnorm (via a) will detect and use convolutional or fully connected version """ if X.ndim == 5: if u is not None and s is not None: b_u = u.dimshuffle('x', 0, 'x', 'x', 'x') b_s = s.dimshuffle('x', 0, 'x', 'x', 'x') else: b_u = T.mean(X, axis=[0, 2, 3, 4]).dimshuffle('x', 0, 'x', 'x', 'x') b_s = T.mean(T.sqr(X - b_u), axis=[0, 2, 3, 4]).dimshuffle('x', 0, 'x', 'x', 'x') if a != 1:<|fim▁hole|> b_s = (1. - a)*1. + a*b_s X = (X - b_u) / T.sqrt(b_s + e) if g is not None and b is not None: X = X*g.dimshuffle('x', 0, 'x', 'x', 'x') + b.dimshuffle('x', 0, 'x', 'x', 'x') elif X.ndim == 2: if u is None and s is None: u = T.mean(X, axis=0) s = T.mean(T.sqr(X - u), axis=0) if a != 1: u = (1. - a)*0. + a*u s = (1. - a)*1. + a*s X = (X - u) / T.sqrt(s + e) if g is not None and b is not None: X = X*g + b else: raise NotImplementedError return X def conv(X, w, input_shape = None, filter_shape = None, subsample=(2, 2, 2), border_mode=(1,1,1), conv_mode='conv',output_shape = None): """ sets up dummy convolutional forward pass and uses its grad as deconv currently only tested/working with same padding input_shape: (batch size, num input feature maps, voxel height, voxel width, voxel depth) filter_shape: (output channels, input channels, filter height, filter width, filter depth) """ if conv_mode == 'conv': return conv3d( input = X, filters = w, input_shape = input_shape, filter_shape = filter_shape, border_mode = border_mode, subsample = subsample, filter_flip = True ) elif conv_mode == 'deconv': if output_shape == None: input_shape = (None,None,(input_shape[2]-1)*subsample[0] + filter_shape[2] - 2*border_mode[0] ,(input_shape[3]-1)*subsample[1] + filter_shape[3] - 2*border_mode[0] ,(input_shape[4]-1)*subsample[2] + filter_shape[4] - 2*border_mode[0]) else: input_shape = output_shape return conv3d_grad_wrt_inputs( output_grad = X, filters = w, input_shape = input_shape, filter_shape = filter_shape, border_mode = border_mode, subsample = subsample, )<|fim▁end|>
b_u = (1. - a)*0. + a*b_u
<|file_name|>figure2_2a.py<|end_file_name|><|fim▁begin|>''' Created on Dec 5, 2016 @author: paveenju ''' import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib2tikz.save as tikz_save import utils.functions as fn if __name__ == '__main__': pass def axes(): plt.axhline(0, alpha=.1) plt.axvline(0, alpha=.1) # input variables dL = np.array([0.4, 0.6, 0.9]) P1, P2 = [1.0, 0.0, 0], [-1.0, 0.0, 0] d = np.linalg.norm(np.mat(P1)-np.mat(P2)) c = d/2 A = dL/2 <|fim▁hole|>x_p = np.linspace(-3, 3, 100) y_p = np.linspace(-3, 3, 100) x_p, y_p = np.meshgrid(x_p, y_p) x, y, h, k = fn.linear_transformation(P1, P2, x_p, y_p) # matplotlib mpl.rcParams['lines.color'] = 'k' mpl.rcParams['axes.prop_cycle'] = mpl.cycler('color', ['k']) for a in A: plt.contour(x_p, y_p, ((x**2/a**2) - (y**2/(c**2-a**2)) - 1), [0], colors='b') axes() plt.annotate(r'$\tau_1$', xy=(0, 0), xytext=(0.67, 2.8), fontsize=20) plt.annotate(r'$\tau_2$', xy=(0, 0), xytext=(0.9, 2.5), fontsize=20) plt.annotate(r'$\tau_3$', xy=(0, 0), xytext=(1.2, 2.1), fontsize=20) plt.text(1.75, 0.5, r'$\tau_1=0.4$' + '\n' + r'$\tau_2=0.6$' + '\n' + r'$\tau_2=0.9$', bbox={'facecolor':'white', 'alpha':0.5, 'pad':10}, fontsize=20) plt.plot(P1[0], P1[1], 'xr', mew=5, ms=15) plt.plot(P2[0], P2[1], 'xr', mew=5, ms=15) #plt.show() tikz_save('../output/figure2_2a.tex')<|fim▁end|>
# data generation
<|file_name|>render_backend.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The high-level module responsible for managing the pipeline and preparing //! commands to be issued by the `Renderer`. //! //! See the comment at the top of the `renderer` module for a description of //! how these two pieces interact. use api::{DebugFlags, BlobImageHandler, Parameter, BoolParameter}; use api::{DocumentId, ExternalScrollId, HitTestResult}; use api::{IdNamespace, PipelineId, RenderNotifier, SampledScrollOffset}; use api::{NotificationRequest, Checkpoint, QualitySettings}; use api::{PrimitiveKeyKind, RenderReasons}; use api::units::*; use api::channel::{single_msg_channel, Sender, Receiver}; #[cfg(any(feature = "capture", feature = "replay"))] use crate::render_api::CaptureBits; #[cfg(feature = "replay")] use crate::render_api::CapturedDocument; use crate::render_api::{MemoryReport, TransactionMsg, ResourceUpdate, ApiMsg, FrameMsg, ClearCache, DebugCommand}; use crate::clip::{ClipIntern, PolygonIntern, ClipStoreScratchBuffer}; use crate::filterdata::FilterDataIntern; #[cfg(any(feature = "capture", feature = "replay"))] use crate::capture::CaptureConfig; use crate::composite::{CompositorKind, CompositeDescriptor}; use crate::frame_builder::{FrameBuilder, FrameBuilderConfig, FrameScratchBuffer}; use crate::glyph_rasterizer::{FontInstance}; use crate::gpu_cache::GpuCache; use crate::hit_test::{HitTest, HitTester, SharedHitTester}; use crate::intern::DataStore; #[cfg(any(feature = "capture", feature = "replay"))] use crate::internal_types::{DebugOutput}; use crate::internal_types::{FastHashMap, RenderedDocument, ResultMsg, FrameId, FrameStamp}; use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use crate::picture::{PictureScratchBuffer, SliceId, TileCacheInstance, TileCacheParams, SurfaceInfo, RasterConfig}; use crate::picture::{PicturePrimitive}; use crate::prim_store::{PrimitiveScratchBuffer, PrimitiveInstance}; use crate::prim_store::{PrimitiveInstanceKind, PrimTemplateCommonData}; use crate::prim_store::interned::*; use crate::profiler::{self, TransactionProfile}; use crate::render_task_graph::RenderTaskGraphBuilder; use crate::renderer::{AsyncPropertySampler, FullFrameStats, PipelineInfo}; use crate::resource_cache::ResourceCache; #[cfg(feature = "replay")] use crate::resource_cache::PlainCacheOwn; #[cfg(feature = "replay")] use crate::resource_cache::PlainResources; #[cfg(feature = "replay")] use crate::scene::Scene; use crate::scene::{BuiltScene, SceneProperties}; use crate::scene_builder_thread::*; use crate::spatial_tree::SpatialTree; #[cfg(feature = "replay")] use crate::spatial_tree::SceneSpatialTree; #[cfg(feature = "serialize")] use serde::{Serialize, Deserialize}; #[cfg(feature = "replay")] use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::{mem, u32}; #[cfg(feature = "capture")] use std::path::PathBuf; #[cfg(feature = "replay")] use crate::frame_builder::Frame; use time::precise_time_ns; use crate::util::{Recycler, VecHelper, drain_filter}; #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Copy, Clone)] pub struct DocumentView { scene: SceneView, } /// Some rendering parameters applying at the scene level. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Copy, Clone)] pub struct SceneView { pub device_rect: DeviceIntRect, pub quality_settings: QualitySettings, } enum RenderBackendStatus { Continue, StopRenderBackend, ShutDown(Option<Sender<()>>), } macro_rules! declare_data_stores { ( $( $name:ident : $ty:ty, )+ ) => { /// A collection of resources that are shared by clips, primitives /// between display lists. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[derive(Default)] pub struct DataStores { $( pub $name: DataStore<$ty>, )+ } impl DataStores { /// Reports CPU heap usage. fn report_memory(&self, ops: &mut MallocSizeOfOps, r: &mut MemoryReport) { $( r.interning.data_stores.$name += self.$name.size_of(ops); )+ } fn apply_updates( &mut self, updates: InternerUpdates, profile: &mut TransactionProfile, ) { $( self.$name.apply_updates( updates.$name, profile, ); )+ } } } } crate::enumerate_interners!(declare_data_stores); impl DataStores { /// Returns the local rect for a primitive. For most primitives, this is /// stored in the template. For pictures, this is stored inside the picture /// primitive instance itself, since this is determined during frame building. pub fn get_local_prim_rect( &self, prim_instance: &PrimitiveInstance, pictures: &[PicturePrimitive], surfaces: &[SurfaceInfo], ) -> LayoutRect { match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => { let pic = &pictures[pic_index.0]; match pic.raster_config { Some(RasterConfig { surface_index, ref composite_mode, .. }) => { let surface = &surfaces[surface_index.0]; composite_mode.get_rect(surface, None) } None => { panic!("bug: get_local_prim_rect should not be called for pass-through pictures"); } } } _ => { self.as_common_data(prim_instance).prim_rect } } } /// Returns the local coverage (space occupied) for a primitive. For most primitives, /// this is stored in the template. For pictures, this is stored inside the picture /// primitive instance itself, since this is determined during frame building. pub fn get_local_prim_coverage_rect( &self, prim_instance: &PrimitiveInstance, pictures: &[PicturePrimitive], surfaces: &[SurfaceInfo], ) -> LayoutRect { match prim_instance.kind { PrimitiveInstanceKind::Picture { pic_index, .. } => { let pic = &pictures[pic_index.0]; match pic.raster_config { Some(RasterConfig { surface_index, ref composite_mode, .. }) => { let surface = &surfaces[surface_index.0]; composite_mode.get_coverage(surface, None) } None => { panic!("bug: get_local_prim_coverage_rect should not be called for pass-through pictures"); } } } _ => { self.as_common_data(prim_instance).prim_rect } } } /// Returns true if this primitive might need repition. // TODO(gw): This seems like the wrong place for this - maybe this flag should // not be in the common prim template data? pub fn prim_may_need_repetition( &self, prim_instance: &PrimitiveInstance, ) -> bool { match prim_instance.kind { PrimitiveInstanceKind::Picture { .. } => { false } _ => { self.as_common_data(prim_instance).may_need_repetition } } } pub fn as_common_data( &self, prim_inst: &PrimitiveInstance ) -> &PrimTemplateCommonData { match prim_inst.kind { PrimitiveInstanceKind::Rectangle { data_handle, .. } | PrimitiveInstanceKind::Clear { data_handle, .. } => { let prim_data = &self.prim[data_handle]; &prim_data.common } PrimitiveInstanceKind::Image { data_handle, .. } => { let prim_data = &self.image[data_handle]; &prim_data.common } PrimitiveInstanceKind::ImageBorder { data_handle, .. } => { let prim_data = &self.image_border[data_handle]; &prim_data.common } PrimitiveInstanceKind::LineDecoration { data_handle, .. } => { let prim_data = &self.line_decoration[data_handle]; &prim_data.common } PrimitiveInstanceKind::LinearGradient { data_handle, .. } | PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => { let prim_data = &self.linear_grad[data_handle]; &prim_data.common } PrimitiveInstanceKind::NormalBorder { data_handle, .. } => { let prim_data = &self.normal_border[data_handle]; &prim_data.common } PrimitiveInstanceKind::Picture { .. } => { panic!("BUG: picture prims don't have common data!"); } PrimitiveInstanceKind::RadialGradient { data_handle, .. } => { let prim_data = &self.radial_grad[data_handle]; &prim_data.common } PrimitiveInstanceKind::ConicGradient { data_handle, .. } => { let prim_data = &self.conic_grad[data_handle]; &prim_data.common } PrimitiveInstanceKind::TextRun { data_handle, .. } => { let prim_data = &self.text_run[data_handle]; &prim_data.common } PrimitiveInstanceKind::YuvImage { data_handle, .. } => { let prim_data = &self.yuv_image[data_handle]; &prim_data.common } PrimitiveInstanceKind::Backdrop { data_handle, .. } => { let prim_data = &self.backdrop[data_handle]; &prim_data.common } } } } #[derive(Default)] pub struct ScratchBuffer { pub primitive: PrimitiveScratchBuffer, pub picture: PictureScratchBuffer, pub frame: FrameScratchBuffer, pub clip_store: ClipStoreScratchBuffer, } impl ScratchBuffer { pub fn begin_frame(&mut self) { self.primitive.begin_frame(); self.picture.begin_frame(); self.frame.begin_frame(); } pub fn end_frame(&mut self) { self.primitive.end_frame(); } pub fn recycle(&mut self, recycler: &mut Recycler) { self.primitive.recycle(recycler); self.picture.recycle(recycler); } pub fn memory_pressure(&mut self) { // TODO: causes browser chrome test crashes on windows. //self.primitive = Default::default(); self.picture = Default::default(); self.frame = Default::default(); self.clip_store = Default::default(); } } struct Document { /// The id of this document id: DocumentId, /// Temporary list of removed pipelines received from the scene builder /// thread and forwarded to the renderer. removed_pipelines: Vec<(PipelineId, DocumentId)>, view: DocumentView, /// The id and time of the current frame. stamp: FrameStamp, /// The latest built scene, usable to build frames. /// received from the scene builder thread. scene: BuiltScene, /// The builder object that prodces frames, kept around to preserve some retained state. frame_builder: FrameBuilder, /// Allows graphs of render tasks to be created, and then built into an immutable graph output. rg_builder: RenderTaskGraphBuilder, /// A data structure to allow hit testing against rendered frames. This is updated /// every time we produce a fully rendered frame. hit_tester: Option<Arc<HitTester>>, /// To avoid synchronous messaging we update a shared hit-tester that other threads /// can query. shared_hit_tester: Arc<SharedHitTester>, /// Properties that are resolved during frame building and can be changed at any time /// without requiring the scene to be re-built. dynamic_properties: SceneProperties, /// Track whether the last built frame is up to date or if it will need to be re-built /// before rendering again. frame_is_valid: bool, hit_tester_is_valid: bool, rendered_frame_is_valid: bool, /// We track this information to be able to display debugging information from the /// renderer. has_built_scene: bool, data_stores: DataStores, /// Retained frame-building version of the spatial tree spatial_tree: SpatialTree, /// Contains various vecs of data that is used only during frame building, /// where we want to recycle the memory each new display list, to avoid constantly /// re-allocating and moving memory around. scratch: ScratchBuffer, #[cfg(feature = "replay")] loaded_scene: Scene, /// Tracks the state of the picture cache tiles that were composited on the previous frame. prev_composite_descriptor: CompositeDescriptor, /// Tracks if we need to invalidate dirty rects for this document, due to the picture /// cache slice configuration having changed when a new scene is swapped in. dirty_rects_are_valid: bool, profile: TransactionProfile, frame_stats: Option<FullFrameStats>, } impl Document { pub fn new( id: DocumentId, size: DeviceIntSize, ) -> Self { Document { id, removed_pipelines: Vec::new(), view: DocumentView { scene: SceneView { device_rect: size.into(), quality_settings: QualitySettings::default(), }, }, stamp: FrameStamp::first(id), scene: BuiltScene::empty(), frame_builder: FrameBuilder::new(), hit_tester: None, shared_hit_tester: Arc::new(SharedHitTester::new()), dynamic_properties: SceneProperties::new(), frame_is_valid: false, hit_tester_is_valid: false, rendered_frame_is_valid: false, has_built_scene: false, data_stores: DataStores::default(), spatial_tree: SpatialTree::new(), scratch: ScratchBuffer::default(), #[cfg(feature = "replay")] loaded_scene: Scene::new(), prev_composite_descriptor: CompositeDescriptor::empty(), dirty_rects_are_valid: true, profile: TransactionProfile::new(), rg_builder: RenderTaskGraphBuilder::new(), frame_stats: None, } } fn can_render(&self) -> bool { self.scene.has_root_pipeline } fn has_pixels(&self) -> bool { !self.view.scene.device_rect.is_empty() } fn process_frame_msg( &mut self, message: FrameMsg, ) -> DocumentOps { match message { FrameMsg::UpdateEpoch(pipeline_id, epoch) => { self.scene.pipeline_epochs.insert(pipeline_id, epoch); } FrameMsg::HitTest(point, tx) => { if !self.hit_tester_is_valid { self.rebuild_hit_tester(); } let result = match self.hit_tester { Some(ref hit_tester) => { hit_tester.hit_test(HitTest::new(point)) } None => HitTestResult { items: Vec::new() }, }; tx.send(result).unwrap(); } FrameMsg::RequestHitTester(tx) => { tx.send(self.shared_hit_tester.clone()).unwrap(); } FrameMsg::SetScrollOffsets(id, offset) => { profile_scope!("SetScrollOffset"); if self.set_scroll_offsets(id, offset) { self.hit_tester_is_valid = false; self.frame_is_valid = false; } return DocumentOps { scroll: true, ..DocumentOps::nop() }; } FrameMsg::ResetDynamicProperties => { self.dynamic_properties.reset_properties(); } FrameMsg::AppendDynamicProperties(property_bindings) => { self.dynamic_properties.add_properties(property_bindings); } FrameMsg::AppendDynamicTransformProperties(property_bindings) => { self.dynamic_properties.add_transforms(property_bindings); } FrameMsg::SetIsTransformAsyncZooming(is_zooming, animation_id) => { if let Some(node_index) = self.spatial_tree.find_spatial_node_by_anim_id(animation_id) { let node = self.spatial_tree.get_spatial_node_mut(node_index); if node.is_async_zooming != is_zooming { node.is_async_zooming = is_zooming; self.frame_is_valid = false; } } } } DocumentOps::nop() } fn build_frame( &mut self, resource_cache: &mut ResourceCache, gpu_cache: &mut GpuCache, debug_flags: DebugFlags, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, frame_stats: Option<FullFrameStats>, render_reasons: RenderReasons, ) -> RenderedDocument { let frame_build_start_time = precise_time_ns(); // Advance to the next frame. self.stamp.advance(); assert!(self.stamp.frame_id() != FrameId::INVALID, "First frame increment must happen before build_frame()"); let frame = { let frame = self.frame_builder.build( &mut self.scene, resource_cache, gpu_cache, &mut self.rg_builder, self.stamp, self.view.scene.device_rect.min, &self.dynamic_properties, &mut self.data_stores, &mut self.scratch, debug_flags, tile_caches, &mut self.spatial_tree, self.dirty_rects_are_valid, &mut self.profile, ); frame }; self.frame_is_valid = true; self.dirty_rects_are_valid = true; let is_new_scene = self.has_built_scene; self.has_built_scene = false; let frame_build_time_ms = profiler::ns_to_ms(precise_time_ns() - frame_build_start_time); self.profile.set(profiler::FRAME_BUILDING_TIME, frame_build_time_ms); let frame_stats = frame_stats.map(|mut stats| { stats.frame_build_time += frame_build_time_ms; stats }); RenderedDocument { frame, is_new_scene, profile: self.profile.take_and_reset(), frame_stats: frame_stats, render_reasons, } } fn rebuild_hit_tester(&mut self) { self.spatial_tree.update_tree(&self.dynamic_properties); let hit_tester = Arc::new(self.scene.create_hit_tester(&self.spatial_tree)); self.hit_tester = Some(Arc::clone(&hit_tester)); self.shared_hit_tester.update(hit_tester); self.hit_tester_is_valid = true; } pub fn updated_pipeline_info(&mut self) -> PipelineInfo { let removed_pipelines = self.removed_pipelines.take_and_preallocate(); PipelineInfo { epochs: self.scene.pipeline_epochs.iter() .map(|(&pipeline_id, &epoch)| ((pipeline_id, self.id), epoch)).collect(), removed_pipelines, } } /// Returns true if the node actually changed position or false otherwise. pub fn set_scroll_offsets( &mut self, id: ExternalScrollId, offsets: Vec<SampledScrollOffset>, ) -> bool { self.spatial_tree.set_scroll_offsets(id, offsets) } /// Update the state of tile caches when a new scene is being swapped in to /// the render backend. Retain / reuse existing caches if possible, and /// destroy any now unused caches. fn update_tile_caches_for_new_scene( &mut self, mut requested_tile_caches: FastHashMap<SliceId, TileCacheParams>, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, resource_cache: &mut ResourceCache, ) { let mut new_tile_caches = FastHashMap::default(); new_tile_caches.reserve(requested_tile_caches.len()); // Step through the tile caches that are needed for the new scene, and see // if we have an existing cache that can be reused. for (slice_id, params) in requested_tile_caches.drain() { let tile_cache = match tile_caches.remove(&slice_id) { Some(mut existing_tile_cache) => { // Found an existing cache - update the cache params and reuse it existing_tile_cache.prepare_for_new_scene( params, resource_cache, ); existing_tile_cache } None => { // No cache exists so create a new one Box::new(TileCacheInstance::new(params)) } }; new_tile_caches.insert(slice_id, tile_cache); } // Replace current tile cache map, and return what was left over, // which are now unused. let unused_tile_caches = mem::replace( tile_caches, new_tile_caches, ); if !unused_tile_caches.is_empty() { // If the slice configuration changed, assume we can't rely on the // current dirty rects for next composite self.dirty_rects_are_valid = false; // Destroy any native surfaces allocated by these unused caches for (_, tile_cache) in unused_tile_caches { tile_cache.destroy(resource_cache); } } } pub fn new_async_scene_ready( &mut self, mut built_scene: BuiltScene, recycler: &mut Recycler, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, resource_cache: &mut ResourceCache, ) { self.frame_is_valid = false; self.hit_tester_is_valid = false; self.update_tile_caches_for_new_scene( mem::replace(&mut built_scene.tile_cache_config.tile_caches, FastHashMap::default()), tile_caches, resource_cache, ); self.scene = built_scene; self.scratch.recycle(recycler); } } struct DocumentOps { scroll: bool, } impl DocumentOps { fn nop() -> Self { DocumentOps { scroll: false, } } } /// The unique id for WR resource identification. /// The namespace_id should start from 1. static NEXT_NAMESPACE_ID: AtomicUsize = AtomicUsize::new(1); #[cfg(any(feature = "capture", feature = "replay"))] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] struct PlainRenderBackend { frame_config: FrameBuilderConfig, documents: FastHashMap<DocumentId, DocumentView>, resource_sequence_id: u32, } /// The render backend is responsible for transforming high level display lists into /// GPU-friendly work which is then submitted to the renderer in the form of a frame::Frame. /// /// The render backend operates on its own thread. pub struct RenderBackend { api_rx: Receiver<ApiMsg>, result_tx: Sender<ResultMsg>, scene_tx: Sender<SceneBuilderRequest>, gpu_cache: GpuCache, resource_cache: ResourceCache, frame_config: FrameBuilderConfig, default_compositor_kind: CompositorKind, documents: FastHashMap<DocumentId, Document>, notifier: Box<dyn RenderNotifier>, sampler: Option<Box<dyn AsyncPropertySampler + Send>>, size_of_ops: Option<MallocSizeOfOps>, debug_flags: DebugFlags, namespace_alloc_by_client: bool, // We keep one around to be able to call clear_namespace // after the api object is deleted. For most purposes the // api object's blob handler should be used instead. blob_image_handler: Option<Box<dyn BlobImageHandler>>, recycler: Recycler, #[cfg(feature = "capture")] /// If `Some`, do 'sequence capture' logging, recording updated documents, /// frames, etc. This is set only through messages from the scene builder, /// so all control of sequence capture goes through there. capture_config: Option<CaptureConfig>, #[cfg(feature = "replay")] loaded_resource_sequence_id: u32, /// A map of tile caches. These are stored in the backend as they are /// persisted between both frame and scenes. tile_caches: FastHashMap<SliceId, Box<TileCacheInstance>>, } impl RenderBackend { pub fn new( api_rx: Receiver<ApiMsg>, result_tx: Sender<ResultMsg>, scene_tx: Sender<SceneBuilderRequest>, resource_cache: ResourceCache, notifier: Box<dyn RenderNotifier>, blob_image_handler: Option<Box<dyn BlobImageHandler>>, frame_config: FrameBuilderConfig, sampler: Option<Box<dyn AsyncPropertySampler + Send>>, size_of_ops: Option<MallocSizeOfOps>, debug_flags: DebugFlags, namespace_alloc_by_client: bool, ) -> RenderBackend { RenderBackend { api_rx, result_tx, scene_tx, resource_cache, gpu_cache: GpuCache::new(), frame_config, default_compositor_kind : frame_config.compositor_kind, documents: FastHashMap::default(), notifier, sampler, size_of_ops, debug_flags, namespace_alloc_by_client, recycler: Recycler::new(), blob_image_handler, #[cfg(feature = "capture")] capture_config: None, #[cfg(feature = "replay")] loaded_resource_sequence_id: 0, tile_caches: FastHashMap::default(), } } fn next_namespace_id(&self) -> IdNamespace { IdNamespace(NEXT_NAMESPACE_ID.fetch_add(1, Ordering::Relaxed) as u32) } pub fn run(&mut self) { let mut frame_counter: u32 = 0; let mut status = RenderBackendStatus::Continue; if let Some(ref sampler) = self.sampler { sampler.register(); } while let RenderBackendStatus::Continue = status { status = match self.api_rx.recv() { Ok(msg) => { self.process_api_msg(msg, &mut frame_counter) } Err(..) => { RenderBackendStatus::ShutDown(None) } }; } if let RenderBackendStatus::StopRenderBackend = status { while let Ok(msg) = self.api_rx.recv() { match msg { ApiMsg::SceneBuilderResult(SceneBuilderResult::ExternalEvent(evt)) => { self.notifier.external_event(evt); } ApiMsg::SceneBuilderResult(SceneBuilderResult::FlushComplete(tx)) => { // If somebody's blocked waiting for a flush, how did they // trigger the RB thread to shut down? This shouldn't happen // but handle it gracefully anyway. debug_assert!(false); tx.send(()).ok(); } ApiMsg::SceneBuilderResult(SceneBuilderResult::ShutDown(sender)) => { info!("Recycling stats: {:?}", self.recycler); status = RenderBackendStatus::ShutDown(sender); break; } _ => {}, } } } // Ensure we read everything the scene builder is sending us from // inflight messages, otherwise the scene builder might panic. while let Ok(msg) = self.api_rx.try_recv() { match msg { ApiMsg::SceneBuilderResult(SceneBuilderResult::FlushComplete(tx)) => { // If somebody's blocked waiting for a flush, how did they // trigger the RB thread to shut down? This shouldn't happen // but handle it gracefully anyway. debug_assert!(false); tx.send(()).ok(); } _ => {}, } } self.documents.clear(); self.notifier.shut_down(); if let Some(ref sampler) = self.sampler { sampler.deregister(); } if let RenderBackendStatus::ShutDown(Some(sender)) = status { let _ = sender.send(()); } } fn process_transaction( &mut self, mut txns: Vec<Box<BuiltTransaction>>, result_tx: Option<Sender<SceneSwapResult>>, frame_counter: &mut u32, ) -> bool { self.prepare_for_frames(); self.maybe_force_nop_documents( frame_counter, |document_id| txns.iter().any(|txn| txn.document_id == document_id)); let mut built_frame = false; for mut txn in txns.drain(..) { let has_built_scene = txn.built_scene.is_some(); if let Some(doc) = self.documents.get_mut(&txn.document_id) { doc.removed_pipelines.append(&mut txn.removed_pipelines); doc.view.scene = txn.view; doc.profile.merge(&mut txn.profile); doc.frame_stats = if let Some(stats) = &doc.frame_stats { Some(stats.merge(&txn.frame_stats)) } else { Some(txn.frame_stats) }; if let Some(updates) = txn.spatial_tree_updates.take() { doc.spatial_tree.apply_updates(updates); } if let Some(built_scene) = txn.built_scene.take() { doc.new_async_scene_ready( built_scene, &mut self.recycler, &mut self.tile_caches, &mut self.resource_cache, ); } // If there are any additions or removals of clip modes // during the scene build, apply them to the data store now. // This needs to happen before we build the hit tester. if let Some(updates) = txn.interner_updates.take() { doc.data_stores.apply_updates(updates, &mut doc.profile); } // Build the hit tester while the APZ lock is held so that its content // is in sync with the gecko APZ tree. if !doc.hit_tester_is_valid { doc.rebuild_hit_tester(); } if let Some(ref tx) = result_tx { let (resume_tx, resume_rx) = single_msg_channel(); tx.send(SceneSwapResult::Complete(resume_tx)).unwrap(); // Block until the post-swap hook has completed on // the scene builder thread. We need to do this before // we can sample from the sampler hook which might happen // in the update_document call below. resume_rx.recv().ok(); } self.resource_cache.add_rasterized_blob_images( txn.rasterized_blobs.take(), &mut doc.profile, ); } else { // The document was removed while we were building it, skip it. // TODO: we might want to just ensure that removed documents are // always forwarded to the scene builder thread to avoid this case. if let Some(ref tx) = result_tx { tx.send(SceneSwapResult::Aborted).unwrap(); } continue; } built_frame |= self.update_document( txn.document_id, txn.resource_updates.take(), txn.frame_ops.take(), txn.notifications.take(), txn.render_frame, RenderReasons::SCENE, None, txn.invalidate_rendered_frame, frame_counter, has_built_scene, ); } built_frame } fn process_api_msg( &mut self, msg: ApiMsg, frame_counter: &mut u32, ) -> RenderBackendStatus { match msg { ApiMsg::CloneApi(sender) => { assert!(!self.namespace_alloc_by_client); sender.send(self.next_namespace_id()).unwrap(); } ApiMsg::CloneApiByClient(namespace_id) => { assert!(self.namespace_alloc_by_client); debug_assert!(!self.documents.iter().any(|(did, _doc)| did.namespace_id == namespace_id)); } ApiMsg::AddDocument(document_id, initial_size) => { let document = Document::new( document_id, initial_size, ); let old = self.documents.insert(document_id, document); debug_assert!(old.is_none()); } ApiMsg::MemoryPressure => { // This is drastic. It will basically flush everything out of the cache, // and the next frame will have to rebuild all of its resources. // We may want to look into something less extreme, but on the other hand this // should only be used in situations where are running low enough on memory // that we risk crashing if we don't do something about it. // The advantage of clearing the cache completely is that it gets rid of any // remaining fragmentation that could have persisted if we kept around the most // recently used resources. self.resource_cache.clear(ClearCache::all()); self.gpu_cache.clear(); for (_, doc) in &mut self.documents { doc.scratch.memory_pressure(); } let resource_updates = self.resource_cache.pending_updates(); let msg = ResultMsg::UpdateResources { resource_updates, memory_pressure: true, }; self.result_tx.send(msg).unwrap(); self.notifier.wake_up(false); } ApiMsg::ReportMemory(tx) => { self.report_memory(tx); } ApiMsg::DebugCommand(option) => { let msg = match option { DebugCommand::EnableDualSourceBlending(enable) => { // Set in the config used for any future documents // that are created. self.frame_config .dual_source_blending_is_enabled = enable; self.update_frame_builder_config(); // We don't want to forward this message to the renderer. return RenderBackendStatus::Continue; } DebugCommand::SetPictureTileSize(tile_size) => { self.frame_config.tile_size_override = tile_size; self.update_frame_builder_config(); return RenderBackendStatus::Continue; } #[cfg(feature = "capture")] DebugCommand::SaveCapture(root, bits) => { let output = self.save_capture(root, bits); ResultMsg::DebugOutput(output) }, #[cfg(feature = "capture")] DebugCommand::StartCaptureSequence(root, bits) => { self.start_capture_sequence(root, bits); return RenderBackendStatus::Continue; }, #[cfg(feature = "capture")] DebugCommand::StopCaptureSequence => { self.stop_capture_sequence(); return RenderBackendStatus::Continue; }, #[cfg(feature = "replay")] DebugCommand::LoadCapture(path, ids, tx) => { NEXT_NAMESPACE_ID.fetch_add(1, Ordering::Relaxed); *frame_counter += 1; let mut config = CaptureConfig::new(path, CaptureBits::all()); if let Some((scene_id, frame_id)) = ids { config.scene_id = scene_id; config.frame_id = frame_id; } self.load_capture(config); for (id, doc) in &self.documents { let captured = CapturedDocument { document_id: *id, root_pipeline_id: doc.loaded_scene.root_pipeline_id, }; tx.send(captured).unwrap(); } // Note: we can't pass `LoadCapture` here since it needs to arrive // before the `PublishDocument` messages sent by `load_capture`. return RenderBackendStatus::Continue; } DebugCommand::ClearCaches(mask) => { self.resource_cache.clear(mask); return RenderBackendStatus::Continue; } DebugCommand::EnableNativeCompositor(enable) => { // Default CompositorKind should be Native if let CompositorKind::Draw { .. } = self.default_compositor_kind { unreachable!(); } let compositor_kind = if enable { self.default_compositor_kind } else { CompositorKind::default() }; for (_, doc) in &mut self.documents { doc.scene.config.compositor_kind = compositor_kind; doc.frame_is_valid = false; } self.frame_config.compositor_kind = compositor_kind; self.update_frame_builder_config(); // We don't want to forward this message to the renderer. return RenderBackendStatus::Continue; } DebugCommand::SetBatchingLookback(count) => { self.frame_config.batch_lookback_count = count as usize; self.update_frame_builder_config(); return RenderBackendStatus::Continue; } DebugCommand::SimulateLongSceneBuild(time_ms) => { let _ = self.scene_tx.send(SceneBuilderRequest::SimulateLongSceneBuild(time_ms)); return RenderBackendStatus::Continue; } DebugCommand::SetFlags(flags) => { self.resource_cache.set_debug_flags(flags); self.gpu_cache.set_debug_flags(flags); let force_invalidation = flags.contains(DebugFlags::FORCE_PICTURE_INVALIDATION); if self.frame_config.force_invalidation != force_invalidation { self.frame_config.force_invalidation = force_invalidation; self.update_frame_builder_config(); } // If we're toggling on the GPU cache debug display, we // need to blow away the cache. This is because we only // send allocation/free notifications to the renderer // thread when the debug display is enabled, and thus // enabling it when the cache is partially populated will // give the renderer an incomplete view of the world. // And since we might as well drop all the debugging state // from the renderer when we disable the debug display, // we just clear the cache on toggle. let changed = self.debug_flags ^ flags; if changed.contains(DebugFlags::GPU_CACHE_DBG) { self.gpu_cache.clear(); } self.debug_flags = flags; ResultMsg::DebugCommand(option) } _ => ResultMsg::DebugCommand(option), }; self.result_tx.send(msg).unwrap(); self.notifier.wake_up(true); } ApiMsg::UpdateDocuments(transaction_msgs) => { self.prepare_transactions( transaction_msgs, frame_counter, ); } ApiMsg::SceneBuilderResult(msg) => { return self.process_scene_builder_result(msg, frame_counter); } } RenderBackendStatus::Continue } fn process_scene_builder_result( &mut self, msg: SceneBuilderResult, frame_counter: &mut u32, ) -> RenderBackendStatus { profile_scope!("sb_msg"); match msg { SceneBuilderResult::Transactions(txns, result_tx) => { self.process_transaction( txns, result_tx, frame_counter, ); self.bookkeep_after_frames(); }, #[cfg(feature = "capture")] SceneBuilderResult::CapturedTransactions(txns, capture_config, result_tx) => { if let Some(ref mut old_config) = self.capture_config { assert!(old_config.scene_id <= capture_config.scene_id); if old_config.scene_id < capture_config.scene_id { old_config.scene_id = capture_config.scene_id; old_config.frame_id = 0; } } else { self.capture_config = Some(capture_config); } let built_frame = self.process_transaction( txns, result_tx, frame_counter, ); if built_frame { self.save_capture_sequence(); } self.bookkeep_after_frames(); }, #[cfg(feature = "capture")] SceneBuilderResult::StopCaptureSequence => { self.capture_config = None; } SceneBuilderResult::GetGlyphDimensions(request) => { let mut glyph_dimensions = Vec::with_capacity(request.glyph_indices.len()); if let Some(base) = self.resource_cache.get_font_instance(request.key) { let font = FontInstance::from_base(Arc::clone(&base)); for glyph_index in &request.glyph_indices { let glyph_dim = self.resource_cache.get_glyph_dimensions(&font, *glyph_index); glyph_dimensions.push(glyph_dim); } } request.sender.send(glyph_dimensions).unwrap(); } SceneBuilderResult::GetGlyphIndices(request) => { let mut glyph_indices = Vec::with_capacity(request.text.len()); for ch in request.text.chars() { let index = self.resource_cache.get_glyph_index(request.key, ch); glyph_indices.push(index); } request.sender.send(glyph_indices).unwrap(); } SceneBuilderResult::FlushComplete(tx) => { tx.send(()).ok(); } SceneBuilderResult::ExternalEvent(evt) => { self.notifier.external_event(evt); } SceneBuilderResult::ClearNamespace(id) => { self.resource_cache.clear_namespace(id); self.documents.retain(|doc_id, _doc| doc_id.namespace_id != id); if let Some(handler) = &mut self.blob_image_handler { handler.clear_namespace(id); } } SceneBuilderResult::DeleteDocument(document_id) => { self.documents.remove(&document_id); } SceneBuilderResult::SetParameter(param) => { if let Parameter::Bool(BoolParameter::Multithreading, enabled) = param { self.resource_cache.enable_multithreading(enabled); } let _ = self.result_tx.send(ResultMsg::SetParameter(param)); } SceneBuilderResult::StopRenderBackend => { return RenderBackendStatus::StopRenderBackend; } SceneBuilderResult::ShutDown(sender) => { info!("Recycling stats: {:?}", self.recycler); return RenderBackendStatus::ShutDown(sender); } } RenderBackendStatus::Continue } fn update_frame_builder_config(&self) { self.send_backend_message( SceneBuilderRequest::SetFrameBuilderConfig( self.frame_config.clone() ) ); } fn prepare_for_frames(&mut self) { self.gpu_cache.prepare_for_frames(); } fn bookkeep_after_frames(&mut self) { self.gpu_cache.bookkeep_after_frames(); } fn requires_frame_build(&mut self) -> bool { self.gpu_cache.requires_frame_build() } fn prepare_transactions( &mut self, txns: Vec<Box<TransactionMsg>>, frame_counter: &mut u32, ) { self.prepare_for_frames(); self.maybe_force_nop_documents( frame_counter, |document_id| txns.iter().any(|txn| txn.document_id == document_id)); let mut built_frame = false; for mut txn in txns { if txn.generate_frame.as_bool() { txn.profile.end_time(profiler::API_SEND_TIME); } self.documents.get_mut(&txn.document_id).unwrap().profile.merge(&mut txn.profile); built_frame |= self.update_document( txn.document_id, txn.resource_updates.take(), txn.frame_ops.take(), txn.notifications.take(), txn.generate_frame.as_bool(), txn.render_reasons, txn.generate_frame.id(), txn.invalidate_rendered_frame, frame_counter, false ); } if built_frame { #[cfg(feature = "capture")] self.save_capture_sequence(); } self.bookkeep_after_frames(); } /// In certain cases, resources shared by multiple documents have to run /// maintenance operations, like cleaning up unused cache items. In those /// cases, we are forced to build frames for all documents, however we /// may not have a transaction ready for every document - this method /// calls update_document with the details of a fake, nop transaction just /// to force a frame build. fn maybe_force_nop_documents<F>(&mut self, frame_counter: &mut u32, document_already_present: F) where F: Fn(DocumentId) -> bool { if self.requires_frame_build() { let nop_documents : Vec<DocumentId> = self.documents.keys() .cloned() .filter(|key| !document_already_present(*key)) .collect(); #[allow(unused_variables)] let mut built_frame = false; for &document_id in &nop_documents { built_frame |= self.update_document( document_id, Vec::default(), Vec::default(), Vec::default(), false, RenderReasons::empty(), None, false, frame_counter, false); } #[cfg(feature = "capture")] match built_frame { true => self.save_capture_sequence(), _ => {}, } } } fn update_document( &mut self, document_id: DocumentId, resource_updates: Vec<ResourceUpdate>, mut frame_ops: Vec<FrameMsg>, mut notifications: Vec<NotificationRequest>, mut render_frame: bool, render_reasons: RenderReasons, generated_frame_id: Option<u64>, invalidate_rendered_frame: bool, frame_counter: &mut u32, has_built_scene: bool, ) -> bool { let requested_frame = render_frame; let requires_frame_build = self.requires_frame_build(); let doc = self.documents.get_mut(&document_id).unwrap(); // If we have a sampler, get more frame ops from it and add them // to the transaction. This is a hook to allow the WR user code to // fiddle with things after a potentially long scene build, but just // before rendering. This is useful for rendering with the latest // async transforms. if requested_frame { if let Some(ref sampler) = self.sampler { frame_ops.append(&mut sampler.sample(document_id, generated_frame_id)); } } doc.has_built_scene |= has_built_scene; // TODO: this scroll variable doesn't necessarily mean we scrolled. It is only used // for something wrench specific and we should remove it. let mut scroll = false; for frame_msg in frame_ops { let op = doc.process_frame_msg(frame_msg); scroll |= op.scroll; } for update in &resource_updates { if let ResourceUpdate::UpdateImage(..) = update { doc.frame_is_valid = false; } } self.resource_cache.post_scene_building_update( resource_updates, &mut doc.profile, ); if doc.dynamic_properties.flush_pending_updates() { doc.frame_is_valid = false; doc.hit_tester_is_valid = false; } if !doc.can_render() { // TODO: this happens if we are building the first scene asynchronously and // scroll at the same time. we should keep track of the fact that we skipped // composition here and do it as soon as we receive the scene. render_frame = false; } // Avoid re-building the frame if the current built frame is still valid. // However, if the resource_cache requires a frame build, _always_ do that, unless // doc.can_render() is false, as in that case a frame build can't happen anyway. // We want to ensure we do this because even if the doc doesn't have pixels it // can still try to access stale texture cache items. let build_frame = (render_frame && !doc.frame_is_valid && doc.has_pixels()) || (requires_frame_build && doc.can_render()); // Request composite is true when we want to composite frame even when // there is no frame update. This happens when video frame is updated under // external image with NativeTexture or when platform requested to composite frame. if invalidate_rendered_frame { doc.rendered_frame_is_valid = false; if doc.scene.config.compositor_kind.should_redraw_on_invalidation() { let msg = ResultMsg::ForceRedraw; self.result_tx.send(msg).unwrap(); } } let mut frame_build_time = None; if build_frame { profile_scope!("generate frame"); *frame_counter += 1; // borrow ck hack for profile_counters let (pending_update, mut rendered_document) = { let frame_build_start_time = precise_time_ns(); let frame_stats = doc.frame_stats.take(); let rendered_document = doc.build_frame( &mut self.resource_cache, &mut self.gpu_cache, self.debug_flags, &mut self.tile_caches, frame_stats, render_reasons, ); debug!("generated frame for document {:?} with {} passes", document_id, rendered_document.frame.passes.len()); let msg = ResultMsg::UpdateGpuCache(self.gpu_cache.extract_updates()); self.result_tx.send(msg).unwrap(); frame_build_time = Some(precise_time_ns() - frame_build_start_time); let pending_update = self.resource_cache.pending_updates(); (pending_update, rendered_document) }; // Invalidate dirty rects if the compositing config has changed significantly rendered_document .frame .composite_state .update_dirty_rect_validity(&doc.prev_composite_descriptor); // Build a small struct that represents the state of the tiles to be composited. let composite_descriptor = rendered_document .frame .composite_state .descriptor .clone(); // If there are texture cache updates to apply, or if the produced // frame is not a no-op, or the compositor state has changed, // then we cannot skip compositing this frame. if !pending_update.is_nop() || !rendered_document.frame.is_nop() || composite_descriptor != doc.prev_composite_descriptor { doc.rendered_frame_is_valid = false; } doc.prev_composite_descriptor = composite_descriptor; #[cfg(feature = "capture")] match self.capture_config { Some(ref mut config) => { // FIXME(aosmond): document splitting causes multiple prepare frames config.prepare_frame(); if config.bits.contains(CaptureBits::FRAME) { let file_name = format!("frame-{}-{}", document_id.namespace_id.0, document_id.id); config.serialize_for_frame(&rendered_document.frame, file_name); } let data_stores_name = format!("data-stores-{}-{}", document_id.namespace_id.0, document_id.id); config.serialize_for_frame(&doc.data_stores, data_stores_name); let frame_spatial_tree_name = format!("frame-spatial-tree-{}-{}", document_id.namespace_id.0, document_id.id); config.serialize_for_frame::<SpatialTree, _>(&doc.spatial_tree, frame_spatial_tree_name); let properties_name = format!("properties-{}-{}", document_id.namespace_id.0, document_id.id); config.serialize_for_frame(&doc.dynamic_properties, properties_name); }, None => {}, } let msg = ResultMsg::PublishPipelineInfo(doc.updated_pipeline_info()); self.result_tx.send(msg).unwrap(); // Publish the frame let msg = ResultMsg::PublishDocument( document_id, rendered_document, pending_update, ); self.result_tx.send(msg).unwrap(); } else if requested_frame { // WR-internal optimization to avoid doing a bunch of render work if // there's no pixels. We still want to pretend to render and request // a render to make sure that the callbacks (particularly the // new_frame_ready callback below) has the right flags. let msg = ResultMsg::PublishPipelineInfo(doc.updated_pipeline_info()); self.result_tx.send(msg).unwrap(); } drain_filter( &mut notifications, |n| { n.when() == Checkpoint::FrameBuilt }, |n| { n.notify(); }, ); if !notifications.is_empty() { self.result_tx.send(ResultMsg::AppendNotificationRequests(notifications)).unwrap(); } // Always forward the transaction to the renderer if a frame was requested, // otherwise gecko can get into a state where it waits (forever) for the // transaction to complete before sending new work. if requested_frame { // If rendered frame is already valid, there is no need to render frame. if doc.rendered_frame_is_valid { render_frame = false; } else if render_frame { doc.rendered_frame_is_valid = true; } self.notifier.new_frame_ready(document_id, scroll, render_frame, frame_build_time); } if !doc.hit_tester_is_valid { doc.rebuild_hit_tester(); } build_frame } fn send_backend_message(&self, msg: SceneBuilderRequest) { self.scene_tx.send(msg).unwrap(); } fn report_memory(&mut self, tx: Sender<Box<MemoryReport>>) { let mut report = Box::new(MemoryReport::default()); let ops = self.size_of_ops.as_mut().unwrap(); let op = ops.size_of_op; report.gpu_cache_metadata = self.gpu_cache.size_of(ops); for doc in self.documents.values() { report.clip_stores += doc.scene.clip_store.size_of(ops); report.hit_testers += match &doc.hit_tester { Some(hit_tester) => hit_tester.size_of(ops), None => 0, }; doc.data_stores.report_memory(ops, &mut report) } (*report) += self.resource_cache.report_memory(op); report.texture_cache_structures = self.resource_cache .texture_cache .report_memory(ops); // Send a message to report memory on the scene-builder thread, which // will add its report to this one and send the result back to the original // thread waiting on the request. self.send_backend_message( SceneBuilderRequest::ReportMemory(report, tx) ); } #[cfg(feature = "capture")] fn save_capture_sequence(&mut self) { if let Some(ref mut config) = self.capture_config { let deferred = self.resource_cache.save_capture_sequence(config); let backend = PlainRenderBackend { frame_config: self.frame_config.clone(), resource_sequence_id: config.resource_id, documents: self.documents .iter() .map(|(id, doc)| (*id, doc.view)) .collect(), }; config.serialize_for_frame(&backend, "backend"); if !deferred.is_empty() { let msg = ResultMsg::DebugOutput(DebugOutput::SaveCapture(config.clone(), deferred)); self.result_tx.send(msg).unwrap(); } } } } impl RenderBackend { #[cfg(feature = "capture")] // Note: the mutable `self` is only needed here for resolving blob images fn save_capture( &mut self, root: PathBuf, bits: CaptureBits, ) -> DebugOutput { use std::fs; use crate::render_task_graph::dump_render_tasks_as_svg; debug!("capture: saving {:?}", root); if !root.is_dir() { if let Err(e) = fs::create_dir_all(&root) { panic!("Unable to create capture dir: {:?}", e); } } let config = CaptureConfig::new(root, bits); if config.bits.contains(CaptureBits::FRAME) { self.prepare_for_frames(); } for (&id, doc) in &mut self.documents { debug!("\tdocument {:?}", id); if config.bits.contains(CaptureBits::FRAME) { let rendered_document = doc.build_frame( &mut self.resource_cache, &mut self.gpu_cache, self.debug_flags, &mut self.tile_caches, None, RenderReasons::empty(), ); // After we rendered the frames, there are pending updates to both // GPU cache and resources. Instead of serializing them, we are going to make sure // they are applied on the `Renderer` side. let msg_update_gpu_cache = ResultMsg::UpdateGpuCache(self.gpu_cache.extract_updates()); self.result_tx.send(msg_update_gpu_cache).unwrap(); //TODO: write down doc's pipeline info? // it has `pipeline_epoch_map`, // which may capture necessary details for some cases. let file_name = format!("frame-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame(&rendered_document.frame, file_name); let file_name = format!("spatial-{}-{}", id.namespace_id.0, id.id); config.serialize_tree_for_frame(&doc.spatial_tree, file_name); let file_name = format!("built-primitives-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame(&doc.scene.prim_store, file_name); let file_name = format!("built-clips-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame(&doc.scene.clip_store, file_name); let file_name = format!("scratch-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame(&doc.scratch.primitive, file_name); let file_name = format!("render-tasks-{}-{}.svg", id.namespace_id.0, id.id); let mut render_tasks_file = fs::File::create(&config.file_path_for_frame(file_name, "svg")) .expect("Failed to open the SVG file."); dump_render_tasks_as_svg( &rendered_document.frame.render_tasks, &mut render_tasks_file ).unwrap(); let file_name = format!("texture-cache-color-linear-{}-{}.svg", id.namespace_id.0, id.id); let mut texture_file = fs::File::create(&config.file_path_for_frame(file_name, "svg")) .expect("Failed to open the SVG file."); self.resource_cache.texture_cache.dump_color8_linear_as_svg(&mut texture_file).unwrap(); let file_name = format!("texture-cache-color8-glyphs-{}-{}.svg", id.namespace_id.0, id.id); let mut texture_file = fs::File::create(&config.file_path_for_frame(file_name, "svg")) .expect("Failed to open the SVG file."); self.resource_cache.texture_cache.dump_color8_glyphs_as_svg(&mut texture_file).unwrap(); let file_name = format!("texture-cache-alpha8-glyphs-{}-{}.svg", id.namespace_id.0, id.id); let mut texture_file = fs::File::create(&config.file_path_for_frame(file_name, "svg")) .expect("Failed to open the SVG file."); self.resource_cache.texture_cache.dump_alpha8_glyphs_as_svg(&mut texture_file).unwrap(); let file_name = format!("texture-cache-alpha8-linear-{}-{}.svg", id.namespace_id.0, id.id); let mut texture_file = fs::File::create(&config.file_path_for_frame(file_name, "svg")) .expect("Failed to open the SVG file."); self.resource_cache.texture_cache.dump_alpha8_linear_as_svg(&mut texture_file).unwrap(); } let data_stores_name = format!("data-stores-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame(&doc.data_stores, data_stores_name); let frame_spatial_tree_name = format!("frame-spatial-tree-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame::<SpatialTree, _>(&doc.spatial_tree, frame_spatial_tree_name); let properties_name = format!("properties-{}-{}", id.namespace_id.0, id.id); config.serialize_for_frame(&doc.dynamic_properties, properties_name); } if config.bits.contains(CaptureBits::FRAME) { // TODO: there is no guarantee that we won't hit this case, but we want to // report it here if we do. If we don't, it will simply crash in // Renderer::render_impl and give us less information about the source. assert!(!self.requires_frame_build(), "Caches were cleared during a capture."); self.bookkeep_after_frames(); } debug!("\tscene builder"); self.send_backend_message( SceneBuilderRequest::SaveScene(config.clone()) ); debug!("\tresource cache"); let (resources, deferred) = self.resource_cache.save_capture(&config.root); info!("\tbackend"); let backend = PlainRenderBackend { frame_config: self.frame_config.clone(), resource_sequence_id: 0, documents: self.documents .iter() .map(|(id, doc)| (*id, doc.view)) .collect(), }; config.serialize_for_frame(&backend, "backend"); config.serialize_for_frame(&resources, "plain-resources"); if config.bits.contains(CaptureBits::FRAME) { let msg_update_resources = ResultMsg::UpdateResources { resource_updates: self.resource_cache.pending_updates(), memory_pressure: false, }; self.result_tx.send(msg_update_resources).unwrap(); // Save the texture/glyph/image caches. info!("\tresource cache"); let caches = self.resource_cache.save_caches(&config.root); config.serialize_for_resource(&caches, "resource_cache"); info!("\tgpu cache"); config.serialize_for_resource(&self.gpu_cache, "gpu_cache"); } DebugOutput::SaveCapture(config, deferred) } #[cfg(feature = "capture")] fn start_capture_sequence( &mut self, root: PathBuf, bits: CaptureBits, ) { self.send_backend_message( SceneBuilderRequest::StartCaptureSequence(CaptureConfig::new(root, bits)) ); } #[cfg(feature = "capture")] fn stop_capture_sequence( &mut self, ) { self.send_backend_message( SceneBuilderRequest::StopCaptureSequence ); } #[cfg(feature = "replay")] fn load_capture( &mut self, mut config: CaptureConfig, ) { debug!("capture: loading {:?}", config.frame_root()); let backend = config.deserialize_for_frame::<PlainRenderBackend, _>("backend") .expect("Unable to open backend.ron"); // If this is a capture sequence, then the ID will be non-zero, and won't // match what is loaded, but for still captures, the ID will be zero. let first_load = backend.resource_sequence_id == 0; if self.loaded_resource_sequence_id != backend.resource_sequence_id || first_load { // FIXME(aosmond): We clear the documents because when we update the // resource cache, we actually wipe and reload, because we don't // know what is the same and what has changed. If we were to keep as // much of the resource cache state as possible, we could avoid // flushing the document state (which has its own dependecies on the // cache). // // FIXME(aosmond): If we try to load the next capture in the // sequence too quickly, we may lose resources we depend on in the // current frame. This can cause panics. Ideally we would not // advance to the next frame until the FrameRendered event for all // of the pipelines. self.documents.clear(); config.resource_id = backend.resource_sequence_id; self.loaded_resource_sequence_id = backend.resource_sequence_id; let plain_resources = config.deserialize_for_resource::<PlainResources, _>("plain-resources") .expect("Unable to open plain-resources.ron"); let caches_maybe = config.deserialize_for_resource::<PlainCacheOwn, _>("resource_cache"); // Note: it would be great to have `RenderBackend` to be split // rather explicitly on what's used before and after scene building // so that, for example, we never miss anything in the code below: let plain_externals = self.resource_cache.load_capture( plain_resources, caches_maybe, &config, ); let msg_load = ResultMsg::DebugOutput( DebugOutput::LoadCapture(config.clone(), plain_externals) ); self.result_tx.send(msg_load).unwrap(); self.gpu_cache = match config.deserialize_for_resource::<GpuCache, _>("gpu_cache") { Some(gpu_cache) => gpu_cache, None => GpuCache::new(), }; } self.frame_config = backend.frame_config; let mut scenes_to_build = Vec::new(); for (id, view) in backend.documents {<|fim▁hole|> let scene_spatial_tree_name = format!("scene-spatial-tree-{}-{}", id.namespace_id.0, id.id); let scene_spatial_tree = config.deserialize_for_scene::<SceneSpatialTree, _>(&scene_spatial_tree_name) .expect(&format!("Unable to open {}.ron", scene_spatial_tree_name)); let interners_name = format!("interners-{}-{}", id.namespace_id.0, id.id); let interners = config.deserialize_for_scene::<Interners, _>(&interners_name) .expect(&format!("Unable to open {}.ron", interners_name)); let data_stores_name = format!("data-stores-{}-{}", id.namespace_id.0, id.id); let data_stores = config.deserialize_for_frame::<DataStores, _>(&data_stores_name) .expect(&format!("Unable to open {}.ron", data_stores_name)); let properties_name = format!("properties-{}-{}", id.namespace_id.0, id.id); let properties = config.deserialize_for_frame::<SceneProperties, _>(&properties_name) .expect(&format!("Unable to open {}.ron", properties_name)); let frame_spatial_tree_name = format!("frame-spatial-tree-{}-{}", id.namespace_id.0, id.id); let frame_spatial_tree = config.deserialize_for_frame::<SpatialTree, _>(&frame_spatial_tree_name) .expect(&format!("Unable to open {}.ron", frame_spatial_tree_name)); // Update the document if it still exists, rather than replace it entirely. // This allows us to preserve state information such as the frame stamp, // which is necessary for cache sanity. match self.documents.entry(id) { Occupied(entry) => { let doc = entry.into_mut(); doc.view = view; doc.loaded_scene = scene.clone(); doc.data_stores = data_stores; doc.spatial_tree = frame_spatial_tree; doc.dynamic_properties = properties; doc.frame_is_valid = false; doc.rendered_frame_is_valid = false; doc.has_built_scene = false; doc.hit_tester_is_valid = false; } Vacant(entry) => { let doc = Document { id, scene: BuiltScene::empty(), removed_pipelines: Vec::new(), view, stamp: FrameStamp::first(id), frame_builder: FrameBuilder::new(), dynamic_properties: properties, hit_tester: None, shared_hit_tester: Arc::new(SharedHitTester::new()), frame_is_valid: false, hit_tester_is_valid: false, rendered_frame_is_valid: false, has_built_scene: false, data_stores, scratch: ScratchBuffer::default(), spatial_tree: frame_spatial_tree, loaded_scene: scene.clone(), prev_composite_descriptor: CompositeDescriptor::empty(), dirty_rects_are_valid: false, profile: TransactionProfile::new(), rg_builder: RenderTaskGraphBuilder::new(), frame_stats: None, }; entry.insert(doc); } }; let frame_name = format!("frame-{}-{}", id.namespace_id.0, id.id); let frame = config.deserialize_for_frame::<Frame, _>(frame_name); let build_frame = match frame { Some(frame) => { info!("\tloaded a built frame with {} passes", frame.passes.len()); let msg_update = ResultMsg::UpdateGpuCache(self.gpu_cache.extract_updates()); self.result_tx.send(msg_update).unwrap(); let msg_publish = ResultMsg::PublishDocument( id, RenderedDocument { frame, is_new_scene: true, profile: TransactionProfile::new(), render_reasons: RenderReasons::empty(), frame_stats: None, }, self.resource_cache.pending_updates(), ); self.result_tx.send(msg_publish).unwrap(); self.notifier.new_frame_ready(id, false, true, None); // We deserialized the state of the frame so we don't want to build // it (but we do want to update the scene builder's state) false } None => true, }; scenes_to_build.push(LoadScene { document_id: id, scene, view: view.scene.clone(), config: self.frame_config.clone(), font_instances: self.resource_cache.get_font_instances(), build_frame, interners, spatial_tree: scene_spatial_tree, }); } if !scenes_to_build.is_empty() { self.send_backend_message( SceneBuilderRequest::LoadScenes(scenes_to_build) ); } } }<|fim▁end|>
debug!("\tdocument {:?}", id); let scene_name = format!("scene-{}-{}", id.namespace_id.0, id.id); let scene = config.deserialize_for_scene::<Scene, _>(&scene_name) .expect(&format!("Unable to open {}.ron", scene_name));
<|file_name|>sha2.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This module implements only the Sha256 function since that is all that is needed for internal //! use. This implementation is not intended for external use or for any use where security is //! important. use std::iter::repeat; use std::slice::bytes::{MutableByteVector, copy_memory}; use serialize::hex::ToHex; /// Write a u32 into a vector, which must be 4 bytes long. The value is written in big-endian /// format. fn write_u32_be(dst: &mut[u8], input: u32) { dst[0] = (input >> 24) as u8; dst[1] = (input >> 16) as u8; dst[2] = (input >> 8) as u8; dst[3] = input as u8; } /// Read the value of a vector of bytes as a u32 value in big-endian format. fn read_u32_be(input: &[u8]) -> u32 { return (input[0] as u32) << 24 | (input[1] as u32) << 16 | (input[2] as u32) << 8 | (input[3] as u32); } /// Read a vector of bytes into a vector of u32s. The values are read in big-endian format. fn read_u32v_be(dst: &mut[u32], input: &[u8]) { assert!(dst.len() * 4 == input.len()); let mut pos = 0; for chunk in input.chunks(4) { dst[pos] = read_u32_be(chunk); pos += 1; } } trait ToBits { /// Convert the value in bytes to the number of bits, a tuple where the 1st item is the /// high-order value and the 2nd item is the low order value. fn to_bits(self) -> (Self, Self); } impl ToBits for u64 { fn to_bits(self) -> (u64, u64) { return (self >> 61, self << 3); } } /// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric /// overflow. fn add_bytes_to_bits(bits: u64, bytes: u64) -> u64 { let (new_high_bits, new_low_bits) = bytes.to_bits(); if new_high_bits > 0 { panic!("numeric overflow occurred.") } match bits.checked_add(new_low_bits) { Some(x) => return x, None => panic!("numeric overflow occurred.") } } /// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it /// must be processed. The input() method takes care of processing and then clearing the buffer /// automatically. However, other methods do not and require the caller to process the buffer. Any /// method that modifies the buffer directory or provides the caller with bytes that can be modified /// results in those bytes being marked as used by the buffer. trait FixedBuffer { /// Input a vector of bytes. If the buffer becomes full, process it with the provided /// function and then clear the buffer. fn input<F>(&mut self, input: &[u8], func: F) where F: FnMut(&[u8]); /// Reset the buffer. fn reset(&mut self); /// Zero the buffer up until the specified index. The buffer position currently must not be /// greater than that index. fn zero_until(&mut self, idx: usize); /// Get a slice of the buffer of the specified size. There must be at least that many bytes /// remaining in the buffer. fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8]; /// Get the current buffer. The buffer must already be full. This clears the buffer as well. fn full_buffer<'s>(&'s mut self) -> &'s [u8]; /// Get the current position of the buffer. fn position(&self) -> usize; /// Get the number of bytes remaining in the buffer until it is full. fn remaining(&self) -> usize; /// Get the size of the buffer fn size(&self) -> usize; } /// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize. struct FixedBuffer64 { buffer: [u8; 64], buffer_idx: usize, } impl FixedBuffer64 { /// Create a new FixedBuffer64 fn new() -> FixedBuffer64 { return FixedBuffer64 { buffer: [0; 64], buffer_idx: 0 }; } } impl FixedBuffer for FixedBuffer64 { fn input<F>(&mut self, input: &[u8], mut func: F) where F: FnMut(&[u8]), { let mut i = 0; let size = self.size(); // If there is already data in the buffer, copy as much as we can into it and process // the data if the buffer becomes full. if self.buffer_idx != 0 { let buffer_remaining = size - self.buffer_idx; if input.len() >= buffer_remaining { copy_memory(<|fim▁hole|> &input[..buffer_remaining], &mut self.buffer[self.buffer_idx..size]); self.buffer_idx = 0; func(&self.buffer); i += buffer_remaining; } else { copy_memory( input, &mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()]); self.buffer_idx += input.len(); return; } } // While we have at least a full buffer size chunk's worth of data, process that data // without copying it into the buffer while input.len() - i >= size { func(&input[i..i + size]); i += size; } // Copy any input data into the buffer. At this point in the method, the amount of // data left in the input vector will be less than the buffer size and the buffer will // be empty. let input_remaining = input.len() - i; copy_memory( &input[i..], &mut self.buffer[..input_remaining]); self.buffer_idx += input_remaining; } fn reset(&mut self) { self.buffer_idx = 0; } fn zero_until(&mut self, idx: usize) { assert!(idx >= self.buffer_idx); self.buffer[self.buffer_idx..idx].set_memory(0); self.buffer_idx = idx; } fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] { self.buffer_idx += len; return &mut self.buffer[self.buffer_idx - len..self.buffer_idx]; } fn full_buffer<'s>(&'s mut self) -> &'s [u8] { assert!(self.buffer_idx == 64); self.buffer_idx = 0; return &self.buffer[..64]; } fn position(&self) -> usize { self.buffer_idx } fn remaining(&self) -> usize { 64 - self.buffer_idx } fn size(&self) -> usize { 64 } } /// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct. trait StandardPadding { /// Add padding to the buffer. The buffer must not be full when this method is called and is /// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least /// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled /// with zeros again until only rem bytes are remaining. fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]); } impl <T: FixedBuffer> StandardPadding for T { fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) { let size = self.size(); self.next(1)[0] = 128; if self.remaining() < rem { self.zero_until(size); func(self.full_buffer()); } self.zero_until(size - rem); } } /// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2 /// family of digest functions. pub trait Digest { /// Provide message data. /// /// # Arguments /// /// * input - A vector of message data fn input(&mut self, input: &[u8]); /// Retrieve the digest result. This method may be called multiple times. /// /// # Arguments /// /// * out - the vector to hold the result. Must be large enough to contain output_bits(). fn result(&mut self, out: &mut [u8]); /// Reset the digest. This method must be called after result() and before supplying more /// data. fn reset(&mut self); /// Get the output size in bits. fn output_bits(&self) -> usize; /// Convenience function that feeds a string into a digest. /// /// # Arguments /// /// * `input` The string to feed into the digest fn input_str(&mut self, input: &str) { self.input(input.as_bytes()); } /// Convenience function that retrieves the result of a digest as a /// newly allocated vec of bytes. fn result_bytes(&mut self) -> Vec<u8> { let mut buf: Vec<u8> = repeat(0).take((self.output_bits()+7)/8).collect(); self.result(&mut buf); buf } /// Convenience function that retrieves the result of a digest as a /// String in hexadecimal format. fn result_str(&mut self) -> String { self.result_bytes().to_hex().to_string() } } // A structure that represents that state of a digest computation for the SHA-2 512 family of digest // functions struct Engine256State { h0: u32, h1: u32, h2: u32, h3: u32, h4: u32, h5: u32, h6: u32, h7: u32, } impl Engine256State { fn new(h: &[u32; 8]) -> Engine256State { return Engine256State { h0: h[0], h1: h[1], h2: h[2], h3: h[3], h4: h[4], h5: h[5], h6: h[6], h7: h[7] }; } fn reset(&mut self, h: &[u32; 8]) { self.h0 = h[0]; self.h1 = h[1]; self.h2 = h[2]; self.h3 = h[3]; self.h4 = h[4]; self.h5 = h[5]; self.h6 = h[6]; self.h7 = h[7]; } fn process_block(&mut self, data: &[u8]) { fn ch(x: u32, y: u32, z: u32) -> u32 { ((x & y) ^ ((!x) & z)) } fn maj(x: u32, y: u32, z: u32) -> u32 { ((x & y) ^ (x & z) ^ (y & z)) } fn sum0(x: u32) -> u32 { ((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10)) } fn sum1(x: u32) -> u32 { ((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7)) } fn sigma0(x: u32) -> u32 { ((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3) } fn sigma1(x: u32) -> u32 { ((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10) } let mut a = self.h0; let mut b = self.h1; let mut c = self.h2; let mut d = self.h3; let mut e = self.h4; let mut f = self.h5; let mut g = self.h6; let mut h = self.h7; let mut w = [0; 64]; // Sha-512 and Sha-256 use basically the same calculations which are implemented // by these macros. Inlining the calculations seems to result in better generated code. macro_rules! schedule_round { ($t:expr) => ( w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7]) .wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]); ) } macro_rules! sha2_round { ($A:ident, $B:ident, $C:ident, $D:ident, $E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => ( { $H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G)) .wrapping_add($K[$t]).wrapping_add(w[$t]); $D = $D.wrapping_add($H); $H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C)); } ) } read_u32v_be(&mut w[0..16], data); // Putting the message schedule inside the same loop as the round calculations allows for // the compiler to generate better code. for t in (0..48).step_by(8) { schedule_round!(t + 16); schedule_round!(t + 17); schedule_round!(t + 18); schedule_round!(t + 19); schedule_round!(t + 20); schedule_round!(t + 21); schedule_round!(t + 22); schedule_round!(t + 23); sha2_round!(a, b, c, d, e, f, g, h, K32, t); sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1); sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2); sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3); sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4); sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5); sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6); sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7); } for t in (48..64).step_by(8) { sha2_round!(a, b, c, d, e, f, g, h, K32, t); sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1); sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2); sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3); sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4); sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5); sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6); sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7); } self.h0 = self.h0.wrapping_add(a); self.h1 = self.h1.wrapping_add(b); self.h2 = self.h2.wrapping_add(c); self.h3 = self.h3.wrapping_add(d); self.h4 = self.h4.wrapping_add(e); self.h5 = self.h5.wrapping_add(f); self.h6 = self.h6.wrapping_add(g); self.h7 = self.h7.wrapping_add(h); } } static K32: [u32; 64] = [ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 ]; // A structure that keeps track of the state of the Sha-256 operation and contains the logic // necessary to perform the final calculations. struct Engine256 { length_bits: u64, buffer: FixedBuffer64, state: Engine256State, finished: bool, } impl Engine256 { fn new(h: &[u32; 8]) -> Engine256 { return Engine256 { length_bits: 0, buffer: FixedBuffer64::new(), state: Engine256State::new(h), finished: false } } fn reset(&mut self, h: &[u32; 8]) { self.length_bits = 0; self.buffer.reset(); self.state.reset(h); self.finished = false; } fn input(&mut self, input: &[u8]) { assert!(!self.finished); // Assumes that input.len() can be converted to u64 without overflow self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64); let self_state = &mut self.state; self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) }); } fn finish(&mut self) { if self.finished { return; } let self_state = &mut self.state; self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) }); write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 ); write_u32_be(self.buffer.next(4), self.length_bits as u32); self_state.process_block(self.buffer.full_buffer()); self.finished = true; } } /// The SHA-256 hash algorithm pub struct Sha256 { engine: Engine256 } impl Sha256 { /// Construct a new instance of a SHA-256 digest. pub fn new() -> Sha256 { Sha256 { engine: Engine256::new(&H256) } } } impl Digest for Sha256 { fn input(&mut self, d: &[u8]) { self.engine.input(d); } fn result(&mut self, out: &mut [u8]) { self.engine.finish(); write_u32_be(&mut out[0..4], self.engine.state.h0); write_u32_be(&mut out[4..8], self.engine.state.h1); write_u32_be(&mut out[8..12], self.engine.state.h2); write_u32_be(&mut out[12..16], self.engine.state.h3); write_u32_be(&mut out[16..20], self.engine.state.h4); write_u32_be(&mut out[20..24], self.engine.state.h5); write_u32_be(&mut out[24..28], self.engine.state.h6); write_u32_be(&mut out[28..32], self.engine.state.h7); } fn reset(&mut self) { self.engine.reset(&H256); } fn output_bits(&self) -> usize { 256 } } static H256: [u32; 8] = [ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 ]; #[cfg(test)] mod tests { #![allow(deprecated)] extern crate rand; use self::rand::Rng; use self::rand::isaac::IsaacRng; use serialize::hex::FromHex; use std::iter::repeat; use std::u64; use super::{Digest, Sha256, FixedBuffer}; // A normal addition - no overflow occurs #[test] fn test_add_bytes_to_bits_ok() { assert!(super::add_bytes_to_bits(100, 10) == 180); } // A simple failure case - adding 1 to the max value #[test] #[should_panic] fn test_add_bytes_to_bits_overflow() { super::add_bytes_to_bits(u64::MAX, 1); } struct Test { input: String, output_str: String, } fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) { // Test that it works when accepting the message all at once for t in tests { sh.reset(); sh.input_str(&t.input); let out_str = sh.result_str(); assert!(out_str == t.output_str); } // Test that it works when accepting the message in pieces for t in tests { sh.reset(); let len = t.input.len(); let mut left = len; while left > 0 { let take = (left + 1) / 2; sh.input_str(&t.input[len - left..take + len - left]); left = left - take; } let out_str = sh.result_str(); assert!(out_str == t.output_str); } } #[test] fn test_sha256() { // Examples from wikipedia let wikipedia_tests = vec!( Test { input: "".to_string(), output_str: "e3b0c44298fc1c149afb\ f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string() }, Test { input: "The quick brown fox jumps over the lazy \ dog".to_string(), output_str: "d7a8fbb307d7809469ca\ 9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string() }, Test { input: "The quick brown fox jumps over the lazy \ dog.".to_string(), output_str: "ef537f25c895bfa78252\ 6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string() }); let tests = wikipedia_tests; let mut sh: Box<_> = box Sha256::new(); test_hash(&mut *sh, &tests); } /// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is /// correct. fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) { let total_size = 1000000; let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect(); let mut rng = IsaacRng::new_unseeded(); let mut count = 0; digest.reset(); while count < total_size { let next: usize = rng.gen_range(0, 2 * blocksize + 1); let remaining = total_size - count; let size = if next > remaining { remaining } else { next }; digest.input(&buffer[..size]); count += size; } let result_str = digest.result_str(); let result_bytes = digest.result_bytes(); assert_eq!(expected, result_str); let expected_vec: Vec<u8> = expected.from_hex() .unwrap() .into_iter() .collect(); assert_eq!(expected_vec, result_bytes); } #[test] fn test_1million_random_sha256() { let mut sh = Sha256::new(); test_digest_1million_random( &mut sh, 64, "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0"); } } #[cfg(test)] mod bench { extern crate test; use self::test::Bencher; use super::{Sha256, FixedBuffer, Digest}; #[bench] pub fn sha256_10(b: &mut Bencher) { let mut sh = Sha256::new(); let bytes = [1; 10]; b.iter(|| { sh.input(&bytes); }); b.bytes = bytes.len() as u64; } #[bench] pub fn sha256_1k(b: &mut Bencher) { let mut sh = Sha256::new(); let bytes = [1; 1024]; b.iter(|| { sh.input(&bytes); }); b.bytes = bytes.len() as u64; } #[bench] pub fn sha256_64k(b: &mut Bencher) { let mut sh = Sha256::new(); let bytes = [1; 65536]; b.iter(|| { sh.input(&bytes); }); b.bytes = bytes.len() as u64; } }<|fim▁end|>
<|file_name|>parquet_dataset_kernel.cc<|end_file_name|><|fim▁begin|>/* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "absl/container/flat_hash_map.h" #include "struct2tensor/kernels/parquet/parquet_reader.h" #include "struct2tensor/kernels/parquet/parquet_reader_util.h" #include "struct2tensor/kernels/vector_to_tensor.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" namespace struct2tensor { namespace parquet_dataset { class Dataset : public tensorflow::data::DatasetBase { public: explicit Dataset(tensorflow::OpKernelContext* ctx, const std::vector<std::string>& filenames, const std::vector<std::string>& value_paths, const tensorflow::DataTypeVector& value_dtypes, const std::vector<std::vector<int>>& segregated_path_indices, const tensorflow::int64 batch_size, const tensorflow::DataTypeVector& output_dtypes) : DatasetBase(tensorflow::data::DatasetContext(ctx)), filenames_(filenames), value_paths_(value_paths), value_dtypes_(value_dtypes), segregated_path_indices_(segregated_path_indices), batch_size_(batch_size), output_dtypes_(output_dtypes), output_shapes_([this]() { // The first output tensor is always the root size (number of messages // read) which is a scalar. Other output tensors are parent indices // so they are 1-D. std::vector<tensorflow::PartialTensorShape> shapes( output_dtypes_.size(), tensorflow::PartialTensorShape({-1})); shapes[0] = tensorflow::PartialTensorShape({}); return shapes; }()) {} std::unique_ptr<tensorflow::data::IteratorBase> MakeIteratorInternal( const std::string& prefix) const override { return absl::WrapUnique(new Iterator( {this, tensorflow::strings::StrCat(prefix, "::Parquet")}, filenames_, value_paths_, value_dtypes_, segregated_path_indices_, batch_size_)); } const tensorflow::DataTypeVector& output_dtypes() const override { return output_dtypes_; } const std::vector<tensorflow::PartialTensorShape>& output_shapes() const override { return output_shapes_; } std::string DebugString() const override { return "ParquetDatasetOp::Dataset"; } tensorflow::Status CheckExternalState() const { return tensorflow::Status::OK(); } protected: // TODO(andylou): Implement saving dataset state. tensorflow::Status AsGraphDefInternal( tensorflow::data::SerializationContext* ctx, DatasetGraphDefBuilder* b, tensorflow::Node** output) const override { return tensorflow::errors::Unimplemented( DebugString(), " does not support serialization."); } private: class Iterator : public tensorflow::data::DatasetIterator<Dataset> { public: explicit Iterator( const Params& params, const std::vector<std::string>& filenames, const std::vector<std::string>& value_paths, const tensorflow::DataTypeVector& value_dtypes, const std::vector<std::vector<int>>& segregated_path_indices, const tensorflow::int64 batch_size) : DatasetIterator<Dataset>(params), filenames_(filenames), value_paths_(value_paths), value_dtypes_(value_dtypes), segregated_path_indices_(segregated_path_indices), batch_size_(batch_size), current_file_index_(0) {} // For a deeper understanding of what tensors are returned in out_tensors, // see parquet_dataset_op.cc. tensorflow::Status GetNextInternal( tensorflow::data::IteratorContext* ctx, std::vector<tensorflow::Tensor>* out_tensors, bool* end_of_sequence) override { tensorflow::mutex_lock l(mu_); if (current_file_index_ >= filenames_.size()) { *end_of_sequence = true; return tensorflow::Status::OK(); } if (!parquet_reader_) { // Once a file is finished reading, this will create a ParquetReader // for the next file in file_names_. TF_RETURN_IF_ERROR( ValidateFileAndSchema(filenames_[current_file_index_])); TF_RETURN_IF_ERROR(ParquetReader::Create( filenames_[current_file_index_], value_paths_, value_dtypes_, batch_size_, &parquet_reader_)); } bool end_of_file = false; std::vector<ParquetReader::ParentIndicesAndValues> parent_indices_and_values; TF_RETURN_IF_ERROR(parquet_reader_->ReadMessages( ctx, &parent_indices_and_values, &end_of_file)); if (end_of_file) { ++current_file_index_; parquet_reader_.reset(); } // pushes the number of messages read as the first output tensor. tensorflow::Tensor root_tensor(ctx->allocator({}), tensorflow::DT_INT64, {}); if (parent_indices_and_values.size() != value_paths_.size()) { return tensorflow::errors::Internal(absl::StrCat( parent_indices_and_values.size(), " messages read, expected to read ", value_paths_.size())); } if (parent_indices_and_values[0].parent_indices.empty()) { return tensorflow::errors::Internal( absl::StrCat("0 messages read, expected to read ", batch_size_)); } root_tensor.flat<tensorflow::int64>()(0) = parent_indices_and_values[0].parent_indices[0].size(); out_tensors->push_back(std::move(root_tensor)); for (int column_index = 0; column_index < value_paths_.size(); ++column_index) { for (int path_index : segregated_path_indices_[column_index]) { tensorflow::Tensor res( ctx->allocator({}), tensorflow::DT_INT64, {static_cast<long long>(parent_indices_and_values[column_index] .parent_indices[path_index] .size())}); struct2tensor::VectorToTensor(parent_indices_and_values[column_index] .parent_indices[path_index], &res, /*produce_string_view=*/false); out_tensors->push_back(std::move(res)); } out_tensors->push_back( std::move(parent_indices_and_values[column_index].values)); } return tensorflow::Status::OK(); } protected: // TODO(b/139440495): Implement saving and restoring iterator state. tensorflow::Status SaveInternal( tensorflow::data::SerializationContext* ctx, tensorflow::data::IteratorStateWriter* writer) { return tensorflow::errors::Unimplemented( "Parquet Dataset Iterator does not support checkpointing."); } tensorflow::Status RestoreInternal( tensorflow::data::IteratorContext* ctx, tensorflow::data::IteratorStateReader* reader) { return tensorflow::errors::Unimplemented( "Parquet Dataset Iterator does not support checkpointing."); } private: // validates that the file exists and can be opened as a parquet file. // validates that the schema is the expected schema. tensorflow::Status ValidateFileAndSchema(const std::string& filename) { std::unique_ptr<parquet::ParquetFileReader> file_reader; tensorflow::Status s = OpenFileWithStatus(filename, &file_reader); absl::flat_hash_map<std::string, tensorflow::DataType> paths; std::shared_ptr<parquet::FileMetaData> file_metadata = file_reader->metadata(); for (int i = 0; i < file_metadata->num_columns(); ++i) { std::string path = file_metadata->schema()->Column(i)->path()->ToDotString(); switch (file_metadata->schema()->Column(i)->physical_type()) { case parquet::Type::INT32: paths[path] = tensorflow::DT_INT32; break; case parquet::Type::INT64: paths[path] = tensorflow::DT_INT64; break; case parquet::Type::FLOAT: paths[path] = tensorflow::DT_FLOAT; break; case parquet::Type::DOUBLE: paths[path] = tensorflow::DT_DOUBLE; break; case parquet::Type::BOOLEAN: paths[path] = tensorflow::DT_BOOL; break; case parquet::Type::BYTE_ARRAY: paths[path] = tensorflow::DT_STRING; break; default:<|fim▁hole|> } for (int i = 0; i < value_dtypes_.size(); ++i) { auto paths_iter = paths.find(value_paths_[i]); if (paths_iter == paths.end()) { return tensorflow::errors::InvalidArgument( absl::StrCat("path not found ", value_paths_[i])); } else if (paths_iter->second != value_dtypes_[i]) { return tensorflow::errors::InvalidArgument( absl::StrCat("This dtype is incorrect: ", value_dtypes_[i], ". dtype should be: ", paths_iter->second)); } } return s; } const std::vector<std::string>& filenames_; const std::vector<std::string>& value_paths_; const tensorflow::DataTypeVector& value_dtypes_; const std::vector<std::vector<int>>& segregated_path_indices_; const tensorflow::int64 batch_size_; int current_file_index_ ABSL_GUARDED_BY(mu_); std::unique_ptr<ParquetReader> parquet_reader_ ABSL_GUARDED_BY(mu_); tensorflow::mutex mu_; }; const std::vector<std::string> filenames_; const std::vector<std::string> value_paths_; const tensorflow::DataTypeVector value_dtypes_; // 2D vectore to tell us which parent_indices from the path we want. i.e. // [[0,1],[0]] means we want the 0th field and 1st field of the 0th path, and // the 0th field of the 1st path. const std::vector<std::vector<int>> segregated_path_indices_; const tensorflow::int64 batch_size_; const tensorflow::DataTypeVector output_dtypes_; const std::vector<tensorflow::PartialTensorShape> output_shapes_; }; class ParquetDatasetOp : public tensorflow::data::DatasetOpKernel { public: ParquetDatasetOp(tensorflow::OpKernelConstruction* ctx) : DatasetOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("value_paths", &value_paths_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("value_dtypes", &value_dtypes_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("parent_index_paths", &parent_index_paths_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("path_index", &path_index_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("batch_size", &batch_size_)); } void MakeDataset(tensorflow::OpKernelContext* ctx, tensorflow::data::DatasetBase** output) override { const tensorflow::Tensor* filenames_tensor; OP_REQUIRES_OK(ctx, ctx->input("filenames", &filenames_tensor)); std::vector<std::string> filenames; filenames.reserve(filenames_tensor->NumElements()); for (int i = 0; i < filenames_tensor->NumElements(); ++i) { filenames.push_back(filenames_tensor->flat<tensorflow::tstring>()(i)); } tensorflow::DataTypeVector output_dtypes = tensorflow::DataTypeVector(); int column_counter = 0; std::string prev = parent_index_paths_[0]; output_dtypes.push_back(tensorflow::DT_INT64); for (int i = 1; i < parent_index_paths_.size(); ++i) { std::string curr = parent_index_paths_[i]; output_dtypes.push_back(tensorflow::DT_INT64); if (curr != prev) { output_dtypes.push_back(value_dtypes_[column_counter]); ++column_counter; prev = curr; } } output_dtypes.push_back(tensorflow::DT_INT64); output_dtypes.push_back(value_dtypes_[column_counter]); // This validates that parent_index_paths is aligned with value_paths, // so segregated_path_indices can correctly be constructed. for (int i = 0, j = 0; i < parent_index_paths_.size(); ++i) { while (parent_index_paths_[i] != value_paths_[j]) { ++j; if (j >= value_paths_.size()) { ctx->CtxFailure(tensorflow::errors::InvalidArgument( "parent_index_paths is not aligned with value_paths")); return; } } } std::vector<std::vector<int>> segregated_path_indices(value_paths_.size()); // This is used to transform path_index to a 2d vector, splitting it up // by clustering the same paths. for example: [0, 1, 2, 0, 1, 0, 1, 2, 3] // becomes: [[0, 1, 2], [0, 1], [0, 1, 2, 3]] for (int i = 0, j = 0; i < parent_index_paths_.size(); ++i) { if (parent_index_paths_[i] == value_paths_[j]) { segregated_path_indices[j].push_back(path_index_[i] + 1); } if (i < parent_index_paths_.size() - 1 && parent_index_paths_[i + 1] != parent_index_paths_[i]) { ++j; } } *output = new Dataset(ctx, filenames, value_paths_, value_dtypes_, segregated_path_indices, batch_size_, output_dtypes); } private: std::vector<std::string> value_paths_; tensorflow::DataTypeVector value_dtypes_; // Paths of parent indices that we want. For example: // ["DocId", "Name.Language.Code", "Name.Language.Code", "Name.Language.Code"] std::vector<std::string> parent_index_paths_; std::vector<int> path_index_; int batch_size_; }; // Register the kernel implementation for ParquetDataset. REGISTER_KERNEL_BUILDER(Name("ParquetDataset").Device(tensorflow::DEVICE_CPU), ParquetDatasetOp); } // namespace parquet_dataset } // namespace struct2tensor<|fim▁end|>
return tensorflow::errors::Unimplemented(absl::StrCat( "This Parquet Data Type is unimplemented ", file_metadata->schema()->Column(i)->physical_type())); }
<|file_name|>statevector.js<|end_file_name|><|fim▁begin|>//________________________________________________________________________________________________ // statevector.js //todo: update description // // cStateVector object - This is a generic container useful for ... // Every item added to the queue is time stamped for time series analyis, playback, etc. // There are optional functions onAdd[1,2,..]() that the user can define that will be //cp-not right!!!!! // called each time imageUpdate() is called. // // Note that the debug statements are commented outin some functions to reduce vpRefresh time. //cp-mention that this saves mouse & moves only > a fixed amount... // // history // 1106 cperz created // _______________________________________end_Rel 12.1 __________________________________________ // // 131119 cperz renamed imageUpdate to viewUpdate to avoid confusion with public interface // // todo // - could make queue trim on time instead of size (with size max still...) // - add methods for getlast direction, mouse position, etc.... // much more straightforward, no need to understand internal storage of this object! //________________________________________________________________________________________________ // state types: // image: set by viewUpdate() // mouse: set by mouseUpdate() // us.eStateTypes = { image: 0, mouse: 1 }; //________________________________________________________________________________________________ function cStateVector(vpObj) { var thisImg = vpObj; // the viewport object - the parent var self = this; // local var for closure var imageId_ = thisImg.getImageId(); var uScopeIdx_ = thisImg.getImageIdx(); var imageSizeMax; var mouseSizeMax; var idxPrevNext = -1; // index in image queue var idxPrevNextStart = -1; // index of starting position of prev/next sequence var d = new Date(); var dTime = 1. / 10.; // get the start time and scale it from millisecs to 1/100th sec var startTime = d.getTime() * dTime; var imageQueue = []; // the image state vector queue var mouseQueue = []; // the mouse state vector queue var cb_imageState = []; // array of image state function callbacks var cb_mouseState = []; // array of mouse state function callbacks var stateTypeView = us.eStateTypes.view; // for quick access var stateTypeMouse = us.eStateTypes.mouse; var minMoveMagnitude_ = 12; // the minimum magnitude in pixels of movement to record var slopeLimit = 4; var slopeLimtInv = 0.25; // 1. / slopeLimit var kSVQUEUE_VIEW_SIZE = 500; // number of view states to retain in state vector queue // this throttles the responsivity of the state vector! //............................................ _sinceStartTime local // This function is used for all time stamps in this object. // It converts absolute time from "new Date().getTime()" to a differntial in seconds // (to the 3rd decimal place - milliseconds) since the start time of this object. var _sinceStartTime = function (time) { return (time * dTime - startTime).toFixed(3); }; //............................................ _setSize local // This function sets the image and mouse queue maximum sizes. var _setSize = function (newSize) { var newMax = (newSize) ? newSize : 200; // maximum size for queue if (newMax < 1 || newMax > 2000) // sanity check on the queue size return; imageSizeMax = newMax; mouseSizeMax = 100; // parseInt(newMax / 2); // limit the mouse queue to half the size while (imageQueue.length > imageSizeMax) imageQueue.shift(); while (mouseQueue.length > mouseSizeMax) mouseQueue.shift(); }; //............................................ registerForEvent // This function adds a subscription to state change event. // The imageUpdate() and mouseUpdate() functions call these subscribers. // this.registerForEvent = function (functionString, stateType) { if (typeof functionString === "function") { if (isUnDefined(stateType)) stateType = us.eStateTypes.view; if (us.DBGSTATEVECTOR) thisImg.log("statevector: add callback: (" + functionString + ") Type: " + stateType); if (stateType === us.eStateTypes.view) cb_imageState.push(functionString); else cb_mouseState.push(functionString); } }; //............................................ unRegisterForEvent // This function removes a subscription to state change event. this.unRegisterForEvent = function (functionString) { // find a string match in the call back arrays and remove it. if (typeof functionString === "function") { functionString = functionString.toString(); // search image state queue for the event for (var c = 0, len = cb_imageState.length; c < len; c++) { var str = cb_imageState[c].toString(); if (str.indexOf(functionString) !== -1) { if (us.DBGSTATEVECTOR) thisImg.log("statevector: remove image callback: (" + functionString + ")"); cb_imageState.remove(c); return; } } // search mouse state queue for the event for (var c = 0, len = cb_mouseState.length; c < len; c++) { var str = cb_mouseState[c].toString(); if (str.indexOf(functionString) !== -1) { if (us.DBGSTATEVECTOR) thisImg.log("statevector: remove mouse callback: (" + functionString + ")"); cb_mouseState.remove(c); return; } } } }; //............................................ viewUpdate // This function adds an image state vector to its queue. // If the state description is a move, the city block distance between this position and the previous state on the queue // must be greater than a threshold. Otherwise the state is not added to the queue. // // An Image State Vector is a definition of the view state plus: // time stamp // + desc {"all", "move", "zoom", "focus", "dims", } // + viewport's vpView contents {left, top, width, height, zoom, focus, angleDeg} // + dir {"none", "wholeview", "north", "east", "west", "south", northwest, ...} //cp-fill in and define // + magpx { magnitude of move in pixels } // // parameters // state: is viewport's vpView contents // changeDesc: {"all", "move", "zoom", "focus", "dims", "rotate"} // // todo: // - I'd like to know if there are listenters, perhaps I don't do all this if not var lastViewState_ = new cPoint(0,0); this.viewUpdate = function (state, changeDesc) { var zoom = state.zoom; var x = state.left; var y = state.top; //debug-only thisImg.log("image state: X,Y,ZOOM: " + x + "," + y + "," + zoom); if (imageQueue.length == imageSizeMax) // maintain queue size imageQueue.shift(); // remove the top element of the array to make room at the bottom var magpx = 0; if (changeDesc == "move") { // throttle the responsivity to motion var magpx = Math.abs(state.left - lastViewState_.x) + Math.abs(state.top - lastViewState_.y); // city-street magnitude of move in pixels if (magpx < minMoveMagnitude_) { //dbg-only if (us.DBGSTATEVECTOREX) thisImg.log("cStateVector: viewUpdate: magnitude of move (" + magpx + ") < minimum (" + minMoveMagnitude_ + ")"); return; } //consoleWrite("cur state: " + state.left + "," + state.top + " last state: " + lastViewState_.toString() + " magpx: " + magpx); } var sv = []; // add info to new state vector sv.time = _sinceStartTime(new Date().getTime()); // add time stamp sv.desc = changeDesc; // add input change description sv.state = jQuery.extend(true, {}, state); // add input view state, must clone state first if (changeDesc == "move") // sv.magpx = magpx; // add magnitude of move (units are pixels) _computeDir(sv); // add computed direction 'dir' imageQueue.push(sv); // add state vector to the queue lastViewState_.set(state.left, state.top); // save last queue view position //if (us.DBGSTATEVECTOR) sv.magpx ? thisImg.log("cStateVector: viewUpdate: dir: " + sv.dir + " magpx: " + sv.magpx) // : thisImg.log("cStateVector: viewUpdate: desc: " + sv.desc); for (var c = 0, len = cb_imageState.length; c < len; c++) // execute the callbacks to signal a SV update event cb_imageState[c](); triggerUScopeEvent(us.evVIEWCHANGE, { // fire event that the view has changed imageId: imageId_, uScopeIdx: uScopeIdx_, desc: sv.desc, x: thisImg.getViewCenterX(), y: thisImg.getViewCenterY(), zoom: zoom, angleDeg: state.angleDeg }); }; //............................................ mouseUpdate // This function adds an mouse state vector to its queue. // If the state description is a mousemove, the city block distance between this position and the previous state on the queue // must be greater than a threshold. Otherwise the state is not added to the queue. // // A Mouse State Vector is a definition of the mouse state: // time stamp // + desc {"mousemove", "mousedown", ...} // + state cPoint {x,y} of mouse position in base image coordinates // // parameters // state: cPoint {x,y} of mouse position in base image coordinates // eventDesc: {"mousemove", "mousedown",} // bForce: boolean, if true the function does not exit when there are no listeners // so us.evMOUSECHANGE event is fired if mouse move is sufficient var lastMouseState_ = new cPoint(0, 0); this.mouseUpdate = function (state, eventDesc, bForce) { if (bForce == false && cb_mouseState.length == 0) // do not save state if no one is subscribed return; //dbg-only if (us.DBGSTATEVECTOREX) thisImg.log("cStateVector: mouseUpdate: " + state.toString() + " desc: " + eventDesc); if (mouseQueue.length == mouseSizeMax) // maintain queue size mouseQueue.shift(); // remove the top element of the array to make room at the bottom var magpx = 0; if (eventDesc == "mousemove") { if (lastMouseState_) { // throttle the responsivity to motion magpx = lastMouseState_.cityBlockDistance(state); //consoleWrite("cur state: " + state + " last state: " + lastMouseState_.toString() + " magpx: " + magpx); if (magpx < minMoveMagnitude_) { //dbg-only if (us.DBGSTATEVECTOREX) thisImg.log("cStateVector: mouseUpdate: magnitude of move (" + magpx + ") < minimum (" + minMoveMagnitude_ + ")"); return; } } } var sv = []; // add info to new state vector sv.time = _sinceStartTime(new Date().getTime()); // add time stamp sv.desc = eventDesc; // add input change description sv.state = jQuery.extend(true, {}, state); // add input view state, must clone state first mouseQueue.push(sv); // add state vector to the queue lastMouseState_.setPoint(sv.state); // save last queue mouse position //dbg-only if (us.DBGSTATEVECTOREX) thisImg.log("cStateVector: mouseUpdate: desc: " + sv.desc + sv.state.toString() + " magnitude: " + magpx); for (var c = 0, len = cb_mouseState.length; c < len; c++) // execute the callbacks to signal a SV update event cb_mouseState[c](); triggerUScopeEvent(us.evMOUSECHANGE, { imageId: imageId_, uScopeIdx: uScopeIdx_, uScopeIdx: thisImg.getImageIdx(), x: state.x, y: state.y }); }; //............................................ getLastMousePosition // This function returns the last recorded mouse position or undefined if none have been recorded. this.getLastMousePosition = function () { return (mouseQueue.length > 0) ? mouseQueue[mouseQueue.length - 1].state : undefined; }; //............................................ getMouseEventsSince // This function returns an array in ascending order of the mouse events since a specified time. // The time stamp in the list of events is in this object's format - delta time since the start of the object scaled by dTime; // Optionally a specific type of mouse event can be selected. // A maximum number of returned events is enforced but can be overridden. // // parameters: // sinceTime required, time to start collecting the event, sinceTime = new Date().getTime() // eventDesc optional, return this mouse event type {"mousemove", "mousedown", ...} default is all // maxRtnSize optional, maximum number of mousestates to return, default is 100 this.getMouseEventsSince = function (sinceTime, eventDesc, maxRtnSize) { var rtnQueue = []; if (isUnDefined(sinceTime)) return rtnQueue; sinceTime = _sinceStartTime(sinceTime); // convert to same time format as used here in if (mouseQueue.length > 0) { eventDesc = (eventDesc) ? eventDesc : "all"; // set event filter maxRtnSize = (maxRtnSize) ? maxRtnSize + 1 : 101; // set limit of number of items returned var cnt = 0; var startIdx = mouseQueue.length; while (--startIdx > 0 && ++cnt < maxRtnSize) { if (mouseQueue[startIdx].time < sinceTime) // am I now before my time? break; if (eventDesc == "all" || eventDesc == mouseQueue[startIdx].desc) rtnQueue.unshift(mouseQueue[startIdx]); // add to the begining of the array for ascending order } } return rtnQueue; }; //............................................ getPrevNext this.getPrevNext = function (incr) {//incr s/b +/-1 if (imageQueue.length < 1) return undefined; var rtnSV = undefined; if (idxPrevNext == -1) { idxPrevNext = imageQueue.length - 1 + incr; idxPrevNextStart = imageQueue.length - 1; } else { idxPrevNext = idxPrevNext + incr; if (idxPrevNext > idxPrevNextStart) idxPrevNext = idxPrevNextStart; // don't move past starting position } /////thisImg.log("getPrevNext: idxPrevNext: " + idxPrevNext); if (idxPrevNext > 0 && idxPrevNext < imageQueue.length) rtnSV = imageQueue[idxPrevNext]; if (idxPrevNext == idxPrevNextStart) idxPrevNext = -1; //reset return rtnSV; }; //............................................ getLastDirection // This function returns the last recorded image direction or undefined if none have been recorded. this.getLastDirection = function () { return (imageQueue.length > 0) ? imageQueue[imageQueue.length - 1].dir : undefined; }; //............................................ getLastDescription // This function returns the last recorded image description or undefined if none have been recorded. this.getLastDescription = function () { return (imageQueue.length > 0) ? imageQueue[imageQueue.length - 1].desc : undefined; }; //............................................ getLast // This function returns the last state vector for the specified state type. this.getLast = function (stateType) {//cp-todo rename to get LastState if (isUnDefined(stateType)) stateType = us.eStateTypes.view; if (stateType === us.eStateTypes.view) return (imageQueue.length > 0) ? imageQueue[imageQueue.length - 1] : undefined; else return (mouseQueue.length > 0) ? mouseQueue[mouseQueue.length - 1] : undefined; }; //............................................ reportLast // This function returns a string containing the last state vector report for the specified state type. this.reportLast = function (stateType) { var str = ""; var queue = (stateType === us.eStateTypes.view) ? imageQueue : mouseQueue; if (queue.length > 0) { str = (stateType === us.eStateTypes.view) ? "LastViewState: " : "LastMouseState: "; var state = queue[queue.length - 1]; str += this.report(state, stateType); } return str; }; //............................................ report // This function converts the passed state into a csv string. this.report = function (stateVector, stateType) { var str = ""; <|fim▁hole|> if (isUnDefined(stateVector)) return str; if (isUnDefined(stateType)) stateType = us.eStateTypes.view; if (stateType === us.eStateTypes.view) { var zoom = stateVector.state.zoom; var isFit = thisImg.isFitZoom(zoom); var mag = (isFit) ? "Fit" : thisImg.convertToMag(zoom); zoom = (isFit) ? "Fit" : zoom; // note that this format is copied into the performance test sequence in cPerformanceTest str = parseInt(stateVector.time) + ', \"' + stateVector.desc + '\", ' + stateVector.state.left + ", " + stateVector.state.top + ", " + zoom + ", " + mag; //note: stateVector.dir & stateVector.magpx not reported (yet?) } else { var sv = mouseQueue[mouseQueue.length - 1]; str = stateVector.desc; str += stateVector.state.toString(); if (stateVector.magpx) str += " magnitude_px:" + stateVector.magpx; } return str; }; this.reportTitle = function (stateType) { var str = ""; if (isUnDefined(stateType)) stateType = us.eStateTypes.view; str = "Filename: " + thisImg.getFileName() + " Width: " + thisImg.getImageWidth() + " Height:" + thisImg.getImageHeight() + "\n"; if (stateType === us.eStateTypes.view) { str += "time, desc, centerX, centerY, zoom, mag\n"; //note: stateVector.dir & stateVector.magpx not reported (yet?) } else { // not implemented } return str; }; //............................................ reportAll this.reportAll = function (stateType, normalize) { var str = this.reportTitle(stateType); if (isUnDefined(stateType)) stateType = us.eStateTypes.view; if (isUnDefined(normalize)) normalize = false; var queue = (stateType === us.eStateTypes.view) ? imageQueue : mouseQueue; if (queue.length > 0) { for (var s = 0, len = queue.length; s < len; s++) { var svRef = queue[s]; // get state vector at index s var sv = new cloneObject(svRef); // clone the state vector so I can modify it var x = sv.state.left; // state reports top left corner, convert to center position var y = sv.state.top; x = parseInt(x + thisImg.getViewWidth() / 2); y = parseInt(y + thisImg.getViewHeight() / 2); if (normalize == true) { // if normalize, x and y are proportion of the image dims x = (x / thisImg.getImageWidth()).toFixed(5); y = (y / thisImg.getImageHeight()).toFixed(5); } sv.state.left = x; sv.state.top = y; str += (this.report(sv, stateType) + "\n"); } } return str; }; //............................................ _computeDir // This function evaluates the most recent state change and concludes the view motion direction. // It then adds that direction to the passed array. var _computeDir = function (ioSV) { ioSV.dir = "none"; if (imageQueue.length == 0 || ioSV.desc == "all" || ioSV.desc != "move") return; var lastState = self.getLast(stateTypeView).state; var dx = ioSV.state.left - lastState.left; var dy = ioSV.state.top - lastState.top; if (Math.abs(dx) > lastState.width * 2 || Math.abs(dy) > lastState.height * 2 // if moved N views //cp-revisit factor of 2 || ioSV.desc == "dim") { // or resized view dimensions ioSV.dir = "wholeview"; // then indicate whole view change return; } var dr = slopeLimit + 1; // slope of motion if (dy != 0) dr = dx / dy; if (us.DBGSTATEVECTOREX) ioSV.magpx ? thisImg.log("cStateVector: computeDir: " + dr.toFixed(2) + " dx dy:" + dx + " " + dy + " magpx:" + ioSV.magpx) : thisImg.log("cStateVector: computeDir: " + dr.toFixed(2) + " dx dy:" + dx + " " + dy); var dir = "none"; // default direction of move if (Math.abs(dr) > slopeLimit) { // if horizontal motion if (dx < 0) // if moving left dir = "east"; else dir = "west"; } else if (Math.abs(dr) < slopeLimtInv) { // if vertical motion if (dy < 0) // if moving up dir = "south"; else dir = "north"; } else if (dx < 0) { // diagnal motion, moving left if (dy < 0) // if moving up, too dir = "southeast"; else dir = "northeast"; } else { // diagnal motion, moving right if (dy < 0) // if moving up, too dir = "southwest"; else dir = "northwest"; } ioSV.dir = dir; // add direction to return ioSV }; //............................................................................................ // construction _setSize(kSVQUEUE_VIEW_SIZE); // save the max queue size allowed }; //________________________________________________________________________________________________<|fim▁end|>
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2014-2015 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from touchdown import ssh from touchdown.aws.ec2.keypair import KeyPair from touchdown.aws.iam import InstanceProfile from touchdown.aws.vpc import SecurityGroup, Subnet from touchdown.core import argument, errors, serializers from touchdown.core.plan import Plan, Present from touchdown.core.resource import Resource from ..account import BaseAccount from ..common import SimpleApply, SimpleDescribe, SimpleDestroy class BlockDevice(Resource): resource_name = "block_device" virtual_name = argument.String(field="VirtualName") device_name = argument.String(field="DeviceName") disabled = argument.Boolean(field="NoDevice", serializer=serializers.Const("")) class NetworkInterface(Resource): resource_name = "network_interface" public = argument.Boolean(default=False, field="AssociatePublicIpAddress") security_groups = argument.ResourceList(SecurityGroup, field="Groups") class Instance(Resource): resource_name = "ec2_instance" name = argument.String(min=3, max=128, field="Name", group="tags") ami = argument.String(field="ImageId")<|fim▁hole|> instance_type = argument.String(field="InstanceType") key_pair = argument.Resource(KeyPair, field="KeyName") subnet = argument.Resource(Subnet, field="SubnetId") instance_profile = argument.Resource( InstanceProfile, field="IamInstanceProfile", serializer=serializers.Dict(Name=serializers.Property("InstanceProfileName")), ) user_data = argument.String(field="UserData") network_interfaces = argument.ResourceList( NetworkInterface, field="NetworkInterfaces" ) block_devices = argument.ResourceList( BlockDevice, field="BlockDeviceMappings", serializer=serializers.List(serializers.Resource()), ) security_groups = argument.ResourceList(SecurityGroup, field="SecurityGroupIds") tags = argument.Dict() account = argument.Resource(BaseAccount) class Describe(SimpleDescribe, Plan): resource = Instance service_name = "ec2" api_version = "2015-10-01" describe_action = "describe_instances" describe_envelope = "Reservations[].Instances[]" key = "InstanceId" def get_describe_filters(self): return { "Filters": [ {"Name": "tag:Name", "Values": [self.resource.name]}, { "Name": "instance-state-name", "Values": [ "pending", "running", "shutting-down", " stopping", "stopped", ], }, ] } class Apply(SimpleApply, Describe): create_action = "run_instances" create_envelope = "Instances[0]" # create_response = 'id-only' waiter = "instance_running" signature = (Present("name"),) def get_create_serializer(self): return serializers.Resource(MaxCount=1, MinCount=1) class Destroy(SimpleDestroy, Describe): destroy_action = "terminate_instances" waiter = "instance_terminated" def get_destroy_serializer(self): return serializers.Dict( InstanceIds=serializers.ListOfOne(serializers.Property("InstanceId")) ) class SSHInstance(ssh.Instance): resource_name = "ec2_instance" input = Instance def get_network_id(self, runner): # FIXME: We can save on some steps if we only do this once obj = runner.get_plan(self.adapts).describe_object() return obj.get("VpcId", None) def get_serializer(self, runner, **kwargs): obj = runner.get_plan(self.adapts).describe_object() if getattr(self.parent, "proxy", None) and self.parent.proxy.instance: if hasattr(self.parent.proxy.instance, "get_network_id"): network = self.parent.proxy.instance.get_network_id(runner) if network == self.get_network_id(runner): return serializers.Const(obj["PrivateIpAddress"]) if obj.get("PublicDnsName", ""): return serializers.Const(obj["PublicDnsName"]) if obj.get("PublicIpAddress", ""): return serializers.Const(obj["PublicIpAddress"]) raise errors.Error("Instance {} not available".format(self.adapts))<|fim▁end|>
<|file_name|>filter.ts<|end_file_name|><|fim▁begin|>/*----------------------------------------------------------------------------- | Copyright (c) 2014-2017, PhosphorJS Contributors | | Distributed under the terms of the BSD 3-Clause License. | | The full license is in the file LICENSE, distributed with this software. |----------------------------------------------------------------------------*/ import { IIterator, IterableOrArrayLike, iter } from './iter'; /** * Filter an iterable for values which pass a test. * * @param object - The iterable or array-like object of interest. * * @param fn - The predicate function to invoke for each value. * * @returns An iterator which yields the values which pass the test. * * #### Example * ```typescript * import { filter, toArray } from '@phosphor/algorithm'; * * let data = [1, 2, 3, 4, 5, 6]; * * let stream = filter(data, value => value % 2 === 0); * * toArray(stream); // [2, 4, 6] * ``` */ export function filter<T>(object: IterableOrArrayLike<T>, fn: (value: T, index: number) => boolean): IIterator<T> { return new FilterIterator<T>(iter(object), fn); } /** * An iterator which yields values which pass a test. */ export class FilterIterator<T> implements IIterator<T> { /** * Construct a new filter iterator. * * @param source - The iterator of values of interest. * * @param fn - The predicate function to invoke for each value.<|fim▁hole|> } /** * Get an iterator over the object's values. * * @returns An iterator which yields the object's values. */ iter(): IIterator<T> { return this; } /** * Create an independent clone of the iterator. * * @returns A new independent clone of the iterator. */ clone(): IIterator<T> { let result = new FilterIterator<T>(this._source.clone(), this._fn); result._index = this._index; return result; } /** * Get the next value from the iterator. * * @returns The next value from the iterator, or `undefined`. */ next(): T | undefined { let fn = this._fn; let it = this._source; let value: T | undefined; while ((value = it.next()) !== undefined) { if (fn(value, this._index++)) { return value; } } return undefined; } private _index = 0; private _source: IIterator<T>; private _fn: (value: T, index: number) => boolean; }<|fim▁end|>
*/ constructor(source: IIterator<T>, fn: (value: T, index: number) => boolean) { this._source = source; this._fn = fn;
<|file_name|>json.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import import json class JSONRenderer: """ Renders a mystery as JSON """ <|fim▁hole|><|fim▁end|>
def render(self, mystery): return json.dumps(mystery.encode(), indent=4)
<|file_name|>qmqp.cc<|end_file_name|><|fim▁begin|>// nullmailer -- a simple relay-only MTA // Copyright (C) 2018 Bruce Guenter <[email protected]> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // // You can contact me at <[email protected]>. There is also a mailing list // available to discuss this package. To subscribe, send an email to // <[email protected]>. #include "config.h" #include <stdlib.h> #include <unistd.h> #include "errcodes.h" #include "fdbuf/fdbuf.h" #include "hostname.h" #include "itoa.h" #include "mystring/mystring.h" #include "netstring.h" #include "protocol.h" const int default_port = 628; const int default_tls_port = -1; // No standard for QMQP over SSL exists const char* cli_program = "qmqp"; const char* cli_help_prefix = "Send an email message via QMQP\n"; class qmqp { fdibuf& in; fdobuf& out; public: qmqp(fdibuf& netin, fdobuf& netout); ~qmqp(); void send(fdibuf& msg, unsigned long size, const mystring& env); }; qmqp::qmqp(fdibuf& netin, fdobuf& netout) : in(netin), out(netout) { } qmqp::~qmqp() { } bool skip_envelope(fdibuf& msg) { if(!msg.rewind()) return false; mystring tmp; while(msg.getline(tmp)) if(!tmp) break; return msg; } void qmqp::send(fdibuf& msg, unsigned long size, const mystring& env) { if(!skip_envelope(msg)) protocol_fail(ERR_MSG_READ, "Error re-reading message"); unsigned long fullsize = strlen(itoa(size)) + 1 + size + 1 + env.length(); out << itoa(fullsize) << ":"; // Start the "outer" netstring out << itoa(size) << ":"; // Start the message netstring fdbuf_copy(msg, out, true); // Send out the message out << "," // End the message netstring << env // The envelope is already encoded << ","; // End the "outer" netstring if(!out.flush()) protocol_fail(ERR_MSG_WRITE, "Error sending message to remote"); mystring response; if(!in.getnetstring(response)) protocol_fail(ERR_PROTO, "Response from remote was not a netstring"); switch(response[0]) { case 'K': protocol_succ(response.c_str()+1); break; case 'Z': protocol_fail(ERR_MSG_TEMPFAIL, response.c_str()+1); break; case 'D': protocol_fail(ERR_MSG_PERMFAIL, response.c_str()+1); break; default: protocol_fail(ERR_PROTO, "Invalid status byte in response"); } } bool compute_size(fdibuf& msg, unsigned long& size) { char buf[4096]; size = 0; while(msg.read(buf, 4096)) size += msg.last_count();<|fim▁hole|>} bool make_envelope(fdibuf& msg, mystring& env) { mystring tmp; while(msg.getline(tmp)) { if(!tmp) return true; env += str2net(tmp); } return false; } bool preload_data(fdibuf& msg, unsigned long& size, mystring& env) { return make_envelope(msg, env) && compute_size(msg, size); } static unsigned long msg_size; static mystring msg_envelope; void protocol_prep(fdibuf& in) { if(!preload_data(in, msg_size, msg_envelope)) protocol_fail(ERR_MSG_READ, "Error reading message"); } void protocol_starttls(fdibuf& netin, fdobuf& netout) { protocol_fail(ERR_USAGE, "QMQP does not support STARTTLS"); (void)netin; (void)netout; } void protocol_send(fdibuf& in, fdibuf& netin, fdobuf& netout) { alarm(60*60); // Connection must close after an hour qmqp conn(netin, netout); conn.send(in, msg_size, msg_envelope); }<|fim▁end|>
if(msg.eof()) size += msg.last_count(); return size > 0;
<|file_name|>thrift_build.rs<|end_file_name|><|fim▁begin|>// @generated by autocargo use std::env; use std::fs; use std::path::Path; use thrift_compiler::Config; #[rustfmt::skip] fn main() { // Rerun if this gets rewritten. println!("cargo:rerun-if-changed=thrift_build.rs"); let out_dir = env::var_os("OUT_DIR").expect("OUT_DIR env not provided"); let out_dir: &Path = out_dir.as_ref(); fs::write( out_dir.join("cratemap"), "test_thrift crate", ).expect("Failed to write cratemap");<|fim▁hole|> let mut conf = Config::from_env().expect("Failed to instantiate thrift_compiler::Config"); let path_from_manifest_to_base: &Path = "../../../../../..".as_ref(); let cargo_manifest_dir = env::var_os("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not provided"); let cargo_manifest_dir: &Path = cargo_manifest_dir.as_ref(); let base_path = cargo_manifest_dir .join(path_from_manifest_to_base) .canonicalize() .expect("Failed to canonicalize base_path"); // TODO: replace canonicalize() with std::path::absolute() when // https://github.com/rust-lang/rust/pull/91673 is available (~Rust 1.60) // and remove this block. #[cfg(windows)] let base_path = Path::new( base_path .as_path() .to_string_lossy() .trim_start_matches(r"\\?\"), ) .to_path_buf(); conf.base_path(base_path); let options = "serde"; if !options.is_empty() { conf.options(options); } let include_srcs = vec![ ]; conf.include_srcs(include_srcs); conf }; conf .run(&[ "../test_thrift.thrift" ]) .expect("Failed while running thrift compilation"); }<|fim▁end|>
let conf = {
<|file_name|>shortanswer.module.ts<|end_file_name|><|fim▁begin|>// (C) Copyright 2015 Martin Dougiamas // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|>// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import { NgModule } from '@angular/core'; import { IonicModule } from 'ionic-angular'; import { TranslateModule } from '@ngx-translate/core'; import { CoreQuestionDelegate } from '@core/question/providers/delegate'; import { CoreDirectivesModule } from '@directives/directives.module'; import { AddonQtypeShortAnswerHandler } from './providers/handler'; import { AddonQtypeShortAnswerComponent } from './component/shortanswer'; @NgModule({ declarations: [ AddonQtypeShortAnswerComponent ], imports: [ IonicModule, TranslateModule.forChild(), CoreDirectivesModule ], providers: [ AddonQtypeShortAnswerHandler ], exports: [ AddonQtypeShortAnswerComponent ], entryComponents: [ AddonQtypeShortAnswerComponent ] }) export class AddonQtypeShortAnswerModule { constructor(questionDelegate: CoreQuestionDelegate, handler: AddonQtypeShortAnswerHandler) { questionDelegate.registerHandler(handler); } }<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use bytes::Buf; use futures::{Async, Future, Poll}; use h2::{Reason, SendStream}; use http::header::{ HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE, }; use http::HeaderMap; use body::Payload; mod client; pub(crate) mod server; pub(crate) use self::client::Client;<|fim▁hole|>fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { // List of connection headers from: // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection // // TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're // tested separately. let connection_headers = [ HeaderName::from_lowercase(b"keep-alive").unwrap(), HeaderName::from_lowercase(b"proxy-connection").unwrap(), PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TRAILER, TRANSFER_ENCODING, UPGRADE, ]; for header in connection_headers.iter() { if headers.remove(header).is_some() { warn!("Connection header illegal in HTTP/2: {}", header.as_str()); } } if is_request { if headers.get(TE).map(|te_header| te_header != "trailers").unwrap_or(false) { warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests"); headers.remove(TE); } } else { if headers.remove(TE).is_some() { warn!("TE headers illegal in HTTP/2 responses"); } } if let Some(header) = headers.remove(CONNECTION) { warn!( "Connection header illegal in HTTP/2: {}", CONNECTION.as_str() ); let header_contents = header.to_str().unwrap(); // A `Connection` header may have a comma-separated list of names of other headers that // are meant for only this specific connection. // // Iterate these names and remove them as headers. Connection-specific headers are // forbidden in HTTP2, as that information has been moved into frame types of the h2 // protocol. for name in header_contents.split(',') { let name = name.trim(); headers.remove(name); } } } // body adapters used by both Client and Server struct PipeToSendStream<S> where S: Payload, { body_tx: SendStream<SendBuf<S::Data>>, data_done: bool, stream: S, } impl<S> PipeToSendStream<S> where S: Payload, { fn new(stream: S, tx: SendStream<SendBuf<S::Data>>) -> PipeToSendStream<S> { PipeToSendStream { body_tx: tx, data_done: false, stream: stream, } } fn on_err(&mut self, err: S::Error) -> ::Error { let err = ::Error::new_user_body(err); trace!("send body user stream error: {}", err); self.body_tx.send_reset(Reason::INTERNAL_ERROR); err } fn send_eos_frame(&mut self) -> ::Result<()> { trace!("send body eos"); self.body_tx .send_data(SendBuf(None), true) .map_err(::Error::new_body_write) } } impl<S> Future for PipeToSendStream<S> where S: Payload, { type Item = (); type Error = ::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { if !self.data_done { // we don't have the next chunk of data yet, so just reserve 1 byte to make // sure there's some capacity available. h2 will handle the capacity management // for the actual body chunk. self.body_tx.reserve_capacity(1); if self.body_tx.capacity() == 0 { loop { match try_ready!(self.body_tx.poll_capacity().map_err(::Error::new_h2)) { Some(0) => {} Some(_) => break, None => return Err(::Error::new_canceled(None::<::Error>)), } } } else { if let Async::Ready(reason) = self.body_tx.poll_reset().map_err(|e| ::Error::new_h2(e))? { debug!("stream received RST_STREAM: {:?}", reason); return Err(::Error::new_h2(reason.into())); } } match try_ready!(self.stream.poll_data().map_err(|e| self.on_err(e))) { Some(chunk) => { let is_eos = self.stream.is_end_stream(); trace!( "send body chunk: {} bytes, eos={}", chunk.remaining(), is_eos, ); let buf = SendBuf(Some(chunk)); self.body_tx .send_data(buf, is_eos) .map_err(::Error::new_body_write)?; if is_eos { return Ok(Async::Ready(())); } } None => { self.body_tx.reserve_capacity(0); let is_eos = self.stream.is_end_stream(); if is_eos { return self.send_eos_frame().map(Async::Ready); } else { self.data_done = true; // loop again to poll_trailers } } } } else { if let Async::Ready(reason) = self.body_tx.poll_reset().map_err(|e| ::Error::new_h2(e))? { debug!("stream received RST_STREAM: {:?}", reason); return Err(::Error::new_h2(reason.into())); } match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_err(e))) { Some(trailers) => { self.body_tx .send_trailers(trailers) .map_err(::Error::new_body_write)?; return Ok(Async::Ready(())); } None => { // There were no trailers, so send an empty DATA frame... return self.send_eos_frame().map(Async::Ready); } } } } } } struct SendBuf<B>(Option<B>); impl<B: Buf> Buf for SendBuf<B> { #[inline] fn remaining(&self) -> usize { self.0.as_ref().map(|b| b.remaining()).unwrap_or(0) } #[inline] fn bytes(&self) -> &[u8] { self.0.as_ref().map(|b| b.bytes()).unwrap_or(&[]) } #[inline] fn advance(&mut self, cnt: usize) { self.0.as_mut().map(|b| b.advance(cnt)); } }<|fim▁end|>
pub(crate) use self::server::Server;
<|file_name|>group.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Stephen Fromm <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: group version_added: "0.0.2" short_description: Add or remove groups requirements: - groupadd - groupdel - groupmod description: - Manage presence of groups on a host. - For Windows targets, use the M(win_group) module instead. options: name: description: - Name of the group to manage. type: str required: true gid: description: - Optional I(GID) to set for the group. type: int state: description: - Whether the group should be present or not on the remote host. type: str choices: [ absent, present ] default: present system: description: - If I(yes), indicates that the group created is a system group. type: bool default: no local: description: - Forces the use of "local" command alternatives on platforms that implement it. - This is useful in environments that use centralized authentication when you want to manipulate the local groups. (e.g. it uses C(lgroupadd) instead of C(groupadd)). - This requires that these commands exist on the targeted host, otherwise it will be a fatal error. type: bool default: no version_added: "2.6" non_unique: description: - This option allows to change the group ID to a non-unique value. Requires C(gid). - Not supported on macOS or BusyBox distributions. type: bool default: no version_added: "2.8" seealso: - module: user - module: win_group author: - Stephen Fromm (@sfromm) ''' EXAMPLES = ''' - name: Ensure group "somegroup" exists group: name: somegroup state: present - name: Ensure group "docker" exists with correct gid group: name: docker state: present gid: 1750 ''' RETURN = r''' gid: description: Group ID of the group. returned: When C(state) is 'present' type: int sample: 1001 name: description: Group name returned: always type: str sample: users state: description: Whether the group is present or not returned: always type: str sample: 'absent' system: description: Whether the group is a system group or not returned: When C(state) is 'present' type: bool sample: False ''' import grp import os from ansible.module_utils._text import to_bytes from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.sys_info import get_platform_subclass class Group(object): """ This is a generic Group manipulation class that is subclassed based on platform. A subclass may wish to override the following action methods:- - group_del() - group_add() - group_mod() All subclasses MUST define platform and distribution (which may be None). """ platform = 'Generic' distribution = None GROUPFILE = '/etc/group' def __new__(cls, *args, **kwargs): new_cls = get_platform_subclass(Group) return super(cls, new_cls).__new__(new_cls) def __init__(self, module): self.module = module self.state = module.params['state'] self.name = module.params['name'] self.gid = module.params['gid'] self.system = module.params['system'] self.local = module.params['local'] self.non_unique = module.params['non_unique'] def execute_command(self, cmd): return self.module.run_command(cmd) def group_del(self): if self.local: command_name = 'lgroupdel' else: command_name = 'groupdel' cmd = [self.module.get_bin_path(command_name, True), self.name] return self.execute_command(cmd) def _local_check_gid_exists(self): if self.gid: for gr in grp.getgrall(): if self.gid == gr.gr_gid and self.name != gr.gr_name: self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name)) def group_add(self, **kwargs): if self.local: command_name = 'lgroupadd' self._local_check_gid_exists() else: command_name = 'groupadd' cmd = [self.module.get_bin_path(command_name, True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('-g') cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') elif key == 'system' and kwargs[key] is True: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): if self.local: command_name = 'lgroupmod' self._local_check_gid_exists() else: command_name = 'groupmod' cmd = [self.module.get_bin_path(command_name, True)] info = self.group_info() for key in kwargs:<|fim▁hole|> if key == 'gid': if kwargs[key] is not None and info[2] != int(kwargs[key]): cmd.append('-g') cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) def group_exists(self): # The grp module does not distinguish between local and directory accounts. # It's output cannot be used to determine whether or not a group exists locally. # It returns True if the group exists locally or in the directory, so instead # look in the local GROUP file for an existing account. if self.local: if not os.path.exists(self.GROUPFILE): self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE)) exists = False name_test = '{0}:'.format(self.name) with open(self.GROUPFILE, 'rb') as f: reversed_lines = f.readlines()[::-1] for line in reversed_lines: if line.startswith(to_bytes(name_test)): exists = True break if not exists: self.module.warn( "'local: true' specified and group was not found in {file}. " "The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE)) return exists else: try: if grp.getgrnam(self.name): return True except KeyError: return False def group_info(self): if not self.group_exists(): return False try: info = list(grp.getgrnam(self.name)) except KeyError: return False return info # =========================================== class SunOS(Group): """ This is a SunOS Group manipulation class. Solaris doesn't have the 'system' group concept. This overrides the following methods from the generic class:- - group_add() """ platform = 'SunOS' distribution = None GROUPFILE = '/etc/group' def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('-g') cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class AIX(Group): """ This is a AIX Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'AIX' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('rmgroup', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('mkgroup', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('id=' + str(kwargs[key])) elif key == 'system' and kwargs[key] is True: cmd.append('-a') cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('chgroup', True)] info = self.group_info() for key in kwargs: if key == 'gid': if kwargs[key] is not None and info[2] != int(kwargs[key]): cmd.append('id=' + str(kwargs[key])) if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class FreeBsdGroup(Group): """ This is a FreeBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'FreeBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] if self.gid is not None: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name] info = self.group_info() cmd_len = len(cmd) if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') # modify the group if cmd will do anything if cmd_len != len(cmd): if self.module.check_mode: return (0, '', '') return self.execute_command(cmd) return (None, '', '') class DragonFlyBsdGroup(FreeBsdGroup): """ This is a DragonFlyBSD Group manipulation class. It inherits all behaviors from FreeBsdGroup class. """ platform = 'DragonFly' # =========================================== class DarwinGroup(Group): """ This is a Mac macOS Darwin Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() group manipulation are done using dseditgroup(1). """ platform = 'Darwin' distribution = None def group_add(self, **kwargs): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'create'] if self.gid is not None: cmd += ['-i', str(self.gid)] elif 'system' in kwargs and kwargs['system'] is True: gid = self.get_lowest_available_system_gid() if gid is not False: self.gid = str(gid) cmd += ['-i', str(self.gid)] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) def group_del(self): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'delete'] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) def group_mod(self, gid=None): info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'edit'] if gid is not None: cmd += ['-i', str(gid)] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) return (None, '', '') def get_lowest_available_system_gid(self): # check for lowest available system gid (< 500) try: cmd = [self.module.get_bin_path('dscl', True)] cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID'] (rc, out, err) = self.execute_command(cmd) lines = out.splitlines() highest = 0 for group_info in lines: parts = group_info.split(' ') if len(parts) > 1: gid = int(parts[-1]) if gid > highest and gid < 500: highest = gid if highest == 0 or highest == 499: return False return (highest + 1) except Exception: return False class OpenBsdGroup(Group): """ This is a OpenBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'OpenBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] if self.gid is not None: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class NetBsdGroup(Group): """ This is a NetBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'NetBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] if self.gid is not None: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class BusyBoxGroup(Group): """ BusyBox group manipulation class for systems that have addgroup and delgroup. It overrides the following methods: - group_add() - group_del() - group_mod() """ def group_add(self, **kwargs): cmd = [self.module.get_bin_path('addgroup', True)] if self.gid is not None: cmd.extend(['-g', str(self.gid)]) if self.system: cmd.append('-S') cmd.append(self.name) return self.execute_command(cmd) def group_del(self): cmd = [self.module.get_bin_path('delgroup', True), self.name] return self.execute_command(cmd) def group_mod(self, **kwargs): # Since there is no groupmod command, modify /etc/group directly info = self.group_info() if self.gid is not None and self.gid != info[2]: with open('/etc/group', 'rb') as f: b_groups = f.read() b_name = to_bytes(self.name) b_current_group_string = b'%s:x:%d:' % (b_name, info[2]) b_new_group_string = b'%s:x:%d:' % (b_name, self.gid) if b':%d:' % self.gid in b_groups: self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid)) if self.module.check_mode: return 0, '', '' b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string) with open('/etc/group', 'wb') as f: f.write(b_new_groups) return 0, '', '' return None, '', '' class AlpineGroup(BusyBoxGroup): platform = 'Linux' distribution = 'Alpine' def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), name=dict(type='str', required=True), gid=dict(type='int'), system=dict(type='bool', default=False), local=dict(type='bool', default=False), non_unique=dict(type='bool', default=False), ), supports_check_mode=True, required_if=[ ['non_unique', True, ['gid']], ], ) group = Group(module) module.debug('Group instantiated - platform %s' % group.platform) if group.distribution: module.debug('Group instantiated - distribution %s' % group.distribution) rc = None out = '' err = '' result = {} result['name'] = group.name result['state'] = group.state if group.state == 'absent': if group.group_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = group.group_del() if rc != 0: module.fail_json(name=group.name, msg=err) elif group.state == 'present': if not group.group_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = group.group_add(gid=group.gid, system=group.system) else: (rc, out, err) = group.group_mod(gid=group.gid) if rc is not None and rc != 0: module.fail_json(name=group.name, msg=err) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err if group.group_exists(): info = group.group_info() result['system'] = group.system result['gid'] = info[2] module.exit_json(**result) if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>sys_rekey.go<|end_file_name|><|fim▁begin|>package api import ( "context" "errors" "github.com/mitchellh/mapstructure" ) func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/init") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyStatusResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyStatusResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/verify") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyVerificationStatusResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyVerificationStatusResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") if err := r.SetJSONBody(config); err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyStatusResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init") if err := r.SetJSONBody(config); err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyStatusResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyCancel() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } func (c *Sys) RekeyRecoveryKeyCancel() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } func (c *Sys) RekeyVerificationCancel() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { body := map[string]interface{}{ "key": shard, "nonce": nonce, } r := c.c.NewRequest("PUT", "/v1/sys/rekey/update") if err := r.SetJSONBody(body); err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close()<|fim▁hole|> var result RekeyUpdateResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { body := map[string]interface{}{ "key": shard, "nonce": nonce, } r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update") if err := r.SetJSONBody(body); err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyUpdateResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } if secret == nil || secret.Data == nil { return nil, errors.New("data from server response is empty") } var result RekeyRetrieveResponse err = mapstructure.Decode(secret.Data, &result) if err != nil { return nil, err } return &result, err } func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } if secret == nil || secret.Data == nil { return nil, errors.New("data from server response is empty") } var result RekeyRetrieveResponse err = mapstructure.Decode(secret.Data, &result) if err != nil { return nil, err } return &result, err } func (c *Sys) RekeyDeleteBackup() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } func (c *Sys) RekeyDeleteRecoveryBackup() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup") ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { body := map[string]interface{}{ "key": shard, "nonce": nonce, } r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify") if err := r.SetJSONBody(body); err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyVerificationUpdateResponse err = resp.DecodeJSON(&result) return &result, err } func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { body := map[string]interface{}{ "key": shard, "nonce": nonce, } r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify") if err := r.SetJSONBody(body); err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() var result RekeyVerificationUpdateResponse err = resp.DecodeJSON(&result) return &result, err } type RekeyInitRequest struct { SecretShares int `json:"secret_shares"` SecretThreshold int `json:"secret_threshold"` StoredShares int `json:"stored_shares"` PGPKeys []string `json:"pgp_keys"` Backup bool RequireVerification bool `json:"require_verification"` } type RekeyStatusResponse struct { Nonce string `json:"nonce"` Started bool `json:"started"` T int `json:"t"` N int `json:"n"` Progress int `json:"progress"` Required int `json:"required"` PGPFingerprints []string `json:"pgp_fingerprints"` Backup bool `json:"backup"` VerificationRequired bool `json:"verification_required"` VerificationNonce string `json:"verification_nonce"` } type RekeyUpdateResponse struct { Nonce string `json:"nonce"` Complete bool `json:"complete"` Keys []string `json:"keys"` KeysB64 []string `json:"keys_base64"` PGPFingerprints []string `json:"pgp_fingerprints"` Backup bool `json:"backup"` VerificationRequired bool `json:"verification_required"` VerificationNonce string `json:"verification_nonce,omitempty"` } type RekeyRetrieveResponse struct { Nonce string `json:"nonce" mapstructure:"nonce"` Keys map[string][]string `json:"keys" mapstructure:"keys"` KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` } type RekeyVerificationStatusResponse struct { Nonce string `json:"nonce"` Started bool `json:"started"` T int `json:"t"` N int `json:"n"` Progress int `json:"progress"` } type RekeyVerificationUpdateResponse struct { Nonce string `json:"nonce"` Complete bool `json:"complete"` }<|fim▁end|>
<|file_name|>lockscreen_rc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.8.0) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x04\x31\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x03\x00\x00\x00\x44\xa4\x8a\xc6\ \x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x00\xdd\x00\x00\x00\xdd\x01\ \x70\x53\xa2\x07\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\ \x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\ \x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\xdd\x50\x4c\x54\ \x45\xff\xff\xff\xff\xff\x00\xff\xff\x80\xff\xff\xff\xff\xcc\x66\ \xff\xdb\x49\xff\xbf\x60\xff\xb3\x4d\xff\xd1\x5d\xff\xc4\x4e\xed\ \xed\xed\xff\xb6\x49\xff\xc8\x5b\xdf\xef\xef\xff\xcf\x50\xff\xd2\ \x5a\xf2\xbf\x40\xf4\xbf\x40\xe2\xeb\xeb\xff\xd0\x55\xe4\xed\xed\ \xe5\xe5\xed\xff\xca\x58\xff\xcc\x55\xf8\xb8\x40\xff\xcd\x55\xff\ \xcc\x53\xe7\xe7\xed\xff\xcc\x55\xe3\xe9\xee\xf4\xb8\x41\xff\xce\ \x51\xff\xcc\x53\xf6\xbc\x43\xf6\xba\x41\xff\xce\x55\xf7\xbb\x44\ \xf7\xbc\x43\xf8\xbc\x43\xff\xcd\x55\xe7\xea\xee\xf5\xbd\x42\xe7\ \xea\xee\xf5\xb9\x42\xf6\xbb\x41\xf6\xbb\x41\xf6\xbb\x41\xe5\xea\ \xed\xe6\xe8\xed\xf5\xbc\x41\xf5\xba\x42\xf6\xbb\x42\xff\xce\x54\ \xe7\xe9\xed\xf5\xbb\x42\xff\xce\x54\xf6\xbb\x42\xf6\xbc\x42\xe8\ \xe9\xed\xf6\xbc\x42\xff\xcd\x53\xe5\xe9\xec\xf5\xba\x41\xe6\xe9\ \xec\xff\xce\x54\xe7\xea\xed\xff\xce\x53\xe7\xea\xef\xf6\xbc\x42\ \xff\xce\x54\xf7\xbc\x43\xf7\xbb\x43\xe7\xe9\xed\xe6\xe8\xec\xff\ \xcd\x55\xf7\xbd\x42\xff\xcf\x54\xe7\xe9\xee\xf6\xbb\x43\xff\xce\ \x55\xff\xcd\x55\xe6\xe9\xed\xf6\xbc\x42\xe7\xe9\xee\xe6\xe9\xed\ \xe7\xea\xed\xff\xce\x54\xe7\xe9\xed\xf6\xbc\x42\xe6\xe9\xed\xf6\ \xbb\x42\xf6\xbb\x42\xff\xce\x54\xf7\xbb\x43\xe7\xe9\xed\xe6\xe9\ \xed\xf6\xbb\x42\xf6\xbb\x42\xe8\xeb\xf0\xe8\xea\xee\xe8\xeb\xef\ \xe7\xea\xee\xeb\xed\xf1\xf8\xbe\x45\xf7\xbd\x44\xe7\xea\xee\xeb\ \xee\xf1\xf6\xbb\x43\xe6\xe9\xed\xea\xed\xf0\xf6\xbb\x42\xf7\xbe\ \x44\xf8\xc0\x46\xc6\xca\xce\xd3\xd6\xdb\xda\x44\x53\xdb\x46\x55\ \xdb\x4b\x5a\xdb\x4e\x5c\xdc\x48\x57\xdf\x65\x72\xe6\x93\x9d\xe6\ \x95\x9e\xe6\xe9\xed\xe7\x4f\x5e\xeb\xed\xf1\xeb\xee\xf1\xec\xb9\ \xc0\xec\xbd\xc4\xec\xef\xf2\xed\xc4\xcb\xed\xf0\xf3\xee\xf0\xf3\ \xee\xf1\xf4\xef\xce\xd4\xef\xf1\xf5\xf0\xd6\xdb\xf0\xf2\xf5\xf0\ \xf2\xf6\xf1\xf3\xf7\xf1\xf4\xf7\xf2\xe3\xe7\xf4\xef\xf2\xf4\xf6\ \xf9\xf5\xf5\xf8\xf5\xf6\xf9\xf5\xf7\xfa\xf6\xbb\x42\xf9\xc0\x47\ \xf9\xc1\x48\xf9\xc2\x49\xfa\xc3\x49\xfb\xc5\x4b\xfb\xc6\x4d\xfc\ \xc8\x4e\xfd\xc9\x4f\xfd\xca\x50\xfd\xca\x51\xff\xce\x54\x04\x23\ \x9d\x11\x00\x00\x00\x71\x74\x52\x4e\x53\x00\x01\x02\x02\x05\x07\ \x08\x0a\x0b\x0d\x0e\x0e\x0e\x10\x10\x11\x14\x18\x1a\x1b\x1c\x1d\ \x1d\x1e\x24\x24\x28\x2b\x2d\x2e\x2f\x2f\x37\x39\x3b\x3f\x40\x41\ \x45\x48\x49\x49\x4a\x4d\x52\x53\x56\x62\x64\x66\x68\x6d\x7d\x7e\ \x80\x83\x8b\x8c\x8e\x90\x90\x95\xa0\xa5\xa6\xa8\xa8\xaa\xae\xb1\ \xb6\xb8\xbd\xbe\xbe\xc0\xc3\xc8\xcb\xcd\xd3\xd8\xd8\xdb\xde\xe6\ \xe7\xe8\xe8\xe9\xea\xed\xf0\xf1\xf2\xf5\xf5\xf7\xf8\xfa\xfa\xfb\ \xfb\xfb\xfc\xfd\xfd\xfd\xfe\xfe\xfe\xfe\xfe\x22\xeb\xe2\xf5\x00\ \x00\x01\x49\x49\x44\x41\x54\x38\xcb\x63\x60\x00\x03\x59\x8f\xf8\ \x40\x03\x06\xdc\x40\x24\x2e\xa9\x35\x2d\x13\x8f\x0a\xd3\xd4\xfe\ \x49\x93\xd2\x02\x71\x2b\xb0\xcf\x9a\x34\x69\x52\x67\x0e\x6e\x05\ \x8e\x75\x40\x05\x5d\xd5\x94\x29\xe8\x25\xa4\xa0\xac\x89\x80\x82\ \xe2\x7a\x84\x02\x01\x42\x0a\xa2\xd5\x70\x2b\xe0\xe7\x03\x12\x09\ \xda\x0c\x0c\x2c\xc2\xd8\x15\x98\x87\x49\x32\x30\x48\x30\x30\x30\ \xba\x06\x60\x57\xc0\xe3\xa4\xae\xe8\x16\xe1\x67\xcc\xe6\xa5\x80\ \xa2\xa0\xa8\xa5\xb8\xbe\x10\xe2\x06\xbd\xbc\xfc\x19\x53\x26\xbb\ \xa0\xb9\x01\xa1\x80\x3d\x76\xea\xbc\x79\xf3\x66\x4d\xd6\xc4\xea\ \x48\x39\x3b\x43\xa5\xc9\x73\x80\x0a\xe6\x65\x58\x00\xd9\x98\x0a\ \x54\xdd\xcd\x54\x26\xcf\x05\x29\x48\xb7\x06\xb2\xb1\x86\x03\x77\ \xe2\x74\xa0\xfc\xec\xc9\xba\x38\x03\xca\x68\x72\xc1\xcc\x69\xd9\ \xde\x8c\x38\x14\xb0\xda\x28\xeb\x04\x65\x47\x59\x72\x3a\x48\x61\ \x57\x60\x12\x23\xc3\xc0\x20\xc8\xc0\xc0\xe4\xe3\x8f\x5d\x81\x98\ \x38\x34\x2e\x38\xe4\xf1\x44\x16\x28\x2e\xf0\xc6\xa6\x04\xba\x3c\ \x6f\x64\x23\x50\x41\x77\xb5\x16\xae\xf4\x62\x9b\xd3\xdf\x51\x5c\ \x39\x21\x37\x9c\x19\x87\x82\x90\xda\xbe\xd2\x92\xe2\x86\xae\x6a\ \x69\x1c\x0a\xe2\xdb\xdb\x8a\x6b\xca\xab\xfa\xab\x35\x70\x28\xf0\ \x6d\x9c\x58\x51\x5c\xda\xd1\x5d\x2d\x84\x43\x81\x7e\x66\xcf\xc4\ \xb6\xbe\xfe\x14\x4f\x9c\xa9\xda\x39\xb3\xb1\xbd\x39\x39\x54\x14\ \x77\xba\xd7\xf7\x8d\x0f\xb6\xe2\xc2\x26\x03\x00\x8f\xb4\x8c\xb5\ \x70\xac\xb2\xb2\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \ \x00\x00\x02\x24\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x03\x00\x00\x00\x44\xa4\x8a\xc6\ \x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x07\xa3\x00\x00\x07\xa3\x01\ \x30\x2f\xb2\xc5\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\ \x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\ \x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\xa2\x50\x4c\x54\ \x45\xff\xff\xff\x28\x38\x4d\x28\x37\x4d\x87\x39\x4f\x6b\x39\x4e\ \xfc\x38\x52\xef\xd5\xc9\x78\x38\x4e\x8a\x39\x4f\x94\x39\x4e\xfc\ \x39\x52\xef\xd8\xcd\xcd\x39\x51\xd6\x39\x51\xed\xea\xda\xfc\x39\ \x52\x26\xb9\x9a\x28\x38\x4c\x4f\xc2\xa6\x8c\xca\xb0\x91\xca\xb1\ \x9c\xcb\xb1\x9f\xcb\xb1\xc5\xca\xb0\xca\xca\xae\xcb\xca\xaf\xce\ \xc9\xae\xce\xc9\xaf\xcf\xca\xaf\xcf\xca\xb0\xd0\xcb\xb1\xd1\xcc\ \xb2\xd2\xcd\xb3\xd7\xd2\xba\xd7\xd3\xbb\xd8\xd3\xbc\xd8\xd4\xbc\ \xd9\xd4\xbd\xd9\xd5\xbd\xd9\xd5\xbe\xda\xd5\xbe\xda\xd6\xbf\xdb\ \xd6\xc0\xdb\xd7\xc1\xdc\xd8\xc3\xde\xda\xc5\xe0\xdc\xc8\xe5\xe2\ \xcf\xe6\xe3\xd0\xe7\xe4\xd2\xe9\xe6\xd4\xeb\xe8\xd7\xed\xea\xda\ \xfc\x39\x52\x19\x34\xb7\x9d\x00\x00\x00\x10\x74\x52\x4e\x53\x00\ \x60\x78\xae\xb0\xb5\xbd\xc0\xc2\xc4\xc8\xcc\xd8\xdf\xe8\xe8\x79\ \xe2\xaf\xf9\x00\x00\x00\xd8\x49\x44\x41\x54\x38\x4f\xad\x90\xc9\ \x0e\x82\x40\x10\x05\x01\x51\x16\x95\xcd\x05\x97\x01\x14\x11\x50\ \x76\x95\xff\xff\x35\x9f\xc6\x43\x93\x00\x26\x84\x3a\xd4\xcc\x74\ \x2a\xa1\x03\xc7\x7d\x10\x4c\xd3\xe4\x39\x02\x8f\x81\x40\xde\xbd\ \xc1\x54\x55\x55\x11\xef\x89\x4a\x98\x60\x20\xe2\x9c\x22\xd0\xeb\ \xba\x96\xf0\x56\x6a\x82\x82\x81\x84\x53\xff\x05\x0b\x59\x96\x97\ \x34\x58\x62\xb0\x20\x41\x27\xe3\x04\xb3\x79\x0f\x33\x04\xda\xab\ \x07\x6d\xb4\x20\x3f\xb5\x90\x93\x20\x3b\x17\x45\x11\xfa\x50\x10\ \x40\x7e\x08\x9d\x33\x1a\xc4\x50\x7a\x87\x92\x04\xba\xa7\x50\x3c\ \x72\xc0\x3c\xcf\x73\x9c\x86\x58\x23\xf0\xcb\xb2\x8c\x2e\xd0\x75\ \x63\x59\xd6\x36\xc2\xcd\xef\xf8\xc4\xca\x30\x8c\x75\xdf\x0e\x43\ \x03\xe6\xba\x2e\xfb\x6a\x67\xdb\xf6\xfe\x7b\x6b\x2e\x59\x55\xd5\ \x2d\x80\xa2\x08\x0a\x6e\x50\xd7\x92\x43\x7f\xd4\xdf\xe0\xc0\x18\ \x3b\x1e\x1b\x3a\xd0\xe0\xf9\x68\xe1\x49\x82\x4e\xc6\x08\xde\xa7\ \x27\x93\xce\xcf\x54\x3a\x2a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\ \x42\x60\x82\ \x00\x00\x02\xb4\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x03\x00\x00\x00\x44\xa4\x8a\xc6\ \x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x00\xdd\x00\x00\x00\xdd\x01\ \x70\x53\xa2\x07\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\ \x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\ \x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\xd8\x50\x4c\x54\ \x45\xff\xff\xff\xff\xff\x00\xff\xbf\x80\xff\xdb\x6d\xff\xdf\x80\ \xff\xd1\x74\xff\xd8\x76\xff\xd9\x73\xff\xd5\x75\xff\xd5\x77\xff\ \xd7\x78\xff\xd7\x79\xff\xd5\x79\xfa\xd5\x75\xfa\xd5\x78\xfa\xd6\ \x76\xfb\xd7\x79\xfb\xd3\x78\xfb\xd5\x78\xed\xc4\x6f\xed\xc5\x6d\ \xfc\xd6\x79\xeb\xc2\x6d\xec\xc6\x70\xfc\xd5\x76\xfc\xd6\x78\xfd\ \xd6\x77\xfd\xd4\x77\xfd\xd6\x77\xfd\xd5\x76\xfb\xd5\x78\xfb\xd4\ \x77\xfc\xd5\x78\xfc\xd5\x77\xfc\xd5\x77\xfc\xd5\x77\xfc\xd5\x78\ \xfc\xd5\x77\xfc\xd5\x77\xfb\xd5\x77\xfc\xd5\x77\xed\xc5\x6f\xfc\ \xd5\x77\xed\xc6\x6f\xfc\xd5\x77\xfc\xd5\x77\xfc\xd5\x77\xfc\xd5\ \x77\xfc\xd5\x77\xfc\xd5\x77\xf2\xcc\x72\xfc\xd5\x77\xf3\xcc\x72\ \xf3\xcc\x73\xfc\xd5\x77\xf3\xcd\x72\xfc\xd5\x77\xf3\xcb\x72\xfc\ \xd5\x77\xfc\xd5\x77\xfc\xd5\x77\xfc\xd5\x77\xfc\xd5\x77\xea\xc3\ \x6e\xf2\xcb\x72\xf3\xcc\x72\xf4\xcd\x73\xf9\xd2\x76\xfa\xd3\x76\ \xfb\xd4\x76\xfb\xd4\x77\xfc\xd5\x77\xec\x0a\x60\x8f\x00\x00\x00\ \x3f\x74\x52\x4e\x53\x00\x01\x04\x07\x08\x0b\x0d\x14\x18\x1e\x20\ \x26\x2a\x30\x31\x38\x39\x40\x42\x45\x46\x4a\x4b\x50\x5b\x64\x69\ \x6b\x7c\x7f\x80\x89\x93\x9f\xaa\xb0\xb1\xbd\xc7\xd6\xdb\xdd\xdd\ \xdf\xe4\xe5\xe7\xe8\xec\xee\xf0\xf0\xf1\xf2\xf2\xf3\xf4\xf5\xf5\ \xf6\xf9\xfa\xfc\x92\x18\x52\x21\x00\x00\x01\x03\x49\x44\x41\x54\ \x38\x4f\x8d\xce\xd7\x5a\xc2\x40\x14\x45\xe1\x03\x58\x00\x05\x44\ \x90\x26\x52\x34\x88\x88\x28\x22\xc5\x12\x08\x84\xc9\xac\xf7\x7f\ \x23\x2f\xb0\xf0\x4d\x12\xc7\x7d\xbb\xfe\x8b\x2d\xb2\xb7\x7c\xd3\ \x19\x8d\x9c\x66\x5e\xa2\x97\x6a\xaf\x00\x60\xd5\x4e\x45\xf5\x4c\ \x9f\x9f\xf5\x33\xe1\x9e\xe8\x01\xa8\xc9\x44\x01\xf4\x12\x21\xd0\ \x00\x08\x06\xe5\xf2\x20\x00\x68\x98\x3d\x39\x07\x98\x96\x44\x4a\ \x53\x80\x79\xd2\x00\x39\x00\x16\x15\x91\xca\x02\x80\x9c\x01\xea\ \x00\x04\xc3\x6a\x75\x18\x00\x50\x37\x40\x67\x77\x3f\x98\xcd\x76\ \x9d\x8e\x01\x5a\x18\x6b\x19\xa0\x06\xa0\x95\x52\x4a\x29\x0d\x50\ \x33\x40\xda\x05\xd6\x9e\xe7\x79\x9e\xb7\x06\xdc\xb4\x01\xa4\x0b\ \xa0\xb5\xd6\x5a\x03\x74\xcd\x2e\xd9\xf1\xfe\x83\x71\x36\x04\xa4\ \xb8\xfc\xed\xcb\x62\xb8\x8b\x9c\xdd\x7f\xf7\xbb\x42\x54\x17\x39\ \x7a\x65\xbb\x05\x9e\x0f\xa3\xbb\xc8\x0b\x1b\x1f\x78\x8a\xeb\xff\ \x06\xb7\x16\xf0\x71\x6a\x01\x97\xb1\xfd\x0b\x5c\xd8\xc0\xe3\xb1\ \x05\x70\x6d\x03\x57\x16\xf0\x70\x60\x01\xee\x89\x05\xe0\x58\xc0\ \xfb\x79\x2c\xb8\x61\xe3\xff\xd5\x45\x6a\x6f\xbe\xd9\x3f\x01\xf5\ \xde\x54\x7e\xca\xf7\x18\x1d\x00\x00\x00\x00\x49\x45\x4e\x44\xae\ \x42\x60\x82\ \x00\x00\x03\x2c\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x03\x00\x00\x00\x44\xa4\x8a\xc6\ \x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x00\xe7\x00\x00\x00\xe7\x01\ \xf0\x1b\x58\xb5\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\ \x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\ \x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\xf0\x50\x4c\x54\ \x45\xff\xff\xff\x6d\x6d\x92\x60\x80\x80\x60\x75\x8a\x66\x7a\x8f\ \x66\x77\x88\x65\x7a\x8c\x64\x7a\x89\x64\x7a\x8a\x65\x79\x8a\x64\ \x7a\x89\x63\x79\x8a\x63\x78\x8a\x64\x79\x8a\x64\x79\x8a\x64\x79\ \x8b\x64\x79\x8a\x64\x79\x8a\x65\x7a\x8a\x66\x7b\x8c\x68\x7d\x8d\ \x74\x87\x96\x79\x8b\x9a\x7f\x91\x9e\x85\x95\xa3\x85\x96\xa3\x87\ \x97\xa5\x90\x9f\xab\x91\x9f\xab\x92\xa0\xac\x93\xa0\xab\x98\xa5\ \xb1\x98\xa6\xb2\xa0\xab\xb5\xa1\xae\xb8\xa3\xaf\xb9\xa5\xb1\xbb\ \xb4\xb5\xbc\xb4\xbe\xc6\xb5\xbf\xc7\xb9\xc2\xca\xbb\xc4\xcc\xbd\ \xc6\xcd\xbe\xc7\xce\xca\xd2\xd7\xcb\xd3\xd8\xcd\xd4\xd9\xd8\xdd\ \xe2\xd9\xde\xe2\xdb\xe0\xe4\xdc\xe1\xe5\xdd\xe2\xe5\xdf\xe4\xe7\ \xe0\xe5\xe8\xe6\xea\xed\xe7\xeb\xed\xe9\xec\xee\xea\xdd\xdd\xeb\ \xe9\xea\xeb\xee\xf0\xec\xef\xf1\xee\x9b\x91\xee\xf0\xf2\xef\xf2\ \xf4\xf0\xf2\xf4\xf2\xb1\xa9\xf3\xf4\xf6\xf4\xf6\xf7\xf5\xf7\xf8\ \xf8\xf9\xfa\xf9\xfa\xfb\xfb\xfc\xfc\xfc\xeb\xe9\xfd\xfe\xfe\xfe\ \xf6\xf6\xfe\xf8\xf7\xfe\xfe\xfe\xfe\xff\xff\xff\xfe\xfe\xff\xff\ \xff\x7d\x02\xb4\x15\x00\x00\x00\x11\x74\x52\x4e\x53\x00\x07\x08\ \x18\x19\x2d\x49\x84\x97\x98\xc1\xc8\xda\xe3\xf2\xf3\xf5\xd5\xa8\ \x31\x5b\x00\x00\x01\x91\x49\x44\x41\x54\x38\xcb\x85\x53\xe9\x5a\ \x82\x50\x10\xbd\xee\x82\x0a\x8e\x9a\x6b\x8b\x95\xa9\xa4\x52\x02\ \x2e\xe5\x46\x29\x9a\x85\xdd\xf7\x7f\x9b\xe6\x82\x20\x20\x7e\xcd\ \x0f\xc4\x39\x87\x59\xcf\x10\xe2\x5a\x24\xc9\xf1\x59\x51\xcc\xf2\ \x5c\x32\x42\xce\x2d\x9e\x16\xc0\x35\x21\x1d\x0f\xc0\xd1\x54\xde\ \x86\x4a\x25\xfb\x37\x9f\x8a\x7a\xf1\x58\x06\x7d\x85\xe6\x60\x34\ \x9e\xcd\x27\x5a\xbf\x59\xc0\xbf\x99\xd8\x09\x4f\xe4\xd0\x51\xd7\ \x96\x06\xa5\xb2\x4c\xe9\x7e\xa3\xd6\xd1\x91\x4b\xb8\xdf\x23\x5e\ \xe8\x8d\xb7\x14\x4d\x51\xd8\x93\xea\x12\x06\xc9\x1d\x63\x44\x31\ \x7e\x45\x5b\x99\xd4\x6b\x3b\xa5\x82\x59\xec\x3a\x52\xf8\xbd\x66\ \x1c\x81\xe9\xd4\xa1\x0c\x31\x46\xca\xea\x0f\xeb\xef\xad\x1c\xb7\ \x24\x39\x6f\x66\x07\x7b\x61\xdd\xa6\xb1\xbe\xb1\x79\x4e\xa0\xeb\ \x1a\x40\x1a\xe7\x27\x60\x82\x2d\x0d\x21\xb0\x24\x42\x84\x24\x01\ \x9a\x4b\x1a\x4a\x38\x34\x00\x92\x84\x03\x18\x18\xe1\x04\xda\x05\ \xe0\x08\x0f\x30\x72\x3d\xbf\xdf\x3e\x82\x0a\xc0\x93\x2c\xc0\xf8\ \x44\x58\x3c\x75\x3c\x04\x1d\x20\x4b\x44\x28\xcd\x64\x36\xbe\xe7\ \x47\xb4\xfb\xdb\x1b\x77\x10\x8a\x6c\x16\x41\x64\x84\xf9\x89\xf0\ \x70\x77\xfd\x16\x20\x60\x8a\x89\x27\xea\xe7\xfb\xe2\xcb\x9f\x02\ \x8b\xd4\x7c\x5b\xf8\x39\x31\xac\x22\xb1\xcd\xfe\xfe\x02\xa3\xcd\ \xda\x64\x83\xda\xd0\x70\x46\x95\x0d\x8a\x8d\x5a\xa5\xa1\x8c\x57\ \x6b\xd4\xd6\xb2\xf4\x20\xe3\x03\x1f\x46\xd9\x5a\x96\xb5\x6e\x69\ \x47\xcf\xad\x75\x5c\xb7\x25\x18\xe5\x1c\x7f\x71\x04\x63\x4b\x6e\ \x68\x06\xf1\x2b\x57\x72\xb6\x68\x3b\x6b\xea\x11\xad\xd1\xf2\x88\ \xf6\x28\xfb\xda\xf0\x40\x6d\xd9\x63\xfd\x65\x9f\xec\x9d\xc3\x69\ \x74\x55\xdd\x34\x75\xb5\x5d\x0d\x1e\x8e\xe7\xf4\x8a\xc5\xd0\xd3\ \xfb\xff\x78\x2f\x9f\xff\x1f\x2f\x83\xa9\x23\xd5\xf0\x7d\x09\x00\ \x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x04\x0a\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x03\x00\x00\x00\x44\xa4\x8a\xc6\ \x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x08\x5b\x00\x00\x08\x5b\x01\ \xe8\x9f\x75\xd0\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\ \x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\ \x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x83\x50\x4c\x54\ \x45\xff\xff\xff\x55\x55\xaa\x40\x80\x80\x66\x66\x99\xff\xff\xff\ \x49\x6d\x92\xdb\xdb\xff\x55\x55\x8e\xe6\xe6\xe6\xea\xea\xea\x51\ \x5d\x80\x59\x64\x85\x55\x60\x80\x52\x5c\x85\x55\x5e\x84\x58\x61\ \x84\x9e\xa7\xb9\x80\x88\x99\x52\x63\x84\x58\x60\x80\x57\x63\x80\ \x55\x60\x81\x54\x5f\x80\x54\x5e\x81\xe7\xee\xee\xe7\xea\xee\xe8\<|fim▁hole|>\xee\x56\x60\x81\x54\x60\x80\x55\x60\x80\xe6\xec\xed\x55\x5f\x80\ \xe7\xec\xee\x56\x60\x80\x56\x60\x80\x55\x5f\x80\xe4\xea\xeb\xe8\ \xec\xed\x55\x60\x80\x78\x82\x9a\x78\x82\x9b\x84\x8e\xa3\x86\x8f\ \xa4\x74\x7e\x97\x9a\xa3\xb3\x77\x80\x99\x78\x81\x99\x55\x60\x80\ \x9a\xa2\xb3\x55\x60\x80\x74\x7d\x97\xa3\xaa\xba\xa4\xac\xbb\x6b\ \x76\x91\x76\x80\x99\xa3\xac\xba\xa7\xaf\xbe\x6b\x75\x90\xe7\xec\ \xed\x67\x71\x8e\x55\x60\x80\x55\x5f\x80\x63\x6f\x8b\x62\x6c\x89\ \xb6\xbd\xc9\x55\x60\x80\x63\x6c\x8a\xb9\xc0\xcb\xba\xc0\xca\xba\ \xc2\xcc\x5e\x68\x86\x5f\x6a\x87\x5d\x67\x86\x5e\x69\x87\x55\x60\ \x80\x5d\x67\x86\x55\x61\x80\x5b\x65\x84\x5d\x68\x87\xc5\xcc\xd4\ \xc6\xcd\xd4\x55\x60\x80\xe7\xec\xed\xc8\xcf\xd6\x55\x5f\x80\xc9\ \xcf\xd6\xcb\xd0\xd7\xe7\xec\xec\x55\x60\x80\x55\x60\x80\xe7\xeb\ \xed\xce\xd4\xda\xcf\xd6\xdc\x55\x60\x80\x55\x60\x80\xd2\xd8\xdd\ \x55\x60\x80\xd6\xda\xe0\x55\x60\x80\x55\x60\x80\xd9\xde\xe3\x55\ \x60\x80\x55\x60\x80\xe8\xec\xed\x55\x60\x80\x56\x60\x80\x55\x5f\ \x80\xe7\xec\xed\x55\x60\x80\x56\x60\x80\xe1\xe6\xe9\xe7\xec\xed\ \x55\x60\x80\x55\x60\x80\x55\x60\x80\x55\x61\x80\x55\x60\x80\xe6\ \xeb\xec\xe6\xeb\xed\x55\x60\x80\x55\x60\x81\xe6\xeb\xed\x55\x60\ \x80\xe7\xec\xed\x3f\x91\xdf\xa4\x00\x00\x00\x7f\x74\x52\x4e\x53\ \x00\x03\x04\x05\x05\x07\x07\x09\x0a\x0c\x16\x17\x18\x19\x1b\x1d\ \x1d\x1e\x1f\x20\x2c\x45\x46\x49\x49\x4a\x4d\x4e\x6e\x71\x74\x78\ \x7d\x82\x90\x91\x93\x93\x95\x98\xb3\xb5\xb9\xbd\xbd\xbd\xbf\xbf\ \xc0\xc0\xc1\xc1\xc2\xc3\xc4\xc4\xc4\xc4\xc5\xc5\xc5\xc5\xc6\xc7\ \xc8\xca\xcb\xcb\xce\xce\xcf\xcf\xcf\xd0\xd0\xd1\xd1\xd4\xd4\xd5\ \xd5\xd6\xd6\xd6\xd7\xd7\xd8\xd8\xda\xdb\xdb\xdb\xdc\xdd\xde\xde\ \xdf\xdf\xe1\xe2\xe4\xe6\xe6\xe8\xe9\xea\xed\xee\xef\xf1\xf1\xf3\ \xf3\xf4\xf4\xf4\xf4\xf5\xf6\xf7\xfb\xfd\xfd\xfd\xfe\xfe\xfe\xe0\ \xf4\x89\xca\x00\x00\x01\x6e\x49\x44\x41\x54\x18\x19\x65\xc1\x09\ \x43\x4c\x61\x14\x06\xe0\x77\x4c\x89\x5c\x5b\x1a\xd9\xab\x4b\x0d\ \x06\xa1\xc5\x12\x5a\xc8\x92\x29\xfb\x90\xad\xe4\x96\x65\xd2\x14\ \x91\xed\xf6\x9e\x9f\x5e\xf3\xdd\xf3\x9d\xee\x34\xcf\x83\x4d\x8d\ \xb9\xb0\xaf\x54\x5e\x9d\x2a\xe4\xdb\xb2\xa8\xd7\x1c\x56\x68\xca\ \xdd\x01\x6a\x65\x3b\x97\x59\xa3\xd2\xd1\x84\x94\x60\x80\x75\x46\ \x02\x98\xd6\x39\x7a\x8b\x8b\xf4\x66\x5b\xa1\x82\x59\x3a\xff\x2f\ \x5e\x9b\x59\x5b\x9b\xb9\x71\x85\x89\xb9\x00\x4e\xd3\x08\x9d\xf8\ \x92\xa8\xfe\x98\xce\x40\x16\x55\x1d\x4c\xf4\x8a\xe9\x65\xa2\x13\ \x1b\x82\x0a\x13\xe3\x62\xc6\x99\x58\x6e\x06\xd0\x4d\x15\x89\x89\ \xa8\x42\x20\x5b\xa6\x8a\xc4\x44\x54\x95\x46\xb4\xd1\x8b\xc4\x44\ \xf4\x72\xc8\xd3\x9b\x17\x33\x4f\x2f\x44\x81\xea\xa1\xa4\x3c\xa3\ \xea\xc3\x14\xd5\x79\x49\x19\xa4\x2a\x61\x95\xea\x9c\xa4\x0c\x52\ \x95\xf1\x95\xea\xe9\x3f\xd9\x74\x8f\xea\x17\x1e\xd1\xbb\x29\xe6\ \x42\x4c\xf5\x04\x05\x7a\x45\xf1\x5e\xc7\xf4\x4e\x23\x4f\xf3\x4a\ \xd4\x2d\x9a\x10\x39\x9a\xeb\x92\x78\xf3\x87\xe6\x20\x32\x9f\x69\ \x9e\x8b\x73\x9b\xe6\x53\x06\x38\x45\xf3\x40\x9c\x49\x9a\x10\xc0\ \xee\x6f\xf4\x5e\x88\x73\x87\xde\xcf\x16\x6c\x38\x46\x35\xf1\x57\ \x9c\xab\x31\xd5\x09\x54\x65\x46\x59\xf5\xbd\xe7\x87\xa8\xfb\x45\ \x3a\x77\xb7\xc1\xd9\x55\x22\x7f\x5f\xfe\x22\x29\x63\x45\x92\xef\ \xf7\x42\xed\x79\x37\xfc\x41\xb6\x18\x7b\xfc\xf1\x00\xcc\xbe\xb3\ \x52\xe7\xcc\x7e\xa4\x1d\x7d\x2b\x35\x5e\x1e\xc6\x16\xdb\xdb\x97\ \xc4\x2c\x1c\x6f\x40\xbd\x9d\x47\xba\x86\xa6\x57\x56\xa6\x87\x4e\ \x1e\xda\x01\xb3\x0e\x29\x11\x78\xcc\x11\x55\x71\x85\x00\x00\x00\ \x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x02\xb6\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\ \x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x0e\x00\x00\x0b\x0e\ \x01\x40\xbe\xe1\x41\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\ \x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\ \x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x33\x49\x44\ \x41\x54\x58\x85\xed\xd7\xdb\x8b\x4d\x51\x1c\x07\xf0\xcf\x3a\x33\ \x19\x1a\x73\x89\xc4\x0c\x43\x9a\x1a\x0f\x83\x44\xae\x29\x4a\x22\ \x92\xc4\xbc\xa9\x89\xe4\x92\x9a\x27\xa5\x44\xfe\x01\x79\xf3\xe0\ \xc1\xa5\x5c\x1e\x14\x4f\x3c\x08\xa1\x30\x72\x09\x45\x29\x93\x48\ \x9e\x64\xc4\x18\x4f\xb4\x3c\xec\x3d\x9a\xc6\x39\xce\x3e\x33\x73\ \x3a\x1e\x66\xd5\xaa\xbd\xd6\xef\xfb\x5b\xdf\xef\x5e\xbf\xb5\xbe\ \xbb\x1d\x62\x8c\x2a\xd9\x72\x15\x65\x1f\x13\xf0\x3f\x08\xa8\x1e\ \x49\x72\x08\x61\x0d\xd6\xa2\x1d\x4d\xf8\x88\x57\x38\x1d\x63\xec\ \xc9\xb4\x48\x8c\xb1\xe4\x8e\x85\xb8\x89\x53\x58\x87\x19\xa8\xc2\ \x6c\x74\xe0\x16\x2e\xa2\xbe\xe8\x5a\xc3\x20\x5f\x89\x07\x68\x2f\ \x82\xdb\x8a\xa7\x98\x39\x6a\x02\x30\x0f\xcf\x31\x2d\x23\x7e\x05\ \xee\xa0\x6a\xb4\x04\x5c\xc1\x82\x12\xf0\x0d\x38\x8f\x1e\x3c\xc4\ \x71\xcc\x1d\x8c\xc9\x7c\x0b\x42\x08\x73\xd0\x88\xaa\x10\x42\x5b\ \x08\xa1\xa6\x08\x7e\x03\x6e\xe0\x0d\x8e\xe1\x02\x5a\xd1\x1d\x42\ \xb8\x18\x42\xc8\x65\x3a\x84\xa8\xc5\x11\xbc\xc3\x13\x9c\xc0\x39\ \x74\xe3\x1a\xd6\xe7\xc9\xd9\x84\x4b\x68\xc8\x13\xab\xc7\x33\xdc\ \x2b\x5a\x02\xac\xc2\x0b\xec\xcb\x57\x47\xb4\xe1\x34\xce\x62\x7c\ \x09\xa5\xc9\xe1\x33\x3a\x8b\x1d\xa0\x47\x98\x9c\x61\xc1\xdd\xb8\ \x8e\x5c\x09\x22\xb6\xa3\xa7\x50\xb0\x3e\x7d\xf3\x96\x12\x16\x3c\ \x8a\xc3\xa8\xcd\x88\x1f\x87\xfe\x42\xc1\x83\xd8\x5d\x02\x79\x3b\ \xce\xa4\xdb\xfa\x1d\xfd\x78\x8b\x35\x45\xf2\x7e\x0c\x9d\x68\xc2\ \x16\x89\x9d\x4e\xc8\x48\xbe\x0b\xf7\xb1\x64\xe0\x9c\x48\x2c\x7e\ \x0f\xfa\x24\xb6\x9c\x2f\xaf\x05\x5f\x06\x06\x75\x12\x5b\xbd\x81\ \x43\x58\x94\x91\x3c\xe0\x00\x6a\x0a\xc4\x27\xe2\x13\x76\xe6\x89\ \x9d\x4c\x85\xab\xc2\x5d\x74\x64\xdd\xf2\x52\x3a\x96\xa2\x6f\xc8\ \xdc\xfc\xb4\x4c\xd3\xa1\x0b\x87\xcb\x41\x3e\x88\xf0\x03\x56\xa4\ \xcf\x5d\x29\xf9\xde\x74\xec\x76\x3e\xc3\x18\x65\x01\xdd\xe8\xc5\ \x37\xbc\x37\xc8\x8e\x73\x68\x8c\x31\x7e\x55\xde\x16\x71\x15\x93\ \x62\x8c\xb3\x62\x8c\x2f\x07\x02\xd5\xf8\x15\x42\xa8\x8e\x31\xfe\ \x2c\xa3\x80\x56\xc9\x41\xfc\x8b\x23\x27\xf9\xbc\xae\x2e\x17\x73\ \x08\xa1\x53\xe2\x90\xaf\x0b\x61\x5a\x24\x96\x5b\x57\x86\xda\x2f\ \x97\x18\xd3\xb2\x82\x98\x14\xb8\x11\x8f\xb1\x0d\x53\x47\x48\x3a\ \x0e\x9b\x71\x39\x25\xdf\xf1\x2f\x7c\x18\xb8\x0a\x21\x84\x29\xd8\ \x8f\xc5\x68\x96\x98\xcc\x70\x5a\xb3\xc4\x01\x9f\x60\x5f\x8c\xb1\ \xf7\x5f\xe0\x3f\x02\x2a\xd5\x2a\xfe\x5f\x30\x26\xa0\xe2\x02\x7e\ \x03\xb7\x39\xbc\xed\x20\x33\xf3\x9f\x00\x00\x00\x00\x49\x45\x4e\ \x44\xae\x42\x60\x82\ " qt_resource_name = b"\ \x00\x05\ \x00\x6f\xa6\x53\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x73\ \x00\x05\ \x00\x4f\xa6\x53\ \x00\x49\ \x00\x63\x00\x6f\x00\x6e\x00\x73\ \x00\x0f\ \x03\xec\xfb\x67\ \x00\x74\ \x00\x68\x00\x65\x00\x72\x00\x6d\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x0c\ \x07\xb5\x0f\xc7\ \x00\x63\ \x00\x61\x00\x6c\x00\x65\x00\x6e\x00\x64\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x0c\ \x0c\xa8\x9d\xc7\ \x00\x64\ \x00\x6f\x00\x6f\x00\x72\x00\x2d\x00\x6b\x00\x65\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x09\ \x05\x9e\x83\x27\ \x00\x63\ \x00\x6c\x00\x6f\x00\x63\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x08\ \x09\xc5\x58\xc7\ \x00\x75\ \x00\x73\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x0a\ \x0b\xb9\x11\x87\ \x00\x63\ \x00\x6c\x00\x6f\x00\x75\x00\x64\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\ " qt_resource_struct = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x10\x00\x02\x00\x00\x00\x06\x00\x00\x00\x03\ \x00\x00\x00\x20\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ \x00\x00\x00\x80\x00\x00\x00\x00\x00\x01\x00\x00\x09\x15\ \x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x04\x35\ \x00\x00\x00\x98\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x45\ \x00\x00\x00\xae\x00\x00\x00\x00\x00\x01\x00\x00\x10\x53\ \x00\x00\x00\x62\x00\x00\x00\x00\x00\x01\x00\x00\x06\x5d\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()<|fim▁end|>
\xeb\xee\xe8\xeb\xeb\x56\x5f\x80\xe6\xed\xed\x56\x61\x80\xe8\xec\
<|file_name|>RoleGraphViewer.java<|end_file_name|><|fim▁begin|>package de.uni.freiburg.iig.telematik.sewol.accesscontrol.rbac.lattice.graphic; import org.apache.commons.collections15.Factory; import edu.uci.ics.jung.graph.Graph; import edu.uci.ics.jung.graph.SparseMultigraph; public class RoleGraphViewer { Graph<Integer, String> g; int nodeCount, edgeCount; Factory<Integer> vertexFactory; Factory<String> edgeFactory; /** Creates a new instance of SimpleGraphView */ public RoleGraphViewer() { // Graph<V, E> where V is the type of the vertices and E is the type of // the edges g = new SparseMultigraph<Integer, String>(); nodeCount = 0; edgeCount = 0; vertexFactory = new Factory<Integer>() { // My vertex factory public Integer create() {<|fim▁hole|> }; edgeFactory = new Factory<String>() { // My edge factory public String create() { return "E" + edgeCount++; } }; } }<|fim▁end|>
return nodeCount++; }
<|file_name|>maxSumSub.py<|end_file_name|><|fim▁begin|>def maxSumSub(arr): maxSums = [0]*len(arr) for i in range(len(arr)):<|fim▁hole|> if (arr[j] < arr[i]): s = maxSums[j] + arr[i] if (s > maxS): maxS = s maxSums[i] = maxS return max(maxSums) arr = [1,101,2,3,100,4,5] print maxSumSub(arr) arr = [3,4,5,10] print maxSumSub(arr) arr = [10,5,4,3] print maxSumSub(arr)<|fim▁end|>
Si = arr[i] maxS = Si for j in range(0,i):
<|file_name|>helper.rs<|end_file_name|><|fim▁begin|>/// Turn a failed parse into `None` and a successful parse into `Some`. /// /// - **Syntax:** `option!(THING)` /// - **Output:** `Option<THING>` /// /// ```rust /// extern crate syn; /// #[macro_use] extern crate synom; /// /// use syn::tokens::Bang; /// /// named!(maybe_bang -> Option<Bang>, option!(syn!(Bang))); /// /// # fn main() {} /// ``` #[macro_export] macro_rules! option { ($i:expr, $submac:ident!( $($args:tt)* )) => { match $submac!($i, $($args)*) { ::std::result::Result::Ok((i, o)) => ::std::result::Result::Ok((i, Some(o))), ::std::result::Result::Err(_) => ::std::result::Result::Ok(($i, None)), } }; ($i:expr, $f:expr) => { option!($i, call!($f)); }; } /// Turn a failed parse into an empty vector. The argument parser must itself /// return a vector. /// /// This is often more convenient than `option!(...)` when the argument produces /// a vector. /// /// - **Syntax:** `opt_vec!(THING)` /// - **Output:** `THING`, which must be `Vec<T>` /// /// ```rust /// extern crate syn; /// #[macro_use] extern crate synom; /// /// use syn::{Lifetime, Ty}; /// use syn::delimited::Delimited; /// use syn::tokens::*; /// /// named!(bound_lifetimes -> (Vec<Lifetime>, Ty), tuple!( /// opt_vec!(do_parse!( /// syn!(For) >> /// syn!(Lt) >> /// lifetimes: call!(Delimited::<Lifetime, Comma>::parse_terminated) >> /// syn!(Gt) >> /// (lifetimes.into_vec()) /// )), /// syn!(Ty) /// )); /// /// # fn main() {} /// ``` #[macro_export] macro_rules! opt_vec { ($i:expr, $submac:ident!( $($args:tt)* )) => { match $submac!($i, $($args)*) { ::std::result::Result::Ok((i, o)) => ::std::result::Result::Ok((i, o)), ::std::result::Result::Err(_) => ::std::result::Result::Ok(($i, Vec::new())) } }; } /// Parses nothing and always succeeds. /// /// This can be useful as a fallthrough case in `alt!`. /// /// - **Syntax:** `epsilon!()` /// - **Output:** `()` /// /// ```rust /// extern crate syn; /// #[macro_use] extern crate synom; /// /// use syn::Mutability; /// use synom::tokens::Mut; /// /// named!(mutability -> Mutability, alt!( /// syn!(Mut) => { Mutability::Mutable } /// | /// epsilon!() => { |_| Mutability::Immutable } /// )); /// /// # fn main() {} #[macro_export] macro_rules! epsilon { ($i:expr,) => { ::std::result::Result::Ok(($i, ())) }; } /// Run a parser, binding the result to a name, and then evaluating an /// expression. /// /// Discards the result of the expression and parser. /// /// - **Syntax:** `tap!(NAME : THING => EXPR)` /// - **Output:** `()` /// /// ```rust /// extern crate syn; /// #[macro_use] extern crate synom; /// /// use syn::{Expr, ExprCall}; /// use syn::tokens::RArrow; /// /// named!(expr_with_arrow_call -> Expr, do_parse!( /// mut e: syn!(Expr) >> /// many0!(tap!(arg: tuple!(syn!(RArrow), syn!(Expr)) => { /// e = Expr { /// node: ExprCall { /// func: Box::new(e), /// args: vec![arg.1].into(), /// paren_token: Default::default(), /// }.into(), /// attrs: Vec::new(), /// }; /// })) >> /// (e) /// )); /// /// # fn main() {} /// ``` #[doc(hidden)] #[macro_export] macro_rules! tap { ($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => { match $submac!($i, $($args)*) { ::std::result::Result::Ok((i, o)) => { let $name = o; $e; ::std::result::Result::Ok((i, ())) } ::std::result::Result::Err(err) => ::std::result::Result::Err(err), } }; ($i:expr, $name:ident : $f:expr => $e:expr) => { tap!($i, $name: call!($f) => $e); }; } /// Parse a type through the `Synom` trait. /// /// This is a convenience macro used to invoke the `Synom::parse` method for a<|fim▁hole|>/// - **Output:** `TYPE` /// /// ```rust /// extern crate syn; /// #[macro_use] extern crate synom; /// /// use syn::Expr; /// use synom::tokens::Dot; /// /// named!(expression -> Expr, syn!(Expr)); /// /// named!(expression_dot -> (Expr, Dot), tuple!(syn!(Expr), syn!(Dot))); /// /// # fn main() {} /// ``` #[macro_export] macro_rules! syn { ($i:expr, $t:ty) => { call!($i, <$t as $crate::Synom>::parse) }; } /// Parse a parenthesized-surrounded subtree. /// /// This macro will invoke a sub-parser inside of all tokens contained in /// parenthesis. The sub-parser is required to consume all tokens within the /// parens or else this parser will return an error. /// /// - **Syntax:** `parens!(SUBPARSER)` /// - **Output:** `(SUBPARSER_RET, Paren)` /// /// ```rust /// extern crate syn; /// #[macro_use] extern crate synom; /// /// use syn::Expr; /// use synom::tokens::Paren; /// /// named!(expr_paren -> (Expr, Paren), parens!(syn!(Expr))); /// /// # fn main() {} /// ``` #[macro_export] macro_rules! parens { ($i:expr, $submac:ident!( $($args:tt)* )) => { $crate::tokens::Paren::parse($i, |i| $submac!(i, $($args)*)) }; ($i:expr, $f:expr) => { parens!($i, call!($f)); }; } /// Same as the `parens` macro, but for brackets. #[macro_export] macro_rules! brackets { ($i:expr, $submac:ident!( $($args:tt)* )) => { $crate::tokens::Bracket::parse($i, |i| $submac!(i, $($args)*)) }; ($i:expr, $f:expr) => { brackets!($i, call!($f)); }; } /// Same as the `parens` macro, but for braces. #[macro_export] macro_rules! braces { ($i:expr, $submac:ident!( $($args:tt)* )) => { $crate::tokens::Brace::parse($i, |i| $submac!(i, $($args)*)) }; ($i:expr, $f:expr) => { braces!($i, call!($f)); }; } /// Same as the `parens` macro, but for none-delimited sequences (groups). #[macro_export] macro_rules! grouped { ($i:expr, $submac:ident!( $($args:tt)* )) => { $crate::tokens::Group::parse($i, |i| $submac!(i, $($args)*)) }; ($i:expr, $f:expr) => { grouped!($i, call!($f)); }; }<|fim▁end|>
/// type, you'll find this in quite a few parsers. This is also the primary way /// to parse punctuation. /// /// - **Syntax:** `syn!(TYPE)`
<|file_name|>ctx.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ flask.ctx ~~~~~~~~~ Implements the objects required to keep the context. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import sys from functools import update_wrapper from werkzeug.exceptions import HTTPException from .globals import _request_ctx_stack, _app_ctx_stack from .module import blueprint_is_module from .signals import appcontext_pushed, appcontext_popped class _AppCtxGlobals(object): """A plain object.""" def get(self, name, default=None): return self.__dict__.get(name, default) def __contains__(self, item): return item in self.__dict__ def __iter__(self): return iter(self.__dict__) def __repr__(self): top = _app_ctx_stack.top if top is not None: return '<flask.g of %r>' % top.app.name return object.__repr__(self) def after_this_request(f): """Executes a function after this request. This is useful to modify response objects. The function is passed the response object and has to return the same or a new one. Example:: @app.route('/') def index(): @after_this_request def add_header(response): response.headers['X-Foo'] = 'Parachute' return response return 'Hello World!' This is more useful if a function other than the view function wants to modify a response. For instance think of a decorator that wants to add some headers without converting the return value into a response object. .. versionadded:: 0.9 """ _request_ctx_stack.top._after_request_functions.append(f) return f def copy_current_request_context(f): """A helper function that decorates a function to retain the current request context. This is useful when working with greenlets. The moment the function is decorated a copy of the request context is created and then pushed when the function is called. Example:: import gevent from flask import copy_current_request_context @app.route('/') def index(): @copy_current_request_context def do_some_work(): # do some work here, it can access flask.request like you # would otherwise in the view function. ... gevent.spawn(do_some_work) return 'Regular response' .. versionadded:: 0.10 """ top = _request_ctx_stack.top if top is None: raise RuntimeError('This decorator can only be used at local scopes ' 'when a request context is on the stack. For instance within ' 'view functions.') reqctx = top.copy() def wrapper(*args, **kwargs): with reqctx: return f(*args, **kwargs) return update_wrapper(wrapper, f) def has_request_context(): """If you have code that wants to test if a request context is there or not this function can be used. For instance, you may want to take advantage of request information if the request object is available, but fail silently if it is unavailable. :: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and has_request_context(): remote_addr = request.remote_addr self.remote_addr = remote_addr Alternatively you can also just test any of the context bound objects (such as :class:`request` or :class:`g` for truthness):: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and request: remote_addr = request.remote_addr self.remote_addr = remote_addr .. versionadded:: 0.7 """ return _request_ctx_stack.top is not None def has_app_context(): """Works like :func:`has_request_context` but for the application context. You can also just do a boolean check on the :data:`current_app` object instead. .. versionadded:: 0.9 """ return _app_ctx_stack.top is not None<|fim▁hole|>class AppContext(object): """The application context binds an application object implicitly to the current thread or greenlet, similar to how the :class:`RequestContext` binds request information. The application context is also implicitly created if a request context is created but the application is not on top of the individual application context. """ def __init__(self, app): self.app = app self.url_adapter = app.create_url_adapter(None) self.g = app.app_ctx_globals_class() # Like request context, app contexts can be pushed multiple times # but there a basic "refcount" is enough to track them. self._refcnt = 0 def push(self): """Binds the app context to the current context.""" self._refcnt += 1 if hasattr(sys, 'exc_clear'): sys.exc_clear() _app_ctx_stack.push(self) appcontext_pushed.send(self.app) def pop(self, exc=None): """Pops the app context.""" self._refcnt -= 1 if self._refcnt <= 0: if exc is None: exc = sys.exc_info()[1] self.app.do_teardown_appcontext(exc) rv = _app_ctx_stack.pop() assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ % (rv, self) appcontext_popped.send(self.app) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): self.pop(exc_value) class RequestContext(object): """The request context contains all request relevant information. It is created at the beginning of the request and pushed to the `_request_ctx_stack` and removed at the end of it. It will create the URL adapter and request object for the WSGI environment provided. Do not attempt to use this class directly, instead use :meth:`~flask.Flask.test_request_context` and :meth:`~flask.Flask.request_context` to create this object. When the request context is popped, it will evaluate all the functions registered on the application for teardown execution (:meth:`~flask.Flask.teardown_request`). The request context is automatically popped at the end of the request for you. In debug mode the request context is kept around if exceptions happen so that interactive debuggers have a chance to introspect the data. With 0.4 this can also be forced for requests that did not fail and outside of `DEBUG` mode. By setting ``'flask._preserve_context'`` to `True` on the WSGI environment the context will not pop itself at the end of the request. This is used by the :meth:`~flask.Flask.test_client` for example to implement the deferred cleanup functionality. You might find this helpful for unittests where you need the information from the context local around for a little longer. Make sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in that situation, otherwise your unittests will leak memory. """ def __init__(self, app, environ, request=None): self.app = app if request is None: request = app.request_class(environ) self.request = request self.url_adapter = app.create_url_adapter(self.request) self.flashes = None self.session = None # Request contexts can be pushed multiple times and interleaved with # other request contexts. Now only if the last level is popped we # get rid of them. Additionally if an application context is missing # one is created implicitly so for each level we add this information self._implicit_app_ctx_stack = [] # indicator if the context was preserved. Next time another context # is pushed the preserved context is popped. self.preserved = False # remembers the exception for pop if there is one in case the context # preservation kicks in. self._preserved_exc = None # Functions that should be executed after the request on the response # object. These will be called before the regular "after_request" # functions. self._after_request_functions = [] self.match_request() # XXX: Support for deprecated functionality. This is going away with # Flask 1.0 blueprint = self.request.blueprint if blueprint is not None: # better safe than sorry, we don't want to break code that # already worked bp = app.blueprints.get(blueprint) if bp is not None and blueprint_is_module(bp): self.request._is_old_module = True def _get_g(self): return _app_ctx_stack.top.g def _set_g(self, value): _app_ctx_stack.top.g = value g = property(_get_g, _set_g) del _get_g, _set_g def copy(self): """Creates a copy of this request context with the same request object. This can be used to move a request context to a different greenlet. Because the actual request object is the same this cannot be used to move a request context to a different thread unless access to the request object is locked. .. versionadded:: 0.10 """ return self.__class__(self.app, environ=self.request.environ, request=self.request ) def match_request(self): """Can be overridden by a subclass to hook into the matching of the request. """ try: url_rule, self.request.view_args = \ self.url_adapter.match(return_rule=True) self.request.url_rule = url_rule except HTTPException as e: self.request.routing_exception = e def push(self): """Binds the request context to the current context.""" # If an exception occurs in debug mode or if context preservation is # activated under exception situations exactly one context stays # on the stack. The rationale is that you want to access that # information under debug situations. However if someone forgets to # pop that context again we want to make sure that on the next push # it's invalidated, otherwise we run at risk that something leaks # memory. This is usually only a problem in testsuite since this # functionality is not active in production environments. top = _request_ctx_stack.top if top is not None and top.preserved: top.pop(top._preserved_exc) # Before we push the request context we have to ensure that there # is an application context. app_ctx = _app_ctx_stack.top if app_ctx is None or app_ctx.app != self.app: app_ctx = self.app.app_context() app_ctx.push() self._implicit_app_ctx_stack.append(app_ctx) else: self._implicit_app_ctx_stack.append(None) if hasattr(sys, 'exc_clear'): sys.exc_clear() _request_ctx_stack.push(self) # Open the session at the moment that the request context is # available. This allows a custom open_session method to use the # request context (e.g. code that access database information # stored on `g` instead of the appcontext). self.session = self.app.open_session(self.request) if self.session is None: self.session = self.app.make_null_session() def pop(self, exc=None): """Pops the request context and unbinds it by doing that. This will also trigger the execution of functions registered by the :meth:`~flask.Flask.teardown_request` decorator. .. versionchanged:: 0.9 Added the `exc` argument. """ app_ctx = self._implicit_app_ctx_stack.pop() clear_request = False if not self._implicit_app_ctx_stack: self.preserved = False self._preserved_exc = None if exc is None: exc = sys.exc_info()[1] self.app.do_teardown_request(exc) # If this interpreter supports clearing the exception information # we do that now. This will only go into effect on Python 2.x, # on 3.x it disappears automatically at the end of the exception # stack. if hasattr(sys, 'exc_clear'): sys.exc_clear() request_close = getattr(self.request, 'close', None) if request_close is not None: request_close() clear_request = True rv = _request_ctx_stack.pop() assert rv is self, 'Popped wrong request context. (%r instead of %r)' \ % (rv, self) # get rid of circular dependencies at the end of the request # so that we don't require the GC to be active. if clear_request: rv.request.environ['werkzeug.request'] = None # Get rid of the app as well if necessary. if app_ctx is not None: app_ctx.pop(exc) def auto_pop(self, exc): if self.request.environ.get('flask._preserve_context') or \ (exc is not None and self.app.preserve_context_on_exception): self.preserved = True self._preserved_exc = exc else: self.pop(exc) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): # do not pop the request stack if we are in debug mode and an # exception happened. This will allow the debugger to still # access the request object in the interactive shell. Furthermore # the context can be force kept alive for the test client. # See flask.testing for how this works. self.auto_pop(exc_value) def __repr__(self): return '<%s \'%s\' [%s] of %s>' % ( self.__class__.__name__, self.request.url, self.request.method, self.app.name, )<|fim▁end|>
<|file_name|>next-tick-gambi.js<|end_file_name|><|fim▁begin|>function getFunction(f,b){ return function myNextTick(){ console.log(f + " " + b);<|fim▁hole|>} process.nextTick(getFunction("foo", "bar"));<|fim▁end|>
};
<|file_name|>test.js<|end_file_name|><|fim▁begin|>import _cloneRegExp from './internal/_cloneRegExp'; import _curry2 from './internal/_curry2'; import _isRegExp from './internal/_isRegExp'; import toString from './toString'; /** * Determines whether a given string matches a given regular expression. * * @func * @memberOf R * @since v0.12.0 * @category String * @sig RegExp -> String -> Boolean * @param {RegExp} pattern * @param {String} str * @return {Boolean}<|fim▁hole|> * * R.test(/^x/, 'xyz'); //=> true * R.test(/^y/, 'xyz'); //=> false */ var test = /*#__PURE__*/_curry2(function test(pattern, str) { if (!_isRegExp(pattern)) { throw new TypeError('‘test’ requires a value of type RegExp as its first argument; received ' + toString(pattern)); } return _cloneRegExp(pattern).test(str); }); export default test;<|fim▁end|>
* @see R.match * @example
<|file_name|>subtract.js<|end_file_name|><|fim▁begin|>import { subtract } from '../subtract'; describe('Core.subtract', () => { test('Subtracts the second argument from the first', () => { expect(subtract(10, 8)).toBe(2);<|fim▁hole|>});<|fim▁end|>
});
<|file_name|>Helpers.py<|end_file_name|><|fim▁begin|>__author__ = 'jhala' import types import os.path, time import json import logging import logging.config logging.config.fileConfig('logging.conf') logger = logging.getLogger(__name__) import re appInfo='appinfo.json' ''' Helper Functions ''' ''' get the file as an array of arrays ( header + rows and columns) ''' def fileInfo(fil): fileArr=[] for i in open(fil): fileArr.append(i.strip().split(",")) return fileArr ''' Return the header as an array ''' def getHeader(fileArr): for rowOne in fileArr: return rowOne def fileLastTouchedTime(fileName): mtim= int(os.path.getmtime(fileName)) ctim= int(os.path.getctime(fileName)) tims = [ mtim, ctim] tims.sort() return tims[len(tims)-1] def getImageLocation(): f=open(appInfo,'r') loc=json.load(f) return loc['imageLocation'] def getImageDataLocation(): f=open(appInfo,'r') loc=json.load(f) return loc['imageData'] def getMatLabFeatureExtractScript(): f=open(appInfo,'r') loc=json.load(f) return loc['matlabFeatureExtractScript'] def getMatLabSemanticElementsScript(): f=open(appInfo,'r') loc=json.load(f) return loc['matlabSemanticElementsScript'] def getMatlabSemanticElementsOutputFile(): f=open(appInfo,'r') loc=json.load(f) return loc['matlabSemanticElementsOutputFile'] def removeMatlabSemanticElementsOutputFile(): f=getMatlabSemanticElementsOutputFile() if os.path.exists(f) and os.path.isfile(f): os.remove(f) def getMatlabFeatureOutputFile(): f=open(appInfo,'r') loc=json.load(f) return loc['matlabFeatureOutputFile'] def getTestImageName(): f=open(appInfo,'r') loc=json.load(f) return loc['testImage'] def removeMatlabFeatureOutputFile(): f=getMatlabFeatureOutputFile() if os.path.exists(f) and os.path.isfile(f): os.remove(f) def checkFileNameExists(filName=str): return os.path.exists(filName) and os.path.isfile(filName) def getMainImageFileList(): fileList=[] epoch=time.mktime(time.strptime('1970','%Y')) for root, dirs, files in os.walk(getImageLocation()): #print root #print dirs for fil in files: thisFileName=os.path.join(root, fil) dataFileExists=False imageFileNewerThanDataFile=False dataFileRequiresUpdate=False if isMainImageFile(thisFileName) and checkFileNameExists(thisFileName): mainImageLastTouched=fileLastTouchedTime(thisFileName) expectedDataFileName = os.path.join(getImageDataLocation(), os.path.basename(root)+'_'+fil+'.json') if checkFileNameExists(expectedDataFileName ): dataFileExists=True dataFileLastTouched=fileLastTouchedTime(expectedDataFileName) else: dataFileExists=False dataFileLastTouched=epoch if dataFileExists and ( mainImageLastTouched > dataFileLastTouched) : dataFileRequiresUpdate=True if not dataFileExists:<|fim▁hole|> lcImageExists=False lcImageName = getLCImageName(thisFileName) if lcImageName != None: lcImageExists=True fileList.append({ 'lcImageExists': lcImageExists , 'lcImageName' : lcImageName, 'dataFileRequiresUpdate' : dataFileRequiresUpdate, 'imageFile' : str(thisFileName), 'dataFile' : expectedDataFileName, 'imageLastTouched': mainImageLastTouched, 'dataLastTouched': dataFileLastTouched, 'dataFileExists' : dataFileExists} ) return fileList def isMainImageFile(fileName): if re.search('.jpg$',fileName, flags=re.IGNORECASE) and not re.search('LC.jpg$',fileName, flags=re.IGNORECASE): return True else: return False def getLCImageName(imageFileName): r=re.match("(.*)(.jpg)", imageFileName, flags=re.IGNORECASE) if not r: logger.error("Invalid image file name given" + imageFileName) return None else: lcImageName = r.group(1) + "LC"+ r.group(2) if checkFileNameExists(lcImageName): return lcImageName else: logger.error('Image file does not exist: ' +lcImageName) return None<|fim▁end|>
dataFileRequiresUpdate=True
<|file_name|>game.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division from sc2reader.utils import Length from sc2reader.events.base import Event from sc2reader.log_utils import loggable from itertools import chain @loggable class GameEvent(Event): """ This is the base class for all game events. The attributes below are universally available. """ def __init__(self, frame, pid): #: The id of the player generating the event. This is 16 for global non-player events. #: Prior to Heart of the Swarm this was the player id. Since HotS it is #: now the user id (uid), we still call it pid for backwards compatibility. You shouldn't #: ever need to use this; use :attr:`player` instead. self.pid = pid #: A reference to the :class:`~sc2reader.objects.Player` object representing #: this player in the replay. Not available for global events (:attr:`is_local` = False) self.player = None #: The frame of the game that this event was recorded at. 16 frames per game second. self.frame = frame #: The second of the game that this event was recorded at. 16 frames per game second. self.second = frame >> 4 #: A flag indicating if it is a local or global event. self.is_local = pid != 16 #: Short cut string for event class name self.name = self.__class__.__name__ def _str_prefix(self): if getattr(self, "pid", 16) == 16: player_name = "Global" elif self.player and not self.player.name: player_name = "Player {0} - ({1})".format( self.player.pid, self.player.play_race ) elif self.player: player_name = self.player.name else: player_name = "no name" return "{0}\t{1:<15} ".format(Length(seconds=int(self.frame / 16)), player_name) def __str__(self): return self._str_prefix() + self.name class GameStartEvent(GameEvent): """ Recorded when the game starts and the frames start to roll. This is a global non-player event. """ def __init__(self, frame, pid, data): super(GameStartEvent, self).__init__(frame, pid) #: ??? self.data = data class PlayerLeaveEvent(GameEvent): """ Recorded when a player leaves the game. """ def __init__(self, frame, pid, data): super(PlayerLeaveEvent, self).__init__(frame, pid) #: ??? self.data = data class UserOptionsEvent(GameEvent): """ This event is recorded for each player at the very beginning of the game before the :class:`GameStartEvent`. """ def __init__(self, frame, pid, data): super(UserOptionsEvent, self).__init__(frame, pid) #: self.game_fully_downloaded = data["game_fully_downloaded"] #: self.development_cheats_enabled = data["development_cheats_enabled"] #: self.multiplayer_cheats_enabled = data["multiplayer_cheats_enabled"] #: self.sync_checksumming_enabled = data["sync_checksumming_enabled"] #: self.is_map_to_map_transition = data["is_map_to_map_transition"] #: self.use_ai_beacons = data["use_ai_beacons"] #: Are workers sent to auto-mine on game start self.starting_rally = ( data["starting_rally"] if "starting_rally" in data else None ) #: self.debug_pause_enabled = data["debug_pause_enabled"] #: self.base_build_num = data["base_build_num"] def create_command_event(frame, pid, data): ability_type = data["data"][0] if ability_type == "None": return BasicCommandEvent(frame, pid, data) elif ability_type == "TargetUnit": return TargetUnitCommandEvent(frame, pid, data) elif ability_type == "TargetPoint": return TargetPointCommandEvent(frame, pid, data) elif ability_type == "Data": return DataCommandEvent(frame, pid, data) @loggable class CommandEvent(GameEvent): """ Ability events are generated when ever a player in the game issues a command to a unit or group of units. They are split into three subclasses of ability, each with their own set of associated data. The attributes listed below are shared across all ability event types. See :class:`TargetPointCommandEvent`, :class:`TargetUnitCommandEvent`, and :class:`DataCommandEvent` for individual details. """ def __init__(self, frame, pid, data): super(CommandEvent, self).__init__(frame, pid) #: Flags on the command??? self.flags = data["flags"] #: A dictionary of possible ability flags. Flags are: #: #: * alternate #: * queued #: * preempt #: * smart_click #: * smart_rally #: * subgroup #: * set_autocast, #: * set_autocast_on #: * user #: * data_a #: * data_b #: * data_passenger #: * data_abil_queue_order_id, #: * ai #: * ai_ignore_on_finish #: * is_order #: * script #: * homogenous_interruption, #: * minimap #: * repeat #: * dispatch_to_other_unit #: * target_self #: self.flag = dict( alternate=0x1 & self.flags != 0, queued=0x2 & self.flags != 0, preempt=0x4 & self.flags != 0, smart_click=0x8 & self.flags != 0, smart_rally=0x10 & self.flags != 0, subgroup=0x20 & self.flags != 0, set_autocast=0x40 & self.flags != 0, set_autocast_on=0x80 & self.flags != 0, user=0x100 & self.flags != 0, data_a=0x200 & self.flags != 0, data_passenger=0x200 & self.flags != 0, # alt-name data_b=0x400 & self.flags != 0, data_abil_queue_order_id=0x400 & self.flags != 0, # alt-name ai=0x800 & self.flags != 0, ai_ignore_on_finish=0x1000 & self.flags != 0, is_order=0x2000 & self.flags != 0, script=0x4000 & self.flags != 0, homogenous_interruption=0x8000 & self.flags != 0, minimap=0x10000 & self.flags != 0, repeat=0x20000 & self.flags != 0, dispatch_to_other_unit=0x40000 & self.flags != 0, target_self=0x80000 & self.flags != 0, ) #: Flag marking that the command had ability information self.has_ability = data["ability"] is not None #: Link the the ability group self.ability_link = data["ability"]["ability_link"] if self.has_ability else 0 #: The index of the ability in the ability group self.command_index = ( data["ability"]["ability_command_index"] if self.has_ability else 0 ) #: Additional ability data. self.ability_data = ( data["ability"]["ability_command_data"] if self.has_ability else 0 ) #: Unique identifier for the ability self.ability_id = self.ability_link << 5 | self.command_index #: A reference to the ability being used self.ability = None #: A shortcut to the name of the ability being used self.ability_name = "" #: The type of ability, one of: None (no target), TargetPoint, TargetUnit, or Data self.ability_type = data["data"][0] #: The raw data associated with this ability type self.ability_type_data = data["data"][1] #: Other unit id?? self.other_unit_id = data["other_unit_tag"] #: A reference to the other unit self.other_unit = None def __str__(self): string = self._str_prefix() if self.has_ability: string += "Ability ({0:X})".format(self.ability_id) if self.ability: string += " - {0}".format(self.ability.name) else: string += "Right Click" if self.ability_type == "TargetUnit": string += "; Target: {0} [{1:0>8X}]".format( self.target.name, self.target_unit_id ) if self.ability_type in ("TargetPoint", "TargetUnit"): string += "; Location: {0}".format(str(self.location)) return string class BasicCommandEvent(CommandEvent): """ Extends :class:`CommandEvent` This event is recorded for events that have no extra information recorded. Note that like all CommandEvents, the event will be recorded regardless of whether or not the command was successful. """ def __init__(self, frame, pid, data): super(BasicCommandEvent, self).__init__(frame, pid, data) class TargetPointCommandEvent(CommandEvent): """ Extends :class:`CommandEvent` This event is recorded when ever a player issues a command that targets a location and NOT a unit. Commands like Psistorm, Attack Move, Fungal Growth, and EMP fall under this category. Note that like all CommandEvents, the event will be recorded regardless of whether or not the command was successful. """ def __init__(self, frame, pid, data): super(TargetPointCommandEvent, self).__init__(frame, pid, data) #: The x coordinate of the target. Available for TargetPoint and TargetUnit type events. self.x = self.ability_type_data["point"].get("x", 0) / 4096.0 #: The y coordinate of the target. Available for TargetPoint and TargetUnit type events. self.y = self.ability_type_data["point"].get("y", 0) / 4096.0 #: The z coordinate of the target. Available for TargetPoint and TargetUnit type events. self.z = self.ability_type_data["point"].get("z", 0) #: The location of the target. Available for TargetPoint and TargetUnit type events self.location = (self.x, self.y, self.z) class TargetUnitCommandEvent(CommandEvent): """ Extends :class:`CommandEvent` This event is recorded when ever a player issues a command that targets a unit. The location of the target unit at the time of the command is also recorded. Commands like Chronoboost, Transfuse, and Snipe fall under this category. Note that like all CommandEvents, the event will be recorded regardless of whether or not the command was successful. """ def __init__(self, frame, pid, data): super(TargetUnitCommandEvent, self).__init__(frame, pid, data) #: Flags set on the target unit. Available for TargetUnit type events self.target_flags = self.ability_type_data.get("flags", None) #: Timer?? Available for TargetUnit type events. self.target_timer = self.ability_type_data.get("timer", None) #: Unique id of the target unit. Available for TargetUnit type events. #: This id can be 0 when the target unit is shrouded by fog of war. self.target_unit_id = self.ability_type_data.get("unit_tag", None) #: A reference to the targeted unit. When the :attr:`target_unit_id` is #: 0 this target unit is a generic, reused fog of war unit of the :attr:`target_unit_type` #: with an id of zero. It should not be confused with a real unit. self.target_unit = None #: Current integer type id of the target unit. Available for TargetUnit type events. self.target_unit_type = self.ability_type_data.get("unit_link", None) #: Integer player id of the controlling player. Available for TargetUnit type events starting in 19595. #: When the targeted unit is under fog of war this id is zero. self.control_player_id = self.ability_type_data.get("control_player_id", None) #: Integer player id of the player paying upkeep. Available for TargetUnit type events. self.upkeep_player_id = self.ability_type_data.get("upkeep_player_id", None) #: The x coordinate of the target. Available for TargetPoint and TargetUnit type events. self.x = self.ability_type_data["point"].get("x", 0) / 4096.0 #: The y coordinate of the target. Available for TargetPoint and TargetUnit type events. self.y = self.ability_type_data["point"].get("y", 0) / 4096.0 #: The z coordinate of the target. Available for TargetPoint and TargetUnit type events. self.z = self.ability_type_data["point"].get("z", 0) #: The location of the target. Available for TargetPoint and TargetUnit type events self.location = (self.x, self.y, self.z) class UpdateTargetPointCommandEvent(TargetPointCommandEvent): """ Extends :class: 'TargetPointCommandEvent' This event is generated when the user changes the point of a unit. Appears to happen when a unit is moving and it is given a new command. It's possible there are other instances of this occurring. """ name = "UpdateTargetPointCommandEvent" class UpdateTargetUnitCommandEvent(TargetUnitCommandEvent): """ Extends :class:`TargetUnitCommandEvent` This event is generated when a TargetUnitCommandEvent is updated, likely due to changing the target unit. It is unclear if this needs to be a separate event from TargetUnitCommandEvent, but for flexibility, it will be treated differently. One example of this event occurring is casting inject on a hatchery while holding shift, and then shift clicking on a second hatchery. """ name = "UpdateTargetUnitCommandEvent" class DataCommandEvent(CommandEvent): """ Extends :class:`CommandEvent` DataCommandEvent are recorded when ever a player issues a command that has no target. Commands like Burrow, SeigeMode, Train XYZ, and Stop fall under this category. Note that like all CommandEvents, the event will be recorded regardless of whether or not the command was successful. """ def __init__(self, frame, pid, data): super(DataCommandEvent, self).__init__(frame, pid, data) #: Other target data. Available for Data type events. self.target_data = self.ability_type_data.get("data", None) @loggable class CommandManagerStateEvent(GameEvent): """ These events indicated that the last :class:`CommandEvent` called has been called again. For example, if you add three SCVs to an empty queue on a Command Center, the first add will be generate a :class:`BasicCommandEvent` and the two subsequent adds will each generate a :class:`CommandManagerStateEvent`. """ def __init__(self, frame, pid, data): super(CommandManagerStateEvent, self).__init__(frame, pid) #: Always 1? self.state = data["state"] #: An index identifying how many events of this type have been called self.sequence = data["sequence"] @loggable class SelectionEvent(GameEvent): """ Selection events are generated when ever the active selection of the player is updated. Unlike other game events, these events can also be generated by non-player actions like unit deaths or transformations. Starting in Starcraft 2.0.0, selection events targeting control group buffers are also generated when control group selections are modified by non-player actions. When a player action updates a control group a :class:`ControlGroupEvent` is generated. """ def __init__(self, frame, pid, data): super(SelectionEvent, self).__init__(frame, pid) #: The control group being modified. 10 for active selection self.control_group = data["control_group_index"] #: Deprecated, use control_group self.bank = self.control_group #: ??? self.subgroup_index = data["subgroup_index"] #: The type of mask to apply. One of None, Mask, OneIndices, ZeroIndices self.mask_type = data["remove_mask"][0] #: The data for the mask self.mask_data = data["remove_mask"][1] #: The unit type data for the new units self.new_unit_types = [ ( d["unit_link"], d["subgroup_priority"], d["intra_subgroup_priority"], d["count"], ) for d in data["add_subgroups"] ] #: The unit id data for the new units self.new_unit_ids = data["add_unit_tags"] # This stretches out the unit types and priorities to be zipped with ids. unit_types = chain( *[ [utype] * count for ( utype, subgroup_priority, intra_subgroup_priority, count, ) in self.new_unit_types ] ) unit_subgroup_priorities = chain( *[ [subgroup_priority] * count for ( utype, subgroup_priority, intra_subgroup_priority, count, ) in self.new_unit_types ] ) unit_intra_subgroup_priorities = chain( *[ [intra_subgroup_priority] * count for ( utype, subgroup_priority, intra_subgroup_priority, count, ) in self.new_unit_types ] ) #: The combined type and id information for new units self.new_unit_info = list( zip( self.new_unit_ids, unit_types, unit_subgroup_priorities, unit_intra_subgroup_priorities, ) ) #: A list of references to units added by this selection self.new_units = None #: Deprecated, see new_units self.objects = None def __str__(self): if self.new_units: return GameEvent.__str__(self) + str([str(u) for u in self.new_units]) else: return GameEvent.__str__(self) + str([str(u) for u in self.new_unit_info]) def create_control_group_event(frame, pid, data): update_type = data["control_group_update"] if update_type == 0: return SetControlGroupEvent(frame, pid, data) elif update_type == 1: return AddToControlGroupEvent(frame, pid, data) elif update_type == 2: return GetControlGroupEvent(frame, pid, data) elif update_type == 3: # TODO: What could this be?!? return ControlGroupEvent(frame, pid, data) else: # No idea what this is but we're seeing update_types of 4 and 5 in 3.0 return ControlGroupEvent(frame, pid, data) @loggable class ControlGroupEvent(GameEvent): """ ControlGroup events are recorded when ever a player action modifies or accesses a control group. There are three kinds of events, generated by each of the possible player actions: * :class:`SetControlGroup` - Recorded when a user sets a control group (ctrl+#). * :class:`GetControlGroup` - Recorded when a user retrieves a control group (#). * :class:`AddToControlGroup` - Recorded when a user adds to a control group (shift+ctrl+#) All three events have the same set of data (shown below) but are interpreted differently. See the class entry for details. """ def __init__(self, frame, pid, data): super(ControlGroupEvent, self).__init__(frame, pid) #: Index to the control group being modified self.control_group = data["control_group_index"] #: Deprecated, use control_group self.bank = self.control_group #: Deprecated, use control_group self.hotkey = self.control_group #: The type of update being performed, 0 (set),1 (add),2 (get) self.update_type = data["control_group_update"] #: The type of mask to apply. One of None, Mask, OneIndices, ZeroIndices self.mask_type = data["remove_mask"][0] #: The data for the mask self.mask_data = data["remove_mask"][1] class SetControlGroupEvent(ControlGroupEvent): """ Extends :class:`ControlGroupEvent` This event does a straight forward replace of the current control group contents with the player's current selection. This event doesn't have masks set. """ class AddToControlGroupEvent(SetControlGroupEvent): """ Extends :class:`ControlGroupEvent` This event adds the current selection to the control group. """ class GetControlGroupEvent(ControlGroupEvent): """ Extends :class:`ControlGroupEvent` This event replaces the current selection with the contents of the control group. The mask data is used to limit that selection to units that are currently selectable. You might have 1 medivac and 8 marines on the control group but if the 8 marines are inside the medivac they cannot be part of your selection. """ @loggable class CameraEvent(GameEvent): """ Camera events are generated when ever the player camera moves, zooms, or rotates. It does not matter why the camera changed, this event simply records the current state of the camera after changing. """ def __init__(self, frame, pid, data): super(CameraEvent, self).__init__(frame, pid) #: The x coordinate of the center of the camera self.x = (data["target"]["x"] if data["target"] is not None else 0) / 256.0 #: The y coordinate of the center of the camera self.y = (data["target"]["y"] if data["target"] is not None else 0) / 256.0 #: The location of the center of the camera self.location = (self.x, self.y) #: The distance to the camera target ?? self.distance = data["distance"] #: The current pitch of the camera self.pitch = data["pitch"] #: The current yaw of the camera self.yaw = data["yaw"]<|fim▁hole|> self.name, self.x, self.y ) @loggable class ResourceTradeEvent(GameEvent): """ Generated when a player trades resources with another player. But not when fullfulling resource requests. """ def __init__(self, frame, pid, data): super(ResourceTradeEvent, self).__init__(frame, pid) #: The id of the player sending the resources self.sender_id = pid #: A reference to the player sending the resources self.sender = None #: The id of the player receiving the resources self.recipient_id = data["recipient_id"] #: A reference to the player receiving the resources self.recipient = None #: An array of resources sent self.resources = data["resources"] #: Amount minerals sent self.minerals = self.resources[0] if len(self.resources) >= 1 else None #: Amount vespene sent self.vespene = self.resources[1] if len(self.resources) >= 2 else None #: Amount terrazine sent self.terrazine = self.resources[2] if len(self.resources) >= 3 else None #: Amount custom resource sent self.custom_resource = self.resources[3] if len(self.resources) >= 4 else None def __str__(self): return self._str_prefix() + " transfer {0} minerals, {1} gas, {2} terrazine, and {3} custom to {4}".format( self.minerals, self.vespene, self.terrazine, self.custom_resource, self.recipient, ) class ResourceRequestEvent(GameEvent): """ Generated when a player creates a resource request. """ def __init__(self, frame, pid, data): super(ResourceRequestEvent, self).__init__(frame, pid) #: An array of resources sent self.resources = data["resources"] #: Amount minerals sent self.minerals = self.resources[0] if len(self.resources) >= 1 else None #: Amount vespene sent self.vespene = self.resources[1] if len(self.resources) >= 2 else None #: Amount terrazine sent self.terrazon = self.resources[2] if len(self.resources) >= 3 else None #: Amount custom resource sent self.custom_resource = self.resources[3] if len(self.resources) >= 4 else None def __str__(self): return ( self._str_prefix() + " requests {0} minerals, {1} gas, {2} terrazine, and {3} custom".format( self.minerals, self.vespene, self.terrazine, self.custom_resource ) ) class ResourceRequestFulfillEvent(GameEvent): """ Generated when a player accepts a resource request. """ def __init__(self, frame, pid, data): super(ResourceRequestFulfillEvent, self).__init__(frame, pid) #: The id of the request being fulfilled self.request_id = data["request_id"] class ResourceRequestCancelEvent(GameEvent): """ Generated when a player cancels their resource request. """ def __init__(self, frame, pid, data): super(ResourceRequestCancelEvent, self).__init__(frame, pid) #: The id of the request being cancelled self.request_id = data["request_id"] class HijackReplayGameEvent(GameEvent): """ Generated when players take over from a replay. """ def __init__(self, frame, pid, data): super(HijackReplayGameEvent, self).__init__(frame, pid) #: The method used. Not sure what 0/1 represent self.method = data["method"] #: Information on the users hijacking the game self.user_infos = data["user_infos"]<|fim▁end|>
def __str__(self): return self._str_prefix() + "{0} at ({1}, {2})".format(
<|file_name|>UsefulConfigParser.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 SnapDisco Pty Ltd, Australia. # All rights reserved. # # This source code is licensed under the terms of the MIT license # found in the "LICENSE" file in the root directory of this source tree. import sys if sys.version_info.major >= 3: from configparser import RawConfigParser else: from ConfigParser import RawConfigParser from .OrderedMultiDict import OrderedMultiDict class UsefulConfigParser(object): """A config parser that sucks less than those in module `ConfigParser`.""" def __init__(self, filenames_to_try=[]): # FUN FACT: In Python 3.2, they spontaneously changed the behaviour of # RawConfigParser so that it no longer considers ';' a comment delimiter # for inline comments. # # Compare: # "Configuration files may include comments, prefixed by specific # characters (# and ;). Comments may appear on their own in an otherwise # empty line, or may be entered in lines holding values or section names. # In the latter case, they need to be preceded by a whitespace character # to be recognized as a comment. (For backwards compatibility, only ; # starts an inline comment, while # does not.)" # -- https://docs.python.org/2/library/configparser.html # vs: # "Comment prefixes are strings that indicate the start of a valid comment # within a config file. comment_prefixes are used only on otherwise empty # lines (optionally indented) whereas inline_comment_prefixes can be used # after every valid value (e.g. section names, options and empty lines as # well). By default inline comments are disabled and '#' and ';' are used # as prefixes for whole line comments. # Changed in version 3.2: In previous versions of configparser behaviour # matched comment_prefixes=('#',';') and inline_comment_prefixes=(';',)." # -- https://docs.python.org/3/library/configparser.html#customizing-parser-behaviour # # Grrr... if sys.version_info.major >= 3: self._cp = RawConfigParser(dict_type=OrderedMultiDict, inline_comment_prefixes=(';',)) else: self._cp = RawConfigParser(dict_type=OrderedMultiDict) if isinstance(filenames_to_try, str): filenames_to_try = [filenames_to_try] self._filenames_to_try = filenames_to_try[:] def read(self, filenames_to_try=[]): if isinstance(filenames_to_try, str): filenames_to_try = [filenames_to_try] self._filenames_to_try.extend(filenames_to_try) return self._cp.read(self._filenames_to_try) def sections(self): return self._cp.sections() def options(self, section_name): ## The client code doesn't need to check in advance that the requested ## section name is present in the config; this function will check ## this automatically, so no exception is raised by RawConfigParser. ## Check that `section_name` is present in the config. ## Otherwise, RawConfigParser will raise ConfigParser.NoSectionError. if not self._cp.has_section(section_name): return [] return self._cp.options(section_name) def get(self, section_name, option_name, do_optionxform=True): if do_optionxform: # https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.optionxform option_name = self._cp.optionxform(option_name) if section_name is None: return self._get_optval_in_sections(self.sections(), option_name) elif isinstance(section_name, str): return self._get_optval_in_sections([section_name], option_name) else: return self._get_optval_in_sections(section_name, option_name) def _get_optval_in_sections(self, section_names, option_name): ## The client code doesn't need to check in advance that the requested<|fim▁hole|> optvals = [] for section_name in section_names: ## Check that `section_name` is present in the config. ## Otherwise, RawConfigParser will raise ConfigParser.NoSectionError. if not self._cp.has_section(section_name): continue optvals.extend([optval for optname, optval in self._cp.items(section_name) if optname == option_name]) return optvals def getboolean(self, section_name, option_name, do_optionxform=True): # https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.getboolean return [self._coerce_to_boolean(optval) for optval in self.get(section_name, option_name, do_optionxform)] _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False} def _coerce_to_boolean(self, optval_str): # 'The accepted values for the option are "1", "yes", "true", and "on", # which cause this method to return True, and "0", "no", "false", and # "off", which cause it to return False. These string values are checked # in a case-insensitive manner. Any other value will cause it to raise # ValueError.' # https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.getboolean ovs_lower = optval_str.lower() if ovs_lower not in self._boolean_states: raise ValueError("Not a boolean: %s" % optval_str) return self._boolean_states[ovs_lower]<|fim▁end|>
## section name(s) are present in the config; this function will check ## this automatically, so no exception is raised by RawConfigParser.
<|file_name|>handler_librato_out.go<|end_file_name|><|fim▁begin|>package librato import ( "encoding/json" "errors" "fmt" "strconv" "time" cc "github.com/grokify/commonchat" "github.com/grokify/chathooks/pkg/config" "github.com/grokify/chathooks/pkg/handlers" "github.com/grokify/chathooks/pkg/models" ) const ( DisplayName = "Librato" HandlerKey = "librato" MessageDirection = "out" DocumentationURL = "https://www.runscope.com/docs/api-testing/notifications#webhook" MessageBodyType = models.JSON ) var ( IncludeRecordedAt = false ) func NewHandler() handlers.Handler { return handlers.Handler{MessageBodyType: MessageBodyType, Normalize: Normalize} } func Normalize(cfg config.Configuration, hReq handlers.HandlerRequest) (cc.Message, error) { src, err := LibratoOutMessageFromBytes(hReq.Body) if err != nil { return cc.NewMessage(), err } if src.Clear == "normal" { return NormalizeSourceCleared(cfg, src), nil } return NormalizeSourceTriggered(cfg, src), nil } func NormalizeSourceTriggered(cfg config.Configuration, src LibratoOutMessage) cc.Message { src.Inflate() ccMsg := cc.NewMessage() iconURL, err := cfg.GetAppIconURL(HandlerKey) if err == nil { ccMsg.IconURL = iconURL.String() } ccMsg.Activity = "Alert triggered" if len(src.Alert.Name) > 0 { if len(src.Alert.RunbookURL) > 0 { ccMsg.Title = fmt.Sprintf("Alert [%v](%s) has triggered!", src.Alert.Name, src.Alert.RunbookURL) } else { ccMsg.Title = fmt.Sprintf("Alert %v has triggered!", src.Alert.Name) } } for violationName, violationSet := range src.Violations { n := len(violationSet) for i, violation := range violationSet { violation.Name = violationName violationSuffix := "" if n > 1 { violationSuffix = fmt.Sprintf(" %v", i+1) } ccMsg.AddAttachment(BuildViolationAttachment(src, violation, violationSuffix)) } } return ccMsg } func BuildViolationAttachment(src LibratoOutMessage, violation LibratoOutViolation, violationSuffix string) cc.Attachment { attachment := cc.NewAttachment() condition, errNoCondition := src.GetCondition(violation.ConditionViolated) IncludeRecordedAt = true violationRecordedAtSuffix := "" if IncludeRecordedAt && violation.RecordedAt > 0 { dt := time.Unix(violation.RecordedAt, 0).UTC() violationRecordedAtSuffix = fmt.Sprintf(" recorded at %v", dt.Format(time.RFC1123)) } if errNoCondition == nil { conditionComparison := "above" if float64(violation.Value) < condition.Threshold { conditionComparison = "below" } attachment.AddField(cc.Field{ Title: fmt.Sprintf("Violation%s", violationSuffix), Value: fmt.Sprintf("%s metric `%v` was **%s** threshold %v with value %v%s", violation.Name, violation.Metric, conditionComparison, strconv.FormatFloat(condition.Threshold, 'f', -1, 64), violation.Value, violationRecordedAtSuffix)}) } else { attachment.AddField(cc.Field{ Title: "Violation", Value: fmt.Sprintf("%v: metric `%v` with value %v%s", violation.Name, violation.Metric, violation.Value, violationRecordedAtSuffix)}) } if 1 == 0 { if len(violation.Name) > 0 { attachment.AddField(cc.Field{ Title: "Violation Name", Value: violation.Name, Short: true}) }<|fim▁hole|> Value: violation.Metric, Short: true}) } condition, err := src.GetCondition(violation.ConditionViolated) if err == nil { attachment.AddField(cc.Field{ Title: "Threshold", Value: fmt.Sprintf("%v", condition.Threshold), Short: true}) } if violation.Value > 0.0 { attachment.AddField(cc.Field{ Title: "Value", Value: fmt.Sprintf("%v", violation.Value), Short: true}) } if 1 == 0 { field := cc.Field{} if len(violation.Name) > 0 { field.Title = violation.Name } condition, err := src.GetCondition(violation.ConditionViolated) if err == nil { field.Value = fmt.Sprintf("Metric %s was above threshold %v with value %v", violation.Metric, condition.Threshold, violation.Value) } attachment.AddField(field) } if violation.RecordedAt > 0 { dt := time.Unix(violation.RecordedAt, 0).UTC() attachment.AddField(cc.Field{ Title: "Recorded At", Value: dt.Format(time.RFC1123)}) } } return attachment } func NormalizeSourceCleared(cfg config.Configuration, src LibratoOutMessage) cc.Message { ccMsg := cc.NewMessage() iconURL, err := cfg.GetAppIconURL(HandlerKey) if err == nil { ccMsg.IconURL = iconURL.String() } ccMsg.Activity = "Alert cleared" alertName := src.Alert.Name if len(alertName) < 1 { alertName = "Alert" } triggerTime := "" if src.TriggerTime > 0 { dt := time.Unix(src.TriggerTime, 0).UTC() triggerTime = fmt.Sprintf(" at %v", dt.Format(time.RFC1123)) } if len(src.Alert.RunbookURL) > 0 { ccMsg.Title = fmt.Sprintf("[%s](%s) cleared%s", alertName, src.Alert.RunbookURL, triggerTime) } else { ccMsg.Title = fmt.Sprintf("%s cleared%s", alertName, triggerTime) } return ccMsg } type LibratoOutMessage struct { Alert LibratoOutAlert `json:"alert,omitempty"` Account string `json:"account,omitempty"` TriggerTime int64 `json:"trigger_time,omitempty"` Conditions []LibratoOutCondition `json:"conditions,omitempty"` ConditionsMap map[int64]LibratoOutCondition `json:"-,omitempty"` Violations map[string][]LibratoOutViolation `json:"violations,omitempty"` Clear string `json:"clear,omitempty"` } func (msg *LibratoOutMessage) Inflate() { msg.ConditionsMap = map[int64]LibratoOutCondition{} for _, condition := range msg.Conditions { msg.ConditionsMap[condition.Id] = condition } } func (msg *LibratoOutMessage) GetCondition(conditionId int64) (LibratoOutCondition, error) { if condition, ok := msg.ConditionsMap[conditionId]; ok { return condition, nil } return LibratoOutCondition{}, errors.New(fmt.Sprintf("Condition %v not found", conditionId)) } type LibratoOutAlert struct { Id int64 `json:"id,omitempty"` Name string `json:"name,omitempty"` RunbookURL string `json:"runbook_url,omitempty"` Version int64 `json:"version,omitempty"` } type LibratoOutCondition struct { Id int64 `json:"id,omitempty"` Type string `json:"type,omitempty"` Threshold float64 `json:"threshold,omitempty"` Duration int64 `json:"duration,omitempty"` } type LibratoOutViolation struct { Name string Metric string `json:"metric,omitempty"` Value float64 `json:"value,omitempty"` RecordedAt int64 `json:"recorded_at,omitempty"` ConditionViolated int64 `json:"condition_violated,omitempty"` Count int64 `json:"count,omitempty"` Begin int64 `json:"begin,omitempty"` End int64 `json:"end,omitempty"` } func LibratoOutMessageFromBytes(bytes []byte) (LibratoOutMessage, error) { msg := LibratoOutMessage{} err := json.Unmarshal(bytes, &msg) return msg, err }<|fim▁end|>
if len(violation.Metric) > 0 { attachment.AddField(cc.Field{ Title: "Metric",
<|file_name|>_covtype.py<|end_file_name|><|fim▁begin|>"""Forest covertype dataset. A classic dataset for classification benchmarks, featuring categorical and real-valued features. The dataset page is available from UCI Machine Learning Repository https://archive.ics.uci.edu/ml/datasets/Covertype Courtesy of Jock A. Blackard and Colorado State University. """ # Author: Lars Buitinck # Peter Prettenhofer <[email protected]> # License: BSD 3 clause from gzip import GzipFile import logging from os.path import dirname, exists, join from os import remove, makedirs import numpy as np import joblib from . import get_data_home from ._base import _convert_data_dataframe from ._base import _fetch_remote from ._base import RemoteFileMetadata from ..utils import Bunch from ._base import _pkl_filepath from ..utils import check_random_state from ..utils.validation import _deprecate_positional_args # The original data can be found in: # https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz ARCHIVE = RemoteFileMetadata( filename='covtype.data.gz', url='https://ndownloader.figshare.com/files/5976039', checksum=('614360d0257557dd1792834a85a1cdeb' 'fadc3c4f30b011d56afee7ffb5b15771')) logger = logging.getLogger(__name__) # Column names reference: # https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info FEATURE_NAMES = ["Elevation", "Aspect", "Slope", "Horizontal_Distance_To_Hydrology", "Vertical_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm", "Horizontal_Distance_To_Fire_Points"] FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)] FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)] TARGET_NAMES = ["Cover_Type"] @_deprecate_positional_args def fetch_covtype(*, data_home=None, download_if_missing=True, random_state=None, shuffle=False, return_X_y=False, as_frame=False): """Load the covertype dataset (classification). Download it if necessary. ================= ============ Classes 7 Samples total 581012 Dimensionality 54 Features int ================= ============ Read more in the :ref:`User Guide <covtype_dataset>`. <|fim▁hole|> Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. shuffle : bool, default=False Whether to shuffle dataset. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.24 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (581012, 54) Each row corresponds to the 54 features in the dataset. target : ndarray of shape (581012,) Each value corresponds to one of the 7 forest covertypes with values ranging between 1 to 7. frame : dataframe of shape (581012, 53) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str Description of the forest covertype dataset. feature_names : list The names of the dataset columns. target_names: list The names of the target columns. (data, target) : tuple if ``return_X_y`` is True .. versionadded:: 0.20 """ data_home = get_data_home(data_home=data_home) covtype_dir = join(data_home, "covertype") samples_path = _pkl_filepath(covtype_dir, "samples") targets_path = _pkl_filepath(covtype_dir, "targets") available = exists(samples_path) if download_if_missing and not available: if not exists(covtype_dir): makedirs(covtype_dir) logger.info("Downloading %s" % ARCHIVE.url) archive_path = _fetch_remote(ARCHIVE, dirname=covtype_dir) Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=',') # delete archive remove(archive_path) X = Xy[:, :-1] y = Xy[:, -1].astype(np.int32, copy=False) joblib.dump(X, samples_path, compress=9) joblib.dump(y, targets_path, compress=9) elif not available and not download_if_missing: raise IOError("Data not found and `download_if_missing` is False") try: X, y except NameError: X = joblib.load(samples_path) y = joblib.load(targets_path) if shuffle: ind = np.arange(X.shape[0]) rng = check_random_state(random_state) rng.shuffle(ind) X = X[ind] y = y[ind] module_path = dirname(__file__) with open(join(module_path, 'descr', 'covtype.rst')) as rst_file: fdescr = rst_file.read() frame = None if as_frame: frame, X, y = _convert_data_dataframe(caller_name="fetch_covtype", data=X, target=y, feature_names=FEATURE_NAMES, target_names=TARGET_NAMES) if return_X_y: return X, y return Bunch(data=X, target=y, frame=frame, target_names=TARGET_NAMES, feature_names=FEATURE_NAMES, DESCR=fdescr)<|fim▁end|>
Parameters ---------- data_home : str, default=None
<|file_name|>diagnostic_led.py<|end_file_name|><|fim▁begin|>import machine import time <|fim▁hole|> with open('config/diagnostic_led.pin', 'r') as fp: invert = False value = int(fp.read()) if value < 0: value = -value invert = True DIAGNOSTIC_LED = machine.Signal(value, machine.Pin.OUT, invert=invert) DIAGNOSTIC_LED.off() except: pass def blink_forever(cycle_period_ms): while True: blink_once(cycle_period_ms) def blink_n(cycle_period_ms, count): i = 0 while i < count: blink_once(cycle_period_ms) i += 1 def blink_once(cycle_period_ms): half_period = cycle_period_ms // 2 if DIAGNOSTIC_LED is not None: DIAGNOSTIC_LED.on() time.sleep_ms(half_period) if DIAGNOSTIC_LED is not None: DIAGNOSTIC_LED.off() time.sleep_ms(half_period)<|fim▁end|>
DIAGNOSTIC_LED = None try:
<|file_name|>categoryLevel.js<|end_file_name|><|fim▁begin|>var CategoryLevel = function(){ 'use strict'; var categorys = {}; this.addCategory = function(_name) { categorys[_name] = []; }; this.addDataToLastCategory = function(_categoryName, _lineData, _className) {<|fim▁hole|><|fim▁end|>
var category = categorys[_categoryName]; }; };
<|file_name|>advisor.spec.js<|end_file_name|><|fim▁begin|>'use strict'; describe('Controller: AdvisorCtrl', function () {<|fim▁hole|> var AdvisorCtrl, scope; // Initialize the controller and a mock scope beforeEach(inject(function ($controller, $rootScope) { scope = $rootScope.$new(); AdvisorCtrl = $controller('AdvisorCtrl', { $scope: scope }); })); it('should ...', function () { expect(1).toEqual(1); }); });<|fim▁end|>
// load the controller's module beforeEach(module('advisorLinkApp'));
<|file_name|>timeout.rs<|end_file_name|><|fim▁begin|>// * This file is part of the uutils coreutils package. // * // * (c) Alex Lyon <[email protected]> // * // * For the full copyright and license information, please view the LICENSE // * file that was distributed with this source code. // spell-checker:ignore (ToDO) tstr sigstr cmdname setpgid sigchld #[macro_use] extern crate uucore; extern crate clap; use clap::{crate_version, App, AppSettings, Arg}; use std::io::ErrorKind; use std::process::{Command, Stdio}; use std::time::Duration; use uucore::display::Quotable; use uucore::error::{UResult, USimpleError}; use uucore::process::ChildExt; use uucore::signals::{signal_by_name_or_value, signal_name_by_value}; use uucore::{format_usage, InvalidEncodingHandling};<|fim▁hole|> const ERR_EXIT_STATUS: i32 = 125; pub mod options { pub static FOREGROUND: &str = "foreground"; pub static KILL_AFTER: &str = "kill-after"; pub static SIGNAL: &str = "signal"; pub static PRESERVE_STATUS: &str = "preserve-status"; pub static VERBOSE: &str = "verbose"; // Positional args. pub static DURATION: &str = "duration"; pub static COMMAND: &str = "command"; } struct Config { foreground: bool, kill_after: Option<Duration>, signal: usize, duration: Duration, preserve_status: bool, verbose: bool, command: Vec<String>, } impl Config { fn from(options: &clap::ArgMatches) -> UResult<Self> { let signal = match options.value_of(options::SIGNAL) { Some(signal_) => { let signal_result = signal_by_name_or_value(signal_); match signal_result { None => { unreachable!("invalid signal {}", signal_.quote()); } Some(signal_value) => signal_value, } } _ => uucore::signals::signal_by_name_or_value("TERM").unwrap(), }; let kill_after = options .value_of(options::KILL_AFTER) .map(|time| uucore::parse_time::from_str(time).unwrap()); let duration = match uucore::parse_time::from_str(options.value_of(options::DURATION).unwrap()) { Ok(duration) => duration, Err(err) => return Err(USimpleError::new(1, err)), }; let preserve_status: bool = options.is_present(options::PRESERVE_STATUS); let foreground = options.is_present(options::FOREGROUND); let verbose = options.is_present(options::VERBOSE); let command = options .values_of(options::COMMAND) .unwrap() .map(String::from) .collect::<Vec<_>>(); Ok(Self { foreground, kill_after, signal, duration, preserve_status, verbose, command, }) } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args = args .collect_str(InvalidEncodingHandling::ConvertLossy) .accept_any(); let app = uu_app(); let matches = app.get_matches_from(args); let config = Config::from(&matches)?; timeout( &config.command, config.duration, config.signal, config.kill_after, config.foreground, config.preserve_status, config.verbose, ) } pub fn uu_app<'a>() -> App<'a> { App::new("timeout") .version(crate_version!()) .about(ABOUT) .override_usage(format_usage(USAGE)) .arg( Arg::new(options::FOREGROUND) .long(options::FOREGROUND) .help("when not running timeout directly from a shell prompt, allow COMMAND to read from the TTY and get TTY signals; in this mode, children of COMMAND will not be timed out") ) .arg( Arg::new(options::KILL_AFTER) .short('k') .takes_value(true)) .arg( Arg::new(options::PRESERVE_STATUS) .long(options::PRESERVE_STATUS) .help("exit with the same status as COMMAND, even when the command times out") ) .arg( Arg::new(options::SIGNAL) .short('s') .long(options::SIGNAL) .help("specify the signal to be sent on timeout; SIGNAL may be a name like 'HUP' or a number; see 'kill -l' for a list of signals") .takes_value(true) ) .arg( Arg::new(options::VERBOSE) .short('v') .long(options::VERBOSE) .help("diagnose to stderr any signal sent upon timeout") ) .arg( Arg::new(options::DURATION) .index(1) .required(true) ) .arg( Arg::new(options::COMMAND) .index(2) .required(true) .multiple_occurrences(true) ) .setting(AppSettings::TrailingVarArg) .setting(AppSettings::InferLongArgs) } /// Remove pre-existing SIGCHLD handlers that would make waiting for the child's exit code fail. fn unblock_sigchld() { unsafe { nix::sys::signal::signal( nix::sys::signal::Signal::SIGCHLD, nix::sys::signal::SigHandler::SigDfl, ) .unwrap(); } } /// TODO: Improve exit codes, and make them consistent with the GNU Coreutils exit codes. fn timeout( cmd: &[String], duration: Duration, signal: usize, kill_after: Option<Duration>, foreground: bool, preserve_status: bool, verbose: bool, ) -> UResult<()> { if !foreground { unsafe { libc::setpgid(0, 0) }; } let mut process = Command::new(&cmd[0]) .args(&cmd[1..]) .stdin(Stdio::inherit()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .spawn() .map_err(|err| { let status_code = if err.kind() == ErrorKind::NotFound { // FIXME: not sure which to use 127 } else { // FIXME: this may not be 100% correct... 126 }; USimpleError::new(status_code, format!("failed to execute process: {}", err)) })?; unblock_sigchld(); match process.wait_or_timeout(duration) { Ok(Some(status)) => { let status_code = status.code().unwrap_or_else(|| status.signal().unwrap()); if status_code == 0 { Ok(()) } else { Err(status_code.into()) } } Ok(None) => { if verbose { show_error!( "sending signal {} to command {}", signal_name_by_value(signal).unwrap(), cmd[0].quote() ); } process .send_signal(signal) .map_err(|e| USimpleError::new(ERR_EXIT_STATUS, format!("{}", e)))?; if let Some(kill_after) = kill_after { match process.wait_or_timeout(kill_after) { Ok(Some(status)) => { if preserve_status { let status_code = status.code().unwrap_or_else(|| status.signal().unwrap()); if status_code == 0 { Ok(()) } else { Err(status_code.into()) } } else { Err(124.into()) } } Ok(None) => { if verbose { show_error!("sending signal KILL to command {}", cmd[0].quote()); } process .send_signal(uucore::signals::signal_by_name_or_value("KILL").unwrap()) .map_err(|e| USimpleError::new(ERR_EXIT_STATUS, format!("{}", e)))?; process .wait() .map_err(|e| USimpleError::new(ERR_EXIT_STATUS, format!("{}", e)))?; Err(137.into()) } Err(_) => Err(124.into()), } } else { Err(124.into()) } } Err(_) => { // We're going to return ERR_EXIT_STATUS regardless of // whether `send_signal()` succeeds or fails, so just // ignore the return value. process .send_signal(signal) .map_err(|e| USimpleError::new(ERR_EXIT_STATUS, format!("{}", e)))?; Err(ERR_EXIT_STATUS.into()) } } }<|fim▁end|>
static ABOUT: &str = "Start COMMAND, and kill it if still running after DURATION."; const USAGE: &str = "{} [OPTION] DURATION COMMAND...";
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import find_packages from setuptools import setup setup( name='svs', version='1.0.0', description='The InAcademia Simple validation Service allows for the easy validation of affiliation (Student,' 'Faculty, Staff) of a user in Academia', license='Apache 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', ], author='Rebecka Gulliksson', author_email='[email protected]', zip_safe=False, url='http://www.inacademia.org', packages=find_packages('src'),<|fim▁hole|> 'data/i18n/locale/*/LC_MESSAGES/*.mo', 'templates/*.mako', 'site/static/*', ], }, message_extractors={ 'src/svs': [ ('**.py', 'python', None), ('templates/**.mako', 'mako', None), ('site/**', 'ignore', None) ] }, install_requires=[ 'satosa==3.3.1', 'Mako', 'gunicorn', 'Werkzeug' ] )<|fim▁end|>
package_dir={'': 'src'}, package_data={ 'svs': [
<|file_name|>nntplib.py<|end_file_name|><|fim▁begin|>"""An NNTP client class based on: - RFC 977: Network News Transfer Protocol - RFC 2980: Common NNTP Extensions - RFC 3977: Network News Transfer Protocol (version 2) Example: >>> from nntplib import NNTP >>> s = NNTP('news') >>> resp, count, first, last, name = s.group('comp.lang.python') >>> print('Group', name, 'has', count, 'articles, range', first, 'to', last) Group comp.lang.python has 51 articles, range 5770 to 5821 >>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last)) >>> resp = s.quit() >>> Here 'resp' is the server response line. Error responses are turned into exceptions. To post an article from a file: >>> f = open(filename, 'rb') # file containing article, including header >>> resp = s.post(f) >>> For descriptions of all methods, read the comments in the code below. Note that all arguments and return values representing article numbers are strings, not numbers, since they are rarely used for calculations. """ # RFC 977 by Brian Kantor and Phil Lapsley. # xover, xgtitle, xpath, date methods by Kevan Heydon # Incompatible changes from the 2.x nntplib: # - all commands are encoded as UTF-8 data (using the "surrogateescape" # error handler), except for raw message data (POST, IHAVE) # - all responses are decoded as UTF-8 data (using the "surrogateescape" # error handler), except for raw message data (ARTICLE, HEAD, BODY) # - the `file` argument to various methods is keyword-only # # - NNTP.date() returns a datetime object # - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object, # rather than a pair of (date, time) strings. # - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples # - NNTP.descriptions() returns a dict mapping group names to descriptions # - NNTP.xover() returns a list of dicts mapping field names (header or metadata) # to field values; each dict representing a message overview. # - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo) # tuple. # - the "internal" methods have been marked private (they now start with # an underscore) # Other changes from the 2.x/3.1 nntplib: # - automatic querying of capabilities at connect # - New method NNTP.getcapabilities() # - New method NNTP.over() # - New helper function decode_header() # - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and # arbitrary iterables yielding lines. # - An extensive test suite :-) # TODO: # - return structured data (GroupInfo etc.) everywhere # - support HDR # Imports import re import socket import collections import datetime import warnings try: import ssl except ImportError: _have_ssl = False else: _have_ssl = True from email.header import decode_header as _email_decode_header from socket import _GLOBAL_DEFAULT_TIMEOUT __all__ = ["NNTP", "NNTPError", "NNTPReplyError", "NNTPTemporaryError", "NNTPPermanentError", "NNTPProtocolError", "NNTPDataError", "decode_header", ] # maximal line length when calling readline(). This is to prevent # reading arbitrary length lines. RFC 3977 limits NNTP line length to # 512 characters, including CRLF. We have selected 2048 just to be on # the safe side. _MAXLINE = 2048 # Exceptions raised when an error or invalid response is received class NNTPError(Exception): """Base class for all nntplib exceptions""" def __init__(self, *args): Exception.__init__(self, *args) try: self.response = args[0] except IndexError: self.response = 'No response given' class NNTPReplyError(NNTPError): """Unexpected [123]xx reply""" pass class NNTPTemporaryError(NNTPError): """4xx errors""" pass class NNTPPermanentError(NNTPError): """5xx errors""" pass class NNTPProtocolError(NNTPError): """Response does not begin with [1-5]""" pass class NNTPDataError(NNTPError): """Error in response data""" pass # Standard port used by NNTP servers NNTP_PORT = 119 NNTP_SSL_PORT = 563 # Response numbers that are followed by additional text (e.g. article) _LONGRESP = { '100', # HELP '101', # CAPABILITIES '211', # LISTGROUP (also not multi-line with GROUP) '215', # LIST '220', # ARTICLE '221', # HEAD, XHDR '222', # BODY '224', # OVER, XOVER '225', # HDR '230', # NEWNEWS '231', # NEWGROUPS '282', # XGTITLE } # Default decoded value for LIST OVERVIEW.FMT if not supported _DEFAULT_OVERVIEW_FMT = [ "subject", "from", "date", "message-id", "references", ":bytes", ":lines"] # Alternative names allowed in LIST OVERVIEW.FMT response _OVERVIEW_FMT_ALTERNATIVES = { 'bytes': ':bytes', 'lines': ':lines', } # Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) _CRLF = b'\r\n' GroupInfo = collections.namedtuple('GroupInfo', ['group', 'last', 'first', 'flag']) ArticleInfo = collections.namedtuple('ArticleInfo', ['number', 'message_id', 'lines']) # Helper function(s) def decode_header(header_str): """Takes an unicode string representing a munged header value and decodes it as a (possibly non-ASCII) readable value.""" parts = [] for v, enc in _email_decode_header(header_str): if isinstance(v, bytes): parts.append(v.decode(enc or 'ascii')) else: parts.append(v) return ''.join(parts) def _parse_overview_fmt(lines): """Parse a list of string representing the response to LIST OVERVIEW.FMT and return a list of header/metadata names. Raises NNTPDataError if the response is not compliant (cf. RFC 3977, section 8.4).""" fmt = [] for line in lines: if line[0] == ':': # Metadata name (e.g. ":bytes") name, _, suffix = line[1:].partition(':') name = ':' + name else: # Header name (e.g. "Subject:" or "Xref:full") name, _, suffix = line.partition(':') name = name.lower() name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name) # Should we do something with the suffix? fmt.append(name) defaults = _DEFAULT_OVERVIEW_FMT if len(fmt) < len(defaults): raise NNTPDataError("LIST OVERVIEW.FMT response too short") if fmt[:len(defaults)] != defaults: raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields") return fmt def _parse_overview(lines, fmt, data_process_func=None): """Parse the response to a OVER or XOVER command according to the overview format `fmt`.""" n_defaults = len(_DEFAULT_OVERVIEW_FMT) overview = [] for line in lines: fields = {} article_number, *tokens = line.split('\t') article_number = int(article_number) for i, token in enumerate(tokens): if i >= len(fmt): # XXX should we raise an error? Some servers might not # support LIST OVERVIEW.FMT and still return additional # headers. continue field_name = fmt[i] is_metadata = field_name.startswith(':') if i >= n_defaults and not is_metadata: # Non-default header names are included in full in the response # (unless the field is totally empty) h = field_name + ": " if token and token[:len(h)].lower() != h: raise NNTPDataError("OVER/XOVER response doesn't include " "names of additional headers") token = token[len(h):] if token else None fields[fmt[i]] = token overview.append((article_number, fields)) return overview def _parse_datetime(date_str, time_str=None): """Parse a pair of (date, time) strings, and return a datetime object. If only the date is given, it is assumed to be date and time concatenated together (e.g. response to the DATE command). """ if time_str is None: time_str = date_str[-6:] date_str = date_str[:-6] hours = int(time_str[:2]) minutes = int(time_str[2:4]) seconds = int(time_str[4:]) year = int(date_str[:-4]) month = int(date_str[-4:-2]) day = int(date_str[-2:]) # RFC 3977 doesn't say how to interpret 2-char years. Assume that # there are no dates before 1970 on Usenet. if year < 70: year += 2000 elif year < 100: year += 1900 return datetime.datetime(year, month, day, hours, minutes, seconds) def _unparse_datetime(dt, legacy=False): """Format a date or datetime object as a pair of (date, time) strings in the format required by the NEWNEWS and NEWGROUPS commands. If a date object is passed, the time is assumed to be midnight (00h00). The returned representation depends on the legacy flag: * if legacy is False (the default): date has the YYYYMMDD format and time the HHMMSS format * if legacy is True: date has the YYMMDD format and time the HHMMSS format. RFC 3977 compliant servers should understand both formats; therefore, legacy is only needed when talking to old servers. """ if not isinstance(dt, datetime.datetime): time_str = "000000" else: time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt) y = dt.year if legacy: y = y % 100 date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt) else: date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt) return date_str, time_str if _have_ssl: def _encrypt_on(sock, context, hostname): """Wrap a socket in SSL/TLS. Arguments: - sock: Socket to wrap - context: SSL context to use for the encrypted connection Returns: - sock: New, encrypted socket. """ # Generate a default SSL context if none was passed. if context is None: context = ssl._create_stdlib_context() return context.wrap_socket(sock, server_hostname=hostname) # The classes themselves class _NNTPBase: # UTF-8 is the character set for all NNTP commands and responses: they # are automatically encoded (when sending) and decoded (and receiving) # by this class. # However, some multi-line data blocks can contain arbitrary bytes (for # example, latin-1 or utf-16 data in the body of a message). Commands # taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message # data will therefore only accept and produce bytes objects. # Furthermore, since there could be non-compliant servers out there, # we use 'surrogateescape' as the error handler for fault tolerance # and easy round-tripping. This could be useful for some applications # (e.g. NNTP gateways). encoding = 'utf-8' errors = 'surrogateescape' def __init__(self, file, host, readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT): """Initialize an instance. Arguments: - file: file-like object (open for read/write in binary mode) - host: hostname of the server - readermode: if true, send 'mode reader' command after connecting. - timeout: timeout (in seconds) used for socket connections readermode is sometimes necessary if you are connecting to an NNTP server on the local machine and intend to call reader-specific commands, such as `group'. If you get unexpected NNTPPermanentErrors, you might need to set readermode. """ self.host = host self.file = file self.debugging = 0 self.welcome = self._getresp() # Inquire about capabilities (RFC 3977). self._caps = None self.getcapabilities() # 'MODE READER' is sometimes necessary to enable 'reader' mode. # However, the order in which 'MODE READER' and 'AUTHINFO' need to # arrive differs between some NNTP servers. If _setreadermode() fails # with an authorization failed error, it will set this to True; # the login() routine will interpret that as a request to try again # after performing its normal function. # Enable only if we're not already in READER mode anyway. self.readermode_afterauth = False if readermode and 'READER' not in self._caps: self._setreadermode() if not self.readermode_afterauth: # Capabilities might have changed after MODE READER self._caps = None self.getcapabilities() # RFC 4642 2.2.2: Both the client and the server MUST know if there is # a TLS session active. A client MUST NOT attempt to start a TLS # session if a TLS session is already active. self.tls_on = False # Log in and encryption setup order is left to subclasses. self.authenticated = False def __enter__(self): return self def __exit__(self, *args): is_connected = lambda: hasattr(self, "file") if is_connected(): try: self.quit() except (OSError, EOFError): pass finally: if is_connected(): self._close() def getwelcome(self): """Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.""" if self.debugging: print('*welcome*', repr(self.welcome)) return self.welcome def getcapabilities(self): """Get the server capabilities, as read by __init__(). If the CAPABILITIES command is not supported, an empty dict is returned.""" if self._caps is None: self.nntp_version = 1 self.nntp_implementation = None try: resp, caps = self.capabilities() except (NNTPPermanentError, NNTPTemporaryError): # Server doesn't support capabilities self._caps = {} else: self._caps = caps if 'VERSION' in caps: # The server can advertise several supported versions, # choose the highest. self.nntp_version = max(map(int, caps['VERSION'])) if 'IMPLEMENTATION' in caps: self.nntp_implementation = ' '.join(caps['IMPLEMENTATION']) return self._caps def set_debuglevel(self, level): """Set the debugging level. Argument 'level' means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF""" self.debugging = level debug = set_debuglevel def _putline(self, line): """Internal: send one line to the server, appending CRLF. The `line` must be a bytes-like object.""" line = line + _CRLF if self.debugging > 1: print('*put*', repr(line)) self.file.write(line) self.file.flush() def _putcmd(self, line): """Internal: send one command to the server (through _putline()). The `line` must be an unicode string.""" if self.debugging: print('*cmd*', repr(line)) line = line.encode(self.encoding, self.errors) self._putline(line) def _getline(self, strip_crlf=True): """Internal: return one line from the server, stripping _CRLF. Raise EOFError if the connection is closed. Returns a bytes object.""" line = self.file.readline(_MAXLINE +1) if len(line) > _MAXLINE: raise NNTPDataError('line too long') if self.debugging > 1: print('*get*', repr(line)) if not line: raise EOFError if strip_crlf: if line[-2:] == _CRLF: line = line[:-2] elif line[-1:] in _CRLF: line = line[:-1] return line def _getresp(self): """Internal: get a response from the server. Raise various errors if the response indicates an error. Returns an unicode string.""" resp = self._getline() if self.debugging: print('*resp*', repr(resp)) resp = resp.decode(self.encoding, self.errors) c = resp[:1] if c == '4': raise NNTPTemporaryError(resp) if c == '5': raise NNTPPermanentError(resp) if c not in '123': raise NNTPProtocolError(resp) return resp def _getlongresp(self, file=None): """Internal: get a response plus following text from the server. Raise various errors if the response indicates an error. Returns a (response, lines) tuple where `response` is an unicode string and `lines` is a list of bytes objects. If `file` is a file-like object, it must be open in binary mode. """ openedFile = None try: # If a string was passed then open a file with that name if isinstance(file, (str, bytes)): openedFile = file = open(file, "wb") resp = self._getresp() if resp[:3] not in _LONGRESP: raise NNTPReplyError(resp) lines = [] if file is not None: # XXX lines = None instead? terminators = (b'.' + _CRLF, b'.\n') while 1: line = self._getline(False) if line in terminators: break if line.startswith(b'..'): line = line[1:] file.write(line) else: terminator = b'.' while 1: line = self._getline() if line == terminator: break if line.startswith(b'..'): line = line[1:] lines.append(line) finally: # If this method created the file, then it must close it if openedFile: openedFile.close() return resp, lines def _shortcmd(self, line): """Internal: send a command and get the response. Same return value as _getresp().""" self._putcmd(line) return self._getresp() def _longcmd(self, line, file=None): """Internal: send a command and get the response plus following text. Same return value as _getlongresp().""" self._putcmd(line) return self._getlongresp(file) def _longcmdstring(self, line, file=None): """Internal: send a command and get the response plus following text. Same as _longcmd() and _getlongresp(), except that the returned `lines` are unicode strings rather than bytes objects. """ self._putcmd(line) resp, list = self._getlongresp(file) return resp, [line.decode(self.encoding, self.errors) for line in list] def _getoverviewfmt(self): """Internal: get the overview format. Queries the server if not already done, else returns the cached value.""" try: return self._cachedoverviewfmt except AttributeError: pass try: resp, lines = self._longcmdstring("LIST OVERVIEW.FMT") except NNTPPermanentError: # Not supported by server? fmt = _DEFAULT_OVERVIEW_FMT[:] else: fmt = _parse_overview_fmt(lines) self._cachedoverviewfmt = fmt return fmt def _grouplist(self, lines): # Parse lines into "group last first flag" return [GroupInfo(*line.split()) for line in lines] def capabilities(self): """Process a CAPABILITIES command. Not supported by all servers. Return: - resp: server response if successful - caps: a dictionary mapping capability names to lists of tokens (for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] }) """ caps = {} resp, lines = self._longcmdstring("CAPABILITIES") for line in lines: name, *tokens = line.split() caps[name] = tokens return resp, caps def newgroups(self, date, *, file=None): """Process a NEWGROUPS command. Arguments: - date: a date or datetime object Return: - resp: server response if successful - list: list of newsgroup names """ if not isinstance(date, (datetime.date, datetime.date)): raise TypeError( "the date parameter must be a date or datetime object, " "not '{:40}'".format(date.__class__.__name__)) date_str, time_str = _unparse_datetime(date, self.nntp_version < 2) cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str) resp, lines = self._longcmdstring(cmd, file) return resp, self._grouplist(lines) <|fim▁hole|> def newnews(self, group, date, *, file=None): """Process a NEWNEWS command. Arguments: - group: group name or '*' - date: a date or datetime object Return: - resp: server response if successful - list: list of message ids """ if not isinstance(date, (datetime.date, datetime.date)): raise TypeError( "the date parameter must be a date or datetime object, " "not '{:40}'".format(date.__class__.__name__)) date_str, time_str = _unparse_datetime(date, self.nntp_version < 2) cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str) return self._longcmdstring(cmd, file) def list(self, group_pattern=None, *, file=None): """Process a LIST or LIST ACTIVE command. Arguments: - group_pattern: a pattern indicating which groups to query - file: Filename string or file object to store the result in Returns: - resp: server response if successful - list: list of (group, last, first, flag) (strings) """ if group_pattern is not None: command = 'LIST ACTIVE ' + group_pattern else: command = 'LIST' resp, lines = self._longcmdstring(command, file) return resp, self._grouplist(lines) def _getdescriptions(self, group_pattern, return_all): line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$') # Try the more std (acc. to RFC2980) LIST NEWSGROUPS first resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern) if not resp.startswith('215'): # Now the deprecated XGTITLE. This either raises an error # or succeeds with the same output structure as LIST # NEWSGROUPS. resp, lines = self._longcmdstring('XGTITLE ' + group_pattern) groups = {} for raw_line in lines: match = line_pat.search(raw_line.strip()) if match: name, desc = match.group(1, 2) if not return_all: return desc groups[name] = desc if return_all: return resp, groups else: # Nothing found return '' def description(self, group): """Get a description for a single group. If more than one group matches ('group' is a pattern), return the first. If no group matches, return an empty string. This elides the response code from the server, since it can only be '215' or '285' (for xgtitle) anyway. If the response code is needed, use the 'descriptions' method. NOTE: This neither checks for a wildcard in 'group' nor does it check whether the group actually exists.""" return self._getdescriptions(group, False) def descriptions(self, group_pattern): """Get descriptions for a range of groups.""" return self._getdescriptions(group_pattern, True) def group(self, name): """Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles - first: first article number - last: last article number - name: the group name """ resp = self._shortcmd('GROUP ' + name) if not resp.startswith('211'): raise NNTPReplyError(resp) words = resp.split() count = first = last = 0 n = len(words) if n > 1: count = words[1] if n > 2: first = words[2] if n > 3: last = words[3] if n > 4: name = words[4].lower() return resp, int(count), int(first), int(last), name def help(self, *, file=None): """Process a HELP command. Argument: - file: Filename string or file object to store the result in Returns: - resp: server response if successful - list: list of strings returned by the server in response to the HELP command """ return self._longcmdstring('HELP', file) def _statparse(self, resp): """Internal: parse the response line of a STAT, NEXT, LAST, ARTICLE, HEAD or BODY command.""" if not resp.startswith('22'): raise NNTPReplyError(resp) words = resp.split() art_num = int(words[1]) message_id = words[2] return resp, art_num, message_id def _statcmd(self, line): """Internal: process a STAT, NEXT or LAST command.""" resp = self._shortcmd(line) return self._statparse(resp) def stat(self, message_spec=None): """Process a STAT command. Argument: - message_spec: article number or message id (if not specified, the current article is selected) Returns: - resp: server response if successful - art_num: the article number - message_id: the message id """ if message_spec: return self._statcmd('STAT {0}'.format(message_spec)) else: return self._statcmd('STAT') def next(self): """Process a NEXT command. No arguments. Return as for STAT.""" return self._statcmd('NEXT') def last(self): """Process a LAST command. No arguments. Return as for STAT.""" return self._statcmd('LAST') def _artcmd(self, line, file=None): """Internal: process a HEAD, BODY or ARTICLE command.""" resp, lines = self._longcmd(line, file) resp, art_num, message_id = self._statparse(resp) return resp, ArticleInfo(art_num, message_id, lines) def head(self, message_spec=None, *, file=None): """Process a HEAD command. Argument: - message_spec: article number or message id - file: filename string or file object to store the headers in Returns: - resp: server response if successful - ArticleInfo: (article number, message id, list of header lines) """ if message_spec is not None: cmd = 'HEAD {0}'.format(message_spec) else: cmd = 'HEAD' return self._artcmd(cmd, file) def body(self, message_spec=None, *, file=None): """Process a BODY command. Argument: - message_spec: article number or message id - file: filename string or file object to store the body in Returns: - resp: server response if successful - ArticleInfo: (article number, message id, list of body lines) """ if message_spec is not None: cmd = 'BODY {0}'.format(message_spec) else: cmd = 'BODY' return self._artcmd(cmd, file) def article(self, message_spec=None, *, file=None): """Process an ARTICLE command. Argument: - message_spec: article number or message id - file: filename string or file object to store the article in Returns: - resp: server response if successful - ArticleInfo: (article number, message id, list of article lines) """ if message_spec is not None: cmd = 'ARTICLE {0}'.format(message_spec) else: cmd = 'ARTICLE' return self._artcmd(cmd, file) def slave(self): """Process a SLAVE command. Returns: - resp: server response if successful """ return self._shortcmd('SLAVE') def xhdr(self, hdr, str, *, file=None): """Process an XHDR command (optional server extension). Arguments: - hdr: the header type (e.g. 'subject') - str: an article nr, a message id, or a range nr1-nr2 - file: Filename string or file object to store the result in Returns: - resp: server response if successful - list: list of (nr, value) strings """ pat = re.compile('^([0-9]+) ?(.*)\n?') resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file) def remove_number(line): m = pat.match(line) return m.group(1, 2) if m else line return resp, [remove_number(line) for line in lines] def xover(self, start, end, *, file=None): """Process an XOVER command (optional server extension) Arguments: - start: start of range - end: end of range - file: Filename string or file object to store the result in Returns: - resp: server response if successful - list: list of dicts containing the response fields """ resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end), file) fmt = self._getoverviewfmt() return resp, _parse_overview(lines, fmt) def over(self, message_spec, *, file=None): """Process an OVER command. If the command isn't supported, fall back to XOVER. Arguments: - message_spec: - either a message id, indicating the article to fetch information about - or a (start, end) tuple, indicating a range of article numbers; if end is None, information up to the newest message will be retrieved - or None, indicating the current article number must be used - file: Filename string or file object to store the result in Returns: - resp: server response if successful - list: list of dicts containing the response fields NOTE: the "message id" form isn't supported by XOVER """ cmd = 'OVER' if 'OVER' in self._caps else 'XOVER' if isinstance(message_spec, (tuple, list)): start, end = message_spec cmd += ' {0}-{1}'.format(start, end or '') elif message_spec is not None: cmd = cmd + ' ' + message_spec resp, lines = self._longcmdstring(cmd, file) fmt = self._getoverviewfmt() return resp, _parse_overview(lines, fmt) def xgtitle(self, group, *, file=None): """Process an XGTITLE command (optional server extension) Arguments: - group: group name wildcard (i.e. news.*) Returns: - resp: server response if successful - list: list of (name,title) strings""" warnings.warn("The XGTITLE extension is not actively used, " "use descriptions() instead", DeprecationWarning, 2) line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$') resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file) lines = [] for raw_line in raw_lines: match = line_pat.search(raw_line.strip()) if match: lines.append(match.group(1, 2)) return resp, lines def xpath(self, id): """Process an XPATH command (optional server extension) Arguments: - id: Message id of article Returns: resp: server response if successful path: directory path to article """ warnings.warn("The XPATH extension is not actively used", DeprecationWarning, 2) resp = self._shortcmd('XPATH {0}'.format(id)) if not resp.startswith('223'): raise NNTPReplyError(resp) try: [resp_num, path] = resp.split() except ValueError: raise NNTPReplyError(resp) else: return resp, path def date(self): """Process the DATE command. Returns: - resp: server response if successful - date: datetime object """ resp = self._shortcmd("DATE") if not resp.startswith('111'): raise NNTPReplyError(resp) elem = resp.split() if len(elem) != 2: raise NNTPDataError(resp) date = elem[1] if len(date) != 14: raise NNTPDataError(resp) return resp, _parse_datetime(date, None) def _post(self, command, f): resp = self._shortcmd(command) # Raises a specific exception if posting is not allowed if not resp.startswith('3'): raise NNTPReplyError(resp) if isinstance(f, (bytes, bytearray)): f = f.splitlines() # We don't use _putline() because: # - we don't want additional CRLF if the file or iterable is already # in the right format # - we don't want a spurious flush() after each line is written for line in f: if not line.endswith(_CRLF): line = line.rstrip(b"\r\n") + _CRLF if line.startswith(b'.'): line = b'.' + line self.file.write(line) self.file.write(b".\r\n") self.file.flush() return self._getresp() def post(self, data): """Process a POST command. Arguments: - data: bytes object, iterable or file containing the article Returns: - resp: server response if successful""" return self._post('POST', data) def ihave(self, message_id, data): """Process an IHAVE command. Arguments: - message_id: message-id of the article - data: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.""" return self._post('IHAVE {0}'.format(message_id), data) def _close(self): self.file.close() del self.file def quit(self): """Process a QUIT command and close the socket. Returns: - resp: server response if successful""" try: resp = self._shortcmd('QUIT') finally: self._close() return resp def login(self, user=None, password=None, usenetrc=True): if self.authenticated: raise ValueError("Already logged in.") if not user and not usenetrc: raise ValueError( "At least one of `user` and `usenetrc` must be specified") # If no login/password was specified but netrc was requested, # try to get them from ~/.netrc # Presume that if .netrc has an entry, NNRP authentication is required. try: if usenetrc and not user: import netrc credentials = netrc.netrc() auth = credentials.authenticators(self.host) if auth: user = auth[0] password = auth[2] except OSError: pass # Perform NNTP authentication if needed. if not user: return resp = self._shortcmd('authinfo user ' + user) if resp.startswith('381'): if not password: raise NNTPReplyError(resp) else: resp = self._shortcmd('authinfo pass ' + password) if not resp.startswith('281'): raise NNTPPermanentError(resp) # Capabilities might have changed after login self._caps = None self.getcapabilities() # Attempt to send mode reader if it was requested after login. # Only do so if we're not in reader mode already. if self.readermode_afterauth and 'READER' not in self._caps: self._setreadermode() # Capabilities might have changed after MODE READER self._caps = None self.getcapabilities() def _setreadermode(self): try: self.welcome = self._shortcmd('mode reader') except NNTPPermanentError: # Error 5xx, probably 'not implemented' pass except NNTPTemporaryError as e: if e.response.startswith('480'): # Need authorization before 'mode reader' self.readermode_afterauth = True else: raise if _have_ssl: def starttls(self, context=None): """Process a STARTTLS command. Arguments: - context: SSL context to use for the encrypted connection """ # Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if # a TLS session already exists. if self.tls_on: raise ValueError("TLS is already enabled.") if self.authenticated: raise ValueError("TLS cannot be started after authentication.") resp = self._shortcmd('STARTTLS') if resp.startswith('382'): self.file.close() self.sock = _encrypt_on(self.sock, context, self.host) self.file = self.sock.makefile("rwb") self.tls_on = True # Capabilities may change after TLS starts up, so ask for them # again. self._caps = None self.getcapabilities() else: raise NNTPError("TLS failed to start.") class NNTP(_NNTPBase): def __init__(self, host, port=NNTP_PORT, user=None, password=None, readermode=None, usenetrc=False, timeout=_GLOBAL_DEFAULT_TIMEOUT): """Initialize an instance. Arguments: - host: hostname to connect to - port: port to connect to (default the standard NNTP port) - user: username to authenticate with - password: password to use with username - readermode: if true, send 'mode reader' command after connecting. - usenetrc: allow loading username and password from ~/.netrc file if not specified explicitly - timeout: timeout (in seconds) used for socket connections readermode is sometimes necessary if you are connecting to an NNTP server on the local machine and intend to call reader-specific commands, such as `group'. If you get unexpected NNTPPermanentErrors, you might need to set readermode. """ self.host = host self.port = port self.sock = socket.create_connection((host, port), timeout) file = None try: file = self.sock.makefile("rwb") _NNTPBase.__init__(self, file, host, readermode, timeout) if user or usenetrc: self.login(user, password, usenetrc) except: if file: file.close() self.sock.close() raise def _close(self): try: _NNTPBase._close(self) finally: self.sock.close() if _have_ssl: class NNTP_SSL(_NNTPBase): def __init__(self, host, port=NNTP_SSL_PORT, user=None, password=None, ssl_context=None, readermode=None, usenetrc=False, timeout=_GLOBAL_DEFAULT_TIMEOUT): """This works identically to NNTP.__init__, except for the change in default port and the `ssl_context` argument for SSL connections. """ self.sock = socket.create_connection((host, port), timeout) file = None try: self.sock = _encrypt_on(self.sock, ssl_context, host) file = self.sock.makefile("rwb") _NNTPBase.__init__(self, file, host, readermode=readermode, timeout=timeout) if user or usenetrc: self.login(user, password, usenetrc) except: if file: file.close() self.sock.close() raise def _close(self): try: _NNTPBase._close(self) finally: self.sock.close() __all__.append("NNTP_SSL") # Test retrieval when run as a script. if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description="""\ nntplib built-in demo - display the latest articles in a newsgroup""") parser.add_argument('-g', '--group', default='gmane.comp.python.general', help='group to fetch messages from (default: %(default)s)') parser.add_argument('-s', '--server', default='news.gmane.org', help='NNTP server hostname (default: %(default)s)') parser.add_argument('-p', '--port', default=-1, type=int, help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT)) parser.add_argument('-n', '--nb-articles', default=10, type=int, help='number of articles to fetch (default: %(default)s)') parser.add_argument('-S', '--ssl', action='store_true', default=False, help='use NNTP over SSL') args = parser.parse_args() port = args.port if not args.ssl: if port == -1: port = NNTP_PORT s = NNTP(host=args.server, port=port) else: if port == -1: port = NNTP_SSL_PORT s = NNTP_SSL(host=args.server, port=port) caps = s.getcapabilities() if 'STARTTLS' in caps: s.starttls() resp, count, first, last, name = s.group(args.group) print('Group', name, 'has', count, 'articles, range', first, 'to', last) def cut(s, lim): if len(s) > lim: s = s[:lim - 4] + "..." return s first = str(int(last) - args.nb_articles + 1) resp, overviews = s.xover(first, last) for artnum, over in overviews: author = decode_header(over['from']).split('<', 1)[0] subject = decode_header(over['subject']) lines = int(over[':lines']) print("{:7} {:20} {:42} ({})".format( artnum, cut(author, 20), cut(subject, 42), lines) ) s.quit()<|fim▁end|>
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals<|fim▁hole|>from appconf import AppConf class BlogConf(AppConf): DISQUS_SHORTNAME = 'django-staticgen' POST_IDENTIFIER = 'current_post' class Meta: prefix = 'blog'<|fim▁end|>
from django.conf import settings # noqa from django.utils.translation import ugettext_lazy as _ # noqa
<|file_name|>main.js<|end_file_name|><|fim▁begin|>$(document).ready(function(){ //enable the return time input and dropdown $("#round").change(function() { if(this.checked) { console.log("Return data field open!"); $("#rD").removeClass('ui disabled input').addClass('ui input'); $("#rY").removeClass('ui disabled input').addClass('ui input'); $("#retMonth").removeClass('ui disabled dropdown').addClass('ui dropdown'); } else { console.log("Return data field close!"); $("#rD").removeClass('ui input').addClass('ui disabled input'); $("#rY").removeClass('ui input').addClass('ui disabled input'); $("#retMonth").removeClass('ui dropdown').addClass('ui disabled dropdown'); } }); //check if the input is a valid format function validateForm() { numdays = [31,28,31,30,31,30,31,31,30,31,30,31]; namemonth = ["January","Feburary","March","April","May","June","July","August","September","October","November","December"]; if ($("#dpYear").val() == "" || $("#dpMonth").val() == "" || $("#dpDay").val() == "" || $("#origin").val() == "" || $("#des").val() == "" || $('#num').val() == "" || $("#email").val() == "" || $("#waiting").val() == "") { console.log("not fill in all the blanks") alert("Please fill in all fields"); return false; } if ($("#dpYear").val().length != 4 || $("#dpDay").val().length != 2) { console.log("invalid departure date or year") alert("Please enter valid departure date or year in the format of DD and YYYY.") return false; } if ($("#origin").val().length != 3 || $("#des").val().length != 3 || /^[a-zA-Z]+$/.test($("#origin").val()) == false || /^[a-zA-Z]+$/.test($("#des").val()) == false ) { console.log("invalid input for destination or origin"); alert("Please enter valid airport code.") return false; } if ($("#origin").val() == $("#des").val()) { console.log("same origin and destination"); alert("You cannot enter same value for origin and destination"); return false; } console.log("fields valid!") var today = new Date(); if (parseInt($("#dpYear").val()) < today.getFullYear()) { alert("You cannot check past ticket's value"); return false; } else { if (parseInt($("#dpYear").val()) == today.getFullYear()) { if (parseInt($("#dpMonth").val()) < today.getMonth()+1 ) { alert("You cannot check past ticket's value"); return false; } else { if (parseInt($("#dpMonth").val()) == today.getMonth()+1 ) { if (parseInt($("#dpDay").val()) < today.getDate()) { alert("You cannot check past ticket's value"); return false; } } } } } console.log("departure date valid!") if ($("#round").is(':checked')) { console.log("roundtrip checked!") if ($("#retYear").val() == "" || $("#retMonth").val() == "" || $("#retDay").val() == "" ) { alert("please enter return date"); return false; } if ($("#retYear").val().length != 4 || $("#retDay").val().length != 2) { console.log("invalid return date or year") alert("Please enter valid return date or year in the format of DD and YYYY.") return false; } if (parseInt($("#retYear").val()) < parseInt($("#dpYear").val())) { alert("Return date cannot be before departure date."); return false; } else { if (parseInt($("#retYear").val()) == parseInt($("#dpYear").val())) { if (parseInt($("#retMonth").val()) < parseInt($("#dpMonth").val()) ) { alert("Return date cannot be before departure date."); return false; } else { if (parseInt($("#retMonth").val()) == parseInt($("#dpMonth").val()) ) { if (parseInt($("#retDay").val()) < parseInt($("#dpDay").val())) { alert("Return date cannot be before departure date."); return false; } } } } } } console.log("return date valid!") if ($("#dpMonth").val() == "2" && parseInt($("#dpYear".val()))%4 == 0 && parseInt($("#dpYear".val()))%100 != 0) { if (parseInt($("#dpDay".val())) > 29) { alert(namemonth[parseInt($("#dpMonth").val())-1]+" does not have more than 29 days"); return false; } } else { var m = parseInt($("#dpMonth").val()); if ( parseInt($("#dpDay").val()) > numdays[m-1]) { alert(namemonth[m-1]+" does not have more than "+numdays[m-1]+" days"); return false; } } return true; } //send the user data to server //not using the form submit function as the it will not reveive the data $("#sub").click(function() { if (validateForm()) { var rq = {}; rq.origin = $("#origin").val(); rq.destination = $("#des").val(); rq.dpdate = $("#dpYear").val()+'-'+$("#dpMonth").val()+'-'+$("#dpDay").val(); rq.waiting = parseInt(parseFloat($("#waiting").val())*60); rq.num = parseInt($('#num').val()); rq.email = $("#email").val(); rq.round = 0; if ($("#round").is(':checked')) { rq.round = 1; rq.retdate = $("#retYear").val()+'-'+$("#retMonth").val()+'-'+$("#retDay").val(); } console.log("data post to server formed!"); $.ajax({<|fim▁hole|> dataType: 'json', contentType: 'application/json', data: JSON.stringify(rq), success: function(data) { alert("Data goes into our system!"); }, error: function(error) { console.log(error); alert("Unable to send!"); } }); } }); });<|fim▁end|>
type: "POST", url: "/user",
<|file_name|>ovirt_storage_domains.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # try: import ovirtsdk4.types as otypes from ovirtsdk4.types import StorageDomainStatus as sdstate except ImportError: pass import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( BaseModule, check_sdk, create_connection, equal, ovirt_full_argument_spec, search_by_name, wait, ) ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: ovirt_storage_domains short_description: Module to manage storage domains in oVirt version_added: "2.3" author: "Ondra Machacek (@machacekondra)" description: - "Module to manage storage domains in oVirt" options: name: description: - "Name of the the storage domain to manage." state: description: - "Should the storage domain be present/absent/maintenance/unattached" choices: ['present', 'absent', 'maintenance', 'unattached'] default: present description: description: - "Description of the storage domain." comment: description: - "Comment of the storage domain." data_center: description: - "Data center name where storage domain should be attached." - "This parameter isn't idempotent, it's not possible to change data center of storage domain." domain_function: description: - "Function of the storage domain." - "This parameter isn't idempotent, it's not possible to change domain function of storage domain." choices: ['data', 'iso', 'export'] default: 'data'<|fim▁hole|> - "Host to be used to mount storage." nfs: description: - "Dictionary with values for NFS storage type:" - "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com" - "C(path) - Path of the mount point. E.g.: /path/to/my/data" - "C(version) - NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)." - "C(timeout) - The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535." - "C(retrans) - The number of times to retry a request before attempting further recovery actions. Range 0 to 65535." - "Note that these parameters are not idempotent." iscsi: description: - "Dictionary with values for iSCSI storage type:" - "C(address) - Address of the iSCSI storage server." - "C(port) - Port of the iSCSI storage server." - "C(target) - The target IQN for the storage device." - "C(lun_id) - LUN id." - "C(username) - A CHAP user name for logging into a target." - "C(password) - A CHAP password for logging into a target." - "Note that these parameters are not idempotent." posixfs: description: - "Dictionary with values for PosixFS storage type:" - "C(path) - Path of the mount point. E.g.: /path/to/my/data" - "C(vfs_type) - Virtual File System type." - "C(mount_options) - Option which will be passed when mounting storage." - "Note that these parameters are not idempotent." glusterfs: description: - "Dictionary with values for GlusterFS storage type:" - "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com" - "C(path) - Path of the mount point. E.g.: /path/to/my/data" - "C(mount_options) - Option which will be passed when mounting storage." - "Note that these parameters are not idempotent." fcp: description: - "Dictionary with values for fibre channel storage type:" - "C(address) - Address of the fibre channel storage server." - "C(port) - Port of the fibre channel storage server." - "C(lun_id) - LUN id." - "Note that these parameters are not idempotent." destroy: description: - "Logical remove of the storage domain. If I(true) retains the storage domain's data for import." - "This parameter is relevant only when C(state) is I(absent)." format: description: - "If I(True) storage domain will be formatted after removing it from oVirt." - "This parameter is relevant only when C(state) is I(absent)." extends_documentation_fragment: ovirt ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Add data NFS storage domain - ovirt_storage_domains: name: data_nfs host: myhost data_center: mydatacenter nfs: address: 10.34.63.199 path: /path/data # Add data iSCSI storage domain: - ovirt_storage_domains: name: data_iscsi host: myhost data_center: mydatacenter iscsi: target: iqn.2016-08-09.domain-01:nickname lun_id: 1IET_000d0002 address: 10.34.63.204 # Import export NFS storage domain: - ovirt_storage_domains: domain_function: export host: myhost data_center: mydatacenter nfs: address: 10.34.63.199 path: /path/export # Create ISO NFS storage domain - ovirt_storage_domains: name: myiso domain_function: iso host: myhost data_center: mydatacenter nfs: address: 10.34.63.199 path: /path/iso # Remove storage domain - ovirt_storage_domains: state: absent name: mystorage_domain format: true ''' RETURN = ''' id: description: ID of the storage domain which is managed returned: On success if storage domain is found. type: str sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c storage domain: description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt instance at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain." returned: On success if storage domain is found. ''' class StorageDomainModule(BaseModule): def _get_storage_type(self): for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']: if self._module.params.get(sd_type) is not None: return sd_type def _get_storage(self): for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']: if self._module.params.get(sd_type) is not None: return self._module.params.get(sd_type) def _login(self, storage_type, storage): if storage_type == 'iscsi': hosts_service = self._connection.system_service().hosts_service() host = search_by_name(hosts_service, self._module.params['host']) hosts_service.host_service(host.id).iscsi_login( iscsi=otypes.IscsiDetails( username=storage.get('username'), password=storage.get('password'), address=storage.get('address'), target=storage.get('target'), ), ) def build_entity(self): storage_type = self._get_storage_type() storage = self._get_storage() self._login(storage_type, storage) return otypes.StorageDomain( name=self._module.params['name'], description=self._module.params['description'], comment=self._module.params['comment'], type=otypes.StorageDomainType( self._module.params['domain_function'] ), host=otypes.Host( name=self._module.params['host'], ), storage=otypes.HostStorage( type=otypes.StorageType(storage_type), logical_units=[ otypes.LogicalUnit( id=storage.get('lun_id'), address=storage.get('address'), port=storage.get('port', 3260), target=storage.get('target'), username=storage.get('username'), password=storage.get('password'), ), ] if storage_type in ['iscsi', 'fcp'] else None, mount_options=storage.get('mount_options'), vfs_type=storage.get('vfs_type'), address=storage.get('address'), path=storage.get('path'), nfs_retrans=storage.get('retrans'), nfs_timeo=storage.get('timeout'), nfs_version=otypes.NfsVersion( storage.get('version') ) if storage.get('version') else None, ) if storage_type is not None else None ) def _attached_sds_service(self): # Get data center object of the storage domain: dcs_service = self._connection.system_service().data_centers_service() dc = search_by_name(dcs_service, self._module.params['data_center']) if dc is None: return dc_service = dcs_service.data_center_service(dc.id) return dc_service.storage_domains_service() def _maintenance(self, storage_domain): attached_sds_service = self._attached_sds_service() if attached_sds_service is None: return attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id) attached_sd = attached_sd_service.get() if attached_sd and attached_sd.status != sdstate.MAINTENANCE: if not self._module.check_mode: attached_sd_service.deactivate() self.changed = True wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.MAINTENANCE, wait=self._module.params['wait'], timeout=self._module.params['timeout'], ) def _unattach(self, storage_domain): attached_sds_service = self._attached_sds_service() if attached_sds_service is None: return attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id) attached_sd = attached_sd_service.get() if attached_sd and attached_sd.status == sdstate.MAINTENANCE: if not self._module.check_mode: # Detach the storage domain: attached_sd_service.remove() self.changed = True # Wait until storage domain is detached: wait( service=attached_sd_service, condition=lambda sd: sd is None, wait=self._module.params['wait'], timeout=self._module.params['timeout'], ) def pre_remove(self, storage_domain): # Before removing storage domain we need to put it into maintenance state: self._maintenance(storage_domain) # Before removing storage domain we need to detach it from data center: self._unattach(storage_domain) def post_create_check(self, sd_id): storage_domain = self._service.service(sd_id).get() self._service = self._attached_sds_service() # If storage domain isn't attached, attach it: attached_sd_service = self._service.service(storage_domain.id) if attached_sd_service.get() is None: self._service.add( otypes.StorageDomain( id=storage_domain.id, ), ) self.changed = True # Wait until storage domain is in maintenance: wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, wait=self._module.params['wait'], timeout=self._module.params['timeout'], ) def unattached_pre_action(self, storage_domain): self._service = self._attached_sds_service(storage_domain) self._maintenance(self._service, storage_domain) def update_check(self, entity): return ( equal(self._module.params['comment'], entity.comment) and equal(self._module.params['description'], entity.description) ) def failed_state(sd): return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE] def control_state(sd_module): sd = sd_module.search_entity() if sd is None: return sd_service = sd_module._service.service(sd.id) if sd.status == sdstate.LOCKED: wait( service=sd_service, condition=lambda sd: sd.status != sdstate.LOCKED, fail_condition=failed_state, ) if failed_state(sd): raise Exception("Not possible to manage storage domain '%s'." % sd.name) elif sd.status == sdstate.ACTIVATING: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, fail_condition=failed_state, ) elif sd.status == sdstate.DETACHING: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.UNATTACHED, fail_condition=failed_state, ) elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.MAINTENANCE, fail_condition=failed_state, ) def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'maintenance', 'unattached'], default='present', ), name=dict(required=True), description=dict(default=None), comment=dict(default=None), data_center=dict(required=True), domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']), host=dict(default=None), nfs=dict(default=None, type='dict'), iscsi=dict(default=None, type='dict'), posixfs=dict(default=None, type='dict'), glusterfs=dict(default=None, type='dict'), fcp=dict(default=None, type='dict'), destroy=dict(type='bool', default=False), format=dict(type='bool', default=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) try: connection = create_connection(module.params.pop('auth')) storage_domains_service = connection.system_service().storage_domains_service() storage_domains_module = StorageDomainModule( connection=connection, module=module, service=storage_domains_service, ) state = module.params['state'] control_state(storage_domains_module) if state == 'absent': ret = storage_domains_module.remove( destroy=module.params['destroy'], format=module.params['format'], host=module.params['host'], ) elif state == 'present': sd_id = storage_domains_module.create()['id'] storage_domains_module.post_create_check(sd_id) ret = storage_domains_module.action( action='activate', action_condition=lambda s: s.status == sdstate.MAINTENANCE, wait_condition=lambda s: s.status == sdstate.ACTIVE, fail_condition=failed_state, ) elif state == 'maintenance': sd_id = storage_domains_module.create()['id'] storage_domains_module.post_create_check(sd_id) ret = storage_domains_module.action( action='deactivate', action_condition=lambda s: s.status == sdstate.ACTIVE, wait_condition=lambda s: s.status == sdstate.MAINTENANCE, fail_condition=failed_state, ) elif state == 'unattached': ret = storage_domains_module.create() storage_domains_module.pre_remove( storage_domain=storage_domains_service.service(ret['id']).get() ) ret['changed'] = storage_domains_module.changed module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=False) if __name__ == "__main__": main()<|fim▁end|>
aliases: ['type'] host: description:
<|file_name|>checkerboard.py<|end_file_name|><|fim▁begin|># Generates alternating frames of a checkerboard pattern. Q_STARTING_INDEX = 150 UNIVERSE_LIGHTS = 144 #144 for side 1, #116 for side 2 flip = 0 for i in range(1,200): # 5 seconds * 40 / second (frame) print "Record Cue " + str(Q_STARTING_INDEX + i) for j in range (1, UNIVERSE_LIGHTS * 3, 1): # 3 channels / light (channel) value = 255 if flip else 0 flip = not flip<|fim▁hole|><|fim▁end|>
print "C"+ str(j)+ " @ #"+str(value)+";" flip = not flip # switch the checkerboard for the next frame print "Record Stop"
<|file_name|>latex.rs<|end_file_name|><|fim▁begin|>use std::io::IoResult; use collections::HashMap; use backend::Backend; use colors; pub struct LatexBackend { contexts: Vec<~str>, } impl LatexBackend { pub fn new() -> LatexBackend { LatexBackend { contexts: Vec::new(), } } } static HEADER: &'static str = "\ \\usepackage{xcolor} \\usepackage{fancyvrb} \\newcommand{\\VerbBar}{|} \\newcommand{\\VERB}{\\Verb[commandchars=\\\\\\{\\}]} \\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\\\\{\\}} % Add ',fontsize=\\small' for more characters per line \\newenvironment{Shaded}{}{} "; impl Backend for LatexBackend { fn configure(&mut self, _vars: &HashMap<~str, ~str>) -> Result<(), ~str> { Ok(()) } fn header(&mut self, w: &mut Writer) -> IoResult<()> { try!(w.write_str(HEADER)); for (ty, color) in colors::get_colors().iter() { try!(writeln!(w, "\\\\definecolor\\{{}\\}\\{HTML\\}\\{{}\\}", ty, color)); } Ok(()) } fn code_start(&mut self, w: &mut Writer) -> IoResult<()> { try!(w.write_line("\\begin{Shaded}")); try!(w.write_line("\\begin{Highlighting}[]")); Ok(()) } fn code_end(&mut self, w: &mut Writer) -> IoResult<()> { try!(w.write_line("\\end{Highlighting}")); try!(w.write_line("\\end{Shaded}")); Ok(()) } fn start(&mut self, w: &mut Writer, ty: &str) -> IoResult<()> { if ty != "comment" { if colors::get_types().contains(&ty.to_owned()) { try!(write!(w, "\\\\textcolor\\{{}\\}\\{", ty)); } if ty == "attribute" { try!(w.write_str("#")); } } self.contexts.push(ty.to_owned()); Ok(()) } fn end(&mut self, w: &mut Writer, ty: &str) -> IoResult<()> { if ty != "comment" { if ty == "attribute" { try!(w.write_str("]")); } if colors::get_types().contains(&ty.to_owned()) { try!(w.write_str("}")); } } self.contexts.pop(); Ok(()) } fn text(&mut self, w: &mut Writer, text: &str) -> IoResult<()> { fn escape_latex(text: &str) -> ~str { let mut result = StrBuf::new(); let mut escape = false; for c in text.chars() { if escape { result.push_str("\\textbackslash{"); } if c == '{' || c == '}' { result.push_char('\\'); } if c == '\\' { escape = true; } else { result.push_char(c); } if escape && c != '\\' { result.push_char('}'); escape = false; } } result.into_owned() } fn escape_comment(text: &str, has_color: bool) -> ~str { let mut result = StrBuf::new(); let mut first = true; for line in text.lines() { if !first { result.push_str("\n"); } if line.len() > 0 && has_color { result.push_str("\\textcolor{comment}{"); } result.push_str(line); if line.len() > 0 && has_color { result.push_str("}"); } first = false; } let old_len = text.len();<|fim▁hole|> range(0, old_len - new_len).advance(|_| { result.push_char('\n'); true }); result.into_owned() } let context = self.contexts.last().unwrap(); let has_color = colors::get_types().contains(context); let context = context.as_slice(); let text = if context == "comment" { escape_comment(text, has_color) } else { escape_latex(text) }; try!(w.write_str(text)); Ok(()) } }<|fim▁end|>
let text = text.trim_right_chars('\n').to_owned(); let new_len = text.len();
<|file_name|>column-header-additional-actions.spec.ts<|end_file_name|><|fim▁begin|>/// <reference types="Cypress" /> context('Column template', () => { beforeEach(() => { cy.visit('http://127.0.0.1:4201/#/filter-header-template'); }); it('shows "Level" menu, and filter list using checkboxes', () => { cy.get('#table > tbody > tr:nth-child(1) > td:nth-child(3) > div') .contains('Low') .get('#table > thead > tr > th:nth-child(3) > div.ngx-dropdown > a') .click()<|fim▁hole|> .click() .get('#table > tbody > tr:nth-child(1) > td:nth-child(3) > div') .contains('Medium'); }); it('shows "Company" menu, and filter list using input', () => { cy.get('#table > tbody > tr:nth-child(1) > td:nth-child(3) > div') .contains('Low') .get('#table > thead > tr > th:nth-child(4) > div.ngx-dropdown > a') .click() .get('#filterHeaderSearch') .click() .type('iso') .get('#table > tbody > tr:nth-child(1) > td:nth-child(3) > div') .contains('Medium') .get('#table > tbody > tr > td:nth-child(4) > div') .contains('ISOSWITCH'); }); });<|fim▁end|>
.get( '#table > thead > tr > th:nth-child(3) > div.ngx-dropdown > div > div > label:nth-child(3)' ) .contains('Low')
<|file_name|>package.py<|end_file_name|><|fim▁begin|>############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. #<|fim▁hole|># This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Saws(AutotoolsPackage): """The Scientific Application Web server (SAWs) turns any C or C++ scientific or engineering application code into a webserver, allowing one to examine (and even modify) the state of the simulation with any browser from anywhere.""" homepage = "https://bitbucket.org/saws/saws/wiki/Home" version('develop', git='https://bitbucket.org/saws/saws.git', tag='master') version('0.1.0', git='https://bitbucket.org/saws/saws.git', tag='v0.1.0')<|fim▁end|>
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. #
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # Copyright (c) 2010-2011, Monash e-Research Centre # (Monash University, Australia) # Copyright (c) 2010-2011, VeRSI Consortium # (Victorian eResearch Strategic Initiative, Australia) # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the VeRSI, the VeRSI Consortium members, nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ __init__.py .. moduleauthor:: Russell Sim <[email protected]> """ import logging from django.conf import settings from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured from django.db.models.signals import post_save from django.core.exceptions import MiddlewareNotUsed logger = logging.getLogger(__name__) class FilterInitMiddleware(object): def __init__(self): from tardis.tardis_portal.models import Dataset_File for f in settings.POST_SAVE_FILTERS: cls = f[0] args = [] kw = {} if len(f) == 2: args = f[1] if len(f) == 3: kw = f[2] hook = self._safe_import(cls, args, kw) # XXX seems to requre a strong ref else it won't fire, # could be because some hooks are classes not functions.<|fim▁hole|> # disable middleware raise MiddlewareNotUsed() def _safe_import(self, path, args, kw): try: dot = path.rindex('.') except ValueError: raise ImproperlyConfigured('%s isn\'t a filter module' % path) filter_module, filter_classname = path[:dot], path[dot + 1:] try: mod = import_module(filter_module) except ImportError, e: raise ImproperlyConfigured('Error importing filter %s: "%s"' % (filter_module, e)) try: filter_class = getattr(mod, filter_classname) except AttributeError: raise ImproperlyConfigured('Filter module "%s" does not define a "%s" class' % (filter_module, filter_classname)) filter_instance = filter_class(*args, **kw) return filter_instance<|fim▁end|>
post_save.connect(hook, sender=Dataset_File, weak=False) logger.debug('Initialised postsave hook %s' % post_save.receivers)
<|file_name|>MemoryMappedFile.cpp<|end_file_name|><|fim▁begin|>/* * Copyright 2014-2020 Real Logic Limited. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _WIN32 #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #else #ifndef NOMINMAX #define NOMINMAX #endif // !NOMINMAX #include <Windows.h> #include "StringUtil.h" #endif #include <string> #include <cstring> #include "util/MemoryMappedFile.h" #include "util/Exceptions.h" #include "util/ScopeUtils.h" namespace aeron { namespace util { #ifdef _WIN32 bool MemoryMappedFile::fill(FileHandle fd, std::size_t size, std::uint8_t value) { std::uint8_t buffer[8196]; memset(buffer, value, m_page_size); DWORD written = 0; while (size >= m_page_size) { if (!::WriteFile(fd.handle, buffer, (DWORD)m_page_size, &written, nullptr)) { return false; } size -= written; } if (size) {<|fim▁hole|> } } return true; } MemoryMappedFile::ptr_t MemoryMappedFile::createNew(const char *filename, std::size_t offset, std::size_t size) { FileHandle fd{}; fd.handle = ::CreateFile( filename, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr); if (INVALID_HANDLE_VALUE == fd.handle) { throw IOException( std::string("failed to create file: ") + filename + " " + toString(GetLastError()), SOURCEINFO); } OnScopeExit tidy( [&]() { if (INVALID_HANDLE_VALUE != fd.handle) { ::CloseHandle(fd.handle); } }); if (!fill(fd, size, 0)) { throw IOException( std::string("failed to write to file: ") + filename + " " + toString(GetLastError()), SOURCEINFO); } auto obj = MemoryMappedFile::ptr_t(new MemoryMappedFile(fd, offset, size, false)); fd.handle = INVALID_HANDLE_VALUE; return obj; } MemoryMappedFile::ptr_t MemoryMappedFile::mapExisting( const char *filename, std::size_t offset, std::size_t size, bool readOnly) { DWORD dwDesiredAccess = readOnly ? GENERIC_READ : (GENERIC_READ | GENERIC_WRITE); DWORD dwSharedMode = FILE_SHARE_READ | FILE_SHARE_WRITE; FileHandle fd{}; fd.handle = ::CreateFile( filename, dwDesiredAccess, dwSharedMode, nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); if (INVALID_HANDLE_VALUE == fd.handle) { throw IOException( std::string("failed to create file: ") + filename + " " + toString(GetLastError()), SOURCEINFO); } OnScopeExit tidy( [&]() { if (INVALID_HANDLE_VALUE != fd.handle) { ::CloseHandle(fd.handle); } }); auto obj = MemoryMappedFile::ptr_t(new MemoryMappedFile(fd, offset, size, readOnly)); fd.handle = INVALID_HANDLE_VALUE; return obj; } #else bool MemoryMappedFile::fill(FileHandle fd, std::size_t size, std::uint8_t value) { std::unique_ptr<std::uint8_t[]> buffer(new std::uint8_t[m_page_size]); memset(buffer.get(), value, m_page_size); while (size >= m_page_size) { if (static_cast<std::size_t>(::write(fd.handle, buffer.get(), m_page_size)) != m_page_size) { return false; } size -= m_page_size; } if (size) { if (static_cast<std::size_t>(::write(fd.handle, buffer.get(), size)) != size) { return false; } } return true; } MemoryMappedFile::ptr_t MemoryMappedFile::createNew(const char *filename, off_t offset, std::size_t size) { FileHandle fd{}; fd.handle = ::open(filename, O_RDWR | O_CREAT, 0666); if (fd.handle < 0) { throw IOException(std::string("failed to create file: ") + filename, SOURCEINFO); } OnScopeExit tidy( [&]() { close(fd.handle); }); if (!fill(fd, size, 0)) { throw IOException(std::string("failed to write to file: ") + filename, SOURCEINFO); } return MemoryMappedFile::ptr_t(new MemoryMappedFile(fd, offset, size, false)); } MemoryMappedFile::ptr_t MemoryMappedFile::mapExisting( const char *filename, off_t offset, std::size_t length, bool readOnly) { FileHandle fd{}; fd.handle = ::open(filename, (readOnly ? O_RDONLY : O_RDWR), 0666); if (fd.handle < 0) { throw IOException(std::string("failed to open existing file: ") + filename, SOURCEINFO); } OnScopeExit tidy( [&]() { close(fd.handle); }); return MemoryMappedFile::ptr_t(new MemoryMappedFile(fd, offset, length, readOnly)); } #endif MemoryMappedFile::ptr_t MemoryMappedFile::mapExisting(const char *filename, bool readOnly) { return mapExisting(filename, 0, 0, readOnly); } std::uint8_t *MemoryMappedFile::getMemoryPtr() const { return m_memory; } std::size_t MemoryMappedFile::getMemorySize() const { return m_memorySize; } std::size_t MemoryMappedFile::m_page_size = getPageSize(); #ifdef _WIN32 MemoryMappedFile::MemoryMappedFile(FileHandle fd, std::size_t offset, std::size_t length, bool readOnly) { m_file = fd.handle; if (0 == length && 0 == offset) { LARGE_INTEGER fileSize; if (!::GetFileSizeEx(fd.handle, &fileSize)) { cleanUp(); throw IOException( std::string("failed query size of existing file: ") + toString(GetLastError()), SOURCEINFO); } length = static_cast<std::size_t>(fileSize.QuadPart); } m_memorySize = length; m_memory = doMapping(m_memorySize, fd, offset, readOnly); if (!m_memory) { cleanUp(); throw IOException(std::string("failed to Map Memory: ") + toString(GetLastError()), SOURCEINFO); } } void MemoryMappedFile::cleanUp() { if (m_memory) { ::UnmapViewOfFile(m_memory); m_memory = nullptr; } if (m_mapping) { ::CloseHandle(m_mapping); m_mapping = nullptr; } if (m_file) { ::CloseHandle(m_file); m_file = nullptr; } } MemoryMappedFile::~MemoryMappedFile() { cleanUp(); } uint8_t *MemoryMappedFile::doMapping(std::size_t size, FileHandle fd, std::size_t offset, bool readOnly) { DWORD flProtect = readOnly ? PAGE_READONLY : PAGE_READWRITE; m_mapping = ::CreateFileMapping(fd.handle, nullptr, flProtect, 0, static_cast<DWORD>(size), nullptr); if (nullptr == m_mapping) { return nullptr; } DWORD dwDesiredAccess = readOnly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS; void *memory = (LPTSTR)::MapViewOfFile(m_mapping, dwDesiredAccess, 0, static_cast<DWORD>(offset), size); return static_cast<std::uint8_t *>(memory); } std::size_t MemoryMappedFile::getPageSize() noexcept { SYSTEM_INFO system_info; ::GetSystemInfo(&system_info); return static_cast<std::size_t>(system_info.dwPageSize); } std::int64_t MemoryMappedFile::getFileSize(const char *filename) { WIN32_FILE_ATTRIBUTE_DATA fad; if (::GetFileAttributesEx(filename, GetFileExInfoStandard, &fad) == 0) { return -1; } LARGE_INTEGER size; size.HighPart = fad.nFileSizeHigh; size.LowPart = fad.nFileSizeLow; return size.QuadPart; } #else MemoryMappedFile::MemoryMappedFile(FileHandle fd, off_t offset, std::size_t length, bool readOnly) { if (0 == length && 0 == offset) { struct stat statInfo{}; ::fstat(fd.handle, &statInfo); length = static_cast<std::size_t>(statInfo.st_size); } m_memorySize = length; m_memory = doMapping(m_memorySize, fd, static_cast<std::size_t>(offset), readOnly); } MemoryMappedFile::~MemoryMappedFile() { if (m_memory && m_memorySize) { ::munmap(m_memory, m_memorySize); } } std::uint8_t *MemoryMappedFile::doMapping(std::size_t length, FileHandle fd, std::size_t offset, bool readOnly) { void *memory = ::mmap( nullptr, length, readOnly ? PROT_READ : (PROT_READ | PROT_WRITE), MAP_SHARED, fd.handle, static_cast<off_t>(offset)); if (MAP_FAILED == memory) { throw IOException("failed to Memory Map file", SOURCEINFO); } return static_cast<std::uint8_t *>(memory); } std::size_t MemoryMappedFile::getPageSize() noexcept { return static_cast<std::size_t>(::getpagesize()); } std::int64_t MemoryMappedFile::getFileSize(const char *filename) { struct stat statInfo{}; if (::stat(filename, &statInfo) < 0) { return -1; } return statInfo.st_size; } #endif }}<|fim▁end|>
if (!::WriteFile(fd.handle, buffer, (DWORD)size, &written, nullptr)) { return false;
<|file_name|>Masonry.d.ts<|end_file_name|><|fim▁begin|>import { PureComponent, Validator, Requireable } from 'react'; import { CellMeasurerCacheInterface, KeyMapper, MeasuredCellParent } from './CellMeasurer'; import { GridCellRenderer } from './Grid'; import { IndexRange } from '../../index'; /** * Specifies the number of miliseconds during which to disable pointer events while a scroll is in progress. * This improves performance and makes scrolling smoother. */ export const DEFAULT_SCROLLING_RESET_TIME_INTERVAL = 150; export type OnCellsRenderedCallback = (params: IndexRange) => void; export type OnScrollCallback = (params: { clientHeight: number; scrollHeight: number; scrollTop: number }) => void; export type MasonryCellProps = { index: number; isScrolling: boolean; key: React.Key; parent: MeasuredCellParent; style?: React.CSSProperties; }; export type CellRenderer = (props: MasonryCellProps) => React.ReactNode; export type MasonryProps = { autoHeight: boolean; cellCount: number; cellMeasurerCache: CellMeasurerCacheInterface; cellPositioner: Positioner; cellRenderer: CellRenderer; className?: string; height: number; id?: string; keyMapper?: KeyMapper; onCellsRendered?: OnCellsRenderedCallback; onScroll?: OnScrollCallback; overscanByPixels?: number; role?: string; scrollingResetTimeInterval?: number; style?: React.CSSProperties; tabIndex?: number | null; width: number; /** * PLEASE NOTE * The [key: string]: any; line is here on purpose * This is due to the need of force re-render of PureComponent * Check the following link if you want to know more * https://github.com/bvaughn/react-virtualized#pass-thru-props */ [key: string]: any; }; export type MasonryState = { isScrolling: boolean; scrollTop: number; }; /** * This component efficiently displays arbitrarily positioned cells using windowing techniques. * Cell position is determined by an injected `cellPositioner` property. * Windowing is vertical; this component does not support horizontal scrolling. * * Rendering occurs in two phases: * 1) First pass uses estimated cell sizes (provided by the cache) to determine how many cells to measure in a batch. * Batch size is chosen using a fast, naive layout algorithm that stacks images in order until the viewport has been filled. * After measurement is complete (componentDidMount or componentDidUpdate) this component evaluates positioned cells * in order to determine if another measurement pass is required (eg if actual cell sizes were less than estimated sizes). * All measurements are permanently cached (keyed by `keyMapper`) for performance purposes. * 2) Second pass uses the external `cellPositioner` to layout cells.<|fim▁hole|> * If the layout is invalidated due to eg a resize, cached positions can be cleared using `recomputeCellPositions()`. * * Animation constraints: * Simple animations are supported (eg translate/slide into place on initial reveal). * More complex animations are not (eg flying from one position to another on resize). * * Layout constraints: * This component supports multi-column layout. * The height of each item may vary. * The width of each item must not exceed the width of the column it is "in". * The left position of all items within a column must align. * (Items may not span multiple columns.) */ export class Masonry extends PureComponent<MasonryProps, MasonryState> { static defaultProps: { autoHeight: false; keyMapper: identity; onCellsRendered: noop; onScroll: noop; overscanByPixels: 20; role: 'grid'; scrollingResetTimeInterval: typeof DEFAULT_SCROLLING_RESET_TIME_INTERVAL; style: emptyObject; tabIndex: 0; }; clearCellPositions(): void; // HACK This method signature was intended for Grid invalidateCellSizeAfterRender(params: { rowIndex: number }): void; recomputeCellPositions(): void; static getDerivedStateFromProps(nextProps: MasonryProps, prevState: MasonryState): MasonryState | null; } export default Masonry; export type emptyObject = {}; export type identity = <T>(value: T) => T; export type noop = () => void; export type Position = { left: number; top: number; }; export type createCellPositionerParams = { cellMeasurerCache: CellMeasurerCacheInterface; columnCount: number; columnWidth: number; spacer?: number; }; export type resetParams = { columnCount: number; columnWidth: number; spacer?: number; }; export type Positioner = ((index: number) => Position) & { reset: (params: resetParams) => void; }; export const createCellPositioner: (params: createCellPositionerParams) => Positioner;<|fim▁end|>
* At this time the positioner has access to cached size measurements for all cells. * The positions it returns are cached by Masonry for fast access later. * Phase one is repeated if the user scrolls beyond the current layout's bounds.
<|file_name|>p2ptransportchannel_unittest.cc<|end_file_name|><|fim▁begin|>/* * libjingle * Copyright 2009 Google Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "talk/base/fakenetwork.h" #include "talk/base/firewallsocketserver.h" #include "talk/base/gunit.h" #include "talk/base/helpers.h" #include "talk/base/logging.h" #include "talk/base/natserver.h" #include "talk/base/natsocketfactory.h" #include "talk/base/physicalsocketserver.h" #include "talk/base/proxyserver.h" #include "talk/base/socketaddress.h" #include "talk/base/thread.h" #include "talk/base/virtualsocketserver.h" #include "talk/p2p/base/p2ptransportchannel.h" #include "talk/p2p/base/testrelayserver.h" #include "talk/p2p/base/teststunserver.h" #include "talk/p2p/client/basicportallocator.h" using talk_base::SocketAddress; static const int kDefaultTimeout = 1000; static const int kOnlyLocalPorts = cricket::PORTALLOCATOR_DISABLE_STUN | cricket::PORTALLOCATOR_DISABLE_RELAY | cricket::PORTALLOCATOR_DISABLE_TCP; // Addresses on the public internet. static const SocketAddress kPublicAddrs[2] = { SocketAddress("11.11.11.11", 0), SocketAddress("22.22.22.22", 0) }; // For configuring multihomed clients. static const SocketAddress kAlternateAddrs[2] = { SocketAddress("11.11.11.101", 0), SocketAddress("22.22.22.202", 0) }; // Addresses for HTTP proxy servers. static const SocketAddress kHttpsProxyAddrs[2] = { SocketAddress("11.11.11.1", 443), SocketAddress("22.22.22.1", 443) }; // Addresses for SOCKS proxy servers. static const SocketAddress kSocksProxyAddrs[2] = { SocketAddress("11.11.11.1", 1080), SocketAddress("22.22.22.1", 1080) }; // Internal addresses for NAT boxes. static const SocketAddress kNatAddrs[2] = { SocketAddress("192.168.1.1", 0), SocketAddress("192.168.2.1", 0) }; // Private addresses inside the NAT private networks. static const SocketAddress kPrivateAddrs[2] = { SocketAddress("192.168.1.11", 0), SocketAddress("192.168.2.22", 0) }; // For cascaded NATs, the internal addresses of the inner NAT boxes. static const SocketAddress kCascadedNatAddrs[2] = { SocketAddress("192.168.10.1", 0), SocketAddress("192.168.20.1", 0) }; // For cascaded NATs, private addresses inside the inner private networks. static const SocketAddress kCascadedPrivateAddrs[2] = { SocketAddress("192.168.10.11", 0), SocketAddress("192.168.20.22", 0) }; // The address of the public STUN server. static const SocketAddress kStunAddr("99.99.99.1", cricket::STUN_SERVER_PORT); // The addresses for the public relay server. static const SocketAddress kRelayUdpIntAddr("99.99.99.2", 5000); static const SocketAddress kRelayUdpExtAddr("99.99.99.3", 5001); static const SocketAddress kRelayTcpIntAddr("99.99.99.2", 5002); static const SocketAddress kRelayTcpExtAddr("99.99.99.3", 5003); static const SocketAddress kRelaySslTcpIntAddr("99.99.99.2", 5004); static const SocketAddress kRelaySslTcpExtAddr("99.99.99.3", 5005); // Based on ICE_UFRAG_LENGTH static const char* kIceUfrag[4] = {"TESTICEUFRAG0000", "TESTICEUFRAG0001", "TESTICEUFRAG0002", "TESTICEUFRAG0003"}; // Based on ICE_PWD_LENGTH static const char* kIcePwd[4] = {"TESTICEPWD00000000000000", "TESTICEPWD00000000000001", "TESTICEPWD00000000000002", "TESTICEPWD00000000000003"}; static const int kTiebreaker1 = 11111; static const int kTiebreaker2 = 22222; // This test simulates 2 P2P endpoints that want to establish connectivity // with each other over various network topologies and conditions, which can be // specified in each individial test. // A virtual network (via VirtualSocketServer) along with virtual firewalls and // NATs (via Firewall/NATSocketServer) are used to simulate the various network // conditions. We can configure the IP addresses of the endpoints, // block various types of connectivity, or add arbitrary levels of NAT. // We also run a STUN server and a relay server on the virtual network to allow // our typical P2P mechanisms to do their thing. // For each case, we expect the P2P stack to eventually settle on a specific // form of connectivity to the other side. The test checks that the P2P // negotiation successfully establishes connectivity within a certain time, // and that the result is what we expect. // Note that this class is a base class for use by other tests, who will provide // specialized test behavior. class P2PTransportChannelTestBase : public testing::Test, public talk_base::MessageHandler, public sigslot::has_slots<> { public: P2PTransportChannelTestBase() : main_(talk_base::Thread::Current()), pss_(new talk_base::PhysicalSocketServer), vss_(new talk_base::VirtualSocketServer(pss_.get())), nss_(new talk_base::NATSocketServer(vss_.get())), ss_(new talk_base::FirewallSocketServer(nss_.get())), ss_scope_(ss_.get()), stun_server_(main_, kStunAddr), relay_server_(main_, kRelayUdpIntAddr, kRelayUdpExtAddr, kRelayTcpIntAddr, kRelayTcpExtAddr, kRelaySslTcpIntAddr, kRelaySslTcpExtAddr), socks_server1_(ss_.get(), kSocksProxyAddrs[0], ss_.get(), kSocksProxyAddrs[0]), socks_server2_(ss_.get(), kSocksProxyAddrs[1], ss_.get(), kSocksProxyAddrs[1]) { ep1_.role_ = cricket::ROLE_CONTROLLING; ep2_.role_ = cricket::ROLE_CONTROLLED; ep1_.allocator_.reset(new cricket::BasicPortAllocator( &ep1_.network_manager_, kStunAddr, kRelayUdpIntAddr, kRelayTcpIntAddr, kRelaySslTcpIntAddr)); ep2_.allocator_.reset(new cricket::BasicPortAllocator( &ep2_.network_manager_, kStunAddr, kRelayUdpIntAddr, kRelayTcpIntAddr, kRelaySslTcpIntAddr)); } protected: enum Config { OPEN, // Open to the Internet NAT_FULL_CONE, // NAT, no filtering NAT_ADDR_RESTRICTED, // NAT, must send to an addr to recv NAT_PORT_RESTRICTED, // NAT, must send to an addr+port to recv NAT_SYMMETRIC, // NAT, endpoint-dependent bindings NAT_DOUBLE_CONE, // Double NAT, both cone NAT_SYMMETRIC_THEN_CONE, // Double NAT, symmetric outer, cone inner BLOCK_UDP, // Firewall, UDP in/out blocked BLOCK_UDP_AND_INCOMING_TCP, // Firewall, UDP in/out and TCP in blocked BLOCK_ALL_BUT_OUTGOING_HTTP, // Firewall, only TCP out on 80/443 PROXY_HTTPS, // All traffic through HTTPS proxy PROXY_SOCKS, // All traffic through SOCKS proxy NUM_CONFIGS }; struct Result { Result(const std::string& lt, const std::string& lp, const std::string& rt, const std::string& rp, const std::string& lt2, const std::string& lp2, const std::string& rt2, const std::string& rp2, int wait) : local_type(lt), local_proto(lp), remote_type(rt), remote_proto(rp), local_type2(lt2), local_proto2(lp2), remote_type2(rt2), remote_proto2(rp2), connect_wait(wait) { } std::string local_type; std::string local_proto; std::string remote_type; std::string remote_proto; std::string local_type2; std::string local_proto2; std::string remote_type2; std::string remote_proto2; int connect_wait; }; struct ChannelData { bool CheckData(const char* data, int len) { bool ret = false; if (!ch_packets_.empty()) { std::string packet = ch_packets_.front(); ret = (packet == std::string(data, len)); ch_packets_.pop_front(); } return ret; } std::string name_; // TODO - Currently not used. std::list<std::string> ch_packets_; talk_base::scoped_ptr<cricket::P2PTransportChannel> ch_; }; struct Endpoint { Endpoint() : signaling_delay_(0), tiebreaker_(0), role_conflict_(false), protocol_type_(cricket::ICEPROTO_GOOGLE) {} bool HasChannel(cricket::TransportChannel* ch) { return (ch == cd1_.ch_.get() || ch == cd2_.ch_.get()); } ChannelData* GetChannelData(cricket::TransportChannel* ch) { if (!HasChannel(ch)) return NULL; if (cd1_.ch_.get() == ch) return &cd1_; else return &cd2_; } void SetSignalingDelay(int delay) { signaling_delay_ = delay; } void SetRole(cricket::TransportRole role) { role_ = role; } cricket::TransportRole role() { return role_; } void SetIceProtocolType(cricket::IceProtocolType type) { protocol_type_ = type; } cricket::IceProtocolType protocol_type() { return protocol_type_; } void SetTiebreaker(uint64 tiebreaker) { tiebreaker_ = tiebreaker; } uint64 GetTiebreaker() { return tiebreaker_; } void OnRoleConflict(bool role_conflict) { role_conflict_ = role_conflict; } bool role_conflict() { return role_conflict_; } talk_base::FakeNetworkManager network_manager_; talk_base::scoped_ptr<cricket::PortAllocator> allocator_; ChannelData cd1_; ChannelData cd2_; int signaling_delay_; cricket::TransportRole role_; uint64 tiebreaker_; bool role_conflict_; cricket::IceProtocolType protocol_type_; }; struct CandidateData : public talk_base::MessageData { CandidateData(cricket::TransportChannel* ch, const cricket::Candidate& c) : channel(ch), candidate(c) { } cricket::TransportChannel* channel; cricket::Candidate candidate; }; ChannelData* GetChannelData(cricket::TransportChannel* channel) { if (ep1_.HasChannel(channel)) return ep1_.GetChannelData(channel); else return ep2_.GetChannelData(channel); } void CreateChannels(int num) { ep1_.cd1_.ch_.reset(CreateChannel( 0, cricket::ICE_CANDIDATE_COMPONENT_DEFAULT, kIceUfrag[0], kIcePwd[0])); ep2_.cd1_.ch_.reset(CreateChannel( 1, cricket::ICE_CANDIDATE_COMPONENT_DEFAULT, kIceUfrag[1], kIcePwd[1])); if (num == 2) { ep1_.cd2_.ch_.reset(CreateChannel( 0, cricket::ICE_CANDIDATE_COMPONENT_DEFAULT, kIceUfrag[2], kIcePwd[2])); ep2_.cd2_.ch_.reset(CreateChannel( 1, cricket::ICE_CANDIDATE_COMPONENT_DEFAULT, kIceUfrag[3], kIcePwd[3])); } } cricket::P2PTransportChannel* CreateChannel(int endpoint, int component, const std::string& ice_ufrag, const std::string& ice_pwd) { cricket::P2PTransportChannel* channel = new cricket::P2PTransportChannel( component, NULL, GetAllocator(endpoint)); channel->SignalRequestSignaling.connect( this, &P2PTransportChannelTestBase::OnChannelRequestSignaling); channel->SignalCandidateReady.connect(this, &P2PTransportChannelTestBase::OnCandidate);<|fim▁hole|> this, &P2PTransportChannelTestBase::OnReadPacket); channel->SignalRoleConflict.connect( this, &P2PTransportChannelTestBase::OnRoleConflict); channel->SetIceProtocolType(GetEndpoint(endpoint)->protocol_type()); channel->SetIceUfrag(ice_ufrag); channel->SetIcePwd(ice_pwd); channel->SetRole(GetEndpoint(endpoint)->role()); channel->SetTiebreaker(GetEndpoint(endpoint)->GetTiebreaker()); channel->Connect(); return channel; } void DestroyChannels() { ep1_.cd1_.ch_.reset(); ep2_.cd1_.ch_.reset(); ep1_.cd2_.ch_.reset(); ep2_.cd2_.ch_.reset(); } cricket::P2PTransportChannel* ep1_ch1() { return ep1_.cd1_.ch_.get(); } cricket::P2PTransportChannel* ep1_ch2() { return ep1_.cd2_.ch_.get(); } cricket::P2PTransportChannel* ep2_ch1() { return ep2_.cd1_.ch_.get(); } cricket::P2PTransportChannel* ep2_ch2() { return ep2_.cd2_.ch_.get(); } // Common results. static const Result kLocalUdpToLocalUdp; static const Result kLocalUdpToStunUdp; static const Result kStunUdpToLocalUdp; static const Result kStunUdpToStunUdp; static const Result kLocalUdpToRelayUdp; static const Result kLocalTcpToLocalTcp; static void SetUpTestCase() { // Ensure the RNG is inited. talk_base::InitRandom(NULL, 0); } talk_base::NATSocketServer* nat() { return nss_.get(); } talk_base::FirewallSocketServer* fw() { return ss_.get(); } Endpoint* GetEndpoint(int endpoint) { if (endpoint == 0) { return &ep1_; } else if (endpoint == 1) { return &ep2_; } else { return NULL; } } cricket::PortAllocator* GetAllocator(int endpoint) { return GetEndpoint(endpoint)->allocator_.get(); } void AddAddress(int endpoint, const SocketAddress& addr) { GetEndpoint(endpoint)->network_manager_.AddInterface(addr); } void RemoveAddress(int endpoint, const SocketAddress& addr) { GetEndpoint(endpoint)->network_manager_.RemoveInterface(addr); } void SetProxy(int endpoint, talk_base::ProxyType type) { talk_base::ProxyInfo info; info.type = type; info.address = (type == talk_base::PROXY_HTTPS) ? kHttpsProxyAddrs[endpoint] : kSocksProxyAddrs[endpoint]; GetAllocator(endpoint)->set_proxy("unittest/1.0", info); } void SetAllocatorFlags(int endpoint, int flags) { GetAllocator(endpoint)->set_flags(flags); } void SetSignalingDelay(int endpoint, int delay) { GetEndpoint(endpoint)->SetSignalingDelay(delay); } void SetIceProtocol(int endpoint, cricket::IceProtocolType type) { GetEndpoint(endpoint)->SetIceProtocolType(type); } void SetIceRole(int endpoint, cricket::TransportRole role) { GetEndpoint(endpoint)->SetRole(role); } void SetTiebreaker(int endpoint, uint64 tiebreaker) { GetEndpoint(endpoint)->SetTiebreaker(tiebreaker); } bool GetRoleConflict(int endpoint) { return GetEndpoint(endpoint)->role_conflict(); } void Test(const Result& expected) { int32 connect_start = talk_base::Time(), connect_time; // Create the channels and wait for them to connect. CreateChannels(1); EXPECT_TRUE_WAIT_MARGIN(ep1_ch1() != NULL && ep2_ch1() != NULL && ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), expected.connect_wait, 1000); connect_time = talk_base::TimeSince(connect_start); if (connect_time < expected.connect_wait) { LOG(LS_INFO) << "Connect time: " << connect_time << " ms"; } else { LOG(LS_INFO) << "Connect time: " << "TIMEOUT (" << expected.connect_wait << " ms)"; } // Allow a few turns of the crank for the best connections to emerge. // This may take up to 2 seconds. if (ep1_ch1()->best_connection() && ep2_ch1()->best_connection()) { int32 converge_start = talk_base::Time(), converge_time; int converge_wait = 2000; EXPECT_TRUE_WAIT_MARGIN( LocalCandidate(ep1_ch1())->type() == expected.local_type && LocalCandidate(ep1_ch1())->protocol() == expected.local_proto && RemoteCandidate(ep1_ch1())->type() == expected.remote_type && RemoteCandidate(ep1_ch1())->protocol() == expected.remote_proto, converge_wait, converge_wait); // Also do EXPECT_EQ on each part so that failures are more verbose. EXPECT_EQ(expected.local_type, LocalCandidate(ep1_ch1())->type()); EXPECT_EQ(expected.local_proto, LocalCandidate(ep1_ch1())->protocol()); EXPECT_EQ(expected.remote_type, RemoteCandidate(ep1_ch1())->type()); EXPECT_EQ(expected.remote_proto, RemoteCandidate(ep1_ch1())->protocol()); /* TODO - Enable ep2 candidates check. // Checking for best connection candidates information at remote. EXPECT_TRUE_WAIT_MARGIN( LocalCandidate(ep2_ch1())->type() == expected.local_type2 && LocalCandidate(ep2_ch1())->protocol() == expected.local_proto2 && RemoteCandidate(ep2_ch1())->type() == expected.remote_type2 && RemoteCandidate(ep2_ch1())->protocol() == expected.remote_proto2, converge_wait - talk_base::TimeSince(converge_start), converge_wait - talk_base::TimeSince(converge_start)); // For verbose EXPECT_EQ(expected.local_type2, LocalCandidate(ep2_ch1())->type()); EXPECT_EQ(expected.local_proto2, LocalCandidate(ep2_ch1())->protocol()); EXPECT_EQ(expected.remote_type2, RemoteCandidate(ep2_ch1())->type()); EXPECT_EQ(expected.remote_proto2, RemoteCandidate(ep2_ch1())->protocol()); */ converge_time = talk_base::TimeSince(converge_start); if (converge_time < converge_wait) { LOG(LS_INFO) << "Converge time: " << converge_time << " ms"; } else { LOG(LS_INFO) << "Converge time: " << "TIMEOUT (" << converge_wait << " ms)"; } } // Try sending some data to other end. TestSendRecv(1); // Destroy the channels, and wait for them to be fully cleaned up. DestroyChannels(); } void TestSendRecv(int channels) { for (int i = 0; i < 10; ++i) { const char* data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; int len = static_cast<int>(strlen(data)); // local_channel1 <==> remote_channel1 EXPECT_EQ_WAIT(len, SendData(ep1_ch1(), data, len), 1000); EXPECT_TRUE_WAIT(CheckDataOnChannel(ep2_ch1(), data, len), 1000); EXPECT_EQ_WAIT(len, SendData(ep2_ch1(), data, len), 1000); EXPECT_TRUE_WAIT(CheckDataOnChannel(ep1_ch1(), data, len), 1000); if (channels == 2 && ep1_ch2() && ep2_ch2()) { // local_channel2 <==> remote_channel2 EXPECT_EQ_WAIT(len, SendData(ep1_ch2(), data, len), 1000); EXPECT_TRUE_WAIT(CheckDataOnChannel(ep2_ch2(), data, len), 1000); EXPECT_EQ_WAIT(len, SendData(ep2_ch2(), data, len), 1000); EXPECT_TRUE_WAIT(CheckDataOnChannel(ep1_ch2(), data, len), 1000); } } } void OnChannelRequestSignaling(cricket::TransportChannelImpl* channel) { channel->OnSignalingReady(); } // We pass the candidates directly to the other side. void OnCandidate(cricket::TransportChannelImpl* ch, const cricket::Candidate& c) { main_->PostDelayed(GetEndpoint(ch)->signaling_delay_, this, 0, new CandidateData(ch, c)); } void OnMessage(talk_base::Message* msg) { talk_base::scoped_ptr<CandidateData> data( static_cast<CandidateData*>(msg->pdata)); cricket::P2PTransportChannel* rch = GetRemoteChannel(data->channel); const cricket::Candidate& c = data->candidate; LOG(LS_INFO) << "Candidate(" << data->channel->component() << "->" << rch->component() << "): " << c.type() << ", " << c.protocol() << ", " << c.address().ToString() << ", " << c.username() << ", " << c.generation(); rch->OnCandidate(c); } void OnReadPacket(cricket::TransportChannel* channel, const char* data, size_t len, int flags) { std::list<std::string>& packets = GetPacketList(channel); packets.push_front(std::string(data, len)); } void OnRoleConflict(cricket::TransportChannelImpl* channel) { GetEndpoint(channel)->OnRoleConflict(true); cricket::TransportRole new_role = GetEndpoint(channel)->role() == cricket::ROLE_CONTROLLING ? cricket::ROLE_CONTROLLED : cricket::ROLE_CONTROLLING; channel->SetRole(new_role); } int SendData(cricket::TransportChannel* channel, const char* data, size_t len) { return channel->SendPacket(data, len, 0); } bool CheckDataOnChannel(cricket::TransportChannel* channel, const char* data, int len) { return GetChannelData(channel)->CheckData(data, len); } static const cricket::Candidate* LocalCandidate( cricket::P2PTransportChannel* ch) { return (ch && ch->best_connection()) ? &ch->best_connection()->local_candidate() : NULL; } static const cricket::Candidate* RemoteCandidate( cricket::P2PTransportChannel* ch) { return (ch && ch->best_connection()) ? &ch->best_connection()->remote_candidate() : NULL; } Endpoint* GetEndpoint(cricket::TransportChannel* ch) { if (ep1_.HasChannel(ch)) { return &ep1_; } else if (ep2_.HasChannel(ch)) { return &ep2_; } else { return NULL; } } cricket::P2PTransportChannel* GetRemoteChannel( cricket::TransportChannel* ch) { if (ch == ep1_ch1()) return ep2_ch1(); else if (ch == ep1_ch2()) return ep2_ch2(); else if (ch == ep2_ch1()) return ep1_ch1(); else if (ch == ep2_ch2()) return ep1_ch2(); else return NULL; } std::list<std::string>& GetPacketList(cricket::TransportChannel* ch) { return GetChannelData(ch)->ch_packets_; } private: talk_base::Thread* main_; talk_base::scoped_ptr<talk_base::PhysicalSocketServer> pss_; talk_base::scoped_ptr<talk_base::VirtualSocketServer> vss_; talk_base::scoped_ptr<talk_base::NATSocketServer> nss_; talk_base::scoped_ptr<talk_base::FirewallSocketServer> ss_; talk_base::SocketServerScope ss_scope_; cricket::TestStunServer stun_server_; cricket::TestRelayServer relay_server_; talk_base::SocksProxyServer socks_server1_; talk_base::SocksProxyServer socks_server2_; Endpoint ep1_; Endpoint ep2_; }; // The tests have only a few outcomes, which we predefine. const P2PTransportChannelTestBase::Result P2PTransportChannelTestBase:: kLocalUdpToLocalUdp("local", "udp", "local", "udp", "local", "udp", "local", "udp", 1000); const P2PTransportChannelTestBase::Result P2PTransportChannelTestBase:: kLocalUdpToStunUdp("local", "udp", "stun", "udp", "stun", "udp", "local", "udp", 1000); const P2PTransportChannelTestBase::Result P2PTransportChannelTestBase:: kStunUdpToLocalUdp("stun", "udp", "local", "udp", "local", "udp", "stun", "udp", 1000); const P2PTransportChannelTestBase::Result P2PTransportChannelTestBase:: kStunUdpToStunUdp("stun", "udp", "stun", "udp", "stun", "udp", "stun", "udp", 1000); const P2PTransportChannelTestBase::Result P2PTransportChannelTestBase:: kLocalUdpToRelayUdp("local", "udp", "relay", "udp", "local", "udp", "relay", "udp", 2000); const P2PTransportChannelTestBase::Result P2PTransportChannelTestBase:: kLocalTcpToLocalTcp("local", "tcp", "local", "tcp", "local", "tcp", "local", "tcp", 3000); // Test the matrix of all the connectivity types we expect to see in the wild. // Just test every combination of the configs in the Config enum. class P2PTransportChannelTest : public P2PTransportChannelTestBase { protected: static const Result* kMatrix[NUM_CONFIGS][NUM_CONFIGS]; void ConfigureEndpoints(Config config1, Config config2) { ConfigureEndpoint(0, config1); ConfigureEndpoint(1, config2); } void ConfigureEndpoint(int endpoint, Config config) { switch (config) { case OPEN: AddAddress(endpoint, kPublicAddrs[endpoint]); break; case NAT_FULL_CONE: case NAT_ADDR_RESTRICTED: case NAT_PORT_RESTRICTED: case NAT_SYMMETRIC: AddAddress(endpoint, kPrivateAddrs[endpoint]); // Add a single NAT of the desired type nat()->AddTranslator(kPublicAddrs[endpoint], kNatAddrs[endpoint], static_cast<talk_base::NATType>(config - NAT_FULL_CONE))-> AddClient(kPrivateAddrs[endpoint]); break; case NAT_DOUBLE_CONE: case NAT_SYMMETRIC_THEN_CONE: AddAddress(endpoint, kCascadedPrivateAddrs[endpoint]); // Add a two cascaded NATs of the desired types nat()->AddTranslator(kPublicAddrs[endpoint], kNatAddrs[endpoint], (config == NAT_DOUBLE_CONE) ? talk_base::NAT_OPEN_CONE : talk_base::NAT_SYMMETRIC)-> AddTranslator(kPrivateAddrs[endpoint], kCascadedNatAddrs[endpoint], talk_base::NAT_OPEN_CONE)-> AddClient(kCascadedPrivateAddrs[endpoint]); break; case BLOCK_UDP: case BLOCK_UDP_AND_INCOMING_TCP: case BLOCK_ALL_BUT_OUTGOING_HTTP: case PROXY_HTTPS: case PROXY_SOCKS: AddAddress(endpoint, kPublicAddrs[endpoint]); // Block all UDP fw()->AddRule(false, talk_base::FP_UDP, talk_base::FD_ANY, kPublicAddrs[endpoint]); if (config == BLOCK_UDP_AND_INCOMING_TCP) { // Block TCP inbound to the endpoint fw()->AddRule(false, talk_base::FP_TCP, SocketAddress(), kPublicAddrs[endpoint]); } else if (config == BLOCK_ALL_BUT_OUTGOING_HTTP) { // Block all TCP to/from the endpoint except 80/443 out fw()->AddRule(true, talk_base::FP_TCP, kPublicAddrs[endpoint], SocketAddress(talk_base::IPAddress(INADDR_ANY), 80)); fw()->AddRule(true, talk_base::FP_TCP, kPublicAddrs[endpoint], SocketAddress(talk_base::IPAddress(INADDR_ANY), 443)); fw()->AddRule(false, talk_base::FP_TCP, talk_base::FD_ANY, kPublicAddrs[endpoint]); } else if (config == PROXY_HTTPS) { // Block all TCP to/from the endpoint except to the proxy server fw()->AddRule(true, talk_base::FP_TCP, kPublicAddrs[endpoint], kHttpsProxyAddrs[endpoint]); fw()->AddRule(false, talk_base::FP_TCP, talk_base::FD_ANY, kPublicAddrs[endpoint]); SetProxy(endpoint, talk_base::PROXY_HTTPS); } else if (config == PROXY_SOCKS) { // Block all TCP to/from the endpoint except to the proxy server fw()->AddRule(true, talk_base::FP_TCP, kPublicAddrs[endpoint], kSocksProxyAddrs[endpoint]); fw()->AddRule(false, talk_base::FP_TCP, talk_base::FD_ANY, kPublicAddrs[endpoint]); SetProxy(endpoint, talk_base::PROXY_SOCKS5); } break; default: break; } } }; // Shorthands for use in the test matrix. #define LULU &kLocalUdpToLocalUdp #define LUSU &kLocalUdpToStunUdp #define SULU &kStunUdpToLocalUdp #define SUSU &kStunUdpToStunUdp #define LURU &kLocalUdpToRelayUdp #define LTLT &kLocalTcpToLocalTcp // TODO: Enable these once TestRelayServer can accept external TCP. #define LTRT NULL #define LSRS NULL // Test matrix. Originator behavior defined by rows, receiever by columns. // TODO: Fix NULLs caused by lack of TCP support in NATSocket. // TODO: Fix NULLs caused by no HTTP proxy support. // TODO: Rearrange rows/columns from best to worst. const P2PTransportChannelTest::Result* P2PTransportChannelTest::kMatrix[NUM_CONFIGS][NUM_CONFIGS] = { // OPEN CONE ADDR PORT SYMM 2CON SCON !UDP !TCP HTTP PRXH PRXS /*OP*/ {LULU, LUSU, LULU, LULU, LULU, LUSU, LULU, LTLT, LTLT, LSRS, NULL, LTLT}, /*CO*/ {LULU, LUSU, LUSU, SUSU, SUSU, LUSU, SUSU, NULL, NULL, LSRS, NULL, LTRT}, /*AD*/ {LULU, LUSU, LULU, SUSU, SUSU, LUSU, SUSU, NULL, NULL, LSRS, NULL, LTRT}, /*PO*/ {LULU, LUSU, SUSU, SUSU, LURU, LUSU, LURU, NULL, NULL, LSRS, NULL, LTRT}, /*SY*/ {LULU, LUSU, SUSU, LURU, LURU, LUSU, LURU, NULL, NULL, LSRS, NULL, LTRT}, /*2C*/ {LULU, LUSU, LUSU, SUSU, SUSU, LUSU, SUSU, NULL, NULL, LSRS, NULL, LTRT}, /*SC*/ {LULU, LUSU, SUSU, LURU, LURU, LUSU, LURU, NULL, NULL, LSRS, NULL, LTRT}, /*!U*/ {LTLT, NULL, NULL, NULL, NULL, NULL, NULL, LTLT, LTLT, LSRS, NULL, LTRT}, /*!T*/ {LTRT, NULL, NULL, NULL, NULL, NULL, NULL, LTLT, LTRT, LSRS, NULL, LTRT}, /*HT*/ {LSRS, LSRS, LSRS, LSRS, LSRS, LSRS, LSRS, LSRS, LSRS, LSRS, NULL, LSRS}, /*PR*/ {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, /*PR*/ {LTRT, LTRT, LTRT, LTRT, LTRT, LTRT, LTRT, LTRT, LTRT, LSRS, NULL, LTRT}, }; // The actual tests that exercise all the various configurations. // Test names are of the form P2PTransportChannelTest_TestOPENToNAT_FULL_CONE #define P2P_TEST_DECLARATION(x, y, z) \ TEST_F(P2PTransportChannelTest, z##Test##x##To##y) { \ ConfigureEndpoints(x, y); \ if (kMatrix[x][y] != NULL) \ Test(*kMatrix[x][y]); \ else \ LOG(LS_WARNING) << "Not yet implemented"; \ } #define P2P_TEST(x, y) \ P2P_TEST_DECLARATION(x, y,) #define FLAKY_P2P_TEST(x, y) \ P2P_TEST_DECLARATION(x, y, DISABLED_) #define P2P_TEST_SET(x) \ P2P_TEST(x, OPEN) \ P2P_TEST(x, NAT_FULL_CONE) \ P2P_TEST(x, NAT_ADDR_RESTRICTED) \ P2P_TEST(x, NAT_PORT_RESTRICTED) \ P2P_TEST(x, NAT_SYMMETRIC) \ P2P_TEST(x, NAT_DOUBLE_CONE) \ P2P_TEST(x, NAT_SYMMETRIC_THEN_CONE) \ P2P_TEST(x, BLOCK_UDP) \ P2P_TEST(x, BLOCK_UDP_AND_INCOMING_TCP) \ P2P_TEST(x, BLOCK_ALL_BUT_OUTGOING_HTTP) \ P2P_TEST(x, PROXY_HTTPS) \ P2P_TEST(x, PROXY_SOCKS) #define FLAKY_P2P_TEST_SET(x) \ P2P_TEST(x, OPEN) \ P2P_TEST(x, NAT_FULL_CONE) \ FLAKY_P2P_TEST(x, NAT_ADDR_RESTRICTED) \ P2P_TEST(x, NAT_PORT_RESTRICTED) \ P2P_TEST(x, NAT_SYMMETRIC) \ P2P_TEST(x, NAT_DOUBLE_CONE) \ P2P_TEST(x, NAT_SYMMETRIC_THEN_CONE) \ P2P_TEST(x, BLOCK_UDP) \ P2P_TEST(x, BLOCK_UDP_AND_INCOMING_TCP) \ P2P_TEST(x, BLOCK_ALL_BUT_OUTGOING_HTTP) \ P2P_TEST(x, PROXY_HTTPS) \ P2P_TEST(x, PROXY_SOCKS) P2P_TEST_SET(OPEN) P2P_TEST_SET(NAT_FULL_CONE) FLAKY_P2P_TEST_SET(NAT_ADDR_RESTRICTED) FLAKY_P2P_TEST_SET(NAT_PORT_RESTRICTED) FLAKY_P2P_TEST_SET(NAT_SYMMETRIC) P2P_TEST_SET(NAT_DOUBLE_CONE) FLAKY_P2P_TEST_SET(NAT_SYMMETRIC_THEN_CONE) P2P_TEST_SET(BLOCK_UDP) P2P_TEST_SET(BLOCK_UDP_AND_INCOMING_TCP) P2P_TEST_SET(BLOCK_ALL_BUT_OUTGOING_HTTP) P2P_TEST_SET(PROXY_HTTPS) P2P_TEST_SET(PROXY_SOCKS) // Test the operation of GetStats. TEST_F(P2PTransportChannelTest, GetStats) { ConfigureEndpoints(OPEN, OPEN); CreateChannels(1); EXPECT_TRUE_WAIT_MARGIN(ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000, 1000); TestSendRecv(1); cricket::ConnectionInfos infos; ASSERT_TRUE(ep1_ch1()->GetStats(&infos)); ASSERT_EQ(1U, infos.size()); EXPECT_TRUE(infos[0].new_connection); EXPECT_TRUE(infos[0].best_connection); EXPECT_TRUE(infos[0].readable); EXPECT_TRUE(infos[0].writable); EXPECT_FALSE(infos[0].timeout); EXPECT_EQ(10 * 36U, infos[0].sent_total_bytes); EXPECT_EQ(10 * 36U, infos[0].recv_total_bytes); EXPECT_GT(infos[0].rtt, 0U); DestroyChannels(); } // Test that we properly handle getting a STUN error due to slow signaling. TEST_F(P2PTransportChannelTest, SlowSignaling) { ConfigureEndpoints(OPEN, NAT_SYMMETRIC); // Make signaling from the callee take 500ms, so that the initial STUN pings // from the callee beat the signaling, and so the caller responds with a // unknown username error. We should just eat that and carry on; mishandling // this will instead cause all the callee's connections to be discarded. SetSignalingDelay(1, 1000); CreateChannels(1); const cricket::Connection* best_connection = NULL; // Wait until the callee's connections are created. WAIT((best_connection = ep2_ch1()->best_connection()) != NULL, 1000); // Wait to see if they get culled; they shouldn't. WAIT(ep2_ch1()->best_connection() != best_connection, 1000); EXPECT_TRUE(ep2_ch1()->best_connection() == best_connection); DestroyChannels(); } // Test that a host behind NAT cannot be reached when incoming_only // is set to true. TEST_F(P2PTransportChannelTest, IncomingOnlyBlocked) { ConfigureEndpoints(NAT_FULL_CONE, OPEN); SetAllocatorFlags(0, kOnlyLocalPorts); CreateChannels(1); ep1_ch1()->set_incoming_only(true); // Pump for 1 second and verify that the channels are not connected. talk_base::Thread::Current()->ProcessMessages(1000); EXPECT_FALSE(ep1_ch1()->readable()); EXPECT_FALSE(ep1_ch1()->writable()); EXPECT_FALSE(ep2_ch1()->readable()); EXPECT_FALSE(ep2_ch1()->writable()); DestroyChannels(); } // Test that a peer behind NAT can connect to a peer that has // incoming_only flag set. TEST_F(P2PTransportChannelTest, IncomingOnlyOpen) { ConfigureEndpoints(OPEN, NAT_FULL_CONE); SetAllocatorFlags(0, kOnlyLocalPorts); CreateChannels(1); ep1_ch1()->set_incoming_only(true); EXPECT_TRUE_WAIT_MARGIN(ep1_ch1() != NULL && ep2_ch1() != NULL && ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000, 1000); DestroyChannels(); } // Test what happens when we have 2 users behind the same NAT. This can lead // to interesting behavior because the STUN server will only give out the // address of the outermost NAT. class P2PTransportChannelSameNatTest : public P2PTransportChannelTestBase { protected: void ConfigureEndpoints(Config nat_type, Config config1, Config config2) { ASSERT(nat_type >= NAT_FULL_CONE && nat_type <= NAT_SYMMETRIC); talk_base::NATSocketServer::Translator* outer_nat = nat()->AddTranslator(kPublicAddrs[0], kNatAddrs[0], static_cast<talk_base::NATType>(nat_type - NAT_FULL_CONE)); ConfigureEndpoint(outer_nat, 0, config1); ConfigureEndpoint(outer_nat, 1, config2); } void ConfigureEndpoint(talk_base::NATSocketServer::Translator* nat, int endpoint, Config config) { ASSERT(config <= NAT_SYMMETRIC); if (config == OPEN) { AddAddress(endpoint, kPrivateAddrs[endpoint]); nat->AddClient(kPrivateAddrs[endpoint]); } else { AddAddress(endpoint, kCascadedPrivateAddrs[endpoint]); nat->AddTranslator(kPrivateAddrs[endpoint], kCascadedNatAddrs[endpoint], static_cast<talk_base::NATType>(config - NAT_FULL_CONE))->AddClient( kCascadedPrivateAddrs[endpoint]); } } }; TEST_F(P2PTransportChannelSameNatTest, TestConesBehindSameCone) { ConfigureEndpoints(NAT_FULL_CONE, NAT_FULL_CONE, NAT_FULL_CONE); Test(kLocalUdpToStunUdp); } // Test what happens when we have multiple available pathways. // In the future we will try different RTTs and configs for the different // interfaces, so that we can simulate a user with Ethernet and VPN networks. class P2PTransportChannelMultihomedTest : public P2PTransportChannelTestBase { }; // Test that we can establish connectivity when both peers are multihomed. TEST_F(P2PTransportChannelMultihomedTest, TestBasic) { AddAddress(0, kPublicAddrs[0]); AddAddress(0, kAlternateAddrs[0]); AddAddress(1, kPublicAddrs[1]); AddAddress(1, kAlternateAddrs[1]); Test(kLocalUdpToLocalUdp); } // Test that we can quickly switch links if an interface goes down. TEST_F(P2PTransportChannelMultihomedTest, TestFailover) { AddAddress(0, kPublicAddrs[0]); AddAddress(1, kPublicAddrs[1]); AddAddress(1, kAlternateAddrs[1]); // Use only local ports for simplicity. SetAllocatorFlags(0, kOnlyLocalPorts); SetAllocatorFlags(1, kOnlyLocalPorts); // Create channels and let them go writable, as usual. CreateChannels(1); EXPECT_TRUE_WAIT(ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000); EXPECT_TRUE( ep1_ch1()->best_connection() && ep2_ch1()->best_connection() && LocalCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[0]) && RemoteCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[1])); // Blackhole any traffic to or from the public addrs. LOG(LS_INFO) << "Failing over..."; fw()->AddRule(false, talk_base::FP_ANY, talk_base::FD_ANY, kPublicAddrs[1]); // We should detect loss of connectivity within 5 seconds or so. EXPECT_TRUE_WAIT(!ep1_ch1()->writable(), 7000); // We should switch over to use the alternate addr immediately // when we lose writability. EXPECT_TRUE_WAIT( ep1_ch1()->best_connection() && ep2_ch1()->best_connection() && LocalCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[0]) && RemoteCandidate(ep1_ch1())->address().EqualIPs(kAlternateAddrs[1]), 3000); DestroyChannels(); } // Test that we can switch links in a coordinated fashion. TEST_F(P2PTransportChannelMultihomedTest, TestDrain) { AddAddress(0, kPublicAddrs[0]); AddAddress(1, kPublicAddrs[1]); // Use only local ports for simplicity. SetAllocatorFlags(0, kOnlyLocalPorts); SetAllocatorFlags(1, kOnlyLocalPorts); // Create channels and let them go writable, as usual. CreateChannels(1); EXPECT_TRUE_WAIT(ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000); EXPECT_TRUE( ep1_ch1()->best_connection() && ep2_ch1()->best_connection() && LocalCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[0]) && RemoteCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[1])); // Remove the public interface, add the alternate interface, and allocate // a new generation of candidates for the new interface (via Connect()). LOG(LS_INFO) << "Draining..."; AddAddress(1, kAlternateAddrs[1]); RemoveAddress(1, kPublicAddrs[1]); ep2_ch1()->Connect(); // We should switch over to use the alternate address after // an exchange of pings. EXPECT_TRUE_WAIT( ep1_ch1()->best_connection() && ep2_ch1()->best_connection() && LocalCandidate(ep1_ch1())->address().EqualIPs(kPublicAddrs[0]) && RemoteCandidate(ep1_ch1())->address().EqualIPs(kAlternateAddrs[1]), 3000); DestroyChannels(); } TEST_F(P2PTransportChannelTest, TestBundleAllocatorToBundleAllocator) { AddAddress(0, kPublicAddrs[0]); AddAddress(1, kPublicAddrs[1]); SetAllocatorFlags(0, cricket::PORTALLOCATOR_ENABLE_BUNDLE); SetAllocatorFlags(1, cricket::PORTALLOCATOR_ENABLE_BUNDLE); CreateChannels(2); EXPECT_TRUE_WAIT(ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000); EXPECT_TRUE(ep1_ch1()->best_connection() && ep2_ch1()->best_connection()); EXPECT_FALSE(ep1_ch2()->readable()); EXPECT_FALSE(ep1_ch2()->writable()); EXPECT_FALSE(ep2_ch2()->readable()); EXPECT_FALSE(ep2_ch2()->writable()); TestSendRecv(1); // Only 1 channel is writable per Endpoint. DestroyChannels(); } TEST_F(P2PTransportChannelTest, TestBundleAllocatorToNonBundleAllocator) { AddAddress(0, kPublicAddrs[0]); AddAddress(1, kPublicAddrs[1]); // Enable BUNDLE flag at one side. SetAllocatorFlags(0, cricket::PORTALLOCATOR_ENABLE_BUNDLE); CreateChannels(2); EXPECT_TRUE_WAIT(ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000); EXPECT_TRUE_WAIT(ep1_ch2()->readable() && ep1_ch2()->writable() && ep2_ch2()->readable() && ep2_ch2()->writable(), 1000); EXPECT_TRUE(ep1_ch1()->best_connection() && ep2_ch1()->best_connection()); EXPECT_TRUE(ep1_ch2()->best_connection() && ep2_ch2()->best_connection()); TestSendRecv(2); DestroyChannels(); } TEST_F(P2PTransportChannelTest, TestIceRoleConflict) { AddAddress(0, kPublicAddrs[0]); AddAddress(1, kPublicAddrs[1]); SetIceProtocol(0, cricket::ICEPROTO_RFC5245); SetTiebreaker(0, kTiebreaker1); // Default EP1 is in controlling state. SetIceProtocol(1, cricket::ICEPROTO_RFC5245); SetIceRole(1, cricket::ROLE_CONTROLLING); SetTiebreaker(1, kTiebreaker2); // Creating channels with both channels role set to CONTROLLING. CreateChannels(1); // Since both the channels initiated with controlling state and channel2 // has higher tiebreaker value, channel1 should receive SignalRoleConflict. EXPECT_TRUE_WAIT(GetRoleConflict(0), 1000); EXPECT_TRUE_WAIT(ep1_ch1()->readable() && ep1_ch1()->writable() && ep2_ch1()->readable() && ep2_ch1()->writable(), 1000); EXPECT_TRUE(ep1_ch1()->best_connection() && ep2_ch1()->best_connection()); TestSendRecv(1); }<|fim▁end|>
channel->SignalReadPacket.connect(
<|file_name|>IWDeveloper.java<|end_file_name|><|fim▁begin|>package com.idega.development.presentation; import com.idega.idegaweb.IWBundle; import com.idega.idegaweb.IWMainApplication; import com.idega.presentation.IWContext; import com.idega.presentation.Image; import com.idega.presentation.Layer; import com.idega.presentation.PresentationObject; import com.idega.presentation.Table; import com.idega.presentation.text.HorizontalRule; import com.idega.presentation.text.Text; import com.idega.presentation.ui.IFrame; import com.idega.repository.data.RefactorClassRegistry; /** * Title: idega Framework * Description: * Copyright: Copyright (c) 2001 * Company: idega * @author <a href=mailto:"[email protected]">Tryggvi Larusson</a> * @version 1.0 */ public class IWDeveloper extends com.idega.presentation.app.IWApplication { private static final String localizerParameter = "iw_localizer"; private static final String localeswitcherParameter = "iw_localeswitcher"; private static final String bundleCreatorParameter = "iw_bundlecreator"; private static final String bundleComponentManagerParameter = "iw_bundlecompmanager"; private static final String applicationPropertiesParameter = "iw_application_properties_setter"; private static final String bundlesPropertiesParameter = "iw_bundle_properties_setter"; public static final String actionParameter = "iw_developer_action"; public static final String dbPoolStatusViewerParameter = "iw_poolstatus_viewer"; public static final String updateManagerParameter = "iw_update_manager"; public static final String frameName = "iwdv_rightFrame"; public static final String PARAMETER_CLASS_NAME = "iwdv_class_name"; public IWDeveloper() { super("idegaWeb Developer"); add(IWDeveloper.IWDevPage.class); super.setResizable(true); super.setScrollbar(true); super.setScrolling(1, true); super.setWidth(800); super.setHeight(600); //super.setOnLoad("moveTo(0,0);"); } public static class IWDevPage extends com.idega.presentation.ui.Window { public IWDevPage() { this.setStatus(true); } <|fim▁hole|> private Table mainTable; private Table objectTable; private IFrame rightFrame; private int count = 1; public void main(IWContext iwc) throws Exception { IWBundle iwbCore = getBundle(iwc); if (iwc.isIE()) { getParentPage().setBackgroundColor("#B0B29D"); } Layer topLayer = new Layer(Layer.DIV); topLayer.setZIndex(3); topLayer.setPositionType(Layer.FIXED); topLayer.setTopPosition(0); topLayer.setLeftPosition(0); topLayer.setBackgroundColor("#0E2456"); topLayer.setWidth(Table.HUNDRED_PERCENT); topLayer.setHeight(25); add(topLayer); Table headerTable = new Table(); headerTable.setCellpadding(0); headerTable.setCellspacing(0); headerTable.setWidth(Table.HUNDRED_PERCENT); headerTable.setAlignment(2,1,Table.HORIZONTAL_ALIGN_RIGHT); topLayer.add(headerTable); Image idegaweb = iwbCore.getImage("/editorwindow/idegaweb.gif","idegaWeb"); headerTable.add(idegaweb,1,1); Text adminTitle = new Text("idegaWeb Developer"); adminTitle.setStyleAttribute("color:#FFFFFF;font-family:Arial,Helvetica,sans-serif;font-size:12px;font-weight:bold;margin-right:5px;"); headerTable.add(adminTitle,2,1); Layer leftLayer = new Layer(Layer.DIV); leftLayer.setZIndex(2); leftLayer.setPositionType(Layer.FIXED); leftLayer.setTopPosition(25); leftLayer.setLeftPosition(0); leftLayer.setPadding(5); leftLayer.setBackgroundColor("#B0B29D"); leftLayer.setWidth(180); leftLayer.setHeight(Table.HUNDRED_PERCENT); add(leftLayer); DeveloperList list = new DeveloperList(); leftLayer.add(list); Layer rightLayer = new Layer(Layer.DIV); rightLayer.setZIndex(1); rightLayer.setPositionType(Layer.ABSOLUTE); rightLayer.setTopPosition(25); rightLayer.setPadding(5); if (iwc.isIE()) { rightLayer.setBackgroundColor("#FFFFFF"); rightLayer.setWidth(Table.HUNDRED_PERCENT); rightLayer.setHeight(Table.HUNDRED_PERCENT); rightLayer.setLeftPosition(180); } else { rightLayer.setLeftPosition(190); } add(rightLayer); if (iwc.isParameterSet(PARAMETER_CLASS_NAME)) { String className = IWMainApplication.decryptClassName(iwc.getParameter(PARAMETER_CLASS_NAME)); PresentationObject obj = (PresentationObject) RefactorClassRegistry.getInstance().newInstance(className, this.getClass()); rightLayer.add(obj); } else { rightLayer.add(new Localizer()); } } } public static Table getTitleTable(String displayString, Image image) { Table titleTable = new Table(1, 2); titleTable.setCellpadding(0); titleTable.setCellspacing(0); titleTable.setWidth("100%"); Text headline = getText(displayString); headline.setFontSize(Text.FONT_SIZE_14_HTML_4); headline.setFontColor("#0E2456"); if (image != null) { image.setHorizontalSpacing(5); titleTable.add(image, 1, 1); } titleTable.add(headline, 1, 1); titleTable.add(new HorizontalRule("100%", 2, "color: #FF9310", true), 1, 2); return titleTable; } public static Table getTitleTable(String displayString) { return getTitleTable(displayString, null); } public static Table getTitleTable(Class classToUse, Image image) { return getTitleTable(classToUse.getName().substring(classToUse.getName().lastIndexOf(".") + 1), image); } public static Table getTitleTable(Class classToUse) { return getTitleTable(classToUse, null); } public static Text getText(String text) { Text T = new Text(text); T.setBold(); T.setFontFace(Text.FONT_FACE_VERDANA); T.setFontSize(Text.FONT_SIZE_10_HTML_2); return T; } }<|fim▁end|>
<|file_name|>0007_auto_20160530_1233.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-30 12:33 from __future__ import unicode_literals<|fim▁hole|>from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('todo', '0006_auto_20160530_1210'), ] operations = [ migrations.AlterField( model_name='todo', name='category', field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='todo.Category'), preserve_default=False, ), ]<|fim▁end|>
<|file_name|>tabsscontrollers.js<|end_file_name|><|fim▁begin|>angular.module('tabss.controllers', []) .controller('TabssCtrl', function($scope,$state) { $scope.gotoTabs =function(){ $state.go('tabs') } $scope.gotoNearby =function(){ $state.go('app.nearby') } $scope.gotoEditProfile =function(){ $state.go('editprofile')<|fim▁hole|> $state.go('qrcode') } $scope.gotoHome =function(){ $state.go('app.home') } $scope.gotoAddFavorite =function(){ $state.go('addfavorite') } $scope.gotoFavoriteMenu1 =function(){ $state.go('favoritemenu1') } $scope.gotoCoffeeShop1 =function(){ $state.go('coffeeshop1') } $scope.gotoNewsPromotion =function(){ $state.go('newspromotion') } $scope.gotoNews =function(){ $state.go('news') } })<|fim▁end|>
} $scope.gotoQRCode =function(){
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15677 //! Simple getopt alternative. //! //! Construct a vector of options, either by using `reqopt`, `optopt`, and `optflag` //! or by building them from components yourself, and pass them to `getopts`, //! along with a vector of actual arguments (not including `argv[0]`). You'll //! either get a failure code back, or a match. You'll have to verify whether //! the amount of 'free' arguments in the match is what you expect. Use `opt_*` //! accessors to get argument values out of the matches object. //! //! Single-character options are expected to appear on the command line with a //! single preceding dash; multiple-character options are expected to be //! proceeded by two dashes. Options that expect an argument accept their //! argument following either a space or an equals sign. Single-character //! options don't require the space. //! //! # Example //! //! The following example shows simple command line parsing for an application //! that requires an input file to be specified, accepts an optional output //! file name following `-o`, and accepts both `-h` and `--help` as optional flags. //! //! ```{.rust} //! extern crate getopts; //! use getopts::{optopt,optflag,getopts,OptGroup}; //! use std::os; //! //! fn do_work(inp: &str, out: Option<String>) { //! println!("{}", inp); //! match out { //! Some(x) => println!("{}", x), //! None => println!("No Output"), //! } //! } //! //! fn print_usage(program: &str, _opts: &[OptGroup]) { //! println!("Usage: {} [options]", program); //! println!("-o\t\tOutput"); //! println!("-h --help\tUsage"); //! } //! //! fn main() { //! let args: Vec<String> = os::args(); //! //! let program = args[0].clone(); //! //! let opts = [ //! optopt("o", "", "set output file name", "NAME"), //! optflag("h", "help", "print this help menu") //! ]; //! let matches = match getopts(args.tail(), opts) { //! Ok(m) => { m } //! Err(f) => { fail!(f.to_string()) } //! }; //! if matches.opt_present("h") { //! print_usage(program.as_slice(), opts); //! return; //! } //! let output = matches.opt_str("o"); //! let input = if !matches.free.is_empty() { //! matches.free[0].clone() //! } else { //! print_usage(program.as_slice(), opts); //! return; //! }; //! do_work(input.as_slice(), output); //! } //! ``` #![crate_name = "getopts"] #![experimental] #![crate_type = "rlib"] #![crate_type = "dylib"] #![license = "MIT/ASL2"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/master/", html_playground_url = "http://play.rust-lang.org/")] #![feature(globs, phase)] #![feature(import_shadowing)] #![deny(missing_doc)] #[cfg(test)] extern crate debug; #[cfg(test)] #[phase(plugin, link)] extern crate log; use std::cmp::PartialEq; use std::fmt; use std::result::{Err, Ok}; use std::result; use std::string::String; /// Name of an option. Either a string or a single char. #[deriving(Clone, PartialEq, Eq)] pub enum Name { /// A string representing the long name of an option. /// For example: "help" Long(String), /// A char representing the short name of an option. /// For example: 'h' Short(char), } /// Describes whether an option has an argument. #[deriving(Clone, PartialEq, Eq)] pub enum HasArg { /// The option requires an argument. Yes, /// The option takes no argument. No, /// The option argument is optional. Maybe, } /// Describes how often an option may occur. #[deriving(Clone, PartialEq, Eq)] pub enum Occur { /// The option occurs once. Req, /// The option occurs at most once. Optional, /// The option occurs zero or more times. Multi, } /// A description of a possible option. #[deriving(Clone, PartialEq, Eq)] pub struct Opt { /// Name of the option pub name: Name, /// Whether it has an argument pub hasarg: HasArg, /// How often it can occur pub occur: Occur, /// Which options it aliases pub aliases: Vec<Opt>, } /// One group of options, e.g., both -h and --help, along with /// their shared description and properties. #[deriving(Clone, PartialEq, Eq)] pub struct OptGroup { /// Short Name of the `OptGroup` pub short_name: String, /// Long Name of the `OptGroup` pub long_name: String, /// Hint pub hint: String, /// Description pub desc: String, /// Whether it has an argument pub hasarg: HasArg, /// How often it can occur pub occur: Occur } /// Describes whether an option is given at all or has a value. #[deriving(Clone, PartialEq, Eq)] enum Optval { Val(String), Given, } /// The result of checking command line arguments. Contains a vector /// of matches and a vector of free strings. #[deriving(Clone, PartialEq, Eq)] pub struct Matches { /// Options that matched opts: Vec<Opt>, /// Values of the Options that matched vals: Vec<Vec<Optval>>, /// Free string fragments pub free: Vec<String>, } /// The type returned when the command line does not conform to the /// expected format. Use the `Show` implementation to output detailed /// information. #[deriving(Clone, PartialEq, Eq)] pub enum Fail_ { /// The option requires an argument but none was passed. ArgumentMissing(String), /// The passed option is not declared among the possible options. UnrecognizedOption(String), /// A required option is not present. OptionMissing(String), /// A single occurrence option is being used multiple times. OptionDuplicated(String), /// There's an argument being passed to a non-argument option. UnexpectedArgument(String), } /// The type of failure that occurred. #[deriving(PartialEq, Eq)] #[allow(missing_doc)] pub enum FailType { ArgumentMissing_, UnrecognizedOption_, OptionMissing_, OptionDuplicated_, UnexpectedArgument_, } /// The result of parsing a command line with a set of options. pub type Result = result::Result<Matches, Fail_>; impl Name { fn from_str(nm: &str) -> Name { if nm.len() == 1u { Short(nm.char_at(0u)) } else { Long(nm.to_string()) } } fn to_string(&self) -> String { match *self { Short(ch) => ch.to_string(), Long(ref s) => s.to_string() } } } impl OptGroup { /// Translate OptGroup into Opt. /// (Both short and long names correspond to different Opts). pub fn long_to_short(&self) -> Opt { let OptGroup { short_name: short_name, long_name: long_name, hasarg: hasarg, occur: occur, .. } = (*self).clone(); match (short_name.len(), long_name.len()) { (0,0) => fail!("this long-format option was given no name"), (0,_) => Opt { name: Long((long_name)), hasarg: hasarg, occur: occur, aliases: Vec::new() }, (1,0) => Opt { name: Short(short_name.as_slice().char_at(0)), hasarg: hasarg, occur: occur, aliases: Vec::new() }, (1,_) => Opt { name: Long((long_name)), hasarg: hasarg, occur: occur, aliases: vec!( Opt { name: Short(short_name.as_slice().char_at(0)), hasarg: hasarg, occur: occur, aliases: Vec::new() } ) }, (_,_) => fail!("something is wrong with the long-form opt") } } } impl Matches { fn opt_vals(&self, nm: &str) -> Vec<Optval> { match find_opt(self.opts.as_slice(), Name::from_str(nm)) { Some(id) => self.vals[id].clone(), None => fail!("No option '{}' defined", nm) } } fn opt_val(&self, nm: &str) -> Option<Optval> { let vals = self.opt_vals(nm); if vals.is_empty() { None } else { Some(vals[0].clone()) } } /// Returns true if an option was matched. pub fn opt_present(&self, nm: &str) -> bool { !self.opt_vals(nm).is_empty() } /// Returns the number of times an option was matched. pub fn opt_count(&self, nm: &str) -> uint { self.opt_vals(nm).len() } /// Returns true if any of several options were matched. pub fn opts_present(&self, names: &[String]) -> bool { for nm in names.iter() { match find_opt(self.opts.as_slice(), Name::from_str(nm.as_slice())) { Some(id) if !self.vals[id].is_empty() => return true, _ => (), }; } false } /// Returns the string argument supplied to one of several matching options or `None`. pub fn opts_str(&self, names: &[String]) -> Option<String> { for nm in names.iter() { match self.opt_val(nm.as_slice()) { Some(Val(ref s)) => return Some(s.clone()), _ => () } } None } /// Returns a vector of the arguments provided to all matches of the given /// option. /// /// Used when an option accepts multiple values. pub fn opt_strs(&self, nm: &str) -> Vec<String> { let mut acc: Vec<String> = Vec::new(); let r = self.opt_vals(nm); for v in r.iter() { match *v { Val(ref s) => acc.push((*s).clone()), _ => () } } acc } /// Returns the string argument supplied to a matching option or `None`. pub fn opt_str(&self, nm: &str) -> Option<String> { let vals = self.opt_vals(nm); if vals.is_empty() { return None::<String>; } match vals[0] { Val(ref s) => Some((*s).clone()), _ => None } } /// Returns the matching string, a default, or none. /// /// Returns none if the option was not present, `def` if the option was /// present but no argument was provided, and the argument if the option was /// present and an argument was provided. pub fn opt_default(&self, nm: &str, def: &str) -> Option<String> { let vals = self.opt_vals(nm); if vals.is_empty() { return None; } match vals[0] { Val(ref s) => Some((*s).clone()), _ => Some(def.to_string()) } } } fn is_arg(arg: &str) -> bool { arg.len() > 1 && arg.as_bytes()[0] == b'-' } fn find_opt(opts: &[Opt], nm: Name) -> Option<uint> { // Search main options. let pos = opts.iter().position(|opt| opt.name == nm); if pos.is_some() { return pos } // Search in aliases. for candidate in opts.iter() { if candidate.aliases.iter().position(|opt| opt.name == nm).is_some() { return opts.iter().position(|opt| opt.name == candidate.name); } } None } /// Create a long option that is required and takes an argument. pub fn reqopt(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: hint.to_string(), desc: desc.to_string(), hasarg: Yes, occur: Req } } /// Create a long option that is optional and takes an argument. pub fn optopt(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: hint.to_string(), desc: desc.to_string(), hasarg: Yes, occur: Optional } } /// Create a long option that is optional and does not take an argument. pub fn optflag(short_name: &str, long_name: &str, desc: &str) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: "".to_string(), desc: desc.to_string(), hasarg: No, occur: Optional } } /// Create a long option that can occur more than once and does not /// take an argument. pub fn optflagmulti(short_name: &str, long_name: &str, desc: &str) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: "".to_string(), desc: desc.to_string(), hasarg: No, occur: Multi } } /// Create a long option that is optional and takes an optional argument. pub fn optflagopt(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: hint.to_string(), desc: desc.to_string(), hasarg: Maybe, occur: Optional } } /// Create a long option that is optional, takes an argument, and may occur /// multiple times. pub fn optmulti(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: hint.to_string(), desc: desc.to_string(), hasarg: Yes, occur: Multi } } /// Create a generic option group, stating all parameters explicitly pub fn opt(short_name: &str, long_name: &str, desc: &str, hint: &str, hasarg: HasArg, occur: Occur) -> OptGroup { let len = short_name.len(); assert!(len == 1 || len == 0); OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), hint: hint.to_string(), desc: desc.to_string(), hasarg: hasarg, occur: occur } } impl Fail_ { /// Convert a `Fail_` enum into an error string. #[deprecated="use `Show` (`{}` format specifier)"] pub fn to_err_msg(self) -> String { self.to_string() } } impl fmt::Show for Fail_ { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ArgumentMissing(ref nm) => { write!(f, "Argument to option '{}' missing.", *nm) } UnrecognizedOption(ref nm) => { write!(f, "Unrecognized option: '{}'.", *nm) } OptionMissing(ref nm) => { write!(f, "Required option '{}' missing.", *nm) } OptionDuplicated(ref nm) => { write!(f, "Option '{}' given more than once.", *nm) } UnexpectedArgument(ref nm) => { write!(f, "Option '{}' does not take an argument.", *nm) } } } } /// Parse command line arguments according to the provided options. /// /// On success returns `Ok(Matches)`. Use methods such as `opt_present` /// `opt_str`, etc. to interrogate results. Returns `Err(Fail_)` on /// failure: use the `Show` implementation of `Fail_` to display /// information about it. pub fn getopts(args: &[String], optgrps: &[OptGroup]) -> Result { let opts: Vec<Opt> = optgrps.iter().map(|x| x.long_to_short()).collect(); let n_opts = opts.len(); fn f(_x: uint) -> Vec<Optval> { return Vec::new(); } let mut vals = Vec::from_fn(n_opts, f); let mut free: Vec<String> = Vec::new(); let l = args.len(); let mut i = 0; while i < l { let cur = args[i].clone(); let curlen = cur.len(); if !is_arg(cur.as_slice()) { free.push(cur); } else if cur.as_slice() == "--" { let mut j = i + 1; while j < l { free.push(args[j].clone()); j += 1; } break; } else { let mut names; let mut i_arg = None; if cur.as_bytes()[1] == b'-' { let tail = cur.as_slice().slice(2, curlen); let tail_eq: Vec<&str> = tail.split('=').collect(); if tail_eq.len() <= 1 { names = vec!(Long(tail.to_string())); } else { names = vec!(Long(tail_eq[0].to_string())); i_arg = Some(tail_eq[1].to_string()); } } else { let mut j = 1; names = Vec::new(); while j < curlen { let range = cur.as_slice().char_range_at(j); let opt = Short(range.ch); /* In a series of potential options (eg. -aheJ), if we see one which takes an argument, we assume all subsequent characters make up the argument. This allows options such as -L/usr/local/lib/foo to be interpreted correctly */ let opt_id = match find_opt(opts.as_slice(), opt.clone()) { Some(id) => id, None => return Err(UnrecognizedOption(opt.to_string())) }; names.push(opt); let arg_follows = match opts[opt_id].hasarg { Yes | Maybe => true, No => false }; if arg_follows && range.next < curlen { i_arg = Some(cur.as_slice() .slice(range.next, curlen).to_string()); break; } j = range.next; } } let mut name_pos = 0; for nm in names.iter() { name_pos += 1; let optid = match find_opt(opts.as_slice(), (*nm).clone()) { Some(id) => id, None => return Err(UnrecognizedOption(nm.to_string())) }; match opts[optid].hasarg { No => { if name_pos == names.len() && !i_arg.is_none() { return Err(UnexpectedArgument(nm.to_string())); } vals.get_mut(optid).push(Given); } Maybe => { if !i_arg.is_none() { vals.get_mut(optid) .push(Val((i_arg.clone()) .unwrap())); } else if name_pos < names.len() || i + 1 == l || is_arg(args[i + 1].as_slice()) { vals.get_mut(optid).push(Given); } else { i += 1; vals.get_mut(optid).push(Val(args[i].clone())); } } Yes => { if !i_arg.is_none() { vals.get_mut(optid).push(Val(i_arg.clone().unwrap())); } else if i + 1 == l { return Err(ArgumentMissing(nm.to_string())); } else { i += 1; vals.get_mut(optid).push(Val(args[i].clone())); } } } } } i += 1; } i = 0u; while i < n_opts { let n = vals[i].len(); let occ = opts[i].occur; if occ == Req { if n == 0 { return Err(OptionMissing(opts[i].name.to_string())); } } if occ != Multi { if n > 1 { return Err(OptionDuplicated(opts[i].name.to_string())); } } i += 1; } Ok(Matches { opts: opts, vals: vals, free: free }) } /// Derive a usage message from a set of long options. pub fn usage(brief: &str, opts: &[OptGroup]) -> String { let desc_sep = format!("\n{}", " ".repeat(24)); let mut rows = opts.iter().map(|optref| { let OptGroup{short_name: short_name, long_name: long_name, hint: hint, desc: desc, hasarg: hasarg, ..} = (*optref).clone(); let mut row = " ".repeat(4); // short option match short_name.len() { 0 => {} 1 => { row.push('-'); row.push_str(short_name.as_slice()); row.push(' '); } _ => fail!("the short name should only be 1 ascii char long"), } // long option match long_name.len() { 0 => {} _ => { row.push_str("--"); row.push_str(long_name.as_slice()); row.push(' '); } } // arg match hasarg { No => {} Yes => row.push_str(hint.as_slice()), Maybe => { row.push('['); row.push_str(hint.as_slice()); row.push(']'); } } // FIXME: #5516 should be graphemes not codepoints // here we just need to indent the start of the description let rowlen = row.as_slice().char_len(); if rowlen < 24 { for _ in range(0, 24 - rowlen) { row.push(' '); } } else { row.push_str(desc_sep.as_slice()) } // Normalize desc to contain words separated by one space character let mut desc_normalized_whitespace = String::new(); for word in desc.as_slice().words() { desc_normalized_whitespace.push_str(word); desc_normalized_whitespace.push(' '); } // FIXME: #5516 should be graphemes not codepoints let mut desc_rows = Vec::new(); each_split_within(desc_normalized_whitespace.as_slice(), 54, |substr| { desc_rows.push(substr.to_string()); true }); // FIXME: #5516 should be graphemes not codepoints // wrapped description row.push_str(desc_rows.connect(desc_sep.as_slice()).as_slice()); row }); format!("{}\n\nOptions:\n{}\n", brief, rows.collect::<Vec<String>>().connect("\n")) } fn format_option(opt: &OptGroup) -> String { let mut line = String::new(); if opt.occur != Req { line.push('['); } // Use short_name is possible, but fallback to long_name. if opt.short_name.len() > 0 { line.push('-'); line.push_str(opt.short_name.as_slice()); } else { line.push_str("--"); line.push_str(opt.long_name.as_slice()); } if opt.hasarg != No { line.push(' '); if opt.hasarg == Maybe { line.push('['); } line.push_str(opt.hint.as_slice()); if opt.hasarg == Maybe { line.push(']'); } } if opt.occur != Req { line.push(']'); } if opt.occur == Multi { line.push_str(".."); } line } /// Derive a short one-line usage summary from a set of long options. pub fn short_usage(program_name: &str, opts: &[OptGroup]) -> String { let mut line = format!("Usage: {} ", program_name); line.push_str(opts.iter() .map(format_option) .collect::<Vec<String>>() .connect(" ") .as_slice()); line } /// Splits a string into substrings with possibly internal whitespace, /// each of them at most `lim` bytes long. The substrings have leading and trailing /// whitespace removed, and are only cut at whitespace boundaries. /// /// Note: Function was moved here from `std::str` because this module is the only place that /// uses it, and because it was to specific for a general string function. /// /// #Failure: /// /// Fails during iteration if the string contains a non-whitespace /// sequence longer than the limit. fn each_split_within<'a>(ss: &'a str, lim: uint, it: |&'a str| -> bool) -> bool { // Just for fun, let's write this as a state machine: enum SplitWithinState { A, // leading whitespace, initial state B, // words C, // internal and trailing whitespace } enum Whitespace { Ws, // current char is whitespace Cr // current char is not whitespace } enum LengthLimit { UnderLim, // current char makes current substring still fit in limit OverLim // current char makes current substring no longer fit in limit } let mut slice_start = 0; let mut last_start = 0; let mut last_end = 0; let mut state = A; let mut fake_i = ss.len(); let mut lim = lim; let mut cont = true; // if the limit is larger than the string, lower it to save cycles if lim >= fake_i { lim = fake_i; } let machine: |&mut bool, (uint, char)| -> bool = |cont, (i, c)| { let whitespace = if ::std::char::is_whitespace(c) { Ws } else { Cr }; let limit = if (i - slice_start + 1) <= lim { UnderLim } else { OverLim }; state = match (state, whitespace, limit) { (A, Ws, _) => { A } (A, Cr, _) => { slice_start = i; last_start = i; B } (B, Cr, UnderLim) => { B } (B, Cr, OverLim) if (i - last_start + 1) > lim => fail!("word starting with {} longer than limit!", ss.slice(last_start, i + 1)), (B, Cr, OverLim) => { *cont = it(ss.slice(slice_start, last_end)); slice_start = last_start; B } (B, Ws, UnderLim) => { last_end = i; C } (B, Ws, OverLim) => { last_end = i; *cont = it(ss.slice(slice_start, last_end)); A } (C, Cr, UnderLim) => { last_start = i; B } (C, Cr, OverLim) => { *cont = it(ss.slice(slice_start, last_end)); slice_start = i; last_start = i; last_end = i; B } (C, Ws, OverLim) => { *cont = it(ss.slice(slice_start, last_end)); A } (C, Ws, UnderLim) => { C } }; *cont }; ss.char_indices().all(|x| machine(&mut cont, x)); // Let the automaton 'run out' by supplying trailing whitespace while cont && match state { B | C => true, A => false } { machine(&mut cont, (fake_i, ' ')); fake_i += 1; } return cont; } #[test] fn test_split_within() { fn t(s: &str, i: uint, u: &[String]) { let mut v = Vec::new(); each_split_within(s, i, |s| { v.push(s.to_string()); true }); assert!(v.iter().zip(u.iter()).all(|(a,b)| a == b)); } t("", 0, []); t("", 15, []); t("hello", 15, ["hello".to_string()]); t("\nMary had a little lamb\nLittle lamb\n", 15, [ "Mary had a".to_string(), "little lamb".to_string(), "Little lamb".to_string() ]); t("\nMary had a little lamb\nLittle lamb\n", ::std::uint::MAX, ["Mary had a little lamb\nLittle lamb".to_string()]); } #[cfg(test)] mod tests { use super::*; use std::result::{Err, Ok}; use std::result; fn check_fail_type(f: Fail_, ft: FailType) { match f { ArgumentMissing(_) => assert!(ft == ArgumentMissing_), UnrecognizedOption(_) => assert!(ft == UnrecognizedOption_), OptionMissing(_) => assert!(ft == OptionMissing_), OptionDuplicated(_) => assert!(ft == OptionDuplicated_), UnexpectedArgument(_) => assert!(ft == UnexpectedArgument_) } } // Tests for reqopt #[test] fn test_reqopt() { let long_args = vec!("--test=20".to_string()); let opts = vec!(reqopt("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(m.opt_present("test")); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!(m.opt_present("t")); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); } _ => { fail!("test_reqopt failed (long arg)"); } } let short_args = vec!("-t".to_string(), "20".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Ok(ref m) => { assert!((m.opt_present("test"))); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!((m.opt_present("t"))); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); } _ => { fail!("test_reqopt failed (short arg)"); } } } #[test] fn test_reqopt_missing() { let args = vec!("blah".to_string()); let opts = vec!(reqopt("t", "test", "testing", "TEST")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, OptionMissing_), _ => fail!() } } #[test] fn test_reqopt_no_arg() { let long_args = vec!("--test".to_string()); let opts = vec!(reqopt("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, ArgumentMissing_), _ => fail!() } let short_args = vec!("-t".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Err(f) => check_fail_type(f, ArgumentMissing_), _ => fail!() } } #[test] fn test_reqopt_multi() { let args = vec!("--test=20".to_string(), "-t".to_string(), "30".to_string()); let opts = vec!(reqopt("t", "test", "testing", "TEST")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, OptionDuplicated_), _ => fail!() } } // Tests for optopt #[test] fn test_optopt() { let long_args = vec!("--test=20".to_string()); let opts = vec!(optopt("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(m.opt_present("test")); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!((m.opt_present("t"))); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); } _ => fail!() } let short_args = vec!("-t".to_string(), "20".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Ok(ref m) => { assert!((m.opt_present("test"))); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!((m.opt_present("t"))); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); } _ => fail!() } } #[test] fn test_optopt_missing() { let args = vec!("blah".to_string()); let opts = vec!(optopt("t", "test", "testing", "TEST")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(!m.opt_present("test")); assert!(!m.opt_present("t")); } _ => fail!() } } #[test] fn test_optopt_no_arg() { let long_args = vec!("--test".to_string()); let opts = vec!(optopt("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, ArgumentMissing_), _ => fail!() } let short_args = vec!("-t".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Err(f) => check_fail_type(f, ArgumentMissing_), _ => fail!() } } #[test] fn test_optopt_multi() { let args = vec!("--test=20".to_string(), "-t".to_string(), "30".to_string()); let opts = vec!(optopt("t", "test", "testing", "TEST")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, OptionDuplicated_), _ => fail!() } } // Tests for optflag #[test] fn test_optflag() { let long_args = vec!("--test".to_string()); let opts = vec!(optflag("t", "test", "testing")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(m.opt_present("test")); assert!(m.opt_present("t")); } _ => fail!() } let short_args = vec!("-t".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Ok(ref m) => { assert!(m.opt_present("test")); assert!(m.opt_present("t")); } _ => fail!() } } #[test] fn test_optflag_missing() { let args = vec!("blah".to_string()); let opts = vec!(optflag("t", "test", "testing")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(!m.opt_present("test")); assert!(!m.opt_present("t")); } _ => fail!() } } #[test] fn test_optflag_long_arg() { let args = vec!("--test=20".to_string()); let opts = vec!(optflag("t", "test", "testing")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Err(f) => { check_fail_type(f, UnexpectedArgument_); } _ => fail!() } } #[test] fn test_optflag_multi() { let args = vec!("--test".to_string(), "-t".to_string()); let opts = vec!(optflag("t", "test", "testing")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, OptionDuplicated_), _ => fail!() } } #[test] fn test_optflag_short_arg() { let args = vec!("-t".to_string(), "20".to_string()); let opts = vec!(optflag("t", "test", "testing")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { // The next variable after the flag is just a free argument assert!(m.free[0] == "20".to_string()); } _ => fail!() } } // Tests for optflagmulti #[test] fn test_optflagmulti_short1() { let args = vec!("-v".to_string()); let opts = vec!(optflagmulti("v", "verbose", "verbosity")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert_eq!(m.opt_count("v"), 1); } _ => fail!() } } #[test] fn test_optflagmulti_short2a() { let args = vec!("-v".to_string(), "-v".to_string()); let opts = vec!(optflagmulti("v", "verbose", "verbosity")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert_eq!(m.opt_count("v"), 2); } _ => fail!() } } #[test] fn test_optflagmulti_short2b() { let args = vec!("-vv".to_string()); let opts = vec!(optflagmulti("v", "verbose", "verbosity")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert_eq!(m.opt_count("v"), 2); } _ => fail!() } } #[test] fn test_optflagmulti_long1() { let args = vec!("--verbose".to_string()); let opts = vec!(optflagmulti("v", "verbose", "verbosity")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert_eq!(m.opt_count("verbose"), 1); } _ => fail!() } } #[test] fn test_optflagmulti_long2() { let args = vec!("--verbose".to_string(), "--verbose".to_string()); let opts = vec!(optflagmulti("v", "verbose", "verbosity")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert_eq!(m.opt_count("verbose"), 2); } _ => fail!() } } #[test] fn test_optflagmulti_mix() { let args = vec!("--verbose".to_string(), "-v".to_string(), "-vv".to_string(), "verbose".to_string()); let opts = vec!(optflagmulti("v", "verbose", "verbosity")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert_eq!(m.opt_count("verbose"), 4); assert_eq!(m.opt_count("v"), 4); } _ => fail!() } } // Tests for optmulti #[test] fn test_optmulti() { let long_args = vec!("--test=20".to_string()); let opts = vec!(optmulti("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!((m.opt_present("test"))); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!((m.opt_present("t"))); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); } _ => fail!() } let short_args = vec!("-t".to_string(), "20".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Ok(ref m) => { assert!((m.opt_present("test"))); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!((m.opt_present("t"))); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); } _ => fail!() } } #[test] fn test_optmulti_missing() { let args = vec!("blah".to_string()); let opts = vec!(optmulti("t", "test", "testing", "TEST")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(!m.opt_present("test")); assert!(!m.opt_present("t")); } _ => fail!() } } #[test] fn test_optmulti_no_arg() { let long_args = vec!("--test".to_string()); let opts = vec!(optmulti("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, ArgumentMissing_), _ => fail!() } let short_args = vec!("-t".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Err(f) => check_fail_type(f, ArgumentMissing_), _ => fail!() } } #[test] fn test_optmulti_multi() { let args = vec!("--test=20".to_string(), "-t".to_string(), "30".to_string()); let opts = vec!(optmulti("t", "test", "testing", "TEST")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(m.opt_present("test")); assert_eq!(m.opt_str("test").unwrap(), "20".to_string()); assert!(m.opt_present("t")); assert_eq!(m.opt_str("t").unwrap(), "20".to_string()); let pair = m.opt_strs("test"); assert!(pair[0] == "20".to_string()); assert!(pair[1] == "30".to_string()); } _ => fail!() } } #[test] fn test_unrecognized_option() { let long_args = vec!("--untest".to_string()); let opts = vec!(optmulti("t", "test", "testing", "TEST")); let rs = getopts(long_args.as_slice(), opts.as_slice()); match rs { Err(f) => check_fail_type(f, UnrecognizedOption_), _ => fail!() } let short_args = vec!("-u".to_string()); match getopts(short_args.as_slice(), opts.as_slice()) { Err(f) => check_fail_type(f, UnrecognizedOption_), _ => fail!() } } #[test] fn test_combined() { let args = vec!("prog".to_string(), "free1".to_string(), "-s".to_string(), "20".to_string(), "free2".to_string(), "--flag".to_string(), "--long=30".to_string(), "-f".to_string(), "-m".to_string(), "40".to_string(), "-m".to_string(), "50".to_string(), "-n".to_string(), "-A B".to_string(), "-n".to_string(), "-60 70".to_string()); let opts = vec!(optopt("s", "something", "something", "SOMETHING"), optflag("", "flag", "a flag"), reqopt("", "long", "hi", "LONG"), optflag("f", "", "another flag"), optmulti("m", "", "mmmmmm", "YUM"), optmulti("n", "", "nothing", "NOTHING"), optopt("", "notpresent", "nothing to see here", "NOPE")); let rs = getopts(args.as_slice(), opts.as_slice()); match rs { Ok(ref m) => { assert!(m.free[0] == "prog".to_string()); assert!(m.free[1] == "free1".to_string()); assert_eq!(m.opt_str("s").unwrap(), "20".to_string()); assert!(m.free[2] == "free2".to_string()); assert!((m.opt_present("flag"))); assert_eq!(m.opt_str("long").unwrap(), "30".to_string()); assert!((m.opt_present("f"))); let pair = m.opt_strs("m"); assert!(pair[0] == "40".to_string()); assert!(pair[1] == "50".to_string()); let pair = m.opt_strs("n"); assert!(pair[0] == "-A B".to_string()); assert!(pair[1] == "-60 70".to_string()); assert!((!m.opt_present("notpresent"))); } _ => fail!() } } #[test] fn test_multi() { let opts = vec!(optopt("e", "", "encrypt", "ENCRYPT"), optopt("", "encrypt", "encrypt", "ENCRYPT"), optopt("f", "", "flag", "FLAG")); let args_single = vec!("-e".to_string(), "foo".to_string()); let matches_single = &match getopts(args_single.as_slice(), opts.as_slice()) { result::Ok(m) => m, result::Err(_) => fail!() }; assert!(matches_single.opts_present(["e".to_string()])); assert!(matches_single.opts_present(["encrypt".to_string(), "e".to_string()])); assert!(matches_single.opts_present(["e".to_string(), "encrypt".to_string()])); assert!(!matches_single.opts_present(["encrypt".to_string()])); assert!(!matches_single.opts_present(["thing".to_string()])); assert!(!matches_single.opts_present([])); assert_eq!(matches_single.opts_str(["e".to_string()]).unwrap(), "foo".to_string()); assert_eq!(matches_single.opts_str(["e".to_string(), "encrypt".to_string()]).unwrap(), "foo".to_string()); assert_eq!(matches_single.opts_str(["encrypt".to_string(), "e".to_string()]).unwrap(), "foo".to_string()); let args_both = vec!("-e".to_string(), "foo".to_string(), "--encrypt".to_string(), "foo".to_string()); let matches_both = &match getopts(args_both.as_slice(), opts.as_slice()) { result::Ok(m) => m, result::Err(_) => fail!() }; assert!(matches_both.opts_present(["e".to_string()])); assert!(matches_both.opts_present(["encrypt".to_string()])); assert!(matches_both.opts_present(["encrypt".to_string(), "e".to_string()])); assert!(matches_both.opts_present(["e".to_string(), "encrypt".to_string()])); assert!(!matches_both.opts_present(["f".to_string()])); assert!(!matches_both.opts_present(["thing".to_string()])); assert!(!matches_both.opts_present([])); assert_eq!(matches_both.opts_str(["e".to_string()]).unwrap(), "foo".to_string()); assert_eq!(matches_both.opts_str(["encrypt".to_string()]).unwrap(), "foo".to_string()); assert_eq!(matches_both.opts_str(["e".to_string(), "encrypt".to_string()]).unwrap(), "foo".to_string()); assert_eq!(matches_both.opts_str(["encrypt".to_string(), "e".to_string()]).unwrap(), "foo".to_string()); } #[test] fn test_nospace() { let args = vec!("-Lfoo".to_string(), "-M.".to_string()); let opts = vec!(optmulti("L", "", "library directory", "LIB"), optmulti("M", "", "something", "MMMM")); let matches = &match getopts(args.as_slice(), opts.as_slice()) { result::Ok(m) => m, result::Err(_) => fail!() }; assert!(matches.opts_present(["L".to_string()])); assert_eq!(matches.opts_str(["L".to_string()]).unwrap(), "foo".to_string()); assert!(matches.opts_present(["M".to_string()])); assert_eq!(matches.opts_str(["M".to_string()]).unwrap(), ".".to_string()); } #[test] fn test_nospace_conflict() { let args = vec!("-vvLverbose".to_string(), "-v".to_string() ); let opts = vec!(optmulti("L", "", "library directory", "LIB"), optflagmulti("v", "verbose", "Verbose")); let matches = &match getopts(args.as_slice(), opts.as_slice()) { result::Ok(m) => m, result::Err(e) => fail!( "{}", e ) }; assert!(matches.opts_present(["L".to_string()])); assert_eq!(matches.opts_str(["L".to_string()]).unwrap(), "verbose".to_string()); assert!(matches.opts_present(["v".to_string()])); assert_eq!(3, matches.opt_count("v")); } #[test] fn test_long_to_short() { let mut short = Opt { name: Long("banana".to_string()), hasarg: Yes, occur: Req, aliases: Vec::new(), }; short.aliases = vec!(Opt { name: Short('b'), hasarg: Yes, occur: Req, aliases: Vec::new() }); let verbose = reqopt("b", "banana", "some bananas", "VAL"); assert!(verbose.long_to_short() == short); } #[test] fn test_aliases_long_and_short() {<|fim▁hole|> let args = vec!("-a".to_string(), "--apple".to_string(), "-a".to_string()); let matches = getopts(args.as_slice(), opts.as_slice()).unwrap(); assert_eq!(3, matches.opt_count("a")); assert_eq!(3, matches.opt_count("apple")); } #[test] fn test_usage() { let optgroups = vec!( reqopt("b", "banana", "Desc", "VAL"), optopt("a", "012345678901234567890123456789", "Desc", "VAL"), optflag("k", "kiwi", "Desc"), optflagopt("p", "", "Desc", "VAL"), optmulti("l", "", "Desc", "VAL")); let expected = "Usage: fruits Options: -b --banana VAL Desc -a --012345678901234567890123456789 VAL Desc -k --kiwi Desc -p [VAL] Desc -l VAL Desc ".to_string(); let generated_usage = usage("Usage: fruits", optgroups.as_slice()); debug!("expected: <<{}>>", expected); debug!("generated: <<{}>>", generated_usage); assert_eq!(generated_usage, expected); } #[test] fn test_usage_description_wrapping() { // indentation should be 24 spaces // lines wrap after 78: or rather descriptions wrap after 54 let optgroups = vec!( optflag("k", "kiwi", "This is a long description which won't be wrapped..+.."), // 54 optflag("a", "apple", "This is a long description which _will_ be wrapped..+..")); let expected = "Usage: fruits Options: -k --kiwi This is a long description which won't be wrapped..+.. -a --apple This is a long description which _will_ be wrapped..+.. ".to_string(); let usage = usage("Usage: fruits", optgroups.as_slice()); debug!("expected: <<{}>>", expected); debug!("generated: <<{}>>", usage); assert!(usage == expected) } #[test] fn test_usage_description_multibyte_handling() { let optgroups = vec!( optflag("k", "k\u2013w\u2013", "The word kiwi is normally spelled with two i's"), optflag("a", "apple", "This \u201Cdescription\u201D has some characters that could \ confuse the line wrapping; an apple costs 0.51€ in some parts of Europe.")); let expected = "Usage: fruits Options: -k --k–w– The word kiwi is normally spelled with two i's -a --apple This “description” has some characters that could confuse the line wrapping; an apple costs 0.51€ in some parts of Europe. ".to_string(); let usage = usage("Usage: fruits", optgroups.as_slice()); debug!("expected: <<{}>>", expected); debug!("generated: <<{}>>", usage); assert!(usage == expected) } #[test] fn test_short_usage() { let optgroups = vec!( reqopt("b", "banana", "Desc", "VAL"), optopt("a", "012345678901234567890123456789", "Desc", "VAL"), optflag("k", "kiwi", "Desc"), optflagopt("p", "", "Desc", "VAL"), optmulti("l", "", "Desc", "VAL")); let expected = "Usage: fruits -b VAL [-a VAL] [-k] [-p [VAL]] [-l VAL]..".to_string(); let generated_usage = short_usage("fruits", optgroups.as_slice()); debug!("expected: <<{}>>", expected); debug!("generated: <<{}>>", generated_usage); assert_eq!(generated_usage, expected); } }<|fim▁end|>
let opts = vec!( optflagmulti("a", "apple", "Desc"));
<|file_name|>index_spec.ts<|end_file_name|><|fim▁begin|>/**<|fim▁hole|> * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { SchematicTestRunner, UnitTestTree } from '@angular-devkit/schematics/testing'; import { Schema as ApplicationOptions } from '../application/schema'; import { Schema as WorkspaceOptions } from '../workspace/schema'; import { Schema as ServiceWorkerOptions } from './schema'; describe('Service Worker Schematic', () => { const schematicRunner = new SchematicTestRunner( '@schematics/angular', require.resolve('../collection.json'), ); const defaultOptions: ServiceWorkerOptions = { project: 'bar', target: 'build', }; let appTree: UnitTestTree; const workspaceOptions: WorkspaceOptions = { name: 'workspace', newProjectRoot: 'projects', version: '6.0.0', }; const appOptions: ApplicationOptions = { name: 'bar', inlineStyle: false, inlineTemplate: false, routing: false, skipTests: false, skipPackageJson: false, }; beforeEach(async () => { appTree = await schematicRunner.runSchematicAsync('workspace', workspaceOptions).toPromise(); appTree = await schematicRunner .runSchematicAsync('application', appOptions, appTree) .toPromise(); }); it('should add `serviceWorker` option to build target', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const configText = tree.readContent('/angular.json'); const buildConfig = JSON.parse(configText).projects.bar.architect.build; expect(buildConfig.options.serviceWorker).toBeTrue(); }); it('should add the necessary dependency', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/package.json'); const pkg = JSON.parse(pkgText); const version = pkg.dependencies['@angular/core']; expect(pkg.dependencies['@angular/service-worker']).toEqual(version); }); it('should import ServiceWorkerModule', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/src/app/app.module.ts'); expect(pkgText).toMatch(/import \{ ServiceWorkerModule \} from '@angular\/service-worker'/); }); it('should import environment', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/src/app/app.module.ts'); expect(pkgText).toMatch(/import \{ environment \} from '\.\.\/environments\/environment'/); }); it('should add the SW import to the NgModule imports', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/src/app/app.module.ts'); expect(pkgText).toMatch( new RegExp( "(\\s+)ServiceWorkerModule\\.register\\('ngsw-worker\\.js', \\{\\n" + '\\1 enabled: environment\\.production,\\n' + '\\1 // Register the ServiceWorker as soon as the app is stable\\n' + '\\1 // or after 30 seconds \\(whichever comes first\\)\\.\\n' + "\\1 registrationStrategy: 'registerWhenStable:30000'\\n" + '\\1}\\)', ), ); }); it('should add the SW import to the NgModule imports with aliased environment', async () => { const moduleContent = ` import { BrowserModule } from '@angular/platform-browser'; import { NgModule } from '@angular/core'; import { AppComponent } from './app.component'; import { environment as env } from '../environments/environment'; @NgModule({ declarations: [ AppComponent ], imports: [ BrowserModule ], bootstrap: [AppComponent] }) export class AppModule {} `; appTree.overwrite('/projects/bar/src/app/app.module.ts', moduleContent); const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/src/app/app.module.ts'); expect(pkgText).toMatch( new RegExp( "(\\s+)ServiceWorkerModule\\.register\\('ngsw-worker\\.js', \\{\\n" + '\\1 enabled: env\\.production,\\n' + '\\1 // Register the ServiceWorker as soon as the app is stable\\n' + '\\1 // or after 30 seconds \\(whichever comes first\\)\\.\\n' + "\\1 registrationStrategy: 'registerWhenStable:30000'\\n" + '\\1}\\)', ), ); }); it('should add the SW import to the NgModule imports with existing environment', async () => { const moduleContent = ` import { BrowserModule } from '@angular/platform-browser'; import { NgModule } from '@angular/core'; import { AppComponent } from './app.component'; import { environment } from '../environments/environment'; @NgModule({ declarations: [ AppComponent ], imports: [ BrowserModule ], bootstrap: [AppComponent] }) export class AppModule {} `; appTree.overwrite('/projects/bar/src/app/app.module.ts', moduleContent); const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/src/app/app.module.ts'); expect(pkgText).toMatch( new RegExp( "(\\s+)ServiceWorkerModule\\.register\\('ngsw-worker\\.js', \\{\\n" + '\\1 enabled: environment\\.production,\\n' + '\\1 // Register the ServiceWorker as soon as the app is stable\\n' + '\\1 // or after 30 seconds \\(whichever comes first\\)\\.\\n' + "\\1 registrationStrategy: 'registerWhenStable:30000'\\n" + '\\1}\\)', ), ); }); it('should put the ngsw-config.json file in the project root', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const path = '/projects/bar/ngsw-config.json'; expect(tree.exists(path)).toEqual(true); const { projects } = JSON.parse(tree.readContent('/angular.json')); expect(projects.bar.architect.build.options.ngswConfigPath).toBe( 'projects/bar/ngsw-config.json', ); }); it('should add $schema in ngsw-config.json with correct relative path', async () => { const pathToNgswConfigSchema = 'node_modules/@angular/service-worker/config/schema.json'; const name = 'foo'; const rootAppOptions: ApplicationOptions = { ...appOptions, name, projectRoot: '', }; const rootSWOptions: ServiceWorkerOptions = { ...defaultOptions, project: name, }; const rootAppTree = await schematicRunner .runSchematicAsync('application', rootAppOptions, appTree) .toPromise(); const treeInRoot = await schematicRunner .runSchematicAsync('service-worker', rootSWOptions, rootAppTree) .toPromise(); const pkgTextInRoot = treeInRoot.readContent('/ngsw-config.json'); const configInRoot = JSON.parse(pkgTextInRoot); expect(configInRoot.$schema).toBe(`./${pathToNgswConfigSchema}`); const treeNotInRoot = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgTextNotInRoot = treeNotInRoot.readContent('/projects/bar/ngsw-config.json'); const configNotInRoot = JSON.parse(pkgTextNotInRoot); expect(configNotInRoot.$schema).toBe(`../../${pathToNgswConfigSchema}`); }); it('should add root assets RegExp', async () => { const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/ngsw-config.json'); const config = JSON.parse(pkgText); expect(config.assetGroups[1].resources.files).toContain( '/*.(svg|cur|jpg|jpeg|png|apng|webp|avif|gif|otf|ttf|woff|woff2)', ); }); it('should add resourcesOutputPath to root assets when specified', async () => { const config = JSON.parse(appTree.readContent('/angular.json')); config.projects.bar.architect.build.options.resourcesOutputPath = 'outDir'; appTree.overwrite('/angular.json', JSON.stringify(config)); const tree = await schematicRunner .runSchematicAsync('service-worker', defaultOptions, appTree) .toPromise(); const pkgText = tree.readContent('/projects/bar/ngsw-config.json'); const ngswConfig = JSON.parse(pkgText); expect(ngswConfig.assetGroups[1].resources.files).toContain( '/outDir/*.(svg|cur|jpg|jpeg|png|apng|webp|avif|gif|otf|ttf|woff|woff2)', ); }); it('should generate ngsw-config.json in root when the application is at root level', async () => { const name = 'foo'; const rootAppOptions: ApplicationOptions = { ...appOptions, name, projectRoot: '', }; const rootSWOptions: ServiceWorkerOptions = { ...defaultOptions, project: name, }; let tree = await schematicRunner .runSchematicAsync('application', rootAppOptions, appTree) .toPromise(); tree = await schematicRunner .runSchematicAsync('service-worker', rootSWOptions, tree) .toPromise(); expect(tree.exists('/ngsw-config.json')).toBe(true); const { projects } = JSON.parse(tree.readContent('/angular.json')); expect(projects.foo.architect.build.options.ngswConfigPath).toBe('ngsw-config.json'); }); });<|fim▁end|>
* @license * Copyright Google LLC All Rights Reserved.
<|file_name|>r3-p2-divide-an-island.cpp<|end_file_name|><|fim▁begin|>/***************************************************************/ /** **/ /** Leonardo Haddad nº 7295361 **/ /** Desafios de Programação - Round 3 Professora Cris **/ /** Problema 2 - Divide an Island! Curso: BCC **/ /** **/ /***************************************************************/ #include <cstdio> #include <cstring> #include <cmath> #include <algorithm> #define MAX_DIST 6000 #define debug_off 1 #define error 1e-9 using namespace std; int xA, yA, xB, yB, xC, yC; int xAtoB, yAtoB, xBtoC, yBtoC, xCtoA, yCtoA; double abLength, bcLength, caLength, fullLength, abPercentage, bcPercentage, caPercentage; double totalArea; #ifdef debug_on /* print arguments */ void printArgs () { printf("Arguments:\n"); printf(" A = %d %d;\n",xA,yA); printf(" B = %d %d;\n",xB,yB); printf(" C = %d %d;\n",xC,yC); } #endif <|fim▁hole|>{ if (normalizedValue < abPercentage) { return xA + xAtoB * (normalizedValue / abPercentage); } else if (normalizedValue < abPercentage + bcPercentage) { return xB + xBtoC * ((normalizedValue - abPercentage) / bcPercentage); } else { return xC + xCtoA * ((normalizedValue - abPercentage - bcPercentage) / caPercentage); } } double normalizedToY (double normalizedValue) { if (normalizedValue < abPercentage) { return yA + yAtoB * (normalizedValue / abPercentage); } else if (normalizedValue < abPercentage + bcPercentage) { return yB + yBtoC * ((normalizedValue - abPercentage) / bcPercentage); } else { return yC + yCtoA * ((normalizedValue - abPercentage - bcPercentage) / caPercentage); } } double areaFromPointsViaHeronFormula (double pxA, double pyA, double pxB, double pyB, double pxC, double pyC) { double abL = sqrt((pxB-pxA)*(pxB-pxA)+(pyB-pyA)*(pyB-pyA)); double bcL = sqrt((pxC-pxB)*(pxC-pxB)+(pyC-pyB)*(pyC-pyB)); double caL = sqrt((pxA-pxC)*(pxA-pxC)+(pyA-pyC)*(pyA-pyC)); double semiPer = (abL + bcL + caL) / 2; return sqrt(semiPer * (semiPer - abL) * (semiPer - bcL) * (semiPer - caL)); } double areaFromPointsViaCrossProduct (double pxA, double pyA, double pxB, double pyB, double pxC, double pyC) { double abL = sqrt((pxB-pxA)*(pxB-pxA)+(pyB-pyA)*(pyB-pyA)); double bcL = sqrt((pxC-pxB)*(pxC-pxB)+(pyC-pyB)*(pyC-pyB)); double caL = sqrt((pxA-pxC)*(pxA-pxC)+(pyA-pyC)*(pyA-pyC)); double angle = acos((abL*abL+caL*caL-bcL*bcL) / (2*abL*caL)); return (0.5 * abL * caL * sin(angle)); } double areaFromPoints (double pxA, double pyA, double pxB, double pyB, double pxC, double pyC) { //return areaFromPointsViaHeronFormula(pxA,pyA,pxB,pyB,pxC,pyC); return areaFromPointsViaCrossProduct(pxA,pyA,pxB,pyB,pxC,pyC); } double normalizedToArea (double normalizedValue) { double otherEdge = normalizedValue + 0.5; if (otherEdge > 1.0) otherEdge = otherEdge - 1.0; if (normalizedValue == 0) { return areaFromPoints(xB,yB,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else if (normalizedValue < abPercentage) { if (otherEdge < abPercentage+bcPercentage) { return areaFromPoints(xB,yB,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else if (otherEdge <= abPercentage+bcPercentage+caPercentage) { return areaFromPoints(xA,yA,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else printf("error (if1)!\n"); } else if (normalizedValue == abPercentage) { return areaFromPoints(xC,yC,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else if (normalizedValue < abPercentage+bcPercentage) { if (otherEdge < abPercentage) { return areaFromPoints(xB,yB,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else if (otherEdge <= abPercentage+bcPercentage+caPercentage) { return areaFromPoints(xC,yC,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else printf("error (if2)!\n"); } else if (normalizedValue == abPercentage+bcPercentage) { return areaFromPoints(xA,yA,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else if (normalizedValue < abPercentage+bcPercentage+caPercentage) { if (otherEdge < abPercentage) { return areaFromPoints(xA,yA,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else if (otherEdge < abPercentage+bcPercentage) { return areaFromPoints(xC,yC,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else printf("error (if3)!\n"); } else if (normalizedValue == abPercentage+bcPercentage+caPercentage) { return areaFromPoints(xB,yB,normalizedToX(normalizedValue),normalizedToY(normalizedValue),normalizedToX(otherEdge),normalizedToY(otherEdge)); } else printf("error (final)!\n"); return -1; } void bruteForceSolution () { int precision; double attempt, dist, from, to, area, normalizedValueA, normalizedValueB; totalArea = areaFromPoints(xA,yA,xB,yB,xC,yC); #ifdef debug_on printf(" totalArea: %.15f\n",totalArea); #endif dist = MAX_DIST; normalizedValueA = -1; precision = 4; from = 0; to = 0.5; while (precision < 15) { attempt = from; while (attempt < to) { area = normalizedToArea(attempt); if (abs(area - totalArea/2) < dist) { dist = abs(area - totalArea/2); normalizedValueA = attempt; } #ifdef debug_on printf(" %.15f: (%.2f %.2f) ; area = %.15f\n",attempt,normalizedToX(attempt),normalizedToY(attempt),area); #endif attempt = attempt + pow(10,-precision); } from = normalizedValueA - pow(10,-precision); to = normalizedValueA + pow(10,-precision); if (from < 0) from = 0; if (to > 0.5) to = 0.5; precision++; } normalizedValueB = normalizedValueA + 0.5; if (normalizedValueB >= 1.0) normalizedValueB = normalizedValueB - 1.0; if (dist < error) printf("YES\n%.15f %.15f\n%.15f %.15f\n", normalizedToX(normalizedValueA), normalizedToY(normalizedValueA), normalizedToX(normalizedValueB), normalizedToY(normalizedValueB)); else printf("NO\n"); } int main () { double semiPerimeter, linearFactorA, linearFactorB, ansXa, ansYa, ansXb, ansYb; bool finished; #ifdef debug_on printf("\n| P2 - Divide an Island! |\n"); #endif /* read program arguments */ scanf("%d %d %d %d %d %d", &xA, &yA, &xB, &yB, &xC, &yC); #ifdef debug_on printArgs(); printf("\nProblem Solution:\n"); #endif /* solution */ xAtoB = xB - xA; yAtoB = yB - yA; xBtoC = xC - xB; yBtoC = yC - yB; xCtoA = xA - xC; yCtoA = yA - yC; abLength = sqrt(xAtoB*xAtoB + yAtoB*yAtoB); bcLength = sqrt(xBtoC*xBtoC + yBtoC*yBtoC); caLength = sqrt(xCtoA*xCtoA + yCtoA*yCtoA); fullLength = abLength + bcLength + caLength; semiPerimeter = fullLength / 2; abPercentage = abLength / fullLength; bcPercentage = bcLength / fullLength; caPercentage = caLength / fullLength; //bruteForceSolution(); finished = false; linearFactorA = (semiPerimeter - sqrt(semiPerimeter * semiPerimeter - 2 * abLength * bcLength)) / 2; linearFactorB = (semiPerimeter + sqrt(semiPerimeter * semiPerimeter - 2 * abLength * bcLength)) / 2; if(linearFactorA <= abLength + error && linearFactorB <= bcLength + error) { ansXa = xB - xAtoB * (linearFactorA / abLength); ansYa = yB - yAtoB * (linearFactorA / abLength); ansXb = xB + xBtoC * (linearFactorB / bcLength); ansYb = yB + yBtoC * (linearFactorB / bcLength); printf("YES\n%.15f %.15f\n%.15f %.15f\n", ansXa, ansYa, ansXb, ansYb); finished = true; } if(!finished && linearFactorB <= abLength + error && linearFactorA <= bcLength + error) { ansXa = xB - xAtoB * (linearFactorB / abLength); ansYa = yB - yAtoB * (linearFactorB / abLength); ansXb = xB + xBtoC * (linearFactorA / bcLength); ansYb = yB + yBtoC * (linearFactorA / bcLength); printf("YES\n%.15f %.15f\n%.15f %.15f\n", ansXa, ansYa, ansXb, ansYb); finished = true; } linearFactorA = (semiPerimeter - sqrt(semiPerimeter * semiPerimeter - 2 * bcLength * caLength)) / 2; linearFactorB = (semiPerimeter + sqrt(semiPerimeter * semiPerimeter - 2 * bcLength * caLength)) / 2; if(!finished && linearFactorA <= bcLength + error && linearFactorB <= caLength + error) { ansXa = xC - xBtoC * (linearFactorA / bcLength); ansYa = yC - yBtoC * (linearFactorA / bcLength); ansXb = xC + xCtoA * (linearFactorB / caLength); ansYb = yC + yCtoA * (linearFactorB / caLength); printf("YES\n%.15f %.15f\n%.15f %.15f\n", ansXa, ansYa, ansXb, ansYb); finished = true; } if(!finished && linearFactorB <= bcLength + error && linearFactorA <= caLength + error) { ansXa = xC - xBtoC * (linearFactorB / bcLength); ansYa = yC - yBtoC * (linearFactorB / bcLength); ansXb = xC + xCtoA * (linearFactorA / caLength); ansYb = yC + yCtoA * (linearFactorA / caLength); printf("YES\n%.15f %.15f\n%.15f %.15f\n", ansXa, ansYa, ansXb, ansYb); finished = true; } linearFactorA = (semiPerimeter - sqrt(semiPerimeter * semiPerimeter - 2 * caLength * abLength)) / 2; linearFactorB = (semiPerimeter + sqrt(semiPerimeter * semiPerimeter - 2 * caLength * abLength)) / 2; if(!finished && linearFactorA <= caLength + error && linearFactorB <= abLength + error) { ansXa = xA - xCtoA * (linearFactorA / caLength); ansYa = yA - yCtoA * (linearFactorA / caLength); ansXb = xA + xAtoB * (linearFactorB / abLength); ansYb = yA + yAtoB * (linearFactorB / abLength); printf("YES\n%.15f %.15f\n%.15f %.15f\n", ansXa, ansYa, ansXb, ansYb); finished = true; } if(!finished && linearFactorB <= caLength + error && linearFactorA <= abLength + error) { ansXa = xA - xCtoA * (linearFactorB / caLength); ansYa = yA - yCtoA * (linearFactorB / caLength); ansXb = xA + xAtoB * (linearFactorA / abLength); ansYb = yA + yAtoB * (linearFactorA / abLength); printf("YES\n%.15f %.15f\n%.15f %.15f\n", ansXa, ansYa, ansXb, ansYb); finished = true; } if (!finished) { printf("NO\n"); } #ifdef debug_on printf("\n--------------------\n"); #endif return 0; } /* 1647. Divide an Island! Time limit: 1.0 second Memory limit: 64 MB Url: http://acm.timus.ru/problem.aspx?space=1&num=1647 A desert island Robinson Crusoe and his companion Friday live on has a shape of a non-degenerate triangle which vertices are points (x1, y1), (x2, y2), (x3, y3). Once Robinson and Friday fell aboard and decided to divide the island into two equal parts by choosing two points on the island coast and connecting them with a line segment. These parts were to have the same area and shore length. Robinson failed to choose these points. Can you do it for him? Input: The only line of the input contains space-separated integers x1, y1, x2, y2, x3, y3, not exceeding 2000 in absolute value. Output: If there is a line segment ST, which divides the island into two parts of the same area and shore length, output “YES” on the first line of the output, S coordinates on the second line, and T coordinates of the third line. S and T should be located on the island shore. Coordinates should be accurate within 10−9. If there is no such line segment, output “NO” on a single line. Samples: Input: 0 0 10 0 0 10 Output: YES 0 0 5 5 Input: 0 3 4 0 3 4 Output: YES 1.741248277008306 3.580416092336102 3.445803840397070 0.415647119702198 */<|fim▁end|>
double normalizedToX (double normalizedValue)
<|file_name|>WiselyConfiguration.java<|end_file_name|><|fim▁begin|>package cn.aezo.spring.base.annotation.combineannotation; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Configuration; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Created by smalle on 2017/6/11. */ @Target(ElementType.TYPE)<|fim▁hole|>@Configuration @ComponentScan public @interface WiselyConfiguration { String[] value() default {}; }<|fim▁end|>
@Retention(RetentionPolicy.RUNTIME) @Documented
<|file_name|>32-bridge-in.py<|end_file_name|><|fim▁begin|># This example is designed to be paired with example file 31-bridge-out.py # Run the two with DIFFERENT DEVICE TOKENS. # (They can be either in same "project" or separate projects as set at phone. Just use different tokens.) # This "in" bridge receives data directly from other RPi. # Our display shows incoming messages. # Our LED on gpio 21 is controlled by button at other end. import gpiozero as GPIO from PiBlynk import Blynk from mytoken import * blynk = Blynk(token2) # <<<<<<<<<<<<<<<<<<<< USE DIFFERENT TOKED FROM OTHER END !!! <|fim▁hole|># gpio (incoming) write def gpioOut_h(val, pin, gpioObj): gpioObj.value = val # control the LED print("Incoming GPIO OUT command:", pin, val) # set up the RPi LED or other outputs and connect to generic gpioOut function above ledR = GPIO.LED(21) # gpiozero led objects blynk.add_digital_hw_pin(21, None, gpioOut_h, ledR) #----------------------------------------- # Listen for anything coming in V61. Just print it def virt_in_h(val, pin, st): print("Incoming on VP:", pin, val) blynk.add_virtual_pin(61, write=virt_in_h) # we place a LISTEN for incoming writes on V61 def cnct_cb(): print ("Connected: ") print("Waiting for incoming messages ...") blynk.on_connect(cnct_cb) ###################################################################################### blynk.run() ###################################################################################### #At APP: # Nothing<|fim▁end|>
#-----------------------------------------------
<|file_name|>opt.rs<|end_file_name|><|fim▁begin|>// ------------------------------------------------------------------------------------------------- // Rick, a Rust intercal compiler. Save your souls! // // Copyright (c) 2015 Georg Brandl // // This program is free software; you can redistribute it and/or modify it under the terms of the // GNU General Public License as published by the Free Software Foundation; either version 2 of the // License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without // even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // // You should have received a copy of the GNU General Public License along with this program; // if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. // ------------------------------------------------------------------------------------------------- /// Optimizes INTERCAL code to look a little less like what your dog left on the carpet. /// /// The optimizer gets the whole program and does several passes. /// /// * constant folding: just reduces (sub)expressions involving no variables /// * expressions: looks for common patterns of INTERCAL operator expressions /// and replaces them by equivalent expressions involving native Rust operators /// * constant output (can be disabled): if the program neither uses random numbers /// nor takes any input, its output must be constant - the optimizer generates /// this output using the Eval interpreter and replaces the program by a single /// Print instruction (if your program does not terminate, you'd better disable /// this pass with the -F option) /// * abstain check: marks all statements that cannot be ABSTAINed from, so that /// the code generator can skip emitting guards for them /// * var check: marks all variables that cannot be IGNOREd, so that the code /// generator can use unchecked assignments /// /// The patterns recognized by the expression optimizer are pretty random. They /// were selected to optimize performance of the `tpk.i` example program, and /// could be expanded a lot. But at that point it's probably better to take the /// route of C-INTERCAL and use a DSL for generic pattern matching. use std::collections::BTreeMap; use std::io::Cursor; use std::u16; use ast::{ Program, Stmt, StmtBody, Expr, Var, VarInfo, VType, Abstain }; use eval; use stdops::{ mingle, select, and_16, and_32, or_16, or_32, xor_16, xor_32 }; pub struct Optimizer { program: Program, allow_const_out: bool, } fn n(i: u32) -> Box<Expr> { box Expr::Num(VType::I32, i) } impl Optimizer { pub fn new(program: Program, allow_const_out: bool) -> Optimizer { Optimizer { program: program, allow_const_out: allow_const_out } } pub fn optimize(self) -> Program { let mut program = self.program; program = Optimizer::opt_constant_fold(program); program = Optimizer::opt_expressions(program); if self.allow_const_out { program = Optimizer::opt_const_output(program); } program = Optimizer::opt_abstain_check(program); program = Optimizer::opt_var_check(program); program } /// Fold expressions with literal constants, of which there are typically a lot /// since you can't have 32-bit literals. pub fn opt_constant_fold(mut program: Program) -> Program { for stmt in &mut program.stmts { match stmt.body { StmtBody::Calc(_, ref mut expr) => Optimizer::fold(expr), StmtBody::Resume(ref mut expr) => Optimizer::fold(expr), StmtBody::Forget(ref mut expr) => Optimizer::fold(expr), _ => { } } } program } fn fold(expr: &mut Expr) { let mut result = None; match *expr { Expr::Mingle(ref mut vx, ref mut wx) => { Optimizer::fold(vx); Optimizer::fold(wx); if let box Expr::Num(_, v) = *vx { if let box Expr::Num(_, w) = *wx { if v <= (u16::MAX as u32) && w <= (u16::MAX as u32) { let z = mingle(v, w); result = Some(*n(z)); } } } } Expr::Select(_, ref mut vx, ref mut wx) => { Optimizer::fold(vx); Optimizer::fold(wx); if let box Expr::Num(_, v) = *vx { if let box Expr::Num(_, w) = *wx { let z = select(v, w); result = Some(*n(z)); } } } Expr::And(_, ref mut vx) => { Optimizer::fold(vx); if let box Expr::Num(vtype, v) = *vx { result = Some(match vtype { VType::I16 => Expr::Num(vtype, and_16(v)), VType::I32 => Expr::Num(vtype, and_32(v)), }); } } Expr::Or(_, ref mut vx) => { Optimizer::fold(vx); if let box Expr::Num(vtype, v) = *vx { result = Some(match vtype { VType::I16 => Expr::Num(vtype, or_16(v)), VType::I32 => Expr::Num(vtype, or_32(v)), }); } } Expr::Xor(_, ref mut vx) => { Optimizer::fold(vx); if let box Expr::Num(vtype, v) = *vx { result = Some(match vtype { VType::I16 => Expr::Num(vtype, xor_16(v)), VType::I32 => Expr::Num(vtype, xor_32(v)), }); } } _ => {} } if let Some(result) = result { *expr = result; } } /// Optimize expressions. pub fn opt_expressions(mut program: Program) -> Program { for stmt in &mut program.stmts { //println!("\n\n{}", stmt.props.srcline); match stmt.body { StmtBody::Calc(_, ref mut expr) => Optimizer::opt_expr(expr), StmtBody::Resume(ref mut expr) => Optimizer::opt_expr(expr), StmtBody::Forget(ref mut expr) => Optimizer::opt_expr(expr), _ => { } } } program } fn opt_expr(expr: &mut Expr) { //println!("optimizing {}", expr); let mut result = None; match *expr { Expr::Select(_, ref mut vx, ref mut wx) => { Optimizer::opt_expr(vx); Optimizer::opt_expr(wx); match *wx { // Select(UnOP(Mingle(x, y)), 0x55555555) = BinOP(x, y) box Expr::Num(_, 0x55555555) => { match *vx { box Expr::And(_, box Expr::Mingle(ref m1, ref m2)) => { result = Some(Expr::RsAnd(m1.clone(), m2.clone())); } box Expr::Or(_, box Expr::Mingle(ref m1, ref m2)) => { result = Some(Expr::RsOr(m1.clone(), m2.clone())); } box Expr::Xor(_, box Expr::Mingle(ref m1, ref m2)) => { result = Some(Expr::RsXor(m1.clone(), m2.clone())); } _ => { } } } // Select(x, N) is a shift & mask if N has to "inside" zeros // in binary notation box Expr::Num(_, i) if i.count_zeros() == i.leading_zeros() + i.trailing_zeros() => { if i.trailing_zeros() == 0 { result = Some(Expr::RsAnd(vx.clone(), n(i))); } else if i.leading_zeros() == 0 { result = Some(Expr::RsRshift(vx.clone(), n(i.trailing_zeros()))); } else { result = Some(Expr::RsAnd( box Expr::RsRshift(vx.clone(), n(i.trailing_zeros())), n((1 << i.count_ones()) - 1))); } } // Select(Mingle(x, 0), 0x2AAAAAAB) -> (x << 1) & 0xFFFF box Expr::Num(_, 0x2AAAAAAB) => { if let box Expr::Mingle(ref m1, box Expr::Num(_, 0)) = *vx { result = Some(Expr::RsAnd( box Expr::RsLshift(m1.clone(), n(1)), n(0xFFFF))); } } _ => { } } } Expr::Mingle(ref mut vx, ref mut wx) => { Optimizer::opt_expr(vx); Optimizer::opt_expr(wx); // (x ~ 0xA..A) OP (y ~ 0xA..A) $ (x ~ 0x5..5) OP (y ~ 0x5..5) // -> (x OP y) in 32-bit if let box Expr::RsAnd(box Expr::Select(_, ref ax, box Expr::Num(_, 0xAAAAAAAA)), box Expr::Select(_, ref bx, box Expr::Num(_, 0xAAAAAAAA))) = *vx { if let box Expr::RsAnd(box Expr::Select(_, ref cx, box Expr::Num(_, 0x55555555)), box Expr::Select(_, ref dx, box Expr::Num(_, 0x55555555))) = *wx { if *ax == *cx && *bx == *dx { result = Some(Expr::RsAnd(ax.clone(), bx.clone())); } } } if let box Expr::RsOr(box Expr::Select(_, ref ax, box Expr::Num(_, 0xAAAAAAAA)), box Expr::Select(_, ref bx, box Expr::Num(_, 0xAAAAAAAA))) = *vx { if let box Expr::RsOr(box Expr::Select(_, ref cx, box Expr::Num(_, 0x55555555)), box Expr::Select(_, ref dx, box Expr::Num(_, 0x55555555))) = *wx { if *ax == *cx && *bx == *dx { result = Some(Expr::RsOr(ax.clone(), bx.clone())); } } } if let box Expr::RsXor(box Expr::Select(_, ref ax, box Expr::Num(_, 0xAAAAAAAA)), box Expr::Select(_, ref bx, box Expr::Num(_, 0xAAAAAAAA))) = *vx { if let box Expr::RsXor(box Expr::Select(_, ref cx, box Expr::Num(_, 0x55555555)), box Expr::Select(_, ref dx, box Expr::Num(_, 0x55555555))) = *wx { if *ax == *cx && *bx == *dx { result = Some(Expr::RsXor(ax.clone(), bx.clone())); } } } // (x ~ 0xA..A) OP y1 $ (x ~ 0x5..5) OP y2 // -> (x OP (y1 << 16 | y2)) in 32-bit if let box Expr::RsAnd(box Expr::Select(_, ref ax, box Expr::Num(_, 0xAAAAAAAA)), box Expr::Num(_, bn)) = *vx { if let box Expr::RsAnd(box Expr::Select(_, ref cx, box Expr::Num(_, 0x55555555)), box Expr::Num(_, dn)) = *wx { if *ax == *cx { result = Some(Expr::RsAnd(ax.clone(), n((bn << 16) | dn))); } } } if let box Expr::RsOr(box Expr::Select(_, ref ax, box Expr::Num(_, 0xAAAAAAAA)), box Expr::Num(_, bn)) = *vx { if let box Expr::RsOr(box Expr::Select(_, ref cx, box Expr::Num(_, 0x55555555)), box Expr::Num(_, dn)) = *wx { if *ax == *cx { result = Some(Expr::RsOr(ax.clone(), n((bn << 16) | dn))); } } } if let box Expr::RsXor(box Expr::Select(_, ref ax, box Expr::Num(_, 0xAAAAAAAA)), box Expr::Num(_, bn)) = *vx { if let box Expr::RsXor(box Expr::Select(_, ref cx, box Expr::Num(_, 0x55555555)), box Expr::Num(_, dn)) = *wx { if *ax == *cx { result = Some(Expr::RsXor(ax.clone(), n((bn << 16) | dn))); } } } // (x != y) $ (z != w) -> ((x != y) << 1) | (z != w) if let box Expr::RsNotEqual(..) = *vx { if let box Expr::RsNotEqual(..) = *wx { result = Some(Expr::RsOr(box Expr::RsLshift(vx.clone(), n(1)), wx.clone())); } } } Expr::And(_, ref mut vx) | Expr::Or(_, ref mut vx) | Expr::Xor(_, ref mut vx) => { Optimizer::opt_expr(vx); } Expr::RsNot(ref mut vx) => { Optimizer::opt_expr(vx); } Expr::RsAnd(ref mut vx, ref mut wx) => { Optimizer::opt_expr(vx); Optimizer::opt_expr(wx); // (x ~ x) & 1 -> x != 0 if let box Expr::Select(_, ref sx, ref tx) = *vx { if *sx == *tx { if let box Expr::Num(_, 1) = *wx { result = Some(Expr::RsNotEqual(sx.clone(), n(0))); } } } // ?(x $ 1) & 3 -> 1 + (x & 1) if let box Expr::Xor(_, box Expr::Mingle(ref mx, box Expr::Num(_, 1))) = *vx { if let box Expr::Num(_, 3) = *wx { result = Some(Expr::RsPlus(n(1), box Expr::RsAnd(mx.clone(), n(1)))); } } // ?(x $ 2) & 3 -> 2 - (x & 1) if let box Expr::Xor(_, box Expr::Mingle(ref mx, box Expr::Num(_, 2))) = *vx { if let box Expr::Num(_, 3) = *wx { result = Some(Expr::RsMinus(n(2), box Expr::RsAnd(mx.clone(), n(1)))); } } // x & 0xFFFFFFFF has no effect if let box Expr::Num(_, 0xFFFFFFFF) = *wx { result = Some(*vx.clone()); } // Select(UnOP(Mingle(x, y)), 1) = BinOP(x & 1, y & 1) if let box Expr::Num(_, 1) = *wx { match *vx { box Expr::And(_, box Expr::Mingle(ref m1, ref m2)) => { result = Some(Expr::RsAnd( box Expr::RsAnd(m1.clone(), n(1)), box Expr::RsAnd(m2.clone(), n(1)))); } box Expr::Or(_, box Expr::Mingle(ref m1, ref m2)) => { result = Some(Expr::RsOr( box Expr::RsAnd(m1.clone(), n(1)), box Expr::RsAnd(m2.clone(), n(1)))); } box Expr::Xor(_, box Expr::Mingle(ref m1, ref m2)) => { result = Some(Expr::RsXor( box Expr::RsAnd(m1.clone(), n(1)), box Expr::RsAnd(m2.clone(), n(1)))); } _ => { } } } // ((x & y) & y) -> second & has no effect if let box Expr::RsAnd(_, ref v2x) = *vx { if *v2x == *wx { result = Some(*vx.clone()); } } // ((x != y) & 1) -> & has no effect if let box Expr::RsNotEqual(..) = *vx { if let box Expr::Num(_, 1) = *wx { result = Some(*vx.clone()); } } } Expr::RsXor(ref mut vx, ref mut wx) => { Optimizer::opt_expr(vx); Optimizer::opt_expr(wx); if let box Expr::Num(_, 0xFFFFFFFF) = *wx { result = Some(Expr::RsNot(vx.clone())); } else if let box Expr::Num(_, 0xFFFFFFFF) = *vx { result = Some(Expr::RsNot(wx.clone())); } } Expr::RsOr(ref mut vx, ref mut wx) | Expr::RsRshift(ref mut vx, ref mut wx) | Expr::RsLshift(ref mut vx, ref mut wx) | // Expr::RsEqual(ref mut vx, ref mut wx) | Expr::RsNotEqual(ref mut vx, ref mut wx) | Expr::RsMinus(ref mut vx, ref mut wx) | Expr::RsPlus(ref mut vx, ref mut wx) => { Optimizer::opt_expr(vx); Optimizer::opt_expr(wx); } Expr::Num(..) | Expr::Var(..) => { } } if let Some(mut result) = result {<|fim▁hole|> } /// Cleverly check for programs that don't take input and always produce the /// same output; reduce them to a Print statement. pub fn opt_const_output(program: Program) -> Program { let mut possible = true; let mut prev_lbl = 0; for stmt in &program.stmts { // if we have a statement with %, no chance if stmt.props.chance < 100 { // except if it is one of the stdlibs itself if !(program.added_syslib && prev_lbl == 1901) { if !(program.added_floatlib && (prev_lbl == 5401 || prev_lbl == 5402)) { possible = false; break; } } } match stmt.body { // if we accept input, bail out StmtBody::WriteIn(..) => { possible = false; break; } // if we call one of the stdlib random routines, bail out StmtBody::DoNext(n) if ((n == 1900 || n == 1910 || n == 5400) && prev_lbl != 1911) => { possible = false; break; } _ => { } } prev_lbl = stmt.props.label; } if !possible { return program; } // we can do it! evaluate the program and replace all statements let out = Vec::new(); let mut cursor = Cursor::new(out); if let Err(_) = eval::Eval::new(&program, &mut cursor, false, false).eval() { // if eval fails, don't pretend to do anything. return program; } Program { stmts: vec![Stmt::new_with(StmtBody::Print(cursor.into_inner())), Stmt::new_with(StmtBody::GiveUp)], labels: BTreeMap::new(), stmt_types: vec![Abstain::Label(0)], var_info: (vec![], vec![], vec![], vec![]), uses_complex_comefrom: false, added_syslib: false, added_floatlib: false, bugline: 2 } } /// Set "can_abstain" to false for all statements that can't be abstained from. pub fn opt_abstain_check(mut program: Program) -> Program { let mut can_abstain = vec![false; program.stmts.len()]; for stmt in &program.stmts { match stmt.body { StmtBody::Abstain(_, ref whats) | StmtBody::Reinstate(ref whats) => { for what in whats { if let &Abstain::Label(lbl) = what { let idx = program.labels[&lbl]; can_abstain[idx as usize] = true; } else { for (i, stype) in program.stmt_types.iter().enumerate() { if stype == what { can_abstain[i] = true; } } } } } _ => { } } } for (stmt, can_abstain) in program.stmts.iter_mut().zip(can_abstain) { if stmt.body != StmtBody::GiveUp { stmt.can_abstain = can_abstain; } } program } /// Determine "can_ignore" and "can_stash" for variables. pub fn opt_var_check(mut program: Program) -> Program { fn reset(vis: &mut Vec<VarInfo>) { for vi in vis { vi.can_stash = false; vi.can_ignore = false; } } reset(&mut program.var_info.0); reset(&mut program.var_info.1); reset(&mut program.var_info.2); reset(&mut program.var_info.3); for stmt in &program.stmts { match stmt.body { StmtBody::Stash(ref vars) | StmtBody::Retrieve(ref vars) => { for var in vars { match *var { Var::I16(n) => program.var_info.0[n].can_stash = true, Var::I32(n) => program.var_info.1[n].can_stash = true, Var::A16(n, _) => program.var_info.2[n].can_stash = true, Var::A32(n, _) => program.var_info.3[n].can_stash = true, } } } StmtBody::Ignore(ref vars) | StmtBody::Remember(ref vars) => { for var in vars { match *var { Var::I16(n) => program.var_info.0[n].can_ignore = true, Var::I32(n) => program.var_info.1[n].can_ignore = true, Var::A16(n, _) => program.var_info.2[n].can_ignore = true, Var::A32(n, _) => program.var_info.3[n].can_ignore = true, } } } _ => { } } } program } }<|fim▁end|>
Optimizer::opt_expr(&mut result); // XXX will this always terminate? *expr = result; }
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import mptt.fields import uchicagohvz.overwrite_fs from django.conf import settings import django.utils.timezone import uchicagohvz.game.models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Award', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ('points', models.FloatField(help_text=b'Can be negative, e.g. to penalize players')), ('code', models.CharField(help_text=b'leave blank for automatic (re-)generation', max_length=255, blank=True)), ('redeem_limit', models.IntegerField(help_text=b'Maximum number of players that can redeem award via code entry (set to 0 for awards to be added by moderators only)')), ('redeem_type', models.CharField(max_length=1, choices=[(b'H', b'Humans only'), (b'Z', b'Zombies only'), (b'A', b'All players')])), ], ), migrations.CreateModel( name='Game', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ('registration_date', models.DateTimeField()), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ('rules', models.FileField(storage=uchicagohvz.overwrite_fs.OverwriteFileSystemStorage(), upload_to=uchicagohvz.game.models.gen_rules_filename)), ('picture', models.FileField(storage=uchicagohvz.overwrite_fs.OverwriteFileSystemStorage(), null=True, upload_to=uchicagohvz.game.models.gen_pics_filename, blank=True)), ('color', models.CharField(default=b'#FFFFFF', max_length=64)), ('flavor', models.TextField(default=b'', max_length=6000)), ], options={ 'ordering': ['-start_date'], }, ), migrations.CreateModel( name='HighValueDorm', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('dorm', models.CharField(max_length=4, choices=[(b'BJ', b'Burton-Judson Courts'), (b'IH', b'International House'), (b'MAX', b'Max Palevsky'), (b'NC', b'North Campus'), (b'SH', b'Snell-Hitchcock'), (b'SC', b'South Campus'), (b'ST', b'Stony Island'), (b'OFF', b'Off campus')])), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ('points', models.IntegerField(default=3)), ('game', models.ForeignKey(to='game.Game')), ], ), migrations.CreateModel( name='HighValueTarget', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ('kill_points', models.IntegerField(default=3, help_text=b'# of points zombies receive for killing this HVT')), ('award_points', models.IntegerField(default=0, help_text=b'# of points the HVT earns if he/she survives for the entire duration')), ], ), migrations.CreateModel( name='Kill', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('points', models.IntegerField(default=1)), ('notes', models.TextField(blank=True)), ('lat', models.FloatField(null=True, verbose_name=b'latitude', blank=True)), ('lng', models.FloatField(null=True, verbose_name=b'longitude', blank=True)), ('lft', models.PositiveIntegerField(editable=False, db_index=True)), ('rght', models.PositiveIntegerField(editable=False, db_index=True)), ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)), ('level', models.PositiveIntegerField(editable=False, db_index=True)), ('hvd', models.ForeignKey(related_name='kills', on_delete=django.db.models.deletion.SET_NULL, verbose_name=b'High-value Dorm', blank=True, to='game.HighValueDorm', null=True)), ('hvt', models.OneToOneField(related_name='kill', null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='game.HighValueTarget', verbose_name=b'High-value target')), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Mission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=63)), ('description', models.CharField(max_length=255)), ('summary', models.TextField(default=b'', max_length=6000)), ('zombies_win', models.BooleanField(default=False)), ('awards', models.ManyToManyField(help_text=b'Awards associated with this mission.', related_name='missions', to='game.Award', blank=True)), ('game', models.ForeignKey(related_name='missions', to='game.Game')), ], ), migrations.CreateModel( name='MissionPicture', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('picture', models.FileField(storage=uchicagohvz.overwrite_fs.OverwriteFileSystemStorage(), upload_to=uchicagohvz.game.models.gen_pics_filename)), ('lat', models.FloatField(null=True, verbose_name=b'latitude', blank=True)), ('lng', models.FloatField(null=True, verbose_name=b'longitude', blank=True)), ('game', models.ForeignKey(related_name='pictures', to='game.Game')), ], ), migrations.CreateModel( name='New_Squad', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128)), ('game', models.ForeignKey(related_name='new_squads', to='game.Game')), ], ), migrations.CreateModel( name='Player', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('active', models.BooleanField(default=False)), ('bite_code', models.CharField(help_text=b'leave blank for automatic (re-)generation', max_length=255, blank=True)), ('dorm', models.CharField(max_length=4, choices=[(b'BJ', b'Burton-Judson Courts'), (b'IH', b'International House'), (b'MAX', b'Max Palevsky'), (b'NC', b'North Campus'), (b'SH', b'Snell-Hitchcock'), (b'SC', b'South Campus'), (b'ST', b'Stony Island'), (b'OFF', b'Off campus')])), ('major', models.CharField(help_text=b'autopopulates from LDAP', max_length=255, blank=True)), ('human', models.BooleanField(default=True)), ('opt_out_hvt', models.BooleanField(default=False)), ('gun_requested', models.BooleanField(default=False)), ('renting_gun', models.BooleanField(default=False)), ('gun_returned', models.BooleanField(default=False)), ('last_words', models.CharField(max_length=255, blank=True)), ('lead_zombie', models.BooleanField(default=False)), ('delinquent_gun', models.BooleanField(default=False)), ('game', models.ForeignKey(related_name='players', to='game.Game')), ('new_squad', models.ForeignKey(related_name='players', blank=True, to='game.New_Squad', null=True)), ], options={ 'ordering': ['-game__start_date', 'user__username', 'user__last_name', 'user__first_name'], }, ), migrations.CreateModel( name='Squad', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128)), ('game', models.ForeignKey(related_name='squads', to='game.Game')), ], ), migrations.AddField(<|fim▁hole|> model_name='player', name='squad', field=models.ForeignKey(related_name='players', blank=True, to='game.Squad', null=True), ), migrations.AddField( model_name='player', name='user', field=models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='missionpicture', name='players', field=models.ManyToManyField(help_text=b'Players in this picture.', related_name='pictures', to='game.Player', blank=True), ), migrations.AddField( model_name='kill', name='killer', field=models.ForeignKey(related_name='+', to='game.Player'), ), migrations.AddField( model_name='kill', name='parent', field=mptt.fields.TreeForeignKey(related_name='children', blank=True, editable=False, to='game.Kill', null=True), ), migrations.AddField( model_name='kill', name='victim', field=models.ForeignKey(related_name='+', to='game.Player'), ), migrations.AddField( model_name='highvaluetarget', name='player', field=models.OneToOneField(related_name='hvt', to='game.Player'), ), migrations.AddField( model_name='award', name='game', field=models.ForeignKey(related_name='+', to='game.Game'), ), migrations.AddField( model_name='award', name='players', field=models.ManyToManyField(help_text=b'Players that should receive this award.', related_name='awards', to='game.Player', blank=True), ), migrations.AlterUniqueTogether( name='squad', unique_together=set([('game', 'name')]), ), migrations.AlterUniqueTogether( name='player', unique_together=set([('game', 'bite_code'), ('user', 'game')]), ), migrations.AlterUniqueTogether( name='new_squad', unique_together=set([('game', 'name')]), ), migrations.AlterUniqueTogether( name='mission', unique_together=set([('game', 'name')]), ), migrations.AlterUniqueTogether( name='kill', unique_together=set([('parent', 'killer', 'victim')]), ), migrations.AlterUniqueTogether( name='highvaluedorm', unique_together=set([('game', 'dorm')]), ), migrations.AlterUniqueTogether( name='award', unique_together=set([('game', 'name'), ('game', 'code')]), ), ]<|fim▁end|>
<|file_name|>v2_invest_optimize_only_gas_and_storage.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ General description ------------------- This example shows how to perform a capacity optimization for an energy system with storage. The following energy system is modeled: input/output bgas bel | | | | | | | | wind(FixedSource) |------------------>| | | | | | pv(FixedSource) |------------------>| | | | | | gas_resource |--------->| | | (Commodity) | | | | | | | | demand(Sink) |<------------------| | | | | | | | | | pp_gas(Transformer) |<---------| | | |------------------>| | | | | | storage(Storage) |<------------------| | |------------------>| | The example exists in four variations. The following parameters describe the main setting for the optimization variation 2: - optimize gas_resource and storage - set installed capacities for wind and pv - set investment cost for storage - set gas price for kWh Results show a higher renewable energy share than in variation 1 (78% compared to 51%) due to preinstalled renewable capacities. Storage is not installed as the gas resource is cheaper. Have a look at different parameter settings. There are four variations of this example in the same folder. Installation requirements ------------------------- This example requires the version v0.4.x of oemof. Install by: pip install 'oemof.solph>=0.4,<0.5' """ __copyright__ = "oemof developer group" __license__ = "GPLv3" ############################################################################### # Imports ############################################################################### # Default logger of oemof from oemof.tools import logger from oemof.tools import economics from oemof import solph import logging import os import pandas as pd import pprint as pp number_timesteps = 8760 ########################################################################## # Initialize the energy system and read/calculate necessary parameters ########################################################################## logger.define_logging() logging.info("Initialize the energy system") date_time_index = pd.date_range("1/1/2012", periods=number_timesteps, freq="H") energysystem = solph.EnergySystem(timeindex=date_time_index) # Read data file full_filename = os.path.join(os.getcwd(), "storage_investment.csv") data = pd.read_csv(full_filename, sep=",") price_gas = 0.04 # If the period is one year the equivalent periodical costs (epc) of an # investment are equal to the annuity. Use oemof's economic tools. epc_storage = economics.annuity(capex=1000, n=20, wacc=0.05) ########################################################################## # Create oemof objects ########################################################################## logging.info("Create oemof objects") # create natural gas bus bgas = solph.Bus(label="natural_gas") # create electricity bus bel = solph.Bus(label="electricity") energysystem.add(bgas, bel) # create excess component for the electricity bus to allow overproduction excess = solph.Sink(label="excess_bel", inputs={bel: solph.Flow()}) # create source object representing the natural gas commodity (annual limit) gas_resource = solph.Source( label="rgas", outputs={bgas: solph.Flow(variable_costs=price_gas)} ) # create fixed source object representing wind power plants wind = solph.Source( label="wind", outputs={bel: solph.Flow(fix=data["wind"], nominal_value=1000000)}, ) # create fixed source object representing pv power plants pv = solph.Source( label="pv", outputs={bel: solph.Flow(fix=data["pv"], nominal_value=600000)} ) # create simple sink object representing the electrical demand demand = solph.Sink( label="demand", inputs={bel: solph.Flow(fix=data["demand_el"], nominal_value=1)}, ) # create simple transformer object representing a gas power plant pp_gas = solph.Transformer( label="pp_gas", inputs={bgas: solph.Flow()}, outputs={bel: solph.Flow(nominal_value=10e10, variable_costs=0)}, conversion_factors={bel: 0.58}, )<|fim▁hole|> # create storage object representing a battery storage = solph.components.GenericStorage( label="storage", inputs={bel: solph.Flow(variable_costs=0.0001)}, outputs={bel: solph.Flow()}, loss_rate=0.00, initial_storage_level=0, invest_relation_input_capacity=1 / 6, invest_relation_output_capacity=1 / 6, inflow_conversion_factor=1, outflow_conversion_factor=0.8, investment=solph.Investment(ep_costs=epc_storage), ) energysystem.add(excess, gas_resource, wind, pv, demand, pp_gas, storage) ########################################################################## # Optimise the energy system ########################################################################## logging.info("Optimise the energy system") # initialise the operational model om = solph.Model(energysystem) # if tee_switch is true solver messages will be displayed logging.info("Solve the optimization problem") om.solve(solver="cbc", solve_kwargs={"tee": True}) ########################################################################## # Check and plot the results ########################################################################## # check if the new result object is working for custom components results = solph.processing.results(om) custom_storage = solph.views.node(results, "storage") electricity_bus = solph.views.node(results, "electricity") meta_results = solph.processing.meta_results(om) pp.pprint(meta_results) my_results = electricity_bus["scalars"] # installed capacity of storage in GWh my_results["storage_invest_GWh"] = ( results[(storage, None)]["scalars"]["invest"] / 1e6 ) # resulting renewable energy share my_results["res_share"] = ( 1 - results[(pp_gas, bel)]["sequences"].sum() / results[(bel, demand)]["sequences"].sum() ) pp.pprint(my_results)<|fim▁end|>
<|file_name|>dev_dodobas.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from .dev import * # noqa INSTALLED_APPS += ( 'django_extensions', ) DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'NAME': 'healthsites_dev', 'USER': '', 'PASSWORD': '', 'HOST': 'localhost', # Set to empty string for default. 'PORT': '', } } LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { # define output formats 'verbose': { 'format': ( '%(levelname)s %(name)s %(asctime)s %(module)s %(process)d ' '%(thread)d %(message)s') }, 'simple': { 'format': ( '%(name)s %(levelname)s %(filename)s L%(lineno)s: ' '%(message)s') }, }, 'handlers': { # console output 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', 'level': 'DEBUG', }, # 'logfile': { # 'class': 'logging.FileHandler', # 'filename': '/tmp/app-dev.log', # 'formatter': 'simple', # 'level': 'DEBUG', # }<|fim▁hole|> 'handlers': ['console'], 'level': 'INFO', # switch to DEBUG to show actual SQL }, # example app logger 'localities': { 'level': 'DEBUG', 'handlers': ['console'], # propagate is True by default, which proppagates logs upstream 'propagate': False } }, # root logger # non handled logs will propagate to the root logger 'root': { 'handlers': ['console'], 'level': 'WARNING' } }<|fim▁end|>
}, 'loggers': { 'django.db.backends': {
<|file_name|>angular-cookies.js<|end_file_name|><|fim▁begin|>/** * @license AngularJS v1.3.0-beta.11 * (c) 2010-2014 Google, Inc. http://angularjs.org * License: MIT */ (function(window, angular, undefined) {'use strict'; /** * @ngdoc module * @name ngCookies * @description * * # ngCookies * * The `ngCookies` module provides a convenient wrapper for reading and writing browser cookies. * * * <div doc-module-components="ngCookies"></div> * * See {@link ngCookies.$cookies `$cookies`} and * {@link ngCookies.$cookieStore `$cookieStore`} for usage. */ angular.module('ngCookies', ['ng']). /** * @ngdoc service * @name $cookies * * @description * Provides read/write access to browser's cookies. * * Only a simple Object is exposed and by adding or removing properties to/from this object, new * cookies are created/deleted at the end of current $eval. * The object's properties can only be strings. * * Requires the {@link ngCookies `ngCookies`} module to be installed. * * @example * * ```js * function ExampleController($cookies) { * // Retrieving a cookie * var favoriteCookie = $cookies.myFavorite; * // Setting a cookie * $cookies.myFavorite = 'oatmeal'; * } * ``` */ factory('$cookies', ['$rootScope', '$browser', function ($rootScope, $browser) { var cookies = {}, lastCookies = {}, lastBrowserCookies, runEval = false, copy = angular.copy, isUndefined = angular.isUndefined; //creates a poller fn that copies all cookies from the $browser to service & inits the service $browser.addPollFn(function() { var currentCookies = $browser.cookies(); if (lastBrowserCookies != currentCookies) { //relies on browser.cookies() impl lastBrowserCookies = currentCookies; copy(currentCookies, lastCookies); copy(currentCookies, cookies); if (runEval) $rootScope.$apply(); } })(); runEval = true; //at the end of each eval, push cookies //TODO: this should happen before the "delayed" watches fire, because if some cookies are not // strings or browser refuses to store some cookies, we update the model in the push fn. $rootScope.$watch(push); return cookies; /** * Pushes all the cookies from the service to the browser and verifies if all cookies were * stored. */ function push() { var name, value, browserCookies, updated; //delete any cookies deleted in $cookies for (name in lastCookies) { if (isUndefined(cookies[name])) { $browser.cookies(name, undefined); } } //update all cookies updated in $cookies for(name in cookies) { value = cookies[name]; if (!angular.isString(value)) { value = '' + value; cookies[name] = value; } if (value !== lastCookies[name]) { $browser.cookies(name, value); updated = true; } } //verify what was actually stored if (updated){ updated = false; browserCookies = $browser.cookies(); for (name in cookies) { if (cookies[name] !== browserCookies[name]) { //delete or reset all cookies that the browser dropped from $cookies if (isUndefined(browserCookies[name])) { delete cookies[name]; } else { cookies[name] = browserCookies[name]; } updated = true; } } } } }]). /** * @ngdoc service * @name $cookieStore * @requires $cookies<|fim▁hole|> * @description * Provides a key-value (string-object) storage, that is backed by session cookies. * Objects put or retrieved from this storage are automatically serialized or * deserialized by angular's toJson/fromJson. * * Requires the {@link ngCookies `ngCookies`} module to be installed. * * @example * * ```js * function ExampleController($cookieStore) { * // Put cookie * $cookieStore.put('myFavorite','oatmeal'); * // Get cookie * var favoriteCookie = $cookieStore.get('myFavorite'); * // Removing a cookie * $cookieStore.remove('myFavorite'); * } * ``` */ factory('$cookieStore', ['$cookies', function($cookies) { return { /** * @ngdoc method * @name $cookieStore#get * * @description * Returns the value of given cookie key * * @param {string} key Id to use for lookup. * @returns {Object} Deserialized cookie value. */ get: function(key) { var value = $cookies[key]; return value ? angular.fromJson(value) : value; }, /** * @ngdoc method * @name $cookieStore#put * * @description * Sets a value for given cookie key * * @param {string} key Id for the `value`. * @param {Object} value Value to be stored. */ put: function(key, value) { $cookies[key] = angular.toJson(value); }, /** * @ngdoc method * @name $cookieStore#remove * * @description * Remove given cookie * * @param {string} key Id of the key-value pair to delete. */ remove: function(key) { delete $cookies[key]; } }; }]); })(window, window.angular);<|fim▁end|>
*
<|file_name|>consensus.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. use util::*; use io::{IoHandler, IoContext, IoChannel}; use ethcore::client::{BlockChainClient, Client}; use ethcore::service::ClientIoMessage; use ethcore::spec::Spec; use ethcore::miner::MinerService; use ethcore::transaction::*; use ethcore::account_provider::AccountProvider; use ethkey::{KeyPair, Secret}; use super::helpers::*; use SyncConfig; struct TestIoHandler { client: Arc<Client>, } impl IoHandler<ClientIoMessage> for TestIoHandler { fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) { match *net_message { ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) { panic!("Invalid message received: {}", e); }, _ => {} // ignore other messages } } } fn new_tx(secret: &Secret, nonce: U256) -> PendingTransaction { let signed = Transaction { nonce: nonce.into(), gas_price: 0.into(), gas: 21000.into(), action: Action::Call(Address::default()), value: 0.into(), data: Vec::new(), }.sign(secret, None); PendingTransaction::new(signed, None) } #[test] fn authority_round() { let s0 = KeyPair::from_secret_slice(&"1".sha3()).unwrap(); let s1 = KeyPair::from_secret_slice(&"0".sha3()).unwrap(); let ap = Arc::new(AccountProvider::transient_provider()); ap.insert_account(s0.secret().clone(), "").unwrap(); ap.insert_account(s1.secret().clone(), "").unwrap(); let mut net = TestNet::with_spec_and_accounts(2, SyncConfig::default(), Spec::new_test_round, Some(ap)); let mut net = &mut *net; let io_handler0: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(0).chain.clone() }); let io_handler1: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(1).chain.clone() }); // Push transaction to both clients. Only one of them gets lucky to produce a block. net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap(); net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain)); net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain)); net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); // exchange statuses net.sync(); // Trigger block proposal net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into())).unwrap(); net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into())).unwrap(); // Sync a block net.sync(); assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1); assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1); net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into())).unwrap(); net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into())).unwrap(); // Move to next proposer step net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); net.sync(); assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2); assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2); // Fork the network net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into())).unwrap(); net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into())).unwrap(); net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); let ci0 = net.peer(0).chain.chain_info();<|fim▁hole|> assert_eq!(ci1.best_block_number, 3); assert!(ci0.best_block_hash != ci1.best_block_hash); // Reorg to the correct one. net.sync(); let ci0 = net.peer(0).chain.chain_info(); let ci1 = net.peer(1).chain.chain_info(); assert_eq!(ci0.best_block_number, 3); assert_eq!(ci1.best_block_number, 3); assert_eq!(ci0.best_block_hash, ci1.best_block_hash); } #[test] fn tendermint() { let s0 = KeyPair::from_secret_slice(&"1".sha3()).unwrap(); let s1 = KeyPair::from_secret_slice(&"0".sha3()).unwrap(); let ap = Arc::new(AccountProvider::transient_provider()); ap.insert_account(s0.secret().clone(), "").unwrap(); ap.insert_account(s1.secret().clone(), "").unwrap(); let mut net = TestNet::with_spec_and_accounts(2, SyncConfig::default(), Spec::new_test_tendermint, Some(ap)); let mut net = &mut *net; let io_handler0: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(0).chain.clone() }); let io_handler1: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(1).chain.clone() }); // Push transaction to both clients. Only one of them issues a proposal. net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap(); trace!(target: "poa", "Peer 0 is {}.", s0.address()); net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); trace!(target: "poa", "Peer 1 is {}.", s1.address()); net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain)); net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain)); net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); // Exhange statuses net.sync(); // Propose net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into())).unwrap(); net.sync(); // Propose timeout, synchronous for now net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); // Prevote, precommit and commit net.sync(); assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1); assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1); net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into())).unwrap(); // Commit timeout net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); // Propose net.sync(); // Propose timeout net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); // Prevote, precommit and commit net.sync(); assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2); assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2); net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into())).unwrap(); net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into())).unwrap(); // Peers get disconnected. // Commit net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); // Propose net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into())).unwrap(); net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into())).unwrap(); // Send different prevotes net.sync(); // Prevote timeout net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); // Precommit and commit net.sync(); // Propose timeout net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); net.sync(); let ci0 = net.peer(0).chain.chain_info(); let ci1 = net.peer(1).chain.chain_info(); assert_eq!(ci0.best_block_number, 3); assert_eq!(ci1.best_block_number, 3); assert_eq!(ci0.best_block_hash, ci1.best_block_hash); }<|fim▁end|>
let ci1 = net.peer(1).chain.chain_info(); assert_eq!(ci0.best_block_number, 3);
<|file_name|>shh.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package api import (<|fim▁hole|> "github.com/ghostnetwrk/ghostnet/eth" "github.com/ghostnetwrk/ghostnet/rpc/codec" "github.com/ghostnetwrk/ghostnet/rpc/shared" "github.com/ghostnetwrk/ghostnet/xeth" ) const ( ShhApiVersion = "1.0" ) var ( // mapping between methods and handlers shhMapping = map[string]shhhandler{ "shh_version": (*shhApi).Version, "shh_post": (*shhApi).Post, "shh_hasIdentity": (*shhApi).HasIdentity, "shh_newIdentity": (*shhApi).NewIdentity, "shh_newFilter": (*shhApi).NewFilter, "shh_uninstallFilter": (*shhApi).UninstallFilter, "shh_getMessages": (*shhApi).GetMessages, "shh_getFilterChanges": (*shhApi).GetFilterChanges, } ) func newWhisperOfflineError(method string) error { return shared.NewNotAvailableError(method, "whisper offline") } // net callback handler type shhhandler func(*shhApi, *shared.Request) (interface{}, error) // shh api provider type shhApi struct { xeth *xeth.XEth ethereum *eth.Ethereum methods map[string]shhhandler codec codec.ApiCoder } // create a new whisper api instance func NewShhApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *shhApi { return &shhApi{ xeth: xeth, ethereum: eth, methods: shhMapping, codec: coder.New(nil), } } // collection with supported methods func (self *shhApi) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { methods[i] = k i++ } return methods } // Execute given request func (self *shhApi) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } return nil, shared.NewNotImplementedError(req.Method) } func (self *shhApi) Name() string { return shared.ShhApiName } func (self *shhApi) ApiVersion() string { return ShhApiVersion } func (self *shhApi) Version(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { return nil, newWhisperOfflineError(req.Method) } return w.Version(), nil } func (self *shhApi) Post(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { return nil, newWhisperOfflineError(req.Method) } args := new(WhisperMessageArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err } err := w.Post(args.Payload, args.To, args.From, args.Topics, args.Priority, args.Ttl) if err != nil { return false, err } return true, nil } func (self *shhApi) HasIdentity(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { return nil, newWhisperOfflineError(req.Method) } args := new(WhisperIdentityArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err } return w.HasIdentity(args.Identity), nil } func (self *shhApi) NewIdentity(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { return nil, newWhisperOfflineError(req.Method) } return w.NewIdentity(), nil } func (self *shhApi) NewFilter(req *shared.Request) (interface{}, error) { args := new(WhisperFilterArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err } id := self.xeth.NewWhisperFilter(args.To, args.From, args.Topics) return newHexNum(big.NewInt(int64(id)).Bytes()), nil } func (self *shhApi) UninstallFilter(req *shared.Request) (interface{}, error) { args := new(FilterIdArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err } return self.xeth.UninstallWhisperFilter(args.Id), nil } func (self *shhApi) GetFilterChanges(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { return nil, newWhisperOfflineError(req.Method) } // Retrieve all the new messages arrived since the last request args := new(FilterIdArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err } return self.xeth.WhisperMessagesChanged(args.Id), nil } func (self *shhApi) GetMessages(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { return nil, newWhisperOfflineError(req.Method) } // Retrieve all the cached messages matching a specific, existing filter args := new(FilterIdArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err } return self.xeth.WhisperMessages(args.Id), nil }<|fim▁end|>
"math/big"
<|file_name|>python.py<|end_file_name|><|fim▁begin|>""" Python environments and packages ================================ This module provides tools for using Python `virtual environments`_ and installing Python packages using the `pip`_ installer. .. _virtual environments: http://www.virtualenv.org/ .. _pip: http://www.pip-installer.org/ """ from __future__ import with_statement from contextlib import contextmanager from distutils.version import StrictVersion as V from pipes import quote import os import posixpath import re from fabric.api import cd, hide, prefix, run, settings, sudo from fabric.utils import puts from fabtools.files import is_file from fabtools.utils import abspath, download, run_as_root GET_PIP_URL = 'https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py' def is_pip_installed(version=None, pip_cmd='pip'): """ Check if `pip`_ is installed. .. _pip: http://www.pip-installer.org/ """ with settings(hide('running', 'warnings', 'stderr', 'stdout'), warn_only=True): res = run('%(pip_cmd)s --version 2>/dev/null' % locals()) if res.failed: return False if version is None: return res.succeeded else: m = re.search(r'pip (?P<version>.*) from', res) if m is None: return False installed = m.group('version') if V(installed) < V(version): puts("pip %s found (version >= %s required)" % (installed, version)) return False else: return True def install_pip(python_cmd='python', use_sudo=True): """ Install the latest version of `pip`_, using the given Python interpreter. :: import fabtools if not fabtools.python.is_pip_installed(): fabtools.python.install_pip() .. note:: pip is automatically installed inside a virtualenv, so there is no need to install it yourself in this case. .. _pip: http://www.pip-installer.org/ """ with cd('/tmp'): download(GET_PIP_URL) command = '%(python_cmd)s get-pip.py' % locals() if use_sudo: run_as_root(command, pty=False) else: run(command, pty=False) run('rm -f get-pip.py') def is_installed(package, pip_cmd='pip'): """ Check if a Python package is installed (using pip). Package names are case insensitive. Example:: from fabtools.python import virtualenv import fabtools with virtualenv('/path/to/venv'): fabtools.python.install('Flask') assert fabtools.python.is_installed('flask') .. _pip: http://www.pip-installer.org/ """ with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True): res = run('%(pip_cmd)s freeze' % locals()) packages = [line.split('==')[0].lower() for line in res.splitlines()] return (package.lower() in packages) def install(packages, upgrade=False, download_cache=None, allow_external=None, allow_unverified=None, quiet=False, pip_cmd='pip', use_sudo=False, user=None, exists_action=None): """ Install Python package(s) using `pip`_. Package names are case insensitive. Starting with version 1.5, pip no longer scrapes insecure external urls by default and no longer installs externally hosted files by default. Use ``allow_external=['foo', 'bar']`` or ``allow_unverified=['bar', 'baz']`` to change these behaviours for specific packages. Examples:: import fabtools # Install a single package fabtools.python.install('package', use_sudo=True) # Install a list of packages fabtools.python.install(['pkg1', 'pkg2'], use_sudo=True) .. _pip: http://www.pip-installer.org/ """ if isinstance(packages, basestring):<|fim▁hole|> allow_external = [] elif allow_external == True: allow_external = packages if allow_unverified in (None, False): allow_unverified = [] elif allow_unverified == True: allow_unverified = packages options = [] if upgrade: options.append('--upgrade') if download_cache: options.append('--download-cache="%s"' % download_cache) if quiet: options.append('--quiet') for package in allow_external: options.append('--allow-external="%s"' % package) for package in allow_unverified: options.append('--allow-unverified="%s"' % package) if exists_action: options.append('--exists-action=%s' % exists_action) options = ' '.join(options) packages = ' '.join(packages) command = '%(pip_cmd)s install %(options)s %(packages)s' % locals() if use_sudo: sudo(command, user=user, pty=False) else: run(command, pty=False) def install_requirements(filename, upgrade=False, download_cache=None, allow_external=None, allow_unverified=None, quiet=False, pip_cmd='pip', use_sudo=False, user=None, exists_action=None): """ Install Python packages from a pip `requirements file`_. :: import fabtools fabtools.python.install_requirements('project/requirements.txt') .. _requirements file: http://www.pip-installer.org/en/latest/requirements.html """ if allow_external is None: allow_external = [] if allow_unverified is None: allow_unverified = [] options = [] if upgrade: options.append('--upgrade') if download_cache: options.append('--download-cache="%s"' % download_cache) for package in allow_external: options.append('--allow-external="%s"' % package) for package in allow_unverified: options.append('--allow-unverified="%s"' % package) if quiet: options.append('--quiet') if exists_action: options.append('--exists-action=%s' % exists_action) options = ' '.join(options) command = '%(pip_cmd)s install %(options)s -r %(filename)s' % locals() if use_sudo: sudo(command, user=user, pty=False) else: run(command, pty=False) def create_virtualenv(directory, system_site_packages=False, venv_python=None, use_sudo=False, user=None, clear=False, prompt=None, virtualenv_cmd='virtualenv'): """ Create a Python `virtual environment`_. :: import fabtools fabtools.python.create_virtualenv('/path/to/venv') .. _virtual environment: http://www.virtualenv.org/ """ options = ['--quiet'] if system_site_packages: options.append('--system-site-packages') if venv_python: options.append('--python=%s' % quote(venv_python)) if clear: options.append('--clear') if prompt: options.append('--prompt=%s' % quote(prompt)) options = ' '.join(options) directory = quote(directory) command = '%(virtualenv_cmd)s %(options)s %(directory)s' % locals() if use_sudo: sudo(command, user=user) else: run(command) def virtualenv_exists(directory): """ Check if a Python `virtual environment`_ exists. .. _virtual environment: http://www.virtualenv.org/ """ return is_file(posixpath.join(directory, 'bin', 'python')) @contextmanager def virtualenv(directory, local=False): """ Context manager to activate an existing Python `virtual environment`_. :: from fabric.api import run from fabtools.python import virtualenv with virtualenv('/path/to/virtualenv'): run('python -V') .. _virtual environment: http://www.virtualenv.org/ """ path_mod = os.path if local else posixpath # Build absolute path to the virtualenv activation script venv_path = abspath(directory) activate_path = path_mod.join(venv_path, 'bin', 'activate') # Source the activation script with prefix('. %s' % quote(activate_path)): yield<|fim▁end|>
packages = [packages] if allow_external in (None, False):
<|file_name|>checker.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # coding=utf-8 # Simple Steam profile checker Telegram bot # Copyright (c) 2017 EasyCoding Team # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from html import unescape from re import sub from urllib.request import Request as request, urlopen from xml.dom import minidom class SteamChecker: @staticmethod def striptags(gtstr, gtrep=''): """ Strip HTML tags from string. :param gtstr: String to strip tags :param gtrep: Replacement for tags :return: String without HTML tags """ return sub('<[^<]+?>', gtrep, unescape(gtstr)) def __fetchxml(self): """ Format query to API, fetch results and return them as string. :return: API check results """ apiuri = 'https://check.team-fortress.su/api.php?action=check&token=%s&id=%s' % (self.__token, self.__id) req = request(apiuri, data=None, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:52.0.0)' 'Gecko/20100101 Firefox/52.0.0'}) with urlopen(req) as xmlres: return xmlres.read().decode('utf-8') @property def sitestatus(self): """ TEAM-FORTRESS.SU user friendly status of checked user profile. :return: TEAM-FORTRESS.SU check results """ # Set dictionary with API return codes... stv = { '1': 'гарант', '2': 'в белом списке', '3': 'в чёрном списке', '4': 'нет в базе', '5': 'в чёрном списке аукциона', '6': 'сотрудник сайта', '7': 'донатер', '8': 'ненадёжный' } # Return result using dictionary... return stv[self.__sitestatus] @property def vacstatus(self): """ VAC status of checked user profile. :return: VAC status """ stv = { '0': 'чист', '1': 'забанен' } return stv[self.__vacstatus] @property def f2pstatus(self): """ Free-to-Play status (has no purchased games) of checked user profile. :return: Free-to-Play status """ stv = { '0': 'нет', '1': 'да' } return stv[self.__f2pstatus] @property def tradestatus(self): """ Current trade status of checked user profile. :return: Trade status """ stv = { '0': 'нет ограничений', '1': 'заблокирована', '2': 'испытательный срок' } return stv[self.__tradestatus] @property def gamebanstatus(self): """ Current game bans on checked user profile. :return: Game bans status and their count """ return 'нет' if self.__gamebans == '0' else 'есть (%s)' % self.__gamebans @property def description(self): """ Formatted custom description of checked user profile. :return: Custom description with markup """ return '`%s`' % self.striptags(self.__description, ' ') if self.__description else '*отсутствует.*' def __init__(self, tid, token): """ Main SteamChecker constructor. :param tid: Profile link, username or SteamID :param token: API token """ # Setting token and unique identifier to pseudo-private properties... self.__id = tid self.__token = token # Fetching XML from API... rxml = self.__fetchxml() # Parsing received XML... xmlp = minidom.parseString(rxml) # Checking API result... if xmlp.getElementsByTagName('qstatus')[0].firstChild.data != 'OK': raise Exception('Incorrect API return code') # Setting public fields... self.steamid32 = xmlp.getElementsByTagName('steamID')[0].firstChild.data self.steamid64 = xmlp.getElementsByTagName('steamID64')[0].firstChild.data self.steamidv3 = xmlp.getElementsByTagName('steamIDv3')[0].firstChild.data self.nickname = xmlp.getElementsByTagName('nickname')[0].firstChild.data self.avatar = xmlp.getElementsByTagName('avatar')[0].firstChild.data self.permalink = xmlp.getElementsByTagName('permalink')[0].firstChild.data self.srstatus = self.striptags(xmlp.getElementsByTagName('steamrep')[0].firstChild.data) # Setting private fields... self.__sitestatus = xmlp.getElementsByTagName('sitestatus')[0].firstChild.data self.__vacstatus = xmlp.getElementsByTagName('isbanned')[0].firstChild.data self.__f2pstatus = xmlp.getElementsByTagName('isf2p')[0].firstChild.data self.__tradestatus = xmlp.getElementsByTagName('istrbanned')[0].firstChild.data self.__premium = xmlp.getElementsByTagName('ispremium')[0].firstChild.data<|fim▁hole|> self.__description = dcs.data if dcs else ''<|fim▁end|>
self.__gamebans = xmlp.getElementsByTagName('gamebans')[0].firstChild.data # Fetching custom description... dcs = xmlp.getElementsByTagName('customdescr')[0].firstChild
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod char_indexing; /// Defines potential patterns used to match against a password pub mod patterns; use self::patterns::*; use crate::frequency_lists::DictionaryType; use char_indexing::{CharIndexable, CharIndexableStr}; use fancy_regex::Regex as FancyRegex; use itertools::Itertools; use regex::Regex; use std::collections::HashMap; /// A match of a predictable pattern in the password. #[derive(Debug, Clone, PartialEq, Default)] #[cfg_attr(feature = "builder", derive(Builder))] #[cfg_attr(feature = "builder", builder(default))] #[cfg_attr(feature = "ser", derive(Serialize))] pub struct Match { /// Beginning of the match. pub i: usize, /// End of the match. pub j: usize, /// Token that has been matched. pub token: String, /// Pattern type and details used to detect this match. #[cfg_attr(feature = "ser", serde(flatten))] pub pattern: MatchPattern, /// Estimated number of tries for guessing the match. pub guesses: Option<u64>, } impl Match { /// Get the range of the index of the chars that are included in the match. pub fn range_inclusive(&self) -> std::ops::RangeInclusive<usize> { self.i..=self.j } } #[allow(clippy::implicit_hasher)] pub(crate) fn omnimatch(password: &str, user_inputs: &HashMap<String, usize>) -> Vec<Match> { let mut matches: Vec<Match> = MATCHERS .iter() .flat_map(|x| x.get_matches(password, user_inputs)) .collect(); matches.sort_unstable_by(|a, b| { let range1 = a.range_inclusive(); let range2 = b.range_inclusive(); range1 .start() .cmp(range2.start()) .then_with(|| range1.end().cmp(range2.end())) }); matches } lazy_static! { static ref L33T_TABLE: HashMap<char, Vec<char>> = { let mut table = HashMap::with_capacity(12); table.insert('a', vec!['4', '@']); table.insert('b', vec!['8']); table.insert('c', vec!['(', '{', '[', '<']); table.insert('e', vec!['3']); table.insert('g', vec!['6', '9']); table.insert('i', vec!['1', '!', '|']); table.insert('l', vec!['1', '|', '7']); table.insert('o', vec!['0']); table.insert('s', vec!['$', '5']); table.insert('t', vec!['+', '7']); table.insert('x', vec!['%']); table.insert('z', vec!['2']); table }; static ref GRAPHS: HashMap<&'static str, &'static HashMap<char, Vec<Option<&'static str>>>> = { let mut table = HashMap::with_capacity(4); table.insert("qwerty", &*super::adjacency_graphs::QWERTY); table.insert("dvorak", &*super::adjacency_graphs::DVORAK); table.insert("keypad", &*super::adjacency_graphs::KEYPAD); table.insert("mac_keypad", &*super::adjacency_graphs::MAC_KEYPAD); table }; } trait Matcher: Send + Sync { fn get_matches(&self, password: &str, user_inputs: &HashMap<String, usize>) -> Vec<Match>; } lazy_static! { static ref MATCHERS: [Box<dyn Matcher>; 8] = [ Box::new(DictionaryMatch {}), Box::new(ReverseDictionaryMatch {}), Box::new(L33tMatch {}), Box::new(SpatialMatch {}), Box::new(RepeatMatch {}), Box::new(SequenceMatch {}), Box::new(RegexMatch {}), Box::new(DateMatch {}), ]; } struct DictionaryMatch {} impl Matcher for DictionaryMatch { fn get_matches(&self, password: &str, user_inputs: &HashMap<String, usize>) -> Vec<Match> { let password_lower_string = password.to_lowercase(); let password_lower = CharIndexableStr::from(password_lower_string.as_str()); let do_trials = move |matches: &mut Vec<Match>, password: &str, dictionary_name: DictionaryType, ranked_dict: &HashMap<&str, usize>| { let len = password.chars().count(); for i in 0..len { for j in i..len { let word = password_lower.char_index(i..j + 1); if let Some(rank) = ranked_dict.get(word).cloned() { let pattern = MatchPattern::Dictionary(DictionaryPattern { matched_word: word.to_string(), rank, dictionary_name, ..DictionaryPattern::default() }); matches.push(Match { pattern, i, j, token: password.chars().take(j + 1).skip(i).collect(), ..Match::default() }); } } } }; let mut matches = Vec::new(); for (dictionary_name, ranked_dict) in super::frequency_lists::RANKED_DICTIONARIES.iter() { do_trials(&mut matches, password, *dictionary_name, ranked_dict); } do_trials( &mut matches, password, DictionaryType::UserInputs, &user_inputs.iter().map(|(x, &i)| (x.as_str(), i)).collect(), ); matches } } struct ReverseDictionaryMatch {} impl Matcher for ReverseDictionaryMatch { fn get_matches(&self, password: &str, user_inputs: &HashMap<String, usize>) -> Vec<Match> { let reversed_password = password.chars().rev().collect::<String>(); (DictionaryMatch {}) .get_matches(&reversed_password, user_inputs) .into_iter() .map(|mut m| { // Reverse token back m.token = m.token.chars().rev().collect(); if let MatchPattern::Dictionary(ref mut pattern) = m.pattern { pattern.reversed = true; } let old_i = m.i; m.i = password.chars().count() - 1 - m.j; m.j = password.chars().count() - 1 - old_i; m }) .collect() } } struct L33tMatch {} impl Matcher for L33tMatch { fn get_matches(&self, password: &str, user_inputs: &HashMap<String, usize>) -> Vec<Match> { let mut matches = Vec::new(); for sub in enumerate_l33t_replacements(&relevant_l33t_subtable(password)) { if sub.is_empty() { break; } let subbed_password = translate(password, &sub); for mut m4tch in (DictionaryMatch {}).get_matches(&subbed_password, user_inputs) { let token = password .chars() .take(m4tch.j + 1) .skip(m4tch.i) .collect::<String>(); { let pattern = if let MatchPattern::Dictionary(ref mut pattern) = m4tch.pattern { pattern } else { unreachable!() }; if token.to_lowercase() == pattern.matched_word { // Only return the matches that contain an actual substitution continue; } let match_sub: HashMap<char, char> = sub .clone() .into_iter() .filter(|&(subbed_chr, _)| token.contains(subbed_chr)) .collect(); m4tch.token = token; pattern.l33t = true; pattern.sub_display = Some( match_sub .iter() .map(|(k, v)| format!("{} -> {}", k, v)) .join(", "), ); pattern.sub = Some(match_sub); } matches.push(m4tch); } } matches .into_iter() .filter(|x| !x.token.is_empty()) .collect() } } fn translate(string: &str, chr_map: &HashMap<char, char>) -> String { string .chars() .map(|c| *chr_map.get(&c).unwrap_or(&c)) .collect() } fn relevant_l33t_subtable(password: &str) -> HashMap<char, Vec<char>> { let password_chars: Vec<char> = password.chars().collect(); let mut subtable: HashMap<char, Vec<char>> = HashMap::new(); for (letter, subs) in L33T_TABLE.iter() { let relevant_subs: Vec<char> = subs .iter() .filter(|&x| password_chars.contains(x)) .cloned() .collect(); if !relevant_subs.is_empty() { subtable.insert(*letter, relevant_subs); } } subtable } fn enumerate_l33t_replacements(table: &HashMap<char, Vec<char>>) -> Vec<HashMap<char, char>> { /// Recursive function that does the work fn helper( table: &HashMap<char, Vec<char>>, subs: Vec<Vec<(char, char)>>, remaining_keys: &[char], ) -> Vec<Vec<(char, char)>> { if remaining_keys.is_empty() { return subs; } let (first_key, rest_keys) = remaining_keys.split_first().unwrap(); let mut next_subs: Vec<Vec<(char, char)>> = Vec::new(); for l33t_chr in &table[first_key] { for sub in &subs { let mut dup_l33t_index = None; for (i, item) in sub.iter().enumerate() { if item.0 == *l33t_chr { dup_l33t_index = Some(i); break; } } if let Some(idx) = dup_l33t_index { let mut sub_alternative = sub.clone(); sub_alternative.remove(idx); sub_alternative.push((*l33t_chr, *first_key)); next_subs.push(sub.clone()); next_subs.push(sub_alternative); } else { let mut sub_extension = sub.clone(); sub_extension.push((*l33t_chr, *first_key)); next_subs.push(sub_extension); } } } helper( table, next_subs .into_iter() .map(|x| x.iter().unique().cloned().collect()) .collect(), rest_keys, ) } helper( table, vec![vec![]], table.keys().cloned().collect::<Vec<char>>().as_slice(), ) .into_iter() .map(|sub| sub.into_iter().collect::<HashMap<char, char>>()) .collect() } struct SpatialMatch {} impl Matcher for SpatialMatch { fn get_matches(&self, password: &str, _user_inputs: &HashMap<String, usize>) -> Vec<Match> { GRAPHS .iter() .flat_map(|(graph_name, graph)| spatial_match_helper(password, graph, graph_name)) .collect() } } const SHIFTED_CHARS: [char; 49] = [ '[', '~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+', 'Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', '{', '}', '|', 'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', ':', '"', 'Z', 'X', 'C', 'V', 'B', 'N', 'M', '<', '>', '?', ']', ]; fn spatial_match_helper( password: &str, graph: &HashMap<char, Vec<Option<&str>>>, graph_name: &str, ) -> Vec<Match> { let mut matches = Vec::new(); let password_len = password.chars().count(); if password_len <= 2 { return matches; } let mut i = 0; while i < password_len - 1 { let mut j = i + 1; let mut last_direction = None; let mut turns = 0; let mut shifted_count = if ["qwerty", "dvorak"].contains(&graph_name) && SHIFTED_CHARS.contains(&password.chars().nth(i).unwrap()) { 1 } else { 0 }; loop { let prev_char = password.chars().nth(j - 1).unwrap(); let mut found = false; let found_direction; let mut cur_direction = -1; let adjacents = graph.get(&prev_char).cloned().unwrap_or_else(Vec::new); // consider growing pattern by one character if j hasn't gone over the edge. if j < password_len { let cur_char = password.chars().nth(j).unwrap(); for adj in adjacents { cur_direction += 1; if let Some(adj) = adj { if let Some(adj_position) = adj.find(cur_char) { found = true; found_direction = cur_direction; if adj_position == 1 { // index 1 in the adjacency means the key is shifted, // 0 means unshifted: A vs a, % vs 5, etc. // for example, 'q' is adjacent to the entry '2@'. // @ is shifted w/ index 1, 2 is unshifted. shifted_count += 1; } if last_direction != Some(found_direction) { // adding a turn is correct even in the initial case when last_direction is null: // every spatial pattern starts with a turn. turns += 1; last_direction = Some(found_direction); } break; } } } } if found { // if the current pattern continued, extend j and try to grow again j += 1; } else { // otherwise push the pattern discovered so far, if any... if j - i > 2 { // Don't consider length 1 or 2 chains let pattern = MatchPattern::Spatial(SpatialPattern { graph: graph_name.to_string(), turns, shifted_count, }); matches.push(Match { pattern, i, j: j - 1, token: password.chars().take(j).skip(i).collect(), ..Match::default() }); } i = j; break; } } } matches } struct RepeatMatch {} impl Matcher for RepeatMatch { fn get_matches(&self, password: &str, user_inputs: &HashMap<String, usize>) -> Vec<Match> { lazy_static! { static ref GREEDY_REGEX: FancyRegex = FancyRegex::new(r"(.+)\1+").unwrap(); static ref LAZY_REGEX: FancyRegex = FancyRegex::new(r"(.+?)\1+").unwrap(); static ref LAZY_ANCHORED_REGEX: FancyRegex = FancyRegex::new(r"^(.+?)\1+$").unwrap(); } let mut matches = Vec::new(); let mut last_index = 0; let char_indexable_password = CharIndexableStr::from(password); let char_count = password.chars().count(); while last_index < char_count { let token = char_indexable_password.char_index(last_index..char_count); let greedy_matches = GREEDY_REGEX.captures(token).unwrap(); if greedy_matches.is_none() { break; } let lazy_matches = LAZY_REGEX.captures(token).unwrap(); let greedy_matches = greedy_matches.unwrap(); let lazy_matches = lazy_matches.unwrap(); let m4tch; let base_token = if greedy_matches.get(0).unwrap().as_str().chars().count() > lazy_matches.get(0).unwrap().as_str().chars().count() { // greedy beats lazy for 'aabaab' // greedy: [aabaab, aab] // lazy: [aa, a] m4tch = greedy_matches; // greedy's repeated string might itself be repeated, eg. // aabaab in aabaabaabaab. // run an anchored lazy match on greedy's repeated string // to find the shortest repeated string LAZY_ANCHORED_REGEX .captures(m4tch.get(0).unwrap().as_str()) .unwrap() .unwrap() .get(1) .unwrap() .as_str() .to_string() } else { // lazy beats greedy for 'aaaaa' // greedy: [aaaa, aa] // lazy: [aaaaa, a] m4tch = lazy_matches; m4tch.get(1).unwrap().as_str().to_string() }; let m = m4tch.get(0).unwrap(); let (i, j) = ( last_index + token[..m.start()].chars().count(), last_index + token[..m.end()].chars().count() - 1, ); // recursively match and score the base string let base_analysis = super::scoring::most_guessable_match_sequence( &base_token, &omnimatch(&base_token, user_inputs), false, ); let base_matches = base_analysis.sequence; let base_guesses = base_analysis.guesses; let pattern = MatchPattern::Repeat(RepeatPattern { repeat_count: m4tch.get(0).unwrap().as_str().chars().count() / base_token.chars().count(), base_token, base_guesses, base_matches, }); matches.push(Match { pattern, i, j, token: m4tch.get(0).unwrap().as_str().to_string(), ..Match::default() }); last_index = j + 1; } matches } } const MAX_DELTA: i32 = 5; /// Identifies sequences by looking for repeated differences in unicode codepoint. /// this allows skipping, such as 9753, and also matches some extended unicode sequences /// such as Greek and Cyrillic alphabets. /// /// for example, consider the input 'abcdb975zy' /// /// password: a b c d b 9 7 5 z y /// index: 0 1 2 3 4 5 6 7 8 9 /// delta: 1 1 1 -2 -41 -2 -2 69 1 /// /// expected result: /// `[(i, j, delta), ...] = [(0, 3, 1), (5, 7, -2), (8, 9, 1)]` struct SequenceMatch {} impl Matcher for SequenceMatch { fn get_matches(&self, password: &str, _user_inputs: &HashMap<String, usize>) -> Vec<Match> { fn update(i: usize, j: usize, delta: i32, password: &str, matches: &mut Vec<Match>) { let delta_abs = delta.abs(); if (j - i > 1 || delta_abs == 1) && (0 < delta_abs && delta_abs <= MAX_DELTA) { let token = password.chars().take(j + 1).skip(i).collect::<String>(); let first_chr = token.chars().next().unwrap(); let (sequence_name, sequence_space) = if first_chr.is_lowercase() { ("lower", 26) } else if first_chr.is_uppercase() { ("upper", 26) } else if first_chr.is_digit(10) { ("digits", 10) } else { // conservatively stick with roman alphabet size. // (this could be improved) ("unicode", 26) }; let pattern = MatchPattern::Sequence(SequencePattern { sequence_name, sequence_space, ascending: delta > 0, }); matches.push(Match { pattern, i, j, token, ..Match::default() }); } } let mut matches = Vec::new(); let password_len = password.chars().count(); if password_len <= 1 { return matches; } let mut i = 0; let mut j; let mut last_delta = 0; for k in 1..password_len { let delta = password.chars().nth(k).unwrap() as i32 - password.chars().nth(k - 1).unwrap() as i32; if last_delta == 0 { last_delta = delta; } if last_delta == delta { continue; } j = k - 1; update(i, j, last_delta, password, &mut matches); i = j; last_delta = delta; } update(i, password_len - 1, last_delta, password, &mut matches); matches } } struct RegexMatch {} impl Matcher for RegexMatch { fn get_matches(&self, password: &str, _user_inputs: &HashMap<String, usize>) -> Vec<Match> { let mut matches = Vec::new(); for (&name, regex) in REGEXES.iter() { for capture in regex.captures_iter(password) { let m = capture.get(0).unwrap(); let pattern = MatchPattern::Regex(RegexPattern { regex_name: name, regex_match: capture .iter() .map(|x| x.unwrap().as_str().to_string()) .collect(), }); let (i, j) = ( password[..m.start()].chars().count(), password[..m.end()].chars().count() - 1, ); matches.push(Match { pattern, token: m.as_str().to_string(), i, j, ..Match::default() }); } } matches } } lazy_static! { static ref REGEXES: HashMap<&'static str, Regex> = { let mut table = HashMap::with_capacity(1); table.insert("recent_year", Regex::new(r"19[0-9]{2}|20[0-9]{2}").unwrap()); table }; } /// a "date" is recognized as: /// any 3-tuple that starts or ends with a 2- or 4-digit year, /// with 2 or 0 separator chars (1.1.91 or 1191), /// maybe zero-padded (01-01-91 vs 1-1-91), /// a month between 1 and 12, /// a day between 1 and 31. /// /// note: this isn't true date parsing in that "feb 31st" is allowed, /// this doesn't check for leap years, etc. /// /// recipe: /// start with regex to find maybe-dates, then attempt to map the integers /// onto month-day-year to filter the maybe-dates into dates. /// finally, remove matches that are substrings of other matches to reduce noise. /// /// note: instead of using a lazy or greedy regex to find many dates over the full string, /// this uses a ^...$ regex against every substring of the password -- less performant but leads /// to every possible date match. struct DateMatch {} impl Matcher for DateMatch { fn get_matches(&self, password: &str, _user_inputs: &HashMap<String, usize>) -> Vec<Match> { let mut matches = Vec::new(); let char_indexable = CharIndexableStr::from(password); let password_len = password.chars().count(); // dates without separators are between length 4 '1191' and 8 '11111991' if password_len < 4 { return matches; } for i in 0..(password_len - 3) { for j in (i + 3)..(i + 8) { if j >= password_len { break; } let token_str = char_indexable.char_index(i..j + 1); if !MAYBE_DATE_NO_SEPARATOR_REGEX.is_match(token_str) { continue; } let token = CharIndexableStr::from(token_str); let mut candidates = Vec::new(); for &(k, l) in &DATE_SPLITS[&token.char_count()] { let ymd = map_ints_to_ymd( token.char_index(0..k).parse().unwrap(), token.char_index(k..l).parse().unwrap(), token.char_index(l..j + 1).parse().unwrap(), ); if let Some(ymd) = ymd { candidates.push(ymd); } } if candidates.is_empty() { continue; } // at this point: different possible ymd mappings for the same i,j substring. // match the candidate date that likely takes the fewest guesses: a year closest to 2000. // (scoring.REFERENCE_YEAR). // // ie, considering '111504', prefer 11-15-04 to 1-1-1504 // (interpreting '04' as 2004) let metric = |candidate: &(i32, i8, i8)| { (candidate.0 - *super::scoring::REFERENCE_YEAR).abs() }; let best_candidate = candidates.iter().min_by_key(|&c| metric(c)).unwrap(); let pattern = MatchPattern::Date(DatePattern { separator: String::new(), year: best_candidate.0, month: best_candidate.1, day: best_candidate.2, }); matches.push(Match { pattern, token: token_str.to_string(), i, j, ..Match::default() }); } } // dates with separators are between length 6 '1/1/91' and 10 '11/11/1991' if password_len >= 6 { for i in 0..(password_len - 5) { for j in (i + 5)..(i + 10) { if j >= password_len { break; } let token = char_indexable.char_index(i..j + 1); let (ymd, separator) = { let captures = MAYBE_DATE_WITH_SEPARATOR_REGEX.captures(token); if captures.is_none() { continue; } let captures = captures.unwrap(); if captures[2] != captures[4] { // Original code uses regex backreferences, Rust doesn't support these. // Need to manually test that group 2 and 4 are the same continue; } ( map_ints_to_ymd( captures[1].parse().unwrap(), captures[3].parse().unwrap(), captures[5].parse().unwrap(), ), captures[2].to_string(), ) }; if let Some(ymd) = ymd { let pattern = MatchPattern::Date(DatePattern { separator, year: ymd.0, month: ymd.1, day: ymd.2, }); matches.push(Match { pattern, token: token.to_string(), i, j, ..Match::default() }); } } } } matches .iter() .filter(|&x| !matches.iter().any(|y| *x != *y && y.i <= x.i && y.j >= x.j)) .cloned() .collect() } } /// Takes three ints and returns them in a (y, m, d) tuple fn map_ints_to_ymd(first: u16, second: u16, third: u16) -> Option<(i32, i8, i8)> { // given a 3-tuple, discard if: // middle int is over 31 (for all ymd formats, years are never allowed in the middle) // middle int is zero // any int is over the max allowable year // any int is over two digits but under the min allowable year // 2 ints are over 31, the max allowable day // 2 ints are zero // all ints are over 12, the max allowable month if second > 31 || second == 0 { return None; } let mut over_12 = 0; let mut over_31 = 0; let mut zero = 0; for &i in &[first, second, third] { if 99 < i && i < DATE_MIN_YEAR || i > DATE_MAX_YEAR { return None; } if i > 31 { over_31 += 1; } if i > 12 { over_12 += 1; } if i == 0 { zero += 1; } } if over_31 >= 2 || over_12 == 3 || zero >= 2 { return None; } // first look for a four digit year: yyyy + daymonth or daymonth + yyyy let possible_year_splits = &[(third, first, second), (first, second, third)]; for &(year, second, third) in possible_year_splits { if DATE_MIN_YEAR <= year && year <= DATE_MAX_YEAR { let dm = map_ints_to_md(second, third); if let Some(dm) = dm { return Some((i32::from(year), dm.0, dm.1)); } else { // for a candidate that includes a four-digit year, // when the remaining ints don't match to a day and month, // it is not a date. return None; } } } // given no four-digit year, two digit years are the most flexible int to match, so // try to parse a day-month out of (first, second) or (second, first) for &(year, second, third) in possible_year_splits { let dm = map_ints_to_md(second, third); if let Some(dm) = dm { let year = two_to_four_digit_year(year); return Some((i32::from(year), dm.0, dm.1)); } } None } /// Takes two ints and returns them in a (m, d) tuple fn map_ints_to_md(first: u16, second: u16) -> Option<(i8, i8)> { for &(d, m) in &[(first, second), (second, first)] { if (1..=31).contains(&d) && (1..=12).contains(&m) { return Some((m as i8, d as i8)); } } None } fn two_to_four_digit_year(year: u16) -> u16 { if year > 99 { year } else if year > 50 { // 87 -> 1987 year + 1900 } else { // 15 -> 2015 year + 2000 } } const DATE_MIN_YEAR: u16 = 1000; const DATE_MAX_YEAR: u16 = 2050; lazy_static! { static ref DATE_SPLITS: HashMap<usize, Vec<(usize, usize)>> = { let mut table = HashMap::with_capacity(5); // for length-4 strings, eg 1191 or 9111, two ways to split: // 1 1 91 (2nd split starts at index 1, 3rd at index 2) // 91 1 1 table.insert(4, vec![(1, 2), (2, 3)]); // 1 11 91 // 11 1 91 table.insert(5, vec![(1, 3), (2, 3)]); // 1 1 1991 // 11 11 91 // 1991 1 1 table.insert(6, vec![(1, 2), (2, 4), (4, 5)]); // 1 11 1991 // 11 1 1991 // 1991 1 11 // 1991 11 1 table.insert(7, vec![(1, 3), (2, 3), (4, 5), (4, 6)]); // 11 11 1991 // 1991 11 11 table.insert(8, vec![(2, 4), (4, 6)]); table }; static ref MAYBE_DATE_NO_SEPARATOR_REGEX: Regex = Regex::new(r"^[0-9]{4,8}$").unwrap(); static ref MAYBE_DATE_WITH_SEPARATOR_REGEX: Regex = Regex::new(r"^([0-9]{1,4})([\s/\\_.-])([0-9]{1,2})([\s/\\_.-])([0-9]{1,4})$").unwrap(); } #[cfg(test)] mod tests { use crate::matching; use crate::matching::patterns::*; use crate::matching::Matcher; use std::collections::HashMap; #[test] fn test_translate() { let chr_map = vec![('a', 'A'), ('b', 'B')] .into_iter() .collect::<HashMap<char, char>>(); let test_data = [ ("a", chr_map.clone(), "A"), ("c", chr_map.clone(), "c"), ("ab", chr_map.clone(), "AB"), ("abc", chr_map.clone(), "ABc"), ("aa", chr_map.clone(), "AA"), ("abab", chr_map.clone(), "ABAB"), ("", chr_map, ""), ("", HashMap::new(), ""), ("abc", HashMap::new(), "abc"), ]; for &(string, ref map, result) in &test_data { assert_eq!(matching::translate(string, map), result); } } #[test] fn test_dictionary_matches_words_that_contain_other_words() { let matches = (matching::DictionaryMatch {}).get_matches("motherboard", &HashMap::new()); let patterns = ["mother", "motherboard", "board"]; let ijs = [(0, 5), (0, 10), (6, 10)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; } } #[test] fn test_dictionary_matches_multiple_words_when_they_overlap() { let matches = (matching::DictionaryMatch {}).get_matches("1abcdef12", &HashMap::new()); let patterns = ["1abcdef", "abcdef12"]; let ijs = [(0, 6), (1, 8)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; } } #[test] fn test_dictionary_ignores_uppercasing() { let matches = (matching::DictionaryMatch {}).get_matches("BoaRdZ", &HashMap::new()); let patterns = ["BoaRd"]; let ijs = [(0, 4)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; } } #[test] fn test_dictionary_identifies_words_surrounded_by_non_words() { let matches = (matching::DictionaryMatch {}).get_matches("asdf1234&*", &HashMap::new()); let patterns = ["asdf", "asdf1234"]; let ijs = [(0, 3), (0, 7)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; } } #[test] fn test_dictionary_matches_user_inputs() { use crate::frequency_lists::DictionaryType; let user_inputs = [("bejeebus".to_string(), 1)] .iter() .cloned() .collect::<HashMap<String, usize>>(); let matches = (matching::DictionaryMatch {}).get_matches("bejeebus", &user_inputs); let patterns = ["bejeebus"]; let ijs = [(0, 7)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.dictionary_name, DictionaryType::UserInputs); } } #[test] fn test_dictionary_matches_against_reversed_words() { let matches = (matching::ReverseDictionaryMatch {}).get_matches("rehtom", &HashMap::new()); let patterns = ["rehtom"]; let ijs = [(0, 5)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.reversed, true); } } #[test] fn test_reduces_l33t_table_to_only_relevant_substitutions() { let test_data = vec![ ("", HashMap::new()), ("a", HashMap::new()), ("4", vec![('a', vec!['4'])].into_iter().collect()), ("4@", vec![('a', vec!['4', '@'])].into_iter().collect()), ( "4({60", vec![ ('a', vec!['4']), ('c', vec!['(', '{']), ('g', vec!['6']), ('o', vec!['0']), ] .into_iter() .collect(), ), ]; for (pw, expected) in test_data { assert_eq!(matching::relevant_l33t_subtable(pw), expected); } } #[test] fn test_enumerates_sets_of_l33t_subs_a_password_might_be_using() { let test_data = vec![ (HashMap::new(), vec![HashMap::new()]), ( vec![('a', vec!['@'])].into_iter().collect(), vec![vec![('@', 'a')].into_iter().collect()], ), ( vec![('a', vec!['@', '4'])].into_iter().collect(), vec![ vec![('@', 'a')].into_iter().collect(), vec![('4', 'a')].into_iter().collect(), ], ), ( vec![('a', vec!['@', '4']), ('c', vec!['('])] .into_iter() .collect(), vec![ vec![('@', 'a'), ('(', 'c')].into_iter().collect(), vec![('4', 'a'), ('(', 'c')].into_iter().collect(), ], ), ]; for (table, subs) in test_data { assert_eq!(matching::enumerate_l33t_replacements(&table), subs); } } #[test] fn test_dictionary_matches_against_l33t_words() { let matches = (matching::L33tMatch {}).get_matches("m0th3r", &HashMap::new()); let patterns = ["m0th3r"]; let ijs = [(0, 5)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.l33t, true); } } #[test] fn test_dictionary_matches_overlapping_l33ted_words() { let matches = (matching::L33tMatch {}).get_matches("p@ssw0rd", &HashMap::new()); let patterns = ["p@ss", "@ssw0rd"]; let ijs = [(0, 3), (1, 7)]; for (k, &pattern) in patterns.iter().enumerate() { let m = matches.iter().find(|m| m.token == *pattern).unwrap(); let (i, j) = ijs[k]; assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Dictionary(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.l33t, true); } } #[test] fn test_doesnt_match_when_multiple_l33t_subs_needed_for_same_letter() { let matches = (matching::L33tMatch {}).get_matches("p4@ssword", &HashMap::new()); assert!(!matches.iter().any(|m| &m.token == "p4@ssword")); } #[test] fn test_doesnt_match_single_character_l33ted_words() { let matches = (matching::L33tMatch {}).get_matches("4 ( @", &HashMap::new()); assert!(matches.is_empty()); } #[test] fn test_doesnt_match_1_and_2_char_spatial_patterns() { for password in &["", "/", "qw", "*/"] { let result = (matching::SpatialMatch {}).get_matches(password, &HashMap::new()); assert!(!result.into_iter().any(|m| m.token == *password)); } } #[test] fn test_matches_spatial_patterns_surrounded_by_non_spatial_patterns() { let password = "6tfGHJ"; let m = (matching::SpatialMatch {}) .get_matches(password, &HashMap::new()) .into_iter() .find(|m| m.token == *password) .unwrap(); let p = if let MatchPattern::Spatial(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.graph, "qwerty".to_string()); assert_eq!(p.turns, 2); assert_eq!(p.shifted_count, 3); } #[test] fn test_matches_pattern_as_a_keyboard_pattern() { let test_data = vec![ ("12345", "qwerty", 1, 0), ("@WSX", "qwerty", 1, 4), ("6tfGHJ", "qwerty", 2, 3), ("hGFd", "qwerty", 1, 2), ("/;p09876yhn", "qwerty", 3, 0), ("Xdr%", "qwerty", 1, 2), ("159-", "keypad", 1, 0), ("*84", "keypad", 1, 0), ("/8520", "keypad", 1, 0), ("369", "keypad", 1, 0), ("/963.", "mac_keypad", 1, 0), ("*-632.0214", "mac_keypad", 9, 0), ("aoEP%yIxkjq:", "dvorak", 4, 5), (";qoaOQ:Aoq;a", "dvorak", 11, 4), ]; for (password, keyboard, turns, shifts) in test_data { let matches = (matching::SpatialMatch {}).get_matches(password, &HashMap::new()); let m = matches .into_iter() .find(|m| { if let MatchPattern::Spatial(ref p) = m.pattern { if m.token == *password && p.graph == keyboard { return true; } }; false }) .unwrap(); let p = if let MatchPattern::Spatial(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.turns, turns); assert_eq!(p.shifted_count, shifts); } } #[test] fn test_doesnt_match_len_1_sequences() { for &password in &["", "a", "1"] { assert_eq!( (matching::SequenceMatch {}).get_matches(password, &HashMap::new()), Vec::new() ); } } #[test] fn test_matches_overlapping_sequences() { let password = "abcbabc"; let matches = (matching::SequenceMatch {}).get_matches(password, &HashMap::new()); for &(pattern, i, j, ascending) in &[ ("abc", 0, 2, true), ("cba", 2, 4, false), ("abc", 4, 6, true), ] { let m = matches .iter() .find(|m| m.token == *pattern && m.i == i && m.j == j) .unwrap(); let p = if let MatchPattern::Sequence(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.ascending, ascending); } } #[test] fn test_matches_embedded_sequence_patterns() { let password = "!jihg22"; let matches = (matching::SequenceMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| &m.token == "jihg").unwrap(); let p = if let MatchPattern::Sequence(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.sequence_name, "lower"); assert_eq!(p.ascending, false); } #[test] fn test_matches_pattern_as_sequence() { let test_data = [ ("ABC", "upper", true), ("CBA", "upper", false), ("PQR", "upper", true), ("RQP", "upper", false), ("XYZ", "upper", true), ("ZYX", "upper", false), ("abcd", "lower", true), ("dcba", "lower", false), ("jihg", "lower", false), ("wxyz", "lower", true), ("zxvt", "lower", false), ("0369", "digits", true), ("97531", "digits", false), ]; for &(pattern, name, is_ascending) in &test_data { let matches = (matching::SequenceMatch {}).get_matches(pattern, &HashMap::new()); let m = matches.iter().find(|m| m.token == *pattern).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, pattern.len() - 1); let p = if let MatchPattern::Sequence(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.sequence_name, name); assert_eq!(p.ascending, is_ascending); } } #[test] fn test_doesnt_match_len_1_repeat_patterns() { for &password in &["", "#"] { assert_eq!( (matching::RepeatMatch {}).get_matches(password, &HashMap::new()), Vec::new() ); } } #[test] fn test_matches_embedded_repeat_patterns() { let password = "y4@&&&&&u%7"; let (i, j) = (3, 7); let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| &m.token == "&&&&&").unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, "&".to_string()); } #[test] fn test_repeats_with_base_character() { for len in 3..13 { for &chr in &['a', 'Z', '4', '&'] { let password = (0..len).map(|_| chr).collect::<String>(); let matches = (matching::RepeatMatch {}).get_matches(&password, &HashMap::new()); let m = matches .iter() .find(|m| { if let MatchPattern::Repeat(ref p) = m.pattern { if p.base_token == format!("{}", chr) { return true; } }; false }) .unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, len - 1); } } } #[test] fn test_multiple_adjacent_repeats() { let password = "BBB1111aaaaa@@@@@@"; let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let test_data = [ ("BBB", 0, 2), ("1111", 3, 6), ("aaaaa", 7, 11), ("@@@@@@", 12, 17), ]; for &(pattern, i, j) in &test_data { let m = matches.iter().find(|m| m.token == pattern).unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, pattern[0..1].to_string()); } } #[test] fn test_multiple_non_adjacent_repeats() { let password = "2818BBBbzsdf1111@*&@!aaaaaEUDA@@@@@@1729"; let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let test_data = [ ("BBB", 4, 6), ("1111", 12, 15), ("aaaaa", 21, 25), ("@@@@@@", 30, 35), ]; for &(pattern, i, j) in &test_data { let m = matches.iter().find(|m| m.token == pattern).unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, pattern[0..1].to_string()); } } #[test] fn test_multiple_character_repeats() { let password = "abab"; let (i, j) = (0, 3); let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| m.token == *password).unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, "ab".to_string()); } #[test] fn test_matches_longest_repeat() { let password = "aabaab"; let (i, j) = (0, 5); let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| m.token == *password).unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, "aab".to_string()); } #[test] fn test_identifies_simplest_repeat() { let password = "abababab"; let (i, j) = (0, 7); let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| m.token == *password).unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, "ab".to_string()); } #[test] fn test_identifies_repeat_with_multibyte_utf8() { let password = "x\u{1F431}\u{1F436}\u{1F431}\u{1F436}"; let (i, j) = (1, 4); let matches = (matching::RepeatMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| m.token == password[1..]).unwrap(); assert_eq!(m.i, i); assert_eq!(m.j, j); let p = if let MatchPattern::Repeat(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.base_token, "\u{1F431}\u{1F436}".to_string()); } #[test] fn test_regex_matching() { let test_data = [("1922", "recent_year"), ("2017", "recent_year")]; for &(pattern, name) in &test_data { let matches = (matching::RegexMatch {}).get_matches(pattern, &HashMap::new()); let m = matches.iter().find(|m| m.token == *pattern).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, pattern.len() - 1); let p = if let MatchPattern::Regex(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.regex_name, name); } } #[test] fn test_date_matching_with_various_separators() { let separators = ["", " ", "-", "/", "\\", "_", "."]; for sep in &separators { let password = format!("13{}2{}1921", sep, sep); let matches = (matching::DateMatch {}).get_matches(&password, &HashMap::new()); let m = matches.iter().find(|m| m.token == password).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, password.len() - 1); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, 1921); assert_eq!(p.month, 2); assert_eq!(p.day, 13); assert_eq!(p.separator, sep.to_string()); } } #[test] fn test_date_matches_year_closest_to_reference_year() { use chrono::{Datelike, Local}; let password = format!("1115{}", Local::today().year() % 100); let matches = (matching::DateMatch {}).get_matches(&password, &HashMap::new()); let m = matches.iter().find(|m| m.token == password).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, password.len() - 1); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, Local::today().year()); assert_eq!(p.month, 11); assert_eq!(p.day, 15); assert_eq!(p.separator, "".to_string()); } #[test] fn test_date_matches() { let test_data = [(1, 1, 1999), (11, 8, 2000), (9, 12, 2005), (22, 11, 1551)]; for &(day, month, year) in &test_data { let password = format!("{}{}{}", year, month, day); let matches = (matching::DateMatch {}).get_matches(&password, &HashMap::new()); let m = matches.iter().find(|m| m.token == password).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, password.len() - 1); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, year); assert_eq!(p.separator, "".to_string()); }<|fim▁hole|> let m = matches.iter().find(|m| m.token == password).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, password.len() - 1); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, year); assert_eq!(p.separator, ".".to_string()); } } #[test] fn test_matching_zero_padded_dates() { let password = "02/02/02"; let matches = (matching::DateMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| m.token == password).unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, password.len() - 1); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, 2002); assert_eq!(p.month, 2); assert_eq!(p.day, 2); assert_eq!(p.separator, "/".to_string()); } #[test] fn test_matching_embedded_dates() { let password = "a1/1/91!"; let matches = (matching::DateMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| &m.token == "1/1/91").unwrap(); assert_eq!(m.i, 1); assert_eq!(m.j, password.len() - 2); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, 1991); assert_eq!(p.month, 1); assert_eq!(p.day, 1); assert_eq!(p.separator, "/".to_string()); } #[test] fn test_matching_overlapping_dates() { let password = "12/20/1991.12.20"; let matches = (matching::DateMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| &m.token == "12/20/1991").unwrap(); assert_eq!(m.i, 0); assert_eq!(m.j, 9); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, 1991); assert_eq!(p.month, 12); assert_eq!(p.day, 20); assert_eq!(p.separator, "/".to_string()); let m = matches.iter().find(|m| &m.token == "1991.12.20").unwrap(); assert_eq!(m.i, 6); assert_eq!(m.j, password.len() - 1); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, 1991); assert_eq!(p.month, 12); assert_eq!(p.day, 20); assert_eq!(p.separator, ".".to_string()); } #[test] fn test_matches_dates_padded_by_non_ambiguous_digits() { let password = "912/20/919"; let matches = (matching::DateMatch {}).get_matches(password, &HashMap::new()); let m = matches.iter().find(|m| &m.token == "12/20/91").unwrap(); assert_eq!(m.i, 1); assert_eq!(m.j, password.len() - 2); let p = if let MatchPattern::Date(ref p) = m.pattern { p } else { panic!("Wrong match pattern") }; assert_eq!(p.year, 1991); assert_eq!(p.month, 12); assert_eq!(p.day, 20); assert_eq!(p.separator, "/".to_string()); } #[test] fn test_omnimatch() { assert_eq!(matching::omnimatch("", &HashMap::new()), Vec::new()); let password = "r0sebudmaelstrom11/20/91aaaa"; let expected = [ ("dictionary", 0, 6), ("dictionary", 7, 15), ("date", 16, 23), ("repeat", 24, 27), ]; let matches = matching::omnimatch(password, &HashMap::new()); for &(pattern_name, i, j) in &expected { assert!(matches .iter() .any(|m| m.pattern.variant() == pattern_name && m.i == i && m.j == j)); } } }<|fim▁end|>
for &(day, month, year) in &test_data { let password = format!("{}.{}.{}", year, month, day); let matches = (matching::DateMatch {}).get_matches(&password, &HashMap::new());
<|file_name|>modelinstance.py<|end_file_name|><|fim▁begin|>import numpy as np from menpo.base import Targetable, Vectorizable from menpo.model import MeanInstanceLinearModel from menpofit.differentiable import DP def similarity_2d_instance_model(shape): r""" A MeanInstanceLinearModel that encodes all possible 2D similarity transforms of a 2D shape (of n_points). Parameters ---------- shape : 2D :class:`menpo.shape.Shape` Returns ------- model : `menpo.model.linear.MeanInstanceLinearModel` Model with four components, linear combinations of which represent the original shape under a similarity transform. The model is exhaustive (that is, all possible similarity transforms can be expressed in the model). """ shape_vector = shape.as_vector() components = np.zeros((4, shape_vector.shape[0])) components[0, :] = shape_vector # Comp. 1 - just the shape rotated_ccw = shape.points[:, ::-1].copy() # flip x,y -> y,x rotated_ccw[:, 0] = -rotated_ccw[:, 0] # negate (old) y components[1, :] = rotated_ccw.flatten() # C2 - the shape rotated 90 degs components[2, ::2] = 1 # Tx components[3, 1::2] = 1 # Ty return MeanInstanceLinearModel(components, shape_vector, shape) class ModelInstance(Targetable, Vectorizable, DP): r"""A instance of a :map:`InstanceBackedModel`. This class describes an instance produced from one of Menpo's :map:`InstanceBackedModel`. The actual instance provided by the model can be found at self.target. This class is targetable, and so :meth:`set_target` can be used to update the target - this will produce the closest possible instance the Model can produce to the target and set the weights accordingly. Parameters ---------- model : :map:`InstanceBackedModel` The generative model that instances will be taken from """ def __init__(self, model): self.model = model self._target = None # set all weights to 0 (yielding the mean, first call to # from_vector_inplace() or set_target() will update this) self._weights = np.zeros(self.model.n_active_components) self._sync_target_from_state() @property def n_weights(self): r""" The number of parameters in the linear model. :type: int """ return self.model.n_active_components @property def weights(self): r""" In this simple :map:`ModelInstance` the weights are just the weights of the model. """ return self._weights @property def target(self): return self._target def _target_setter(self, new_target): r""" Called by the Targetable framework when set_target() is called. This method **ONLY SETS THE NEW TARGET** it does no synchronisation logic (for that, see _sync_state_from_target()) """ self._target = new_target def _new_target_from_state(self): r""" Return the appropriate target for the parameters provided. Subclasses can override this. Returns ------- new_target: model instance """ return self.model.instance(self.weights) def _sync_state_from_target(self): # 1. Find the optimum parameters and set them self._weights = self._weights_for_target(self.target) # 2. Find the closest target the model can reproduce and trigger an # update of our transform self._target_setter(self._new_target_from_state()) def _weights_for_target(self, target): r""" Return the appropriate model weights for target provided. Subclasses can override this. Parameters ---------- target: model instance The target that the statistical model will try to reproduce Returns ------- weights: (P,) ndarray Weights of the statistical model that generate the closest instance to the requested target """ return self.model.project(target) def _as_vector(self): r""" Return the current parameters of this transform - this is the just the linear model's weights Returns ------- params : (`n_parameters`,) ndarray The vector of parameters """ return self.weights def from_vector_inplace(self, vector): r""" Updates this :map:`ModelInstance` from it's vectorized form (in this case, simply the weights on the linear model) """ self._weights = vector self._sync_target_from_state() class PDM(ModelInstance, DP): r"""Specialization of :map:`ModelInstance` for use with spatial data. """ @property def n_dims(self): r""" The number of dimensions of the spatial instance of the model :type: int """ return self.model.template_instance.n_dims def d_dp(self, points): """ Returns the Jacobian of the PCA model reshaped to have the standard Jacobian shape: n_points x n_params x n_dims which maps to n_features x n_components x n_dims on the linear model Returns ------- jacobian : (n_features, n_components, n_dims) ndarray The Jacobian of the model in the standard Jacobian shape. """ d_dp = self.model.components.reshape(self.model.n_active_components, -1, self.n_dims) return d_dp.swapaxes(0, 1) # TODO: document me class GlobalPDM(PDM): r"""<|fim▁hole|> # Start the global_transform as an identity (first call to # from_vector_inplace() or set_target() will update this) mean = model.mean() self.global_transform = global_transform_cls(mean, mean) super(GlobalPDM, self).__init__(model) @property def n_global_parameters(self): r""" The number of parameters in the `global_transform` :type: int """ return self.global_transform.n_parameters @property def global_parameters(self): r""" The parameters for the global transform. :type: (`n_global_parameters`,) ndarray """ return self.global_transform.as_vector() def _new_target_from_state(self): r""" Return the appropriate target for the model weights provided, accounting for the effect of the global transform Returns ------- new_target: :class:`menpo.shape.PointCloud` A new target for the weights provided """ return self.global_transform.apply(self.model.instance(self.weights)) def _weights_for_target(self, target): r""" Return the appropriate model weights for target provided, accounting for the effect of the global transform. Note that this method updates the global transform to be in the correct state. Parameters ---------- target: :class:`menpo.shape.PointCloud` The target that the statistical model will try to reproduce Returns ------- weights: (P,) ndarray Weights of the statistical model that generate the closest PointCloud to the requested target """ self._update_global_transform(target) projected_target = self.global_transform.pseudoinverse().apply(target) # now we have the target in model space, project it to recover the # weights new_weights = self.model.project(projected_target) # TODO investigate the impact of this, could be problematic # the model can't perfectly reproduce the target we asked for - # reset the global_transform.target to what it CAN produce #refined_target = self._target_for_weights(new_weights) #self.global_transform.target = refined_target return new_weights def _update_global_transform(self, target): self.global_transform.set_target(target) def _as_vector(self): r""" Return the current parameters of this transform - this is the just the linear model's weights Returns ------- params : (`n_parameters`,) ndarray The vector of parameters """ return np.hstack([self.global_parameters, self.weights]) def from_vector_inplace(self, vector): # First, update the global transform global_parameters = vector[:self.n_global_parameters] self._update_global_weights(global_parameters) # Now extract the weights, and let super handle the update weights = vector[self.n_global_parameters:] PDM.from_vector_inplace(self, weights) def _update_global_weights(self, global_weights): r""" Hook that allows for overriding behavior when the global weights are set. Default implementation simply asks global_transform to update itself from vector. """ self.global_transform.from_vector_inplace(global_weights) def d_dp(self, points): # d_dp is always evaluated at the mean shape points = self.model.mean().points # compute dX/dp # dX/dq is the Jacobian of the global transform evaluated at the # current target # (n_points, n_global_params, n_dims) dX_dq = self._global_transform_d_dp(points) # by application of the chain rule dX/db is the Jacobian of the # model transformed by the linear component of the global transform # (n_points, n_weights, n_dims) dS_db = PDM.d_dp(self, []) # (n_points, n_dims, n_dims) dX_dS = self.global_transform.d_dx(points) # (n_points, n_weights, n_dims) dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db) # dX/dp is simply the concatenation of the previous two terms # (n_points, n_params, n_dims) return np.hstack((dX_dq, dX_db)) def _global_transform_d_dp(self, points): return self.global_transform.d_dp(points) # TODO: document me class OrthoPDM(GlobalPDM): r""" """ def __init__(self, model, global_transform_cls): # 1. Construct similarity model from the mean of the model self.similarity_model = similarity_2d_instance_model(model.mean()) # 2. Orthonormalize model and similarity model model_cpy = model.copy() model_cpy.orthonormalize_against_inplace(self.similarity_model) self.similarity_weights = self.similarity_model.project( model_cpy.mean()) super(OrthoPDM, self).__init__(model_cpy, global_transform_cls) @property def global_parameters(self): r""" The parameters for the global transform. :type: (`n_global_parameters`,) ndarray """ return self.similarity_weights def _update_global_transform(self, target): self.similarity_weights = self.similarity_model.project(target) self._update_global_weights(self.similarity_weights) def _update_global_weights(self, global_weights): self.similarity_weights = global_weights new_target = self.similarity_model.instance(global_weights) self.global_transform.set_target(new_target) def _global_transform_d_dp(self, points): return self.similarity_model.components.reshape( self.n_global_parameters, -1, self.n_dims).swapaxes(0, 1)<|fim▁end|>
""" def __init__(self, model, global_transform_cls):
<|file_name|>dtrun.py<|end_file_name|><|fim▁begin|># # Copyright (C) 2001 Andrew T. Csillag <[email protected]> # # You may distribute under the terms of either the GNU General # Public License or the SkunkWeb License, as specified in the # README file. # import os import DT import sys import time import marshal import stat def phfunc(name, obj): marshal.dump(obj, open(name,'w'))<|fim▁hole|> if __name__=='__main__': bt = time.time() fname=sys.argv[1] mtime=os.stat(fname)[stat.ST_MTIME] cform=sys.argv[1]+'.dtcc' try: cmtime=os.stat(cform)[stat.ST_MTIME] comp_form=marshal.load(open(cform)) except: comp_form=None cmtime=-1 d=DT.DT(open(fname).read(), fname, comp_form, mtime, cmtime, lambda x, y=cform: phfunc(y, x)) class dumb: pass ns=dumb() text = d(ns) et = time.time() print text print 'elapsed time:', et - bt<|fim▁end|>
<|file_name|>regions-outlives-nominal-type-region-rev.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that a nominal type (like `Foo<'a>`) outlives `'b` if its // arguments (like `'a`) outlive `'b`.<|fim▁hole|> #![allow(dead_code)] mod rev_variant_struct_region { struct Foo<'a> { x: fn(&'a i32), } trait Trait<'a, 'b> { type Out; } impl<'a, 'b> Trait<'a, 'b> for usize { type Out = &'a Foo<'b>; //~ ERROR reference has a longer lifetime } } fn main() { }<|fim▁end|>
// // Rule OutlivesNominalType from RFC 1214.
<|file_name|>drawing_skyline.py<|end_file_name|><|fim▁begin|>import collections import functools from typing import List from test_framework import generic_test from test_framework.test_utils import enable_executor_hook Rect = collections.namedtuple('Rect', ('left', 'right', 'height')) def compute_skyline(buildings: List[Rect]) -> List[Rect]: # TODO - you fill in here. return [] @enable_executor_hook def compute_skyline_wrapper(executor, buildings): buildings = [Rect(*x) for x in buildings] <|fim▁hole|> if __name__ == '__main__': exit( generic_test.generic_test_main('drawing_skyline.py', 'drawing_skyline.tsv', compute_skyline_wrapper))<|fim▁end|>
result = executor.run(functools.partial(compute_skyline, buildings)) return [(x.left, x.right, x.height) for x in result]
<|file_name|>quick_test.py<|end_file_name|><|fim▁begin|>import sys import os from ..data.molecular_species import molecular_species from ..data.reaction_mechanism_class import reaction_mechanism from ..data.condition_class import condition from ..data.reagent import reagent from ..data.puzzle_class import puzzle from ..data.solution_class import solution def name(class_obj): return class_obj.__name__ # depends on JSON base class for class_being_tested in [molecular_species, condition, reaction_mechanism, reagent, puzzle, solution]: system_output = sys.stdout # store stdout sys.stdout = open(os.getcwd() + "/testing_result_" + name(class_being_tested) + ".txt", "w") # pipe to file test_result = class_being_tested.test() sys.stdout.close() # close file sys.stdout = system_output #replace stdout<|fim▁hole|> if test_result: print("PASSED", name(class_being_tested), sep=" ") else: print("FAILED", name(class_being_tested), sep=" ")<|fim▁end|>
<|file_name|>plugin-compat.js<|end_file_name|><|fim▁begin|>let hookTypes; const callStyles = { sync: 'applyPlugins', syncWaterfall: 'applyPluginsWaterfall', syncBail: 'applyPluginsBailResult', sync_map: 'applyPlugins', asyncWaterfall: 'applyPluginsAsyncWaterfall', asyncParallel: 'applyPluginsParallel', asyncSerial: 'applyPluginsAsync', }; const camelToDash = camel => camel.replace(/_/g, '--').replace(/[A-Z]/g, c => `-${c.toLowerCase()}`); const knownPluginRegistrations = { Compilation: { needAdditionalPass: ['sync', []], succeedModule: ['sync', ['module']], buildModule: ['sync', ['module']],<|fim▁hole|> Compiler: { afterCompile: ['asyncSerial', ['compilation']], afterEnvironment: ['sync', []], afterPlugins: ['sync', []], afterResolvers: ['sync', []], compilation: ['sync', ['compilation', 'params']], emit: ['asyncSerial', ['compilation']], make: ['asyncParallel', ['compilation']], watchRun: ['asyncSerial', ['watcher']], run: ['asyncSerial', ['compiler']], }, NormalModuleFactory: { createModule: ['syncBail', ['data']], parser: ['sync_map', ['parser', 'parserOptions']], resolver: ['syncWaterfall', ['nextResolver']], }, ContextModuleFactory: { afterResolve: ['asyncWaterfall', ['data']], }, }; exports.register = (tapable, name, style, args) => { if (tapable.hooks) { if (!hookTypes) { const Tapable = require('tapable'); hookTypes = { sync: Tapable.SyncHook, syncWaterfall: Tapable.SyncWaterfallHook, syncBail: Tapable.SyncBailHook, asyncWaterfall: Tapable.AsyncWaterfallHook, asyncParallel: Tapable.AsyncParallelHook, asyncSerial: Tapable.AsyncSeriesHook, asyncSeries: Tapable.AsyncSeriesHook, }; } if (!tapable.hooks[name]) { tapable.hooks[name] = new hookTypes[style](args); } } else { if (!tapable.__hardSource_hooks) { tapable.__hardSource_hooks = {}; } if (!tapable.__hardSource_hooks[name]) { tapable.__hardSource_hooks[name] = { name, dashName: camelToDash(name), style, args, async: style.startsWith('async'), map: style.endsWith('_map'), }; } if (!tapable.__hardSource_proxy) { tapable.__hardSource_proxy = {}; } if (!tapable.__hardSource_proxy[name]) { if (tapable.__hardSource_hooks[name].map) { const _forCache = {}; tapable.__hardSource_proxy[name] = { _forCache, for: key => { let hook = _forCache[key]; if (hook) { return hook; } _forCache[key] = { tap: (...args) => exports.tapFor(tapable, name, key, ...args), tapPromise: (...args) => exports.tapPromiseFor(tapable, name, key, ...args), call: (...args) => exports.callFor(tapable, name, key, ...args), promise: (...args) => exports.promiseFor(tapable, name, key, ...args), }; return _forCache[key]; }, tap: (...args) => exports.tapFor(tapable, name, ...args), tapPromise: (...args) => exports.tapPromiseFor(tapable, name, ...args), call: (...args) => exports.callFor(tapable, name, ...args), promise: (...args) => exports.promiseFor(tapable, name, ...args), }; } else { tapable.__hardSource_proxy[name] = { tap: (...args) => exports.tap(tapable, name, ...args), tapPromise: (...args) => exports.tapPromise(tapable, name, ...args), call: (...args) => exports.call(tapable, name, args), promise: (...args) => exports.promise(tapable, name, args), }; } } } }; exports.tap = (tapable, name, reason, callback) => { if (tapable.hooks) { tapable.hooks[name].tap(reason, callback); } else { if (!tapable.__hardSource_hooks || !tapable.__hardSource_hooks[name]) { const registration = knownPluginRegistrations[tapable.constructor.name][name]; exports.register(tapable, name, registration[0], registration[1]); } const dashName = tapable.__hardSource_hooks[name].dashName; if (tapable.__hardSource_hooks[name].async) { tapable.plugin(dashName, (...args) => { const cb = args.pop(); cb(null, callback(...args)); }); } else { tapable.plugin(dashName, callback); } } }; exports.tapPromise = (tapable, name, reason, callback) => { if (tapable.hooks) { tapable.hooks[name].tapPromise(reason, callback); } else { if (!tapable.__hardSource_hooks || !tapable.__hardSource_hooks[name]) { const registration = knownPluginRegistrations[tapable.constructor.name][name]; exports.register(tapable, name, registration[0], registration[1]); } const dashName = tapable.__hardSource_hooks[name].dashName; tapable.plugin(dashName, (...args) => { const cb = args.pop(); return callback(...args).then(value => cb(null, value), cb); }); } }; exports.tapAsync = (tapable, name, reason, callback) => { if (tapable.hooks) { tapable.hooks[name].tapAsync(reason, callback); } else { if (!tapable.__hardSource_hooks || !tapable.__hardSource_hooks[name]) { const registration = knownPluginRegistrations[tapable.constructor.name][name]; exports.register(tapable, name, registration[0], registration[1]); } const dashName = tapable.__hardSource_hooks[name].dashName; tapable.plugin(dashName, callback); } }; exports.call = (tapable, name, args) => { if (tapable.hooks) { const hook = tapable.hooks[name]; return hook.call(...args); } else { const dashName = tapable.__hardSource_hooks[name].dashName; const style = tapable.__hardSource_hooks[name].style; return tapable[callStyles[style]](...[dashName].concat(args)); } }; exports.promise = (tapable, name, args) => { if (tapable.hooks) { const hook = tapable.hooks[name]; return hook.promise(...args); } else { const dashName = tapable.__hardSource_hooks[name].dashName; const style = tapable.__hardSource_hooks[name].style; return new Promise((resolve, reject) => { tapable[callStyles[style]]( ...[dashName].concat(args, (err, value) => { if (err) { reject(err); } else { resolve(value); } }), ); }); } }; exports.tapFor = (tapable, name, key, reason, callback) => { if (tapable.hooks) { tapable.hooks[name].for(key).tap(reason, callback); } else { exports.tap(tapable, name, reason, callback); } }; exports.tapPromiseFor = (tapable, name, key, reason, callback) => { if (tapable.hooks) { tapable.hooks[name].for(key).tapPromise(reason, callback); } else { exports.tapPromise(tapable, name, reason, callback); } }; exports.callFor = (tapable, name, key, args) => { if (tapable.hooks) { tapable.hooks[name].for(key).call(...args); } else { exports.call(tapable, name, args); } }; exports.promiseFor = (tapable, name, key, args) => { if (tapable.hooks) { tapable.hooks[name].for(key).promise(...args); } else { exports.promise(tapable, name, args); } }; exports.hooks = tapable => { if (tapable.hooks) { return tapable.hooks; } if (!tapable.__hardSource_proxy) { tapable.__hardSource_proxy = {}; } const registrations = knownPluginRegistrations[tapable.constructor.name]; if (registrations) { for (const name in registrations) { const registration = registrations[name]; exports.register(tapable, name, registration[0], registration[1]); } } return tapable.__hardSource_proxy; };<|fim▁end|>
seal: ['sync', []], },
<|file_name|>template.go<|end_file_name|><|fim▁begin|>package export import ( "fmt" "os" "github.com/spf13/cobra" "github.com/elastic/beats/libbeat/cmd/instance" "github.com/elastic/beats/libbeat/paths" "github.com/elastic/beats/libbeat/template" ) <|fim▁hole|> Short: "Export index template to stdout", Run: func(cmd *cobra.Command, args []string) { version, _ := cmd.Flags().GetString("es.version") index, _ := cmd.Flags().GetString("index") b, err := instance.NewBeat(name, idxPrefix, beatVersion) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing beat: %s\n", err) os.Exit(1) } err = b.Init() if err != nil { fmt.Fprintf(os.Stderr, "Error initializing beat: %s\n", err) os.Exit(1) } cfg := template.DefaultConfig if b.Config.Template.Enabled() { err = b.Config.Template.Unpack(&cfg) if err != nil { fmt.Fprintf(os.Stderr, "Error getting template settings: %+v", err) os.Exit(1) } } tmpl, err := template.New(b.Info.Version, index, version, cfg) if err != nil { fmt.Fprintf(os.Stderr, "Error generating template: %+v", err) os.Exit(1) } fieldsPath := paths.Resolve(paths.Config, cfg.Fields) templateString, err := tmpl.Load(fieldsPath) if err != nil { fmt.Fprintf(os.Stderr, "Error generating template: %+v", err) os.Exit(1) } _, err = os.Stdout.WriteString(templateString.StringToPrint() + "\n") if err != nil { fmt.Fprintf(os.Stderr, "Error writing template: %+v", err) os.Exit(1) } }, } genTemplateConfigCmd.Flags().String("es.version", beatVersion, "Elasticsearch version") genTemplateConfigCmd.Flags().String("index", idxPrefix, "Base index name") return genTemplateConfigCmd }<|fim▁end|>
func GenTemplateConfigCmd(name, idxPrefix, beatVersion string) *cobra.Command { genTemplateConfigCmd := &cobra.Command{ Use: "template",
<|file_name|>elephant.py<|end_file_name|><|fim▁begin|>import numpy as np import pylab # elephant parameters p1, p2, p3, p4 = (50 - 30j, 18 + 8j, 12 - 10j, -14 - 60j )<|fim▁hole|>def fourier(t, C): f = np.zeros(t.shape) A, B = C.real, C.imag for k in range(len(C)): f = f + A[k]*np.cos(k*t) + B[k]*np.sin(k*t) return f def elephant(t, p1, p2, p3, p4, p5): npar = 6 Cx = np.zeros((npar,), dtype='complex') Cy = np.zeros((npar,), dtype='complex') Cx[1] = p1.real*1j Cx[2] = p2.real*1j Cx[3] = p3.real Cx[5] = p4.real Cy[1] = p4.imag + p1.imag*1j Cy[2] = p2.imag*1j Cy[3] = p3.imag*1j x = np.append(fourier(t,Cx), [-p5.imag]) y = np.append(fourier(t,Cy), [p5.imag]) return x,y x, y = elephant(np.linspace(0,2*np.pi,1000), p1, p2, p3, p4, p5) pylab.plot(y,-x,'.') pylab.show()<|fim▁end|>
p5 = 40 + 20j # eyepiece
<|file_name|>DBImportFirstPage.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2005-2010, G. Weirich and Elexis * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * G. Weirich - initial implementation * D. Lutz - adapted for importing data from other databases * *******************************************************************************/ package ch.elexis.core.ui.wizards; import org.eclipse.jface.resource.ImageDescriptor; import org.eclipse.jface.wizard.WizardPage; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.List; import org.eclipse.swt.widgets.Text; import org.eclipse.ui.forms.widgets.Form; import org.eclipse.ui.forms.widgets.FormToolkit; import org.eclipse.ui.forms.widgets.TableWrapData; import org.eclipse.ui.forms.widgets.TableWrapLayout; import ch.elexis.core.ui.UiDesk; import ch.elexis.core.ui.icons.ImageSize; import ch.elexis.core.ui.icons.Images; import ch.rgw.tools.JdbcLink; import ch.rgw.tools.StringTool; public class DBImportFirstPage extends WizardPage { List dbTypes; Text server, dbName; String defaultUser, defaultPassword; JdbcLink j = null; static final String[] supportedDB = new String[] { "mySQl", "PostgreSQL", "H2", "ODBC" //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ }; static final int MYSQL = 0; static final int POSTGRESQL = 1; static final int ODBC = 3; static final int H2 = 2; public DBImportFirstPage(String pageName){ super(Messages.DBImportFirstPage_connection, Messages.DBImportFirstPage_typeOfDB, Images.IMG_LOGO.getImageDescriptor(ImageSize._75x66_TitleDialogIconSize)); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ setMessage(Messages.DBImportFirstPage_selectType + Messages.DBImportFirstPage_enterNameODBC); //$NON-NLS-1$ setDescription(Messages.DBImportFirstPage_theDesrciption); //$NON-NLS-1$ } public DBImportFirstPage(String pageName, String title, ImageDescriptor titleImage){ super(pageName, title, titleImage); // TODO Automatisch erstellter Konstruktoren-Stub } public void createControl(Composite parent){ DBImportWizard wiz = (DBImportWizard) getWizard(); FormToolkit tk = UiDesk.getToolkit(); Form form = tk.createForm(parent); form.setText(Messages.DBImportFirstPage_Connection); //$NON-NLS-1$ Composite body = form.getBody(); body.setLayout(new TableWrapLayout()); tk.createLabel(body, Messages.DBImportFirstPage_EnterType); //$NON-NLS-1$ dbTypes = new List(body, SWT.BORDER); dbTypes.setItems(supportedDB); dbTypes.addSelectionListener(new SelectionAdapter() { @Override public void widgetSelected(SelectionEvent e){ int it = dbTypes.getSelectionIndex(); switch (it) { case MYSQL: case POSTGRESQL: server.setEnabled(true); dbName.setEnabled(true); defaultUser = ""; //$NON-NLS-1$ defaultPassword = ""; //$NON-NLS-1$ break; case H2: server.setEnabled(false); dbName.setEnabled(true); defaultUser = "sa"; defaultPassword = ""; break; case ODBC: server.setEnabled(false);<|fim▁hole|> defaultUser = "sa"; //$NON-NLS-1$ defaultPassword = ""; //$NON-NLS-1$ break; default: break; } DBImportSecondPage sec = (DBImportSecondPage) getNextPage(); sec.name.setText(defaultUser); sec.pwd.setText(defaultPassword); } }); tk.adapt(dbTypes, true, true); tk.createLabel(body, Messages.DBImportFirstPage_serverAddress); //$NON-NLS-1$ server = tk.createText(body, "", SWT.BORDER); //$NON-NLS-1$ TableWrapData twr = new TableWrapData(TableWrapData.FILL_GRAB); server.setLayoutData(twr); tk.createLabel(body, Messages.DBImportFirstPage_databaseName); //$NON-NLS-1$ dbName = tk.createText(body, "", SWT.BORDER); //$NON-NLS-1$ TableWrapData twr2 = new TableWrapData(TableWrapData.FILL_GRAB); dbName.setLayoutData(twr2); if (wiz.preset != null && wiz.preset.length > 1) { int idx = StringTool.getIndex(supportedDB, wiz.preset[0]); if (idx < dbTypes.getItemCount()) { dbTypes.select(idx); } server.setText(wiz.preset[1]); dbName.setText(wiz.preset[2]); } setControl(form); } }<|fim▁end|>
dbName.setEnabled(true);
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import re import os import sys def get_version(package): """ Return package version as listed in `__version__` in `init.py`. """ init_py = open(os.path.join(package, '__init__.py')).read() return re.search( "^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) package = 'iosfu' version = get_version(package) if sys.argv[-1] == 'publish': os.system("python setup.py sdist upload") args = {'version': version} print("You probably want to also tag the version now:") print(" git tag -a %(version)s -m 'version %(version)s'" % args) print(" git push --tags")<|fim▁hole|> sys.exit() setup( name='iosfu', version=version, url='http://github.com/fmartingr/iosfu', license='MIT', description='iOS Forensics Utility', author='Felipe Martin', author_email='[email protected]', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=open('requirements.txt').read().split('\n'), classifiers=[ 'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'Intended Audience :: Other Audience' 'Operating System :: OS Independent', 'Programming Language :: Python ;; 2.7', 'Programming Language :: Python ;; 3.3', 'Topic :: Security', ] )<|fim▁end|>
<|file_name|>NodeNotebook.py<|end_file_name|><|fim▁begin|># vim: set fileencoding=utf-8 : # ***********************IMPORTANT NMAP LICENSE TERMS************************ # * * # * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is * # * also a registered trademark of Insecure.Com LLC. This program is free * # * software; you may redistribute and/or modify it under the terms of the * # * GNU General Public License as published by the Free Software * # * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS * # * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, * # * modify, and redistribute this software under certain conditions. If * # * you wish to embed Nmap technology into proprietary software, we sell * # * alternative licenses (contact [email protected]). Dozens of software * # * vendors already license Nmap technology such as host discovery, port * # * scanning, OS detection, version detection, and the Nmap Scripting * # * Engine. * # * * # * Note that the GPL places important restrictions on "derivative works", * # * yet it does not provide a detailed definition of that term. To avoid * # * misunderstandings, we interpret that term as broadly as copyright law * # * allows. For example, we consider an application to constitute a * # * derivative work for the purpose of this license if it does any of the * # * following with any software or content covered by this license * # * ("Covered Software"): * # * * # * o Integrates source code from Covered Software. * # * * # * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db * # * or nmap-service-probes. * # * * # * o Is designed specifically to execute Covered Software and parse the * # * results (as opposed to typical shell or execution-menu apps, which will * # * execute anything you tell them to). * # * * # * o Includes Covered Software in a proprietary executable installer. The * # * installers produced by InstallShield are an example of this. Including * # * Nmap with other software in compressed or archival form does not * # * trigger this provision, provided appropriate open source decompression * # * or de-archiving software is widely available for no charge. For the * # * purposes of this license, an installer is considered to include Covered * # * Software even if it actually retrieves a copy of Covered Software from * # * another source during runtime (such as by downloading it from the * # * Internet). * # * * # * o Links (statically or dynamically) to a library which does any of the * # * above. * # * * # * o Executes a helper program, module, or script to do any of the above. * # * * # * This list is not exclusive, but is meant to clarify our interpretation * # * of derived works with some common examples. Other people may interpret * # * the plain GPL differently, so we consider this a special exception to * # * the GPL that we apply to Covered Software. Works which meet any of * # * these conditions must conform to all of the terms of this license, * # * particularly including the GPL Section 3 requirements of providing * # * source code and allowing free redistribution of the work as a whole. * # * * # * As another special exception to the GPL terms, Insecure.Com LLC grants * # * permission to link the code of this program with any version of the * # * OpenSSL library which is distributed under a license identical to that * # * listed in the included docs/licenses/OpenSSL.txt file, and distribute * # * linked combinations including the two. * # * * # * Any redistribution of Covered Software, including any derived works, * # * must obey and carry forward all of the terms of this license, including * # * obeying all GPL rules and restrictions. For example, source code of * # * the whole work must be provided and free redistribution must be * # * allowed. All GPL references to "this License", are to be treated as * # * including the terms and conditions of this license text as well. * # * * # * Because this license imposes special exceptions to the GPL, Covered * # * Work may not be combined (even as part of a larger work) with plain GPL * # * software. The terms, conditions, and exceptions of this license must * # * be included as well. This license is incompatible with some other open * # * source licenses as well. In some cases we can relicense portions of * # * Nmap or grant special permissions to use it in other open source * # * software. Please contact [email protected] with any such requests. * # * Similarly, we don't incorporate incompatible open source software into * # * Covered Software without special permission from the copyright holders. * # * * # * If you have any questions about the licensing restrictions on using * # * Nmap in other works, are happy to help. As mentioned above, we also * # * offer alternative license to integrate Nmap into proprietary * # * applications and appliances. These contracts have been sold to dozens * # * of software vendors, and generally include a perpetual license as well * # * as providing for priority support and updates. They also fund the * # * continued development of Nmap. Please email [email protected] for further * # * information. * # * * # * If you have received a written license agreement or contract for * # * Covered Software stating terms other than these, you may choose to use * # * and redistribute Covered Software under those terms instead of these. * # * * # * Source is provided to this software because we believe users have a * # * right to know exactly what a program is going to do before they run it. * # * This also allows you to audit the software for security holes (none * # * have been found so far). * # * * # * Source code also allows you to port Nmap to new platforms, fix bugs, * # * and add new features. You are highly encouraged to send your changes * # * to the [email protected] mailing list for possible incorporation into the * # * main distribution. By sending these changes to Fyodor or one of the * # * Insecure.Org development mailing lists, or checking them into the Nmap * # * source code repository, it is understood (unless you specify otherwise) * # * that you are offering the Nmap Project (Insecure.Com LLC) the * # * unlimited, non-exclusive right to reuse, modify, and relicense the * # * code. Nmap will always be available Open Source, but this is important * # * because the inability to relicense code has caused devastating problems * # * for other Free Software projects (such as KDE and NASM). We also * # * occasionally relicense the code to third parties as discussed above. * # * If you wish to specify special license conditions of your * # * contributions, just say so when you send them. * # * * # * This program is distributed in the hope that it will be useful, but * # * WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap * # * license file for more details (it's in a COPYING file included with * # * Nmap, and also available from https://svn.nmap.org/nmap/COPYING * # * * # ***************************************************************************/ import gtk import pango import gobject from radialnet.bestwidgets.boxes import * from radialnet.bestwidgets.expanders import BWExpander from radialnet.bestwidgets.labels import * from radialnet.bestwidgets.textview import * import zenmapCore.I18N PORTS_HEADER = [ _('Port'), _('Protocol'), _('State'), _('Service'), _('Method')] EXTRAPORTS_HEADER = [_('Count'), _('State'), _('Reasons')] SERVICE_COLORS = {'open': '#ffd5d5', 'closed': '#d5ffd5', 'filtered': '#ffffd5', 'unfiltered': '#ffd5d5', 'open|filtered': '#ffd5d5', 'closed|filtered': '#d5ffd5'} UNKNOWN_SERVICE_COLOR = '#d5d5d5' TRACE_HEADER = [_('TTL'), _('RTT'), _('IP'), _('Hostname')] TRACE_TEXT = _( "Traceroute on port <b>%s/%s</b> totalized <b>%d</b> known hops.") NO_TRACE_TEXT = _("No traceroute information available.") HOP_COLOR = {'known': '#ffffff', 'unknown': '#cccccc'} SYSTEM_ADDRESS_TEXT = "[%s] %s" OSMATCH_HEADER = ['%', _('Name'), _('DB Line')] OSCLASS_HEADER = ['%', _('Vendor'), _('Type'), _('Family'), _('Version')] USED_PORTS_TEXT = "%d/%s %s" TCP_SEQ_NOTE = _("""\ <b>*</b> TCP sequence <i>index</i> equal to %d and <i>difficulty</i> is "%s".\ """) def get_service_color(state): color = SERVICE_COLORS.get(state) if color is None: color = UNKNOWN_SERVICE_COLOR return color class NodeNotebook(gtk.Notebook): """ """ def __init__(self, node): """ """ gtk.Notebook.__init__(self) self.set_tab_pos(gtk.POS_TOP) self.__node = node self.__create_widgets() def __create_widgets(self): """ """ # create body elements self.__services_page = ServicesPage(self.__node) self.__system_page = SystemPage(self.__node) self.__trace_page = TraceroutePage(self.__node) # packing notebook elements self.append_page(self.__system_page, BWLabel(_('General'))) self.append_page(self.__services_page, BWLabel(_('Services'))) self.append_page(self.__trace_page, BWLabel(_('Traceroute'))) class ServicesPage(gtk.Notebook): """ """ def __init__(self, node): """ """ gtk.Notebook.__init__(self) self.set_border_width(6) self.set_tab_pos(gtk.POS_TOP) self.__node = node self.__font = pango.FontDescription('Monospace') self.__create_widgets() def __create_widgets(self): """ """ self.__cell = gtk.CellRendererText() # texteditor widgets self.__texteditor = BWTextEditor() self.__texteditor.bw_modify_font(self.__font) self.__texteditor.bw_set_editable(False) self.__texteditor.set_border_width(0) self.__select_combobox = gtk.combo_box_new_text() self.__select_combobox.connect('changed', self.__change_text_value) self.__viewer = BWVBox(spacing=6) self.__viewer.set_border_width(6) self.__viewer.bw_pack_start_noexpand_nofill(self.__select_combobox) self.__viewer.bw_pack_start_expand_fill(self.__texteditor) self.__text = list() # ports information number_of_ports = len(self.__node.get_info('ports')) self.__ports_label = BWLabel(_('Ports (%s)') % number_of_ports) self.__ports_scroll = BWScrolledWindow() self.__ports_store = gtk.TreeStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN) self.__ports_treeview = gtk.TreeView(self.__ports_store) for port in self.__node.get_info('ports'): color = get_service_color(port['state']['state']) service_name = port['service'].get('name', _('<unknown>')) service_method = port['service'].get('method', _('<none>')) reference = self.__ports_store.append(None, [port['id'], port['protocol'], port['state']['state'], service_name, service_method, color, True]) for key in port['state']: self.__ports_store.append(reference, [port['id'], 'state', key, port['state'][key], '', 'white', True]) for key in port['service']: if key in ['servicefp']: text = _('[%d] service: %s') % (port['id'], key) self.__select_combobox.append_text(text) self.__text.append(port['service'][key]) value = _('<special field>') else: value = port['service'][key] self.__ports_store.append(reference, [port['id'], 'service', key, value, '', 'white', True]) #for script in port['scripts']: # text = _('[%d] script: %s') % (port['id'], script['id']) # self.__select_combobox.append_text(text) # self.__text.append(script['output']) # # self.__ports_store.append(reference, # [port['id'], # 'script', # 'id', # script['id'], # _('<special field>'), # 'white', # True])<|fim▁hole|> self.__ports_column = list() for i in range(len(PORTS_HEADER)): column = gtk.TreeViewColumn(PORTS_HEADER[i], self.__cell, text=i) self.__ports_column.append(column) self.__ports_column[i].set_reorderable(True) self.__ports_column[i].set_resizable(True) self.__ports_column[i].set_sort_column_id(i) self.__ports_column[i].set_attributes(self.__cell, text=i, background=5, editable=6) self.__ports_treeview.append_column(self.__ports_column[i]) self.__ports_scroll.add_with_viewport(self.__ports_treeview) # extraports information number_of_xports = 0 self.__xports_scroll = BWScrolledWindow() self.__xports_store = gtk.TreeStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN) self.__xports_treeview = gtk.TreeView(self.__xports_store) for xports in self.__node.get_info('extraports'): color = get_service_color(xports['state']) number_of_xports += xports['count'] reference = self.__xports_store.append( None, [xports['count'], xports['state'], ", ".join(xports['reason']), color, True]) for xreason in xports['all_reason']: self.__xports_store.append(reference, [xreason['count'], xports['state'], xreason['reason'], 'white', True]) self.__xports_column = list() for i in range(len(EXTRAPORTS_HEADER)): column = gtk.TreeViewColumn(EXTRAPORTS_HEADER[i], self.__cell, text=i) self.__xports_column.append(column) self.__xports_column[i].set_reorderable(True) self.__xports_column[i].set_resizable(True) self.__xports_column[i].set_sort_column_id(i) self.__xports_column[i].set_attributes(self.__cell, text=i, background=3, editable=4) self.__xports_treeview.append_column(self.__xports_column[i]) xports_label_text = _('Extraports (%s)') % number_of_xports self.__xports_label = BWLabel(xports_label_text) self.__xports_scroll.add_with_viewport(self.__xports_treeview) self.append_page(self.__ports_scroll, self.__ports_label) self.append_page(self.__xports_scroll, self.__xports_label) self.append_page(self.__viewer, BWLabel(_('Special fields'))) if len(self.__text) > 0: self.__select_combobox.set_active(0) def __change_text_value(self, widget): """ """ id = self.__select_combobox.get_active() self.__texteditor.bw_set_text(self.__text[id]) class SystemPage(BWScrolledWindow): """ """ def __init__(self, node): """ """ BWScrolledWindow.__init__(self) self.__node = node self.__font = pango.FontDescription('Monospace') self.__create_widgets() def __create_widgets(self): """ """ self.__vbox = BWVBox() self.__vbox.set_border_width(6) self.__cell = gtk.CellRendererText() self.__general_frame = BWExpander(_('General information')) self.__sequences_frame = BWExpander(_('Sequences')) self.__os_frame = BWExpander(_('Operating System')) self.__sequences_frame.bw_add(gtk.Label(_('No sequence information.'))) self.__os_frame.bw_add(gtk.Label(_('No OS information.'))) # general information widgets self.__general = BWTable(3, 2) self.__address_label = BWSectionLabel(_('Address:')) self.__address_list = gtk.combo_box_entry_new_text() self.__address_list.child.set_editable(False) for address in self.__node.get_info('addresses'): params = address['type'], address['addr'] address_text = SYSTEM_ADDRESS_TEXT % params if address['vendor'] is not None and address['vendor'] != '': address_text += " (%s)" % address['vendor'] self.__address_list.append_text(address_text) self.__address_list.set_active(0) self.__general.bw_attach_next(self.__address_label, yoptions=gtk.FILL, xoptions=gtk.FILL) self.__general.bw_attach_next(self.__address_list, yoptions=gtk.FILL) if self.__node.get_info('hostnames') is not None: self.__hostname_label = BWSectionLabel(_('Hostname:')) self.__hostname_list = gtk.combo_box_entry_new_text() self.__hostname_list.child.set_editable(False) for hostname in self.__node.get_info('hostnames'): params = hostname['type'], hostname['name'] self.__hostname_list.append_text(SYSTEM_ADDRESS_TEXT % params) self.__hostname_list.set_active(0) self.__general.bw_attach_next(self.__hostname_label, yoptions=gtk.FILL, xoptions=gtk.FILL) self.__general.bw_attach_next(self.__hostname_list, yoptions=gtk.FILL) if self.__node.get_info('uptime') is not None: self.__uptime_label = BWSectionLabel(_('Last boot:')) seconds = self.__node.get_info('uptime')['seconds'] lastboot = self.__node.get_info('uptime')['lastboot'] text = _('%s (%s seconds).') % (lastboot, seconds) self.__uptime_value = BWLabel(text) self.__uptime_value.set_selectable(True) self.__uptime_value.set_line_wrap(False) self.__general.bw_attach_next(self.__uptime_label, yoptions=gtk.FILL, xoptions=gtk.FILL) self.__general.bw_attach_next(self.__uptime_value, yoptions=gtk.FILL) self.__general_frame.bw_add(self.__general) self.__general_frame.set_expanded(True) sequences = self.__node.get_info('sequences') if len(sequences) > 0: self.__sequences_frame.bw_add( self.__create_sequences_widget(sequences)) # operating system information widgets self.__os = gtk.Notebook() os = self.__node.get_info('os') if os is not None: if 'matches' in os: self.__match_scroll = BWScrolledWindow() self.__match_store = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_INT, gobject.TYPE_BOOLEAN) self.__match_treeview = gtk.TreeView(self.__match_store) for os_match in os['matches']: self.__match_store.append([os_match['accuracy'], os_match['name'], #os_match['db_line'], 0, # unsupported True]) self.__match_column = list() for i in range(len(OSMATCH_HEADER)): column = gtk.TreeViewColumn(OSMATCH_HEADER[i], self.__cell, text=i) self.__match_column.append(column) self.__match_column[i].set_reorderable(True) self.__match_column[i].set_resizable(True) self.__match_column[i].set_attributes(self.__cell, text=i, editable=3) self.__match_column[i].set_sort_column_id(i) self.__match_treeview.append_column(self.__match_column[i]) self.__match_scroll.add_with_viewport(self.__match_treeview) self.__os.append_page(self.__match_scroll, BWLabel(_('Match'))) if 'classes' in os: self.__class_scroll = BWScrolledWindow() self.__class_store = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN) self.__class_treeview = gtk.TreeView(self.__class_store) for os_class in os['classes']: os_gen = os_class.get('os_gen', '') self.__class_store.append([os_class['accuracy'], os_class['vendor'], os_class['type'], os_class['os_family'], os_gen, True]) self.__class_column = list() for i in range(len(OSCLASS_HEADER)): column = gtk.TreeViewColumn(OSCLASS_HEADER[i], self.__cell, text=i) self.__class_column.append(column) self.__class_column[i].set_reorderable(True) self.__class_column[i].set_resizable(True) self.__class_column[i].set_attributes(self.__cell, text=i, editable=5) self.__class_column[i].set_sort_column_id(i) self.__class_treeview.append_column(self.__class_column[i]) self.__class_scroll.add_with_viewport(self.__class_treeview) self.__os.append_page(self.__class_scroll, BWLabel(_('Class'))) self.__fp_viewer = BWTextEditor() self.__fp_viewer.bw_modify_font(self.__font) self.__fp_viewer.bw_set_editable(False) self.__fp_viewer.bw_set_text(os['fingerprint']) self.__fp_ports = BWHBox() self.__fp_label = BWSectionLabel(_('Used ports:')) self.__fp_ports_list = gtk.combo_box_entry_new_text() self.__fp_ports_list.child.set_editable(False) self.__fp_vbox = BWVBox() if 'used_ports' in os: used_ports = os['used_ports'] for port in used_ports: params = port['id'], port['protocol'], port['state'] self.__fp_ports_list.append_text(USED_PORTS_TEXT % params) self.__fp_ports_list.set_active(0) self.__fp_ports.bw_pack_start_noexpand_nofill(self.__fp_label) self.__fp_ports.bw_pack_start_expand_fill(self.__fp_ports_list) self.__fp_vbox.bw_pack_start_noexpand_nofill(self.__fp_ports) self.__os.append_page(self.__fp_viewer, BWLabel(_('Fingerprint'))) self.__fp_vbox.bw_pack_start_expand_fill(self.__os) self.__os_frame.bw_add(self.__fp_vbox) self.__os_frame.set_expanded(True) self.__vbox.bw_pack_start_noexpand_nofill(self.__general_frame) self.__vbox.bw_pack_start_expand_fill(self.__os_frame) self.__vbox.bw_pack_start_noexpand_nofill(self.__sequences_frame) self.add_with_viewport(self.__vbox) def __create_sequences_widget(self, sequences): """Return a widget representing various OS detection sequences. The sequences argument is a dict with zero or more of the keys 'tcp', 'ip_id', and 'tcp_ts'.""" # sequences information widgets table = BWTable(5, 3) table.attach(BWSectionLabel(_('Class')), 1, 2, 0, 1) table.attach(BWSectionLabel(_('Values')), 2, 3, 0, 1) table.attach(BWSectionLabel(_('TCP *')), 0, 1, 1, 2) table.attach(BWSectionLabel(_('IP ID')), 0, 1, 2, 3) table.attach(BWSectionLabel(_('TCP Timestamp')), 0, 1, 3, 4) tcp = sequences.get('tcp') if tcp is not None: tcp_class = BWLabel(tcp['class']) tcp_class.set_selectable(True) table.attach(tcp_class, 1, 2, 1, 2) tcp_values = gtk.combo_box_entry_new_text() for value in tcp['values']: tcp_values.append_text(value) tcp_values.set_active(0) table.attach(tcp_values, 2, 3, 1, 2) tcp_note = BWLabel() tcp_note.set_selectable(True) tcp_note.set_line_wrap(False) tcp_note.set_alignment(1.0, 0.5) tcp_note.set_markup( TCP_SEQ_NOTE % (tcp['index'], tcp['difficulty'])) table.attach(tcp_note, 0, 3, 4, 5) ip_id = sequences.get('ip_id') if ip_id is not None: ip_id_class = BWLabel(ip_id['class']) ip_id_class.set_selectable(True) table.attach(ip_id_class, 1, 2, 2, 3) ip_id_values = gtk.combo_box_entry_new_text() for value in ip_id['values']: ip_id_values.append_text(value) ip_id_values.set_active(0) table.attach(ip_id_values, 2, 3, 2, 3) tcp_ts = sequences.get('tcp_ts') if tcp_ts is not None: tcp_ts_class = BWLabel(tcp_ts['class']) tcp_ts_class.set_selectable(True) table.attach(tcp_ts_class, 1, 2, 3, 4) if tcp_ts['values'] is not None: tcp_ts_values = gtk.combo_box_entry_new_text() for value in tcp_ts['values']: tcp_ts_values.append_text(value) tcp_ts_values.set_active(0) table.attach(tcp_ts_values, 2, 3, 3, 4) return table class TraceroutePage(BWVBox): """ """ def __init__(self, node): """ """ BWVBox.__init__(self) self.set_border_width(6) self.__node = node self.__create_widgets() def __create_widgets(self): """ """ trace = self.__node.get_info('trace') hops = None if trace is not None: hops = trace.get("hops") if hops is None or len(hops) == 0: self.__trace_label = gtk.Label(NO_TRACE_TEXT) self.pack_start(self.__trace_label, True, True) else: # add hops hops = self.__node.get_info('trace')['hops'] ttls = [int(i['ttl']) for i in hops] self.__cell = gtk.CellRendererText() self.__trace_scroll = BWScrolledWindow() self.__trace_scroll.set_border_width(0) self.__trace_store = gtk.ListStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN) self.__trace_treeview = gtk.TreeView(self.__trace_store) count = 0 for i in range(1, max(ttls) + 1): if i in ttls: hop = hops[count] count += 1 self.__trace_store.append([hop['ttl'], hop['rtt'], hop['ip'], hop['hostname'], HOP_COLOR['known'], True]) else: self.__trace_store.append([i, '', _('<unknown>'), '', HOP_COLOR['unknown'], True]) self.__trace_column = list() for i in range(len(TRACE_HEADER)): column = gtk.TreeViewColumn(TRACE_HEADER[i], self.__cell, text=i) self.__trace_column.append(column) self.__trace_column[i].set_reorderable(True) self.__trace_column[i].set_resizable(True) self.__trace_column[i].set_attributes(self.__cell, text=i, background=4, editable=5) self.__trace_treeview.append_column(self.__trace_column[i]) self.__trace_column[0].set_sort_column_id(0) self.__trace_scroll.add_with_viewport(self.__trace_treeview) self.__trace_info = (self.__node.get_info('trace')['port'], self.__node.get_info('trace')['protocol'], len(self.__node.get_info('trace')['hops'])) self.__trace_label = BWLabel(TRACE_TEXT % self.__trace_info) self.__trace_label.set_use_markup(True) self.bw_pack_start_expand_fill(self.__trace_scroll) self.bw_pack_start_noexpand_nofill(self.__trace_label)<|fim▁end|>
<|file_name|>subreddits.py<|end_file_name|><|fim▁begin|>import os, sys, re, json from praw2 import Reddit reload(sys)<|fim▁hole|>except: def log(msg): print(msg) sys.setdefaultencoding("utf-8") CLIENT_ID = 'J_0zNv7dXM1n3Q' CLIENT_SECRET = 'sfiPkzKDd8LZl3Ie1WLAvpCICH4' USER_AGENT = 'sparkle streams 1.0' class SubRedditEvents(object): as_regex_str = r'(acestream://[^$\s]+)' def __init__(self, username=None, password=None, client=None): self.client = client or Reddit(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, user_agent=USER_AGENT, username=username, password=password, ) self.as_regex = re.compile(self.as_regex_str, re.IGNORECASE) @staticmethod def get_as_links(body): """ For each acestream link, return a tuple of acestream link, and link quality """ links = [] for entry in body.split('\n'): res = re.findall('(.*)(acestream://[a-z0-9]+)\s*(.*)', entry) if res: pre, acelink, post = res[0] if len(pre.strip()) > len(post.strip()): links.append((acelink.strip(), pre.strip())) else: links.append((acelink.strip(), post.strip())) return links @staticmethod def priority(entry): """ For cases where we have multiple entries for the same acestream link, prioritize based on the quality text to get the best text possible """ if not entry[0]: return (entry, 3) elif re.search('.*\[.*\].*', entry[0]): return (entry, 1) else: return (entry, 2) @staticmethod def collapse(entries): """ Collapse oure list of acestream entries to pick only one with the best quality text """ results = [] prev = None # Sort the entries by our priority logic, then iterate for entry in sorted(entries, key=lambda entry: priority(entry), reverse=True): if prev != entry[0]: results.append(entry) prev = entry[0] return results def get_events(self, subreddit, filtering=False): subs = [] path = '/r/{}'.format(subreddit) for submission in self.client.get(path): sub_id = submission.id score = submission.score title = submission.title title = title.encode('utf-8') subs.append({'submission_id': sub_id, 'title': title, 'score': score }) return sorted(subs, key=lambda d: d['score'], reverse=True) def get_event_links(self, submission_id): submission = self.client.submission(id=submission_id) links = [] scores = {} # Add the extracted links and details tuple for c in submission.comments.list(): if hasattr(c, 'body'): links.extend(self.get_as_links(c.body.encode('utf-8'))) # Add entry to our scores table taking the largest score for a given # acestream link score = c.score if hasattr(c, 'score') else 0 for entry in links: scores[entry[0]] = max(scores.get(entry[0], 0), score) if len(links) > 0: return [(s, q, a) for ((a, q), s) in zip(links, map(lambda x: scores[x[0]], links))] else: return links<|fim▁end|>
try: from xbmc import log
<|file_name|>int_rect.rs<|end_file_name|><|fim▁begin|>use math::IntVector; use sdl2::rect::Rect as SdlRect; #[derive(Debug, Copy, PartialEq, Clone)] pub struct IntRect { pub xy: IntVector, pub width: u32, pub height: u32, } impl IntRect { fn new(x: i32, y: i32, width: u32, height: u32) -> IntRect { IntRect { xy: IntVector::new(x, y), width: width, height: height, } } pub fn x(&self) -> i32 { self.xy.x } pub fn y(&self) -> i32 {<|fim▁hole|> pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } } impl From<SdlRect> for IntRect { fn from(sdl_rect: SdlRect) -> IntRect { IntRect::new(sdl_rect.x(), sdl_rect.y(), sdl_rect.width(), sdl_rect.height()) } } impl From<IntRect> for SdlRect { fn from(int_rect: IntRect) -> SdlRect { SdlRect::new(int_rect.x(), int_rect.y(), int_rect.width(), int_rect.height()) } }<|fim▁end|>
self.xy.y }
<|file_name|>SplitRGBBands.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ *************************************************************************** SplitRGBBands.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from processing.tools.system import * from processing.tools import dataobjects from processing.saga.SagaUtils import SagaUtils __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from PyQt4 import QtGui from processing.core.GeoAlgorithm import GeoAlgorithm from processing.parameters.ParameterRaster import ParameterRaster from processing.outputs.OutputRaster import OutputRaster import os class SplitRGBBands(GeoAlgorithm): INPUT = "INPUT" R = "R" G = "G" B = "B" def getIcon(self): return QtGui.QIcon(os.path.dirname(__file__) + "/../images/saga.png") def defineCharacteristics(self): self.name = "Split RGB bands" self.group = "Grid - Tools" self.addParameter(ParameterRaster(SplitRGBBands.INPUT, "Input layer", False)) self.addOutput(OutputRaster(SplitRGBBands.R, "Output R band layer")) self.addOutput(OutputRaster(SplitRGBBands.G, "Output G band layer")) self.addOutput(OutputRaster(SplitRGBBands.B, "Output B band layer")) def processAlgorithm(self, progress): #TODO:check correct num of bands input = self.getParameterValue(SplitRGBBands.INPUT) temp = getTempFilename(None).replace('.',''); basename = os.path.basename(temp) validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" safeBasename = ''.join(c for c in basename if c in validChars) temp = os.path.join(os.path.dirname(temp), safeBasename) r = self.getOutputValue(SplitRGBBands.R) g = self.getOutputValue(SplitRGBBands.G) b = self.getOutputValue(SplitRGBBands.B)<|fim▁hole|> commands.append("io_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\""); commands.append("io_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\""); commands.append("io_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\""); else: commands.append("libio_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input + "\"") commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\""); commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\""); commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\""); SagaUtils.createSagaBatchJobFileFromSagaCommands(commands) SagaUtils.executeSaga(progress);<|fim▁end|>
commands = [] if isWindows(): commands.append("io_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input+"\"")
<|file_name|>File.java<|end_file_name|><|fim▁begin|>package scrum.server.files; import ilarkesto.io.IO; import scrum.server.admin.User; public class File extends GFile { // --- dependencies --- // --- ---<|fim▁hole|> public void deleteFile() { IO.delete(getJavaFile()); } public java.io.File getJavaFile() { return new java.io.File(getProject().getFileRepositoryPath() + "/" + getFilename()); } public void updateNumber() { if (isNumber(0)) setNumber(getProject().generateFileNumber()); } public boolean isVisibleFor(User user) { return getProject().isVisibleFor(user); } public String getReferenceAndLabel() { return getReference() + " (" + getLabel() + ")"; } public String getReference() { return scrum.client.files.File.REFERENCE_PREFIX + getNumber(); } public boolean isEditableBy(User user) { return getProject().isEditableBy(user); } @Override public String toString() { return getReferenceAndLabel(); } @Override public void ensureIntegrity() { super.ensureIntegrity(); updateNumber(); } }<|fim▁end|>
<|file_name|>PtyPartEffectTranslate.moc.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2010, Anima Games, Benjamin Karaban, Laurent Schneider, * Jérémie Comarmond, Didier Colin. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "PtyPartEffectTranslate.moc.h" #include <QtToolbox/CollapsibleWidget.moc.h> #include <QtToolbox/SingleSlidingValue.moc.h> #include <QtToolbox/SingleSlidingHDR.moc.h> #include <QGridLayout> #include <QPushButton> namespace EPI { //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- PtyPartEffectTranslate::PtyPartEffectTranslate( const Ptr<Universe::NodeEmitter>& pNodeE, const Ptr<Universe::PartEffectTranslate>& pEffect, const Core::String& title) : PtyPartEffect(pNodeE, pEffect, title) { updateProperty(); } //----------------------------------------------------------------------------- PtyPartEffectTranslate::PtyPartEffectTranslate(const Ptr<Universe::NodeEmitter>& pNodeE, const Core::String& title) : PtyPartEffect( pNodeE, Ptr<Universe::PartEffectTranslate>(new Universe::PartEffectTranslate()), title) { updateProperty(); } //----------------------------------------------------------------------------- PtyPartEffectTranslate::~PtyPartEffectTranslate() {} //----------------------------------------------------------------------------- Ptr<PropertyWidget> PtyPartEffectTranslate::internalCreatePropertyWidget(const Ptr<PropertyWidgetDataProxy>& pDataProxy, QWidget * parent) { Ptr<PtyWidgetPartEffectTranslate> pPW (new PtyWidgetPartEffectTranslate(pDataProxy, parent)); return pPW; } //----------------------------------------------------------------------------- void PtyPartEffectTranslate::updateData() { Ptr<Universe::PartEffectTranslate> pEffet = LM_DEBUG_PTR_CAST<Universe::PartEffectTranslate> (getEffect()); pEffet->setConstSpeed(_constSpeed); pEffet->setRandSpeed(_randSpeed); } //----------------------------------------------------------------------------- void PtyPartEffectTranslate::updateProperty() { Ptr<Universe::PartEffectTranslate> pEffet = LM_DEBUG_PTR_CAST<Universe::PartEffectTranslate> (getEffect()); _constSpeed = pEffet->getConstSpeed(); _randSpeed = pEffet->getRandSpeed(); } //----------------------------------------------------------------------------- void PtyPartEffectTranslate::internalResurrect(const Ptr<Universe::World>& pWorld, const Ptr<Universe::World>& pWorldInfoContent, const Ptr<Property>& pty) { LM_ASSERT(getEffect()==null); Ptr<Universe::IPartEffect> pEffet = Ptr<Universe::PartEffectTranslate>(new Universe::PartEffectTranslate()); setEffect(pEffet); getUniverseNodeEmitter()->addEffect(getEffect()); updateData(); } //----------------------------------------------------------------------------- Ptr<Property> PtyPartEffectTranslate::clone() const { return Ptr<Property>(new PtyPartEffectTranslate( *this )); } //----------------------------------------------------------------------------- void PtyPartEffectTranslate::internalCopy(const Ptr<Property>& pSrc) { PtyPartEffect::internalCopy(pSrc); Ptr<PtyPartEffectTranslate> pPty = LM_DEBUG_PTR_CAST<PtyPartEffectTranslate>(pSrc); _constSpeed = pPty->_constSpeed; _randSpeed = pPty->_randSpeed; <|fim▁hole|>//----------------------------------------------------------------------------- PtyWidgetPartEffectTranslate::PtyWidgetPartEffectTranslate(const Ptr<PropertyWidgetDataProxy>& data, QWidget * parent) : PropertyWidget(data, parent) { setupUi(); } //----------------------------------------------------------------------------- PtyWidgetPartEffectTranslate::~PtyWidgetPartEffectTranslate() {} //----------------------------------------------------------------------------- void PtyWidgetPartEffectTranslate::readProperty() { Ptr<PtyPartEffectTranslate> pP = LM_DEBUG_PTR_CAST<PtyPartEffectTranslate>(getDataProxy()->getProperty()); _constSpeedX->setSingleValue(pP->_constSpeed.x); _constSpeedY->setSingleValue(pP->_constSpeed.y); _constSpeedZ->setSingleValue(pP->_constSpeed.z); _randSpeedX->setSingleValue(pP->_randSpeed.x); _randSpeedY->setSingleValue(pP->_randSpeed.y); _randSpeedZ->setSingleValue(pP->_randSpeed.z); } //----------------------------------------------------------------------------- void PtyWidgetPartEffectTranslate::writeProperty(QWidget* pWidget) { Ptr<PtyPartEffectTranslate> pP = LM_DEBUG_PTR_CAST<PtyPartEffectTranslate>(getDataProxy()->getProperty()); double x = 0.0; double y = 0.0; double z = 0.0; _constSpeedX->getSingleValue(x); _constSpeedY->getSingleValue(y); _constSpeedZ->getSingleValue(z); pP->_constSpeed = Core::Vector3f(float(x), float(y), float(z)); _randSpeedX->getSingleValue(x); _randSpeedY->getSingleValue(y); _randSpeedZ->getSingleValue(z); pP->_randSpeed = Core::Vector3f(float(x), float(y), float(z)); } //----------------------------------------------------------------------------- void PtyWidgetPartEffectTranslate::setupUi() { _layout = new QGridLayout(this); _layout->setContentsMargins(0, 0, 0, 0); _layout->setSpacing(0); _groupBox = new QtToolbox::CollapsibleWidget(this, "Translate effect"); _del = new QPushButton(QIcon(":/icons/smallClearBW.png"), "", this); _constSpeedX = new QtToolbox::SingleSlidingHDR(this, "Const X", true); _constSpeedY = new QtToolbox::SingleSlidingHDR(this, "Const Y", true); _constSpeedZ = new QtToolbox::SingleSlidingHDR(this, "Const Z", true); _randSpeedX = new QtToolbox::SingleSlidingHDR(this, "Rand X", true); _randSpeedY = new QtToolbox::SingleSlidingHDR(this, "Rand Y", true); _randSpeedZ = new QtToolbox::SingleSlidingHDR(this, "Rand Z", true); _groupBox->addWidgetToTitle(_del); _groupBox->getLayout()->addWidget(_constSpeedX); _groupBox->getLayout()->addWidget(_constSpeedY); _groupBox->getLayout()->addWidget(_constSpeedZ); _groupBox->getLayout()->addWidget(_randSpeedX); _groupBox->getLayout()->addWidget(_randSpeedY); _groupBox->getLayout()->addWidget(_randSpeedZ); _layout->addWidget(_groupBox); setLayout(_layout); getWidgetsForUndoRedo().push_back(_constSpeedX); getWidgetsForUndoRedo().push_back(_constSpeedY); getWidgetsForUndoRedo().push_back(_constSpeedZ); getWidgetsForUndoRedo().push_back(_randSpeedX); getWidgetsForUndoRedo().push_back(_randSpeedY); getWidgetsForUndoRedo().push_back(_randSpeedZ); PropertyWidget::setupUi(); connect(_del, SIGNAL(clicked()), this, SLOT(deleteWidget())); } //----------------------------------------------------------------------------- void PtyWidgetPartEffectTranslate::deleteWidget() { emit deletePtyWidgetEffect(this); } //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- }//namespace EPI<|fim▁end|>
updateData(); } //-----------------------------------------------------------------------------
<|file_name|>app.js<|end_file_name|><|fim▁begin|>const { resolve } = require('path') const express = require('express') const bodyParser = require('body-parser') var proxy = require('express-http-proxy') const app = express() // parse JSON bodies app.use(bodyParser.json({ type: 'application/json' })) // the index file app.get('/', (req, res) => { res.sendFile(resolve(__dirname, '../index.html')) }) // handle Perl requests app.post('/resources/cgi-bin/scrollery-cgi.pl', proxy('http://localhost:9080/resources/cgi-bin/scrollery-cgi.pl'))<|fim▁hole|> // expose the Express app instance module.exports = app<|fim▁end|>
<|file_name|>lovins.py<|end_file_name|><|fim▁begin|>"""This module implements the Lovins stemming algorithm. Use the ``stem()`` function:: stemmed_word = stem(word) """ from whoosh.util.collections2 import defaultdict # Conditions def A(base): # A No restrictions on stem return True def B(base): # B Minimum stem length = 3 return len(base) > 2 def C(base): # C Minimum stem length = 4 return len(base) > 3 def D(base): # D Minimum stem length = 5 return len(base) > 4 def E(base): # E Do not remove ending after e return base[-1] != "e" def F(base): # F Minimum stem length = 3 and do not remove ending after e return len(base) > 2 and base[-1] != "e" def G(base): # G Minimum stem length = 3 and remove ending only after f return len(base) > 2 and base[-1] == "f" def H(base): # H Remove ending only after t or ll c1, c2 = base[-2:] return c2 == "t" or (c2 == "l" and c1 == "l") def I(base): # I Do not remove ending after o or e c = base[-1] return c != "o" and c != "e" def J(base): # J Do not remove ending after a or e c = base[-1] return c != "a" and c != "e" def K(base): # K Minimum stem length = 3 and remove ending only after l, i or u*e c = base[-1] cc = base[-3] return len(base) > 2 and (c == "l" or c == "i" or (c == "e" and cc == "u")) def L(base): # L Do not remove ending after u, x or s, unless s follows o c1, c2 = base[-2:] return c2 != "u" and c2 != "x" and (c2 != "s" or c1 == "o") def M(base): # M Do not remove ending after a, c, e or m c = base[-1] return c != "a" and c!= "c" and c != "e" and c != "m" def N(base): # N Minimum stem length = 4 after s**, elsewhere = 3 return len(base) > 3 or (len(base) == 3 and base[-1] != "s") def O(base): # O Remove ending only after l or i c = base[-1] return c == "l" or c == "i" def P(base): # P Do not remove ending after c return base[-1] != "c" def Q(base): # Q Minimum stem length = 3 and do not remove ending after l or n c = base[-1] return len(base) > 2 and (c != "l" and c != "n") def R(base): # R Remove ending only after n or r c = base[-1] return c == "n" or c == "r" def S(base): # S Remove ending only after dr or t, unless t follows t l2 = base[-2] return l2 == "rd" or (base[-1] == "t" and l2 != "tt") def T(base): # T Remove ending only after s or t, unless t follows o c1, c2 = base[-2:] return c2 == "s" or (c2 == "t" and c1 != "o") def U(base): # U Remove ending only after l, m, n or r c = base[-1] return c == "l" or c == "m" or c == "n" or c == "r" def V(base): # V Remove ending only after c return base[-1] == "c" def W(base): # W Do not remove ending after s or u c = base[-1] return c != "s" and c != "u" def X(base): # X Remove ending only after l, i or u*e c = base[-1] cc = base[-3] return c == "l" or c == "i" or (c == "e" and cc == "u") def Y(base): # Y Remove ending only after in return base[-2:] == "in" def Z(base): # Z Do not remove ending after f return base[-1] != "f" def a(base): # a Remove ending only after d, f, ph, th, l, er, or, es or t c = base[-1] l2 = base[-2:] return (c == "d" or c == "f" or l2 == "ph" or l2 == "th" or c == "l" or l2 == "er" or l2 == "or" or l2 == "es" or c == "t") def b(base): # b Minimum stem length = 3 and do not remove ending after met or ryst return len(base) > 2 and not (base.endswith("met") or base.endswith("ryst")) def c(base): # c Remove ending only after l return base[-1] == "l" # Endings m = [None] * 12 m[11] = dict(( ("alistically", B), ("arizability", A), ("izationally", B))) m[10] = dict(( ("antialness", A), ("arisations", A), ("arizations", A), ("entialness", A))) m[9] = dict(( ("allically", C), ("antaneous", A), ("antiality", A), ("arisation", A), ("arization", A), ("ationally", B), ("ativeness", A), ("eableness", E), ("entations", A), ("entiality", A), ("entialize", A), ("entiation", A), ("ionalness", A), ("istically", A), ("itousness", A), ("izability", A), ("izational", A))) m[8] = dict(( ("ableness", A), ("arizable", A), ("entation", A), ("entially", A), ("eousness", A), ("ibleness", A), ("icalness", A), ("ionalism", A), ("ionality", A), ("ionalize", A), ("iousness", A), ("izations", A), ("lessness", A))) m[7] = dict(( ("ability", A), ("aically", A), ("alistic", B), ("alities", A), ("ariness", E), ("aristic", A), ("arizing", A), ("ateness", A), ("atingly", A), ("ational", B), ("atively", A), ("ativism", A), ("elihood", E), ("encible", A), ("entally", A), ("entials", A), ("entiate", A), ("entness", A), ("fulness", A), ("ibility", A), ("icalism", A), ("icalist", A), ("icality", A), ("icalize", A), ("ication", G), ("icianry", A), ("ination", A), ("ingness", A), ("ionally", A), ("isation", A), ("ishness", A), ("istical", A), ("iteness", A), ("iveness", A), ("ivistic", A), ("ivities", A), ("ization", F), ("izement", A), ("oidally", A), ("ousness", A))) m[6] = dict(( ("aceous", A), ("acious", B), ("action", G), ("alness", A), ("ancial", A), ("ancies", A), ("ancing", B), ("ariser", A), ("arized", A), ("arizer", A), ("atable", A), ("ations", B), ("atives", A), ("eature", Z), ("efully", A), ("encies", A), ("encing", A), ("ential", A), ("enting", C), ("entist", A), ("eously", A), ("ialist", A), ("iality", A), ("ialize", A), ("ically", A), ("icance", A), ("icians", A), ("icists", A), ("ifully", A), ("ionals", A), ("ionate", D), ("ioning", A), ("ionist", A), ("iously", A), ("istics", A), ("izable", E), ("lessly", A), ("nesses", A), ("oidism", A))) m[5] = dict(( ("acies", A), ("acity", A), ("aging", B), ("aical", A), ("alist", A), ("alism", B), ("ality", A), ("alize", A), ("allic", b), ("anced", B), ("ances", B), ("antic", C), ("arial", A), ("aries", A), ("arily", A), ("arity", B), ("arize", A), ("aroid", A), ("ately", A), ("ating", I), ("ation", B), ("ative", A), ("ators", A), ("atory", A), ("ature", E), ("early", Y), ("ehood", A), ("eless", A), ("elily", A), ("ement", A), ("enced", A), ("ences", A), ("eness", E), ("ening", E), ("ental", A), ("ented", C), ("ently", A), ("fully", A), ("ially", A), ("icant", A), ("ician", A), ("icide", A), ("icism", A), ("icist", A), ("icity", A), ("idine", I), ("iedly", A), ("ihood", A), ("inate", A), ("iness", A), ("ingly", B), ("inism", J), ("inity", c), ("ional", A), ("ioned", A), ("ished", A), ("istic", A), ("ities", A), ("itous", A), ("ively", A), ("ivity", A), ("izers", F), ("izing", F), ("oidal", A), ("oides", A), ("otide", A), ("ously", A))) m[4] = dict(( ("able", A), ("ably", A), ("ages", B), ("ally", B), ("ance", B), ("ancy", B), ("ants", B), ("aric", A), ("arly", K), ("ated", I), ("ates", A), ("atic", B), ("ator", A), ("ealy", Y), ("edly", E), ("eful", A), ("eity", A), ("ence", A), ("ency", A), ("ened", E), ("enly", E), ("eous", A), ("hood", A), ("ials", A), ("ians", A), ("ible", A), ("ibly", A), ("ical", A), ("ides", L), ("iers", A), ("iful", A), ("ines", M), ("ings", N), ("ions", B), ("ious", A), ("isms", B), ("ists", A), ("itic", H), ("ized", F), ("izer", F), ("less", A), ("lily", A), ("ness", A), ("ogen", A), ("ward", A), ("wise", A), ("ying", B), ("yish", A))) m[3] = dict(( ("acy", A), ("age", B), ("aic", A), ("als", b), ("ant", B), ("ars", O), ("ary", F), ("ata", A), ("ate", A), ("eal", Y), ("ear", Y), ("ely", E), ("ene", E), ("ent", C), ("ery", E), ("ese", A), ("ful", A), ("ial", A), ("ian", A), ("ics", A), ("ide", L), ("ied", A), ("ier", A), ("ies", P), ("ily", A), ("ine", M), ("ing", N), ("ion", Q), ("ish", C), ("ism", B), ("ist", A), ("ite", a), ("ity", A), ("ium", A), ("ive", A), ("ize", F), ("oid", A), ("one", R), ("ous", A))) m[2] = dict(( ("ae", A), ("al", b), ("ar", X), ("as", B), ("ed", E), ("en", F), ("es", E), ("ia", A), ("ic", A), ("is", A), ("ly", B), ("on", S), ("or", T), ("um", U), ("us", V), ("yl", R), ("s'", A), ("'s", A))) m[1] = dict(( ("a", A), ("e", A), ("i", A), ("o", A), ("s", W), ("y", B))) def remove_ending(word): length = len(word) el = 11 while el > 0: if length - el > 1: ending = word[length-el:] cond = m[el].get(ending) if cond: base = word[:length-el] if cond(base): return base el -= 1 return word _endings = (("iev", "ief"), ("uct", "uc"), ("iev", "ief"), ("uct", "uc"), ("umpt", "um"), ("rpt", "rb"), ("urs", "ur"), ("istr", "ister"), ("metr", "meter"), ("olv", "olut"), ("ul", "l", "aoi"), ("bex", "bic"), ("dex", "dic"), ("pex", "pic"), ("tex", "tic"), ("ax", "ac"), ("ex", "ec"), ("ix", "ic"), ("lux", "luc"), ("uad", "uas"), ("vad", "vas"), ("cid", "cis"), ("lid", "lis"), ("erid", "eris"), ("pand", "pans"), ("end", "ens", "s"), ("ond", "ons"), ("lud", "lus"), ("rud", "rus"),<|fim▁hole|> ("ent", "ens", "m"), ("ert", "ers"), ("et", "es", "n"), ("yt", "ys"), ("yz", "ys")) # Hash the ending rules by the last letter of the target ending _endingrules = defaultdict(list) for rule in _endings: _endingrules[rule[0][-1]].append(rule) _doubles = frozenset(("dd", "gg", "ll", "mm", "nn", "pp", "rr", "ss", "tt")) def fix_ending(word): if word[-2:] in _doubles: word = word[:-1] for endingrule in _endingrules[word[-1]]: target, newend = endingrule[:2] if word.endswith(target): if len(endingrule) > 2: exceptafter = endingrule[2] c = word[0-(len(target)+1)] if c in exceptafter: return word return word[:0-len(target)] + newend return word def stem(word): """Returns the stemmed version of the argument string. """ return fix_ending(remove_ending(word))<|fim▁end|>
("her", "hes", "pt"), ("mit", "mis"),
<|file_name|>strategy.go<|end_file_name|><|fim▁begin|>/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|> http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storageclass import ( "fmt" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/apis/storage/validation" "k8s.io/kubernetes/pkg/genericapiserver/registry/generic" apistorage "k8s.io/kubernetes/pkg/storage" ) // storageClassStrategy implements behavior for StorageClass objects type storageClassStrategy struct { runtime.ObjectTyper names.NameGenerator } // Strategy is the default logic that applies when creating and updating // StorageClass objects via the REST API. var Strategy = storageClassStrategy{api.Scheme, names.SimpleNameGenerator} func (storageClassStrategy) NamespaceScoped() bool { return false } // ResetBeforeCreate clears the Status field which is not allowed to be set by end users on creation. func (storageClassStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { _ = obj.(*storage.StorageClass) } func (storageClassStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { storageClass := obj.(*storage.StorageClass) return validation.ValidateStorageClass(storageClass) } // Canonicalize normalizes the object after validation. func (storageClassStrategy) Canonicalize(obj runtime.Object) { } func (storageClassStrategy) AllowCreateOnUpdate() bool { return false } // PrepareForUpdate sets the Status fields which is not allowed to be set by an end user updating a PV func (storageClassStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { _ = obj.(*storage.StorageClass) _ = old.(*storage.StorageClass) } func (storageClassStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { errorList := validation.ValidateStorageClass(obj.(*storage.StorageClass)) return append(errorList, validation.ValidateStorageClassUpdate(obj.(*storage.StorageClass), old.(*storage.StorageClass))...) } func (storageClassStrategy) AllowUnconditionalUpdate() bool { return true } // GetAttrs returns labels and fields of a given object for filtering purposes. func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) { cls, ok := obj.(*storage.StorageClass) if !ok { return nil, nil, fmt.Errorf("given object is not of type StorageClass") } return labels.Set(cls.ObjectMeta.Labels), StorageClassToSelectableFields(cls), nil } // MatchStorageClass returns a generic matcher for a given label and field selector. func MatchStorageClasses(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate { return apistorage.SelectionPredicate{ Label: label, Field: field, GetAttrs: GetAttrs, } } // StorageClassToSelectableFields returns a label set that represents the object func StorageClassToSelectableFields(storageClass *storage.StorageClass) fields.Set { return generic.ObjectMetaFieldsSet(&storageClass.ObjectMeta, false) }<|fim▁end|>
you may not use this file except in compliance with the License. You may obtain a copy of the License at
<|file_name|>client.go<|end_file_name|><|fim▁begin|>package pdns import ( "encoding/json" "fmt" "io" "net/http" "strconv" "strings" "github.com/xenolf/lego/challenge/dns01" ) type Record struct { Content string `json:"content"` Disabled bool `json:"disabled"` // pre-v1 API Name string `json:"name"` Type string `json:"type"` TTL int `json:"ttl,omitempty"` } type hostedZone struct { ID string `json:"id"` Name string `json:"name"` URL string `json:"url"` RRSets []rrSet `json:"rrsets"` // pre-v1 API Records []Record `json:"records"` } type rrSet struct { Name string `json:"name"` Type string `json:"type"` Kind string `json:"kind"` ChangeType string `json:"changetype"` Records []Record `json:"records"` TTL int `json:"ttl,omitempty"` } type rrSets struct { RRSets []rrSet `json:"rrsets"` } type apiError struct { ShortMsg string `json:"error"` } func (a apiError) Error() string { return a.ShortMsg } type apiVersion struct { URL string `json:"url"` Version int `json:"version"` } func (d *DNSProvider) getHostedZone(fqdn string) (*hostedZone, error) { var zone hostedZone authZone, err := dns01.FindZoneByFqdn(fqdn) if err != nil { return nil, err } u := "/servers/localhost/zones" result, err := d.sendRequest(http.MethodGet, u, nil) if err != nil { return nil, err } var zones []hostedZone err = json.Unmarshal(result, &zones) if err != nil { return nil, err } u = "" for _, zone := range zones { if dns01.UnFqdn(zone.Name) == dns01.UnFqdn(authZone) { u = zone.URL break } } result, err = d.sendRequest(http.MethodGet, u, nil) if err != nil { return nil, err } err = json.Unmarshal(result, &zone) if err != nil { return nil, err } // convert pre-v1 API result if len(zone.Records) > 0 { zone.RRSets = []rrSet{} for _, record := range zone.Records { set := rrSet{ Name: record.Name, Type: record.Type, Records: []Record{record}, } zone.RRSets = append(zone.RRSets, set) } } return &zone, nil } func (d *DNSProvider) findTxtRecord(fqdn string) (*rrSet, error) { zone, err := d.getHostedZone(fqdn) if err != nil { return nil, err } _, err = d.sendRequest(http.MethodGet, zone.URL, nil) if err != nil { return nil, err } for _, set := range zone.RRSets { if (set.Name == dns01.UnFqdn(fqdn) || set.Name == fqdn) && set.Type == "TXT" { return &set, nil } } return nil, fmt.Errorf("no existing record found for %s", fqdn) } func (d *DNSProvider) getAPIVersion() (int, error) { result, err := d.sendRequest(http.MethodGet, "/api", nil) if err != nil { return 0, err } var versions []apiVersion err = json.Unmarshal(result, &versions) if err != nil { return 0, err } latestVersion := 0 for _, v := range versions { if v.Version > latestVersion { latestVersion = v.Version } } return latestVersion, err } func (d *DNSProvider) sendRequest(method, uri string, body io.Reader) (json.RawMessage, error) { req, err := d.makeRequest(method, uri, body) if err != nil { return nil, err } resp, err := d.config.HTTPClient.Do(req) if err != nil { return nil, fmt.Errorf("error talking to PDNS API -> %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusUnprocessableEntity && (resp.StatusCode < 200 || resp.StatusCode >= 300) { return nil, fmt.Errorf("unexpected HTTP status code %d when fetching '%s'", resp.StatusCode, req.URL) } var msg json.RawMessage err = json.NewDecoder(resp.Body).Decode(&msg) if err != nil { if err == io.EOF { // empty body return nil, nil } // other error return nil, err } // check for PowerDNS error message if len(msg) > 0 && msg[0] == '{' { var errInfo apiError err = json.Unmarshal(msg, &errInfo) if err != nil { return nil, err } if errInfo.ShortMsg != "" { return nil, fmt.Errorf("error talking to PDNS API -> %v", errInfo) } } return msg, nil } func (d *DNSProvider) makeRequest(method, uri string, body io.Reader) (*http.Request, error) { var path = "" if d.config.Host.Path != "/" { path = d.config.Host.Path } if !strings.HasPrefix(uri, "/") { uri = "/" + uri } if d.apiVersion > 0 && !strings.HasPrefix(uri, "/api/v") { uri = "/api/v" + strconv.Itoa(d.apiVersion) + uri } u := d.config.Host.Scheme + "://" + d.config.Host.Host + path + uri req, err := http.NewRequest(method, u, body) if err != nil { return nil, err } req.Header.Set("X-API-Key", d.config.APIKey) <|fim▁hole|> return req, nil }<|fim▁end|>
<|file_name|>pygments.py<|end_file_name|><|fim▁begin|>import importlib from pygments.lexer import RegexLexer, bygroups from pygments.styles import STYLE_MAP from pygments.token import * def load_style(full_class_string): modulename, styleclass = full_class_string.split('::') module = importlib.import_module("pygments.styles." + modulename) return getattr(module, styleclass) repl_styles = {} for name, import_info in STYLE_MAP.items():<|fim▁hole|>class FranzLexer(RegexLexer): name = 'Franz Lexer' tokens = { 'root': [ (r'"', String.Double, 'double-quote'), (r'[0-9]+(\.[0-9]+)?', Number), (r'\b(if|else|for|while|in|to|fn|ⲗ|try|rescue|assert|include|yield|return|break|continue)\b', Keyword.Reserved), (r'\b(int|str|any|float|list|dict|bool)\b', Keyword.Type), (r'\b(and|or|not)\b', Operator.Word), (r'#.*?$', Comment.Single), (r'([a-zA-Z][a-zA-Z0-9_!?\-%$]*)(\s*)(=)(\s*)(fn)', bygroups(Name.Function.Definition, Whitespace, Operator, Whitespace, Keyword.Reserved)), (r'\b([a-zA-Z][a-zA-Z0-9_!?\-%$]*)(\s*)([(])', bygroups(Name.Function, Whitespace, Punctuation)), (r'\b[a-zA-Z][a-zA-Z0-9_!?\-%$]*\b', Name), (r'\s+([*+\-^=<>%/?]+)\s+', Operator), (r'[@().,:;\[\]]', Punctuation), (r'[{}]', Punctuation.Braces), (r'\s+', Whitespace) ], 'double-quote': [ (r'\{.*?\}', String.Interpol), (r'\\.', Literal.String.Escape), (r'[^"{}\\]+', String.Double), (r'"', String.Double, '#pop'), ] }<|fim▁end|>
repl_styles[name] = load_style(import_info) repl_styles[name].styles[Whitespace] = '' # some styles underline ws
<|file_name|>knockout.kogrid-tests.ts<|end_file_name|><|fim▁begin|>export interface IGridItem { name: string; } export class Tests { public items: KnockoutObservableArray<IGridItem>; public selectedItems: KnockoutObservableArray<IGridItem>; public gridOptionsAlarms: kg.GridOptions<IGridItem>; constructor() { this.items = ko.observableArray<IGridItem>(); this.selectedItems = ko.observableArray<IGridItem>(); this.gridOptionsAlarms = this.createDefaultGridOptions(this.items, this.selectedItems); } public createDefaultGridOptions<Type>(dataArray: KnockoutObservableArray<Type>, selectedItems: KnockoutObservableArray<Type>): kg.GridOptions<Type> { return { data: dataArray, displaySelectionCheckbox: false, footerVisible: false, multiSelect: false, showColumnMenu: false,<|fim▁hole|> selectedItems: selectedItems }; } }<|fim▁end|>
plugins: null,
<|file_name|>export.js<|end_file_name|><|fim▁begin|>var auth = require( './auth' ), surveyTemplate = require( '../lib/survey_template' ), SurveyResponse = require( '../models/survey_response' ); var ExportController = function() { this.csv = function( req, res ) { var now = new Date(), cleanTitle = req.survey.title.replace( /[^a-z0-9]/ig, '' ), key = req.param( 'key' ), today = now.getFullYear() + '_' + ( now.getMonth() + 1 ) + '_' + now.getDate(), flatSurvey = [], csvData = [], survey = surveyTemplate.getSurvey( req.survey.version ); survey.pages.forEach( function( page ) { page.fields.forEach( function( q ) { if( q.type === 'gridselect' ) { for( var key in q.options ) { flatSurvey.push( { name: key, label: q.options[ key ] } ); } } else { flatSurvey.push( { name: q.name, label: q.label } ); } } ); } ); csvData.push( flatSurvey.map( function( q ) { return q.label; } ) ); SurveyResponse.find( { surveyId: req.survey.id }, function( err, responses ) { if( responses && responses.length ) { responses.forEach( function( response ) { csvData.push( flatSurvey.map( function( q ) { try { return response.response[ q.name ]; } catch( e ) { return ''; } } ) ); } ); } res.header('Content-Disposition', 'attachment; filename=' + today + '_' + cleanTitle + '.csv'); res.csv( csvData ); } ); }; };<|fim▁hole|> var exporter = new ExportController(); app.get( '/export/:key', auth.canAccessSurvey, exporter.csv ); };<|fim▁end|>
exports.setup = function( app ) {
<|file_name|>doop-list.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node var _ = require('lodash'); var async = require('async-chainable'); var asyncFlush = require('async-chainable-flush'); var colors = require('chalk'); var doop = require('.'); var glob = require('glob'); var fs = require('fs'); var fspath = require('path'); var program = require('commander'); var sha1 = require('node-sha1'); program .version(require('./package.json').version) .description('List units installed for the current project') .option('-b, --basic', 'Display a simple list, do not attempt to hash file differences') .option('-v, --verbose', 'Be verbose. Specify multiple times for increasing verbosity', function(i, v) { return v + 1 }, 0) .parse(process.argv); async() .use(asyncFlush) .then(doop.chProjectRoot) .then(doop.getUserSettings) // Get the list of units {{{ .then('units', function(next) { doop.getUnits(function(err, units) { if (err) return next(err); next(null, units.map(u => { return { id: u, path: fspath.join(doop.settings.paths.units, u), files: {}, } })); }); }) // }}} // Hash file comparisons unless program.basic {{{ // Get repo {{{ .then('repo', function(next) { if (program.basic) return next(); doop.getDoopPath(next, program.repo); }) .then(function(next) { if (program.basic) return next(); if (program.verbose) console.log('Using Doop source:', colors.cyan(this.repo)); next(); }) // }}} // Scan project + Doop file list and hash all files (unless !program.basic) {{{ .then(function(next) { if (program.basic) return next(); // Make a list of all files in both this project and in the doop repo // For each file create an object with a `local` sha1 hash and `doop` sha1 hash var hashQueue = async(); // Hash tasks to perform async() .forEach(this.units, function(next, unit) { async() .parallel([ // Hash all project files {{{ function(next) { glob(fspath.join(unit.path, '**'), {nodir: true}, function(err, files) { if (files.length) { unit.existsInProject = true; files.forEach(function(file) { hashQueue.defer(file, function(next) { if (program.verbose) console.log('Hash file (Proj)', colors.cyan(file)); sha1(fs.createReadStream(file), function(err, hash) { if (!unit.files[file]) unit.files[file] = {path: file}; unit.files[file].project = hash; next(); }); }); }); } else { unit.existsInProject = false; } next(); }); }, // }}}<|fim▁hole|> function(next) { glob(fspath.join(doop.settings.paths.doop, unit.path, '**'), {nodir: true}, function(err, files) { if (files.length) { unit.existsInDoop = true; files.forEach(function(rawFile) { var croppedPath = rawFile.substr(doop.settings.paths.doop.length + 1); var file = fspath.join(doop.settings.paths.doop, croppedPath); hashQueue.defer(file, function(next) { if (program.verbose) console.log('Hash file (Doop)', colors.cyan(croppedPath)); sha1(fs.createReadStream(file), function(err, hash) { if (!unit.files[croppedPath]) unit.files[croppedPath] = {path: file}; unit.files[croppedPath].doop = hash; next(); }); }); }); } else { unit.existsInDoop = false; } next(); }); }, // }}} ]) .end(next) }) .end(function(err) { if (err) return next(err); // Wait for hashing queue to finish hashQueue.await().end(next); }); }) // }}} // }}} // Present the list {{{ .then(function(next) { var task = this; if (program.verbose > 1) console.log(); this.units.forEach(function(unit) { if (unit.existsInProject && !unit.existsInDoop) { console.log(colors.grey(' -', unit.id)); } else if (!unit.existsInProject && unit.existsInDoop) { console.log(colors.red(' -', unit.id)); } else { // In both Doop + Project - examine file differences var changes = []; // Edited {{{ var items = _.filter(unit.files, f => f.project && f.doop && f.project != f.doop); if (_.get(doop.settings, 'list.changes.maxEdited') && items.length > doop.settings.list.changes.maxEdited) { changes.push(colors.yellow.bold('~' + items.length + ' items')); } else { items.forEach(f => changes.push(colors.yellow.bold('~') + f.path.substr(unit.path.length+1))); } // }}} // Created {{{ var items = _.filter(unit.files, f => f.project && !f.doop); if (_.get(doop.settings, 'list.changes.maxCreated') && items.length > doop.settings.list.changes.maxCreated) { changes.push(colors.green.bold('+' + items.length + ' items')); } else { items.forEach(f => changes.push(colors.green.bold('+') + f.path.substr(unit.path.length+1))); } // }}} // Deleted {{{ var items = _.filter(unit.files, f => f.doop && !f.project); if (_.get(doop.settings, 'list.changes.maxDeleted') && items.length > doop.settings.list.changes.maxDeleted) { changes.push(colors.red.bold('-' + items.length + ' items')); } else { items.forEach(f => changes.push(colors.red.bold('-') + f.path.substr(doop.settings.paths.doop.length+unit.path.length+2))); } // }}} if (changes.length) { console.log(' -', unit.id, colors.blue('('), changes.join(', '), colors.blue(')')); } else { console.log(' -', unit.id); } } }); next(); }) // }}} // End {{{ .flush() .end(function(err) { if (err) { console.log(colors.red('Doop Error'), err.toString()); process.exit(1); } else { process.exit(0); } }); // }}}<|fim▁end|>
// Hash all Doop files {{{
<|file_name|>enum-no_trailing_comma.rs<|end_file_name|><|fim▁begin|>// rustfmt-enum_trailing_comma: false enum X { A, B } enum Y { A, B } enum TupX { A(u32), B(i32, u16) } enum TupY { A(u32), B(i32, u16) } <|fim▁hole|> enum StructY { A { s: u16 }, B { u: u32, i: i32 } }<|fim▁end|>
enum StructX { A { s: u16 }, B { u: u32, i: i32 } }
<|file_name|>utils.go<|end_file_name|><|fim▁begin|>package utils import ( "code.google.com/p/goauth2/oauth" "github.com/google/go-github/github" "github.com/pinterb/hsc/config" ) // Utils manages interactions with all utilities type Utils struct { client *github.Client config *config.Config Users *UserUtils // Teams *TeamUtils // Projects *ProjectUtils // Organizations *OrganizationUtils }<|fim▁hole|>type Response struct { *github.Response } // NewUtils creates an instance of Utils func NewUtils(config *config.Config) *Utils { client := github.NewClient(nil) if config != nil { t := &oauth.Transport{ Token: &oauth.Token{AccessToken: config.Token}, } client = github.NewClient(t.Client()) } u := &Utils{config: config, client: client} u.Users = &UserUtils{Utils: u} return u } // NewResponse creates a new instance of Response func NewResponse(r *github.Response) *Response { resp := &Response{Response: r} return resp }<|fim▁end|>
// Response is a light wrapper around the go-github Response struct
<|file_name|>erroneous_aclfull_proxy.py<|end_file_name|><|fim▁begin|>from . import NetworkObject import z3 class ErroneousAclWebProxy (NetworkObject): """A caching web proxy which enforces ACLs erroneously. The idea here was to present something that is deliberately not path independent""" def _init (self, node, network, context): super(ErroneousAclWebProxy, self).init_fail(node) self.proxy = node.z3Node self.ctx = context self.constraints = list () self.acls = list () network.SaneSend(self) self._webProxyFunctions () self._webProxyConstraints () @property def z3Node (self): return self.proxy def SetPolicy (self, policy): """Wrap add acls""" self.AddAcls(policy) def AddAcls(self, acls): if not isinstance(acls, list): acls = [acls] self.acls.extend(acls) @property def ACLs (self): return self.acls def _addConstraints (self, solver): self.constraints = list () self._webProxyFunctions () self._webProxyConstraints () solver.add(self.constraints) def _webProxyConstraints (self): eh = z3.Const('__webproxy_contraint_eh_%s'%(self.proxy), self.ctx.node) eh2 = z3.Const('__webproxy_contraint_eh2_%s'%(self.proxy), self.ctx.node) a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address) i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort()) p = z3.Const('__webproxy_req_packet_%s'%(self.proxy), self.ctx.packet) p2 = z3.Const('__webproxy_req_packet_2_%s'%(self.proxy), self.ctx.packet) p3 = z3.Const('__webproxy_res_packet_%s'%(self.proxy), self.ctx.packet) e1 = z3.Const('__webproxy_e1_%s'%(self.proxy), self.ctx.node) e2 = z3.Const('__webproxy_e2_%s'%(self.proxy), self.ctx.node) e3 = z3.Const('__webproxy_e3_%s'%(self.proxy), self.ctx.node) e4 = z3.Const('__webproxy_e4_%s'%(self.proxy), self.ctx.node) e5 = z3.Const('__webproxy_e5_%s'%(self.proxy), self.ctx.node) e6 = z3.Const('__webproxy_e6_%s'%(self.proxy), self.ctx.node) # \forall e, p: send(w, e, p) \Rightarrow hostHasAddr(w, p.src) # \forall e_1, p_1: send(w, e, p_1) \Rightarrow \exists e_2, p_2: recv(e_2, w, p_2) \land # p_2.origin == p_1.origin \land p_2.dest == p_1.dest \land hostHasAddr(p_2.origin, p_2.src) self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \ self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p))))) cached_packet = z3.And(self.cached(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \ self.ctx.etime(self.proxy, p2, self.ctx.recv_event) > \ self.ctime(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \ self.ctx.etime(self.proxy, p, self.ctx.send_event) > \ self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \ self.ctx.packet.body(p) == self.cresp(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \ self.ctx.packet.orig_body(p) == self.corigbody(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \ self.ctx.packet.dest(p) == self.ctx.packet.src(p2), \ self.ctx.dest_port(p) == self.ctx.src_port(p2), \ self.ctx.src_port(p) == self.ctx.dest_port(p2), \ self.ctx.packet.options(p) == 0, \ self.ctx.packet.origin(p) == self.corigin(self.ctx.packet.dest(p2), self.ctx.packet.body(p2))) request_constraints = [z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.dest(p2))), \ self.ctx.packet.origin(p2) == self.ctx.packet.origin(p), self.ctx.packet.dest(p2) == self.ctx.packet.dest(p), \ self.ctx.packet.body(p2) == self.ctx.packet.body(p), \ self.ctx.packet.orig_body(p2) == self.ctx.packet.orig_body(p), \ self.ctx.packet.options(p) == 0, \ self.ctx.packet.seq(p2) == self.ctx.packet.seq(p), \ self.ctx.hostHasAddr(self.ctx.packet.origin(p2), self.ctx.packet.src(p2)), \ self.ctx.dest_port(p2) == self.ctx.dest_port(p), \ self.ctx.etime(self.proxy, p, self.ctx.send_event) > \ self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \ self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p))] if len(self.acls) != 0: acl_constraint = map(lambda (s, d): \ z3.Not(z3.And(self.ctx.packet.src(p2) == s, \ self.ctx.packet.dest(p2) == d)), self.acls) request_constraints.extend(acl_constraint) self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \ z3.Or(\ z3.Exists([p2, eh2], \ z3.And(self.ctx.recv(eh2, self.proxy, p2), \ z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\ z3.And(request_constraints))), \ z3.Exists([p2, eh2], \ z3.And(self.ctx.recv(eh2, self.proxy, p2), \ z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\ cached_packet)))))) cache_conditions = \ z3.ForAll([a, i], \ z3.Implies(self.cached(a, i), \ z3.And(\ z3.Not(self.ctx.hostHasAddr (self.proxy, a)), \ z3.Exists([e1, e2, e3, p, p2, p3], \ z3.And(\ self.ctx.recv(e1, self.proxy, p2), \ self.ctx.packet.dest(p2) == a, \ self.ctx.packet.body(p2) == i, \ self.ctx.packet.body(p) == i, \ self.ctx.packet.dest(p) == a, \ self.ctx.dest_port(p) == self.ctx.dest_port(p2), \ self.creqpacket(a, i) == p2, \ self.creqopacket(a, i) == p, \ self.ctime(a, i) > self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \ self.ctx.send(self.proxy, e2, p), \ self.ctime(a, i) > self.ctx.etime(self.proxy, p, self.ctx.send_event), \ self.ctx.recv(e3, self.proxy, p3), \ self.crespacket(a, i) == p3, \ self.ctx.src_port(p3) == self.ctx.dest_port(p), \ self.ctx.dest_port(p3) == self.ctx.src_port(p), \ self.ctx.packet.src(p3) == self.ctx.packet.dest(p), \ self.ctx.packet.dest(p3) == self.ctx.packet.src(p), \ z3.Exists([e5, e6], \ z3.And( self.ctx.hostHasAddr (e5, a), \ self.ctx.recv(e6, e5, p), \ z3.ForAll([e4], \ z3.Or(self.ctx.etime(e4, p3, self.ctx.send_event) == 0, \ self.ctx.etime(e4, p3, self.ctx.send_event) > self.ctx.etime(e5, p, self.ctx.recv_event))))), \ self.cresp(a, i) == self.ctx.packet.body(p3), \ self.corigbody(a, i) == self.ctx.packet.orig_body(p3), \ self.corigin(a, i) == self.ctx.packet.origin(p3), \ self.ctime(a, i) == self.ctx.etime(self.proxy, p3, self.ctx.recv_event), \ *request_constraints)))))<|fim▁hole|> def _webProxyFunctions (self): self.cached = z3.Function('__webproxy_cached_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.BoolSort()) self.ctime = z3.Function('__webproxy_ctime_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort()) self.cresp = z3.Function('__webproxy_cresp_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort()) self.corigbody = z3.Function('__webproxy_corigbody_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort()) self.corigin = z3.Function('__webproxy_corigin_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.node) self.crespacket = z3.Function('__webproxy_crespacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet) self.creqpacket = z3.Function('__webproxy_creqpacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet) self.creqopacket = z3.Function('__webproxy_creqopacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet) #self.corigbody = z3.Function('__webproxy_corigbody_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet) a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address) i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort()) # Model cache as a function # If not cached, cache time is 0 self.constraints.append(z3.ForAll([a, i], z3.Not(self.cached(a, i)) == (self.ctime(a, i) == 0))) self.constraints.append(z3.ForAll([a, i], z3.Not(self.cached(a, i)) == (self.cresp(a, i) == 0)))<|fim▁end|>
self.constraints.append(cache_conditions)
<|file_name|>bitswap.go<|end_file_name|><|fim▁begin|>// package bitswap implements the IPFS Exchange interface with the BitSwap // bilateral exchange protocol. package bitswap import ( "errors" "math" "sync" "time" process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" ) var log = logging.Logger("bitswap") const ( // maxProvidersPerRequest specifies the maximum number of providers desired // from the network. This value is specified because the network streams // results. // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 hasBlockTimeout = time.Second * 15 provideTimeout = time.Second * 15 sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 512 ) var rebroadcastDelay = delay.Fixed(time.Second * 10) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the IPFS daemon in this way. // // FIXME(btc) Now that bitswap manages itself using a process, it probably // shouldn't accept a context anymore. Clients should probably use Close() // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) notif := notifications.New() px := process.WithTeardown(func() error { notif.Shutdown() return nil }) bs := &Bitswap{ self: p, blockstore: bstore, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network,<|fim▁hole|> provideKeys: make(chan key.Key, provideKeysBufferSize), wm: NewWantManager(ctx, network), } go bs.wm.Run() network.SetDelegate(bs) // Start up bitswaps async worker routines bs.startWorkers(px, ctx) // bind the context and process. // do it over here to avoid closing before all setup is done. go func() { <-px.Closing() // process closes first cancelFunc() }() procctx.CloseAfterContext(px, ctx) // parent cancelled first return bs } // Bitswap instances implement the bitswap protocol. type Bitswap struct { // the ID of the peer to act on behalf of self peer.ID // network delivers messages on behalf of the session network bsnet.BitSwapNetwork // the peermanager manages sending messages to peers in a way that // wont block bitswap operation wm *WantManager // blockstore is the local database // NB: ensure threadsafety blockstore blockstore.Blockstore notifications notifications.PubSub // send keys to a worker to find and connect to providers for them findKeys chan *blockRequest engine *decision.Engine process process.Process newBlocks chan *blocks.Block provideKeys chan key.Key counterLk sync.Mutex blocksRecvd int dupBlocksRecvd int dupDataRecvd uint64 } type blockRequest struct { keys []key.Key ctx context.Context } // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to // listen on parent in this scope, but NOT okay to pass |parent| to // functions called by this one. Otherwise those functions won't return // when this context's cancel func is executed. This is difficult to // enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) ctx = logging.ContextWithLoggable(ctx, logging.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) defer func() { cancelFunc() }() promise, err := bs.GetBlocks(ctx, []key.Key{k}) if err != nil { return nil, err } select { case block, ok := <-promise: if !ok { select { case <-ctx.Done(): return nil, ctx.Err() default: return nil, errors.New("promise channel was closed") } } return block, nil case <-parent.Done(): return nil, parent.Err() } } func (bs *Bitswap) WantlistForPeer(p peer.ID) []key.Key { var out []key.Key for _, e := range bs.engine.WantlistForPeer(p) { out = append(out, e.Key) } return out } // GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. // // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } bs.wm.WantBlocks(keys) req := &blockRequest{ keys: keys, ctx: ctx, } select { case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() } } // CancelWant removes a given key from the wantlist func (bs *Bitswap) CancelWants(ks []key.Key) { bs.wm.CancelWants(ks) } // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk *blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") default: } err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times if err != nil { log.Errorf("Error writing block to datastore: %s", err) return err } bs.notifications.Publish(blk) select { case bs.newBlocks <- blk: // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() } return nil } func (bs *Bitswap) tryPutBlock(blk *blocks.Block, attempts int) error { var err error for i := 0; i < attempts; i++ { if err = bs.blockstore.Put(blk); err == nil { break } time.Sleep(time.Millisecond * time.Duration(400*(i+1))) } return err } func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) defer cancel() // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} for _, e := range entries { wg.Add(1) go func(k key.Key) { defer wg.Done() child, cancel := context.WithTimeout(ctx, providerRequestTimeout) defer cancel() providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { go func(p peer.ID) { bs.network.ConnectTo(ctx, p) }(prov) } }(e.Key) } wg.Wait() // make sure all our children do finish. } func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { // This call records changes to wantlists, blocks received, // and number of bytes transfered. bs.engine.MessageReceived(p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger iblocks := incoming.Blocks() if len(iblocks) == 0 { return } // quickly send out cancels, reduces chances of duplicate block receives var keys []key.Key for _, block := range iblocks { if _, found := bs.wm.wl.Contains(block.Key()); !found { log.Info("received un-asked-for block: %s", block) continue } keys = append(keys, block.Key()) } bs.wm.CancelWants(keys) wg := sync.WaitGroup{} for _, block := range iblocks { wg.Add(1) go func(b *blocks.Block) { defer wg.Done() if err := bs.updateReceiveCounters(b); err != nil { return // ignore error, is either logged previously, or ErrAlreadyHaveBlock } k := b.Key() log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) log.Debugf("got block %s from %s", b, p) if err := bs.HasBlock(b); err != nil { log.Warningf("ReceiveMessage HasBlock error: %s", err) } }(block) } wg.Wait() } var ErrAlreadyHaveBlock = errors.New("already have block") func (bs *Bitswap) updateReceiveCounters(b *blocks.Block) error { bs.counterLk.Lock() defer bs.counterLk.Unlock() bs.blocksRecvd++ has, err := bs.blockstore.Has(b.Key()) if err != nil { log.Infof("blockstore.Has error: %s", err) return err } if err == nil && has { bs.dupBlocksRecvd++ bs.dupDataRecvd += uint64(len(b.Data)) } if has { return ErrAlreadyHaveBlock } return nil } // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { bs.wm.Connected(p) } // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerDisconnected(p peer.ID) { bs.wm.Disconnected(p) bs.engine.PeerDisconnected(p) } func (bs *Bitswap) ReceiveError(err error) { log.Infof("Bitswap ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger } func (bs *Bitswap) Close() error { return bs.process.Close() } func (bs *Bitswap) GetWantlist() []key.Key { var out []key.Key for _, e := range bs.wm.wl.Entries() { out = append(out, e.Key) } return out }<|fim▁end|>
findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize),
<|file_name|>saveMember.ts<|end_file_name|><|fim▁begin|>import { FormValidationResult } from 'lc-form-validation'; import * as toastr from 'toastr';<|fim▁hole|>import { actionTypes } from '../../../common/constants/actionTypes'; import { MemberEntity } from '../../../model'; import { memberAPI } from '../../../api/member'; import { memberFormValidation } from '../memberFormValidation'; import {trackPromise} from 'react-promise-tracker'; export const saveMemberAction = (member: MemberEntity) => (dispatch) => { trackPromise( memberFormValidation.validateForm(member) .then((formValidationResult) => { if (formValidationResult.succeeded) { saveMember(member); } dispatch(saveMemberActionCompleted(formValidationResult)); }) ); }; const saveMember = (member: MemberEntity) => { trackPromise( memberAPI.saveMember(member) .then(() => { toastr.success('Member saved.'); history.back(); }) .catch(toastr.error) ); }; const saveMemberActionCompleted = (formValidationResult: FormValidationResult) => ({ type: actionTypes.SAVE_MEMBER, payload: formValidationResult, });<|fim▁end|>
<|file_name|>main.js<|end_file_name|><|fim▁begin|>const Lib = require("../src/main"); const assert = require("assert"); describe("plain object output", function () { context("for `JSON.stringify` serializable objects", function () { it("should have resembling structure", function () { const obj1 = { a: 1, b: "b", c: true }; const res = Lib.write(obj1); const exp1 = { f: [ ["a", 1], ["b", "b"], ["c", true] ] }; assert.deepStrictEqual(res, exp1); assert.equal(Lib.stringify(obj1), JSON.stringify(res)); const obj2 = { obj1 }; const exp2 = { f: [["obj1", exp1]] }; assert.deepStrictEqual(Lib.write(obj2), exp2); assert.equal(Lib.stringify(obj2), JSON.stringify(exp2)); }); context("with `alwaysByRef: true`", function () { it("should use references environment", function () { const obj1 = { a: { a: 1 }, b: { b: "b" }, c: { c: true } }; const res = Lib.write(obj1, { alwaysByRef: true }); const exp1 = { r: 3, x: [ { f: [["c", true]] }, { f: [["b", "b"]] }, { f: [["a", 1]] }, {<|fim▁hole|> f: [ ["a", { r: 2 }], ["b", { r: 1 }], ["c", { r: 0 }] ] } ] }; assert.deepStrictEqual(res, exp1); const obj2 = Lib.read(res); assert.notStrictEqual(obj1, obj2); assert.deepStrictEqual(obj1, obj2); }); }); }); it("should have correct format for shared values", function () { const root = { val: "hi" }; root.rec1 = { obj1: root, obj2: root, obj3: { obj: root } }; root.rec2 = root; assert.deepStrictEqual(Lib.write(root), { r: 0, x: [ { f: [ ["val", "hi"], [ "rec1", { f: [ ["obj1", { r: 0 }], ["obj2", { r: 0 }], ["obj3", { f: [["obj", { r: 0 }]] }] ] } ], ["rec2", { r: 0 }] ] } ] }); }); }); describe("special values", function () { it("should correctly restore them", function () { const root = { undef: undefined, nul: null, nan: NaN }; const res = Lib.write(root); assert.deepStrictEqual(res, { f: [ ["undef", { $: "undefined" }], ["nul", null], ["nan", { $: "NaN" }] ] }); const { undef, nul, nan } = Lib.read(res); assert.strictEqual(undef, undefined); assert.strictEqual(nul, null); assert.ok(typeof nan === "number" && Number.isNaN(nan)); }); }); describe("reading plain object", function () { it("should correctly assign shared values", function () { const obj = Lib.read({ r: 0, x: [ { f: [ ["val", "hi"], [ "rec1", { f: [ ["obj1", { r: 0 }], ["obj2", { r: 0 }], ["obj3", { f: [["obj", { r: 0 }]] }] ] } ], ["rec2", { r: 0 }] ] } ] }); assert.strictEqual(Object.keys(obj).sort().join(), "rec1,rec2,val"); assert.strictEqual(obj.val, "hi"); assert.strictEqual(Object.keys(obj.rec1).sort().join(), "obj1,obj2,obj3"); assert.strictEqual(Object.keys(obj.rec1.obj3).sort().join(), "obj"); assert.strictEqual(obj.rec2, obj); assert.strictEqual(obj.rec1.obj1, obj); assert.strictEqual(obj.rec1.obj2, obj); assert.strictEqual(obj.rec1.obj3.obj, obj); }); }); describe("object with parent", function () { function MyObj() { this.a = 1; this.b = "b"; this.c = true; } Lib.regConstructor(MyObj); it("should output `$` attribute", function () { const obj1 = new MyObj(); assert.deepEqual(Lib.write(obj1), { $: "MyObj", f: [ ["a", 1], ["b", "b"], ["c", true] ] }); function Object() { this.a = obj1; } Lib.regConstructor(Object); assert.deepEqual(Lib.write(new Object()), { $: "Object_1", f: [ [ "a", { f: [ ["a", 1], ["b", "b"], ["c", true] ], $: "MyObj" } ] ] }); }); it("should use `$` attribute to resolve a type on read", function () { const obj1 = Lib.read({ $: "MyObj", f: [ ["a", 1], ["b", "b"], ["c", true] ] }); assert.strictEqual(obj1.constructor, MyObj); assert.equal(Object.keys(obj1).sort().join(), "a,b,c"); assert.strictEqual(obj1.a, 1); assert.strictEqual(obj1.b, "b"); assert.strictEqual(obj1.c, true); }); context("for shared values", function () { function Obj2() {} Lib.regConstructor(Obj2); it("should write shared values in `shared` map", function () { const root = new Obj2(); root.l1 = new Obj2(); root.l1.back = root.l1; assert.deepStrictEqual(Lib.write(root), { f: [["l1", { r: 0 }]], $: "Obj2", x: [{ f: [["back", { r: 0 }]], $: "Obj2" }] }); }); it("should use `#shared` keys to resolve prototypes on read", function () { const obj1 = Lib.read({ f: [["l1", { r: 0 }]], $: "Obj2", x: [{ f: [["back", { r: 0 }]], $: "Obj2" }] }); assert.strictEqual(obj1.constructor, Obj2); assert.deepEqual(Object.keys(obj1), ["l1"]); assert.deepEqual(Object.keys(obj1.l1), ["back"]); assert.strictEqual(obj1.l1.constructor, Obj2); assert.strictEqual(obj1.l1.back, obj1.l1); }); }); }); describe("prototypes chain", function () { it("should correctly store and recover all references", function () { class C1 { constructor(p) { this.p1 = p; } } class C2 extends C1 { constructor() { super("A"); this.c1 = new C1(this); } } Lib.regOpaqueObject(C1.prototype, "C1"); Lib.regOpaqueObject(C2.prototype, "C2"); const obj = new C2(); C1.prototype.p_prop_1 = "prop_1"; const res = Lib.write(obj); C1.prototype.p_prop_1 = "changed"; assert.deepEqual(res, { r: 0, x: [ { p: { $: "C2" }, f: [ ["p1", "A"], [ "c1", { p: { $: "C1", f: [["p_prop_1", "prop_1"]] }, f: [["p1", { r: 0 }]] } ] ] } ] }); const r2 = Lib.read(res); assert.ok(r2 instanceof C1); assert.ok(r2 instanceof C2); assert.strictEqual(r2.constructor, C2); assert.strictEqual(Object.getPrototypeOf(r2).constructor, C2); assert.strictEqual(r2.c1.constructor, C1); assert.strictEqual(r2.c1.p1, r2); assert.equal(r2.p1, "A"); assert.strictEqual(C1.prototype.p_prop_1, "prop_1"); class C3 { constructor(val) { this.a = val; } } Lib.regOpaqueObject(C3.prototype, "C3", { props: false }); class C4 extends C3 { constructor() { super("A"); this.b = "B"; } } Lib.regOpaqueObject(C4.prototype, "C4"); const obj2 = new C4(); const res2 = Lib.write(obj2); assert.deepEqual(res2, { p: { $: "C4" }, f: [ ["a", "A"], ["b", "B"] ] }); const obj3 = Lib.read(res2); assert.ok(obj3 instanceof C3); assert.ok(obj3 instanceof C4); assert.equal(obj3.a, "A"); assert.equal(obj3.b, "B"); assert.equal( Object.getPrototypeOf(Object.getPrototypeOf(obj3)), Object.getPrototypeOf(Object.getPrototypeOf(obj2)) ); }); }); describe("property's descriptor", function () { it("should correctly store and recover all settings", function () { const a = {}; let setCalled = 0; let getCalled = 0; let back; let val; const descr = { set(value) { assert.strictEqual(this, back); setCalled++; val = value; }, get() { assert.strictEqual(this, back); getCalled++; return a; } }; Object.defineProperty(a, "prop", descr); const psym1 = Symbol("prop"); const psym2 = Symbol("prop"); Object.defineProperty(a, psym1, { value: "B", enumerable: true }); Object.defineProperty(a, psym2, { value: "C", configurable: true }); Object.defineProperty(a, Symbol.for("prop"), { value: "D", writable: true }); Lib.regOpaqueObject(descr.set, "dset"); Lib.regOpaqueObject(descr.get, "dget"); const opts = { symsByName: new Map() }; const res = Lib.write(a, opts); assert.deepEqual(res, { f: [ ["prop", null, 15, { $: "dget" }, { $: "dset" }], [{ name: "prop" }, "B", 5], [{ name: "prop", id: 1 }, "C", 6], [{ key: "prop" }, "D", 3] ] }); back = Lib.read(res, opts); assert.deepEqual(Object.getOwnPropertySymbols(back), [ psym1, psym2, Symbol.for("prop") ]); assert.strictEqual(setCalled, 0); assert.strictEqual(getCalled, 0); back.prop = "A"; assert.strictEqual(setCalled, 1); assert.strictEqual(getCalled, 0); assert.strictEqual(val, "A"); assert.strictEqual(back.prop, a); assert.strictEqual( Object.getOwnPropertyDescriptor(back, Symbol("prop")), void 0 ); assert.deepEqual(Object.getOwnPropertyDescriptor(back, "prop"), { enumerable: false, configurable: false, ...descr }); assert.deepEqual(Object.getOwnPropertyDescriptor(back, psym1), { value: "B", writable: false, enumerable: true, configurable: false }); assert.deepEqual(Object.getOwnPropertyDescriptor(back, psym2), { value: "C", writable: false, enumerable: false, configurable: true }); assert.deepEqual( Object.getOwnPropertyDescriptor(back, Symbol.for("prop")), { value: "D", writable: true, enumerable: false, configurable: false } ); }); }); describe("arrays serialization", function () { context("without shared references", function () { it("should be similar to `JSON.stringify`/`JSON.parse`", function () { const obj = { arr: [1, "a", [true, [false, null]], undefined] }; const res = Lib.write(obj); assert.deepStrictEqual(res, { f: [["arr", [1, "a", [true, [false, null]], { $: "undefined" }]]] }); const back = Lib.read(res); assert.deepStrictEqual(obj, back); }); it("doesn't support Array as root", function () { assert.throws(() => Lib.write([1, 2]), TypeError); }); }); it("should handle shared references", function () { const obj = { arr: [1, "a", [true, [false, null]], undefined] }; obj.arr.push(obj.arr); const res = Lib.write(obj); assert.notStrictEqual(res, obj); assert.deepStrictEqual(res, { f: [["arr", { r: 0 }]], x: [[1, "a", [true, [false, null]], { $: "undefined" }, { r: 0 }]] }); const back = Lib.read(res); assert.notStrictEqual(res, back); assert.deepStrictEqual(obj, back); }); }); describe("`Set` serialization", function () { context("without shared references", function () { it("should output `JSON.stringify` serializable object", function () { const arr = [1, "a", [true, [false, null]], undefined]; const obj = { set: new Set(arr) }; obj.set.someNum = 100; obj.set.self = obj.set; const res = Lib.write(obj); assert.deepStrictEqual(res, { f: [["set", { r: 0 }]], x: [ { $: "Set", l: [1, "a", [true, [false, null]], { $: "undefined" }], f: [ ["someNum", 100], ["self", { r: 0 }] ] } ] }); const back = Lib.read(res); assert.deepStrictEqual(obj, back); }); }); it("should handle shared references", function () { const obj = new Set([1, "a", [true, [false, null]], undefined]); obj.add(obj); const res = Lib.write(obj); assert.notStrictEqual(res, obj); assert.deepStrictEqual(res, { r: 0, x: [ { $: "Set", l: [1, "a", [true, [false, null]], { $: "undefined" }, { r: 0 }] } ] }); const back = Lib.read(res); assert.notStrictEqual(res, back); assert.deepStrictEqual(obj, back); }); }); describe("`Map` serialization", function () { context("without shared references", function () { it("should output `JSON.stringify` serializable object", function () { const arr = [[1, "a"], [true, [false, null]], [undefined]]; const obj = { map: new Map(arr) }; const res = Lib.write(obj); assert.deepStrictEqual(res, { f: [ [ "map", { $: "Map", k: [1, true, { $: "undefined" }], v: ["a", [false, null], { $: "undefined" }] } ] ] }); const back = Lib.read(res); assert.deepStrictEqual(obj, back); }); }); it("should handle shared references", function () { const obj = new Map([[1, "a"], [true, [false, null]], [undefined]]); obj.set(obj, obj); const res = Lib.write(obj); assert.notStrictEqual(res, obj); assert.deepStrictEqual(res, { r: 0, x: [ { $: "Map", k: [1, true, { $: "undefined" }, { r: 0 }], v: ["a", [false, null], { $: "undefined" }, { r: 0 }] } ] }); const back = Lib.read(res); assert.notStrictEqual(res, back); assert.deepStrictEqual(obj, back); }); }); describe("opaque objects serialization", function () { it("should throw for not registered objects", function () { function a() {} assert.throws(() => Lib.write({ a }), TypeError); }); it("should not throw if `ignore:true`", function () { function a() {} assert.deepStrictEqual(Lib.write({ a }, { ignore: true }), {}); }); it("should output object's name if registered", function () { function a() {} Lib.regOpaqueObject(a); assert.deepStrictEqual(Lib.write({ a }), { f: [["a", { $: "a" }]] }); Lib.regOpaqueObject(a); assert.deepStrictEqual(Lib.read({ f: [["a", { $: "a" }]] }), { a }); (function () { function a() {} Lib.regOpaqueObject(a); assert.deepStrictEqual(Lib.write({ a }), { f: [["a", { $: "a_1" }]] }); assert.deepStrictEqual(Lib.read({ f: [["a", { $: "a_1" }]] }), { a }); })(); }); it("should not serialize properties specified before its registration", function () { const obj = { prop1: "p1", [Symbol.for("sym#a")]: "s1", [Symbol.for("sym#b")]: "s2", prop2: "p2", [3]: "N3", [4]: "N4" }; Lib.regOpaqueObject(obj, "A"); obj.prop1 = "P2"; obj.prop3 = "p3"; obj[Symbol.for("sym#a")] = "S1"; obj[Symbol.for("sym#c")] = "s3"; obj[4] = "n4"; obj[5] = "n5"; assert.deepStrictEqual(Lib.write({ obj }), { f: [ [ "obj", { $: "A", f: [ ["4", "n4"], ["5", "n5"], ["prop1", "P2"], ["prop3", "p3"], [ { key: "sym#a" }, "S1" ], [ { key: "sym#c" }, "s3" ] ] } ] ] }); }); }); describe("opaque primitive value serialization", function () { it("should output object's name if registered", function () { const a = Symbol("a"); Lib.regOpaquePrim(a, "sa"); assert.ok(!a[Lib.descriptorSymbol]); assert.deepStrictEqual(Lib.write({ a }), { f: [["a", { $: "sa" }]] }); Lib.regOpaquePrim(a, "sb"); assert.deepStrictEqual(Lib.read({ f: [["a", { $: "sa" }]] }), { a }); (function () { const a = Symbol("a"); Lib.regOpaquePrim(a, "sa"); assert.deepStrictEqual(Lib.write({ a }), { f: [["a", { $: "sa_1" }]] }); assert.deepStrictEqual(Lib.read({ f: [["a", { $: "sa_1" }]] }), { a }); })(); }); }); describe("Symbols serialization", function () { it("should keep values", function () { const a1 = Symbol("a"); const a2 = Symbol("a"); const b = Symbol("b"); const g = Symbol.for("g"); const opts = { symsByName: new Map() }; const res = Lib.write({ a1, a2, b1: b, b2: b, g }, opts); assert.deepStrictEqual(res, { f: [ ["a1", { name: "a", $: "Symbol" }], ["a2", { name: "a", id: 1, $: "Symbol" }], ["b1", { name: "b", $: "Symbol" }], ["b2", { name: "b", $: "Symbol" }], ["g", { key: "g", $: "Symbol" }] ] }); const { a1: ra1, a2: ra2, b1: rb1, b2: rb2, g: rg } = Lib.read(res, opts); assert.strictEqual(a1, ra1); assert.strictEqual(a2, ra2); assert.strictEqual(b, rb1); assert.strictEqual(b, rb2); assert.strictEqual(rg, g); const { a1: la1, a2: la2, b1: lb1, b2: lb2, g: lg } = Lib.read(res, { ignore: true }); assert.notStrictEqual(a1, la1); assert.notStrictEqual(a2, la2); assert.notStrictEqual(lb1, b); assert.notStrictEqual(lb2, b); assert.strictEqual(lg, g); assert.strictEqual(lb1, lb2); assert.equal(String(la1), "Symbol(a)"); assert.equal(String(la2), "Symbol(a)"); assert.equal(String(lb1), "Symbol(b)"); assert.equal(String(lb2), "Symbol(b)"); }); }); describe("type with `$$typeof` attribute", function () { Lib.regDescriptor({ name: "hundred", typeofTag: 100, read(ctx, json) { return { $$typeof: 100 }; }, write(ctx, value) { return { $: "hundred" }; }, props: false }); it("should use overriden methods", function () { assert.deepStrictEqual(Lib.write({ $$typeof: 100 }), { $: "hundred" }); assert.deepStrictEqual(Lib.read({ $: "hundred" }), { $$typeof: 100 }); }); }); describe("bind function arguments", function () { it("should be serializable", function () { const obj = {}; function f1(a1, a2, a3) { return [this, a1, a2, a3]; } const a1 = {}, a2 = {}, a3 = {}; Lib.regOpaqueObject(obj, "obj"); Lib.regOpaqueObject(f1); Lib.regOpaqueObject(a1, "arg"); Lib.regOpaqueObject(a2, "arg"); const bind = Lib.bind(f1, obj, a1, a2); bind.someNum = 100; bind.rec = bind; const fjson = Lib.write({ f: bind }); assert.deepStrictEqual(fjson, { f: [ [ "f", { r: 0 } ] ], x: [ { f: [ ["someNum", 100], [ "rec", { r: 0 } ], [ { $: "#this" }, { $: "obj" } ], [ { $: "#fun" }, { $: "f1" } ], [ { $: "#args" }, [ { $: "arg" }, { $: "arg_1" } ] ] ], $: "Bind" } ] }); const f2 = Lib.read(fjson).f; assert.notStrictEqual(f1, f2); const res = f2(a3); assert.strictEqual(res.length, 4); const [robj, ra1, ra2, ra3] = res; assert.strictEqual(obj, robj); assert.strictEqual(a1, ra1); assert.strictEqual(a2, ra2); assert.strictEqual(a3, ra3); }); }); describe("RegExp", function () { it("should be serializable", function () { const re1 = /\w+/; const re2 = /ho/g; const s1 = "uho-ho-ho"; re2.test(s1); const res = Lib.write({ re1, re2 }); assert.deepEqual(res, { f: [ ["re1", { src: "\\w+", flags: "", $: "RegExp" }], ["re2", { src: "ho", flags: "g", last: 3, $: "RegExp" }] ] }); const { re1: bre1, re2: bre2 } = Lib.read(res); assert.equal(re1.src, bre1.src); assert.equal(re1.flags, bre1.flags); assert.equal(re1.lastIndex, bre1.lastIndex); assert.equal(re2.src, bre2.src); assert.equal(re2.flags, bre2.flags); assert.equal(re2.lastIndex, bre2.lastIndex); }); }); describe("not serializable values", function () { it("should throw an exception if `ignore:falsy`", function () { function A() {} try { Lib.write({ A }); } catch (e) { assert.equal(e.constructor, TypeError); assert.equal( e.message, `not serializable value "function A() {}" at "1"(A) of "A"` ); return; } assert.fail("should throw"); }); it("should be ignored if `ignore:true`", function () { function A() {} const d = Lib.write({ A }, { ignore: true }); const r = Lib.read(d); assert.deepEqual(r, {}); }); it('should register an opaque descriptor `ignore:"opaque"`', function () { function A() {} const d = Lib.write({ A, b: A }, { ignore: "opaque" }); const r = Lib.read(d); assert.deepEqual(r, { A, b: A }); }); it("should register an opaque descriptor with auto-opaque descriptor", function () { function A() {} Lib.regAutoOpaqueConstr(A, true); const a = new A(); const d = Lib.write({ a, b: a }, { ignore: "opaque" }); const r = Lib.read(d); assert.deepEqual(r, { a, b: a }); }); it('should be converted into a not usable placeholder if `ignore:"placeholder"`', function () { function A() {} const d = Lib.write({ A }, { ignore: "placeholder" }); const r = Lib.read(d); try { r.A(); } catch (e) { assert.equal(e.constructor, TypeError); assert.equal(e.message, "apply in a not restored object"); return; } assert.fail("should throw"); }); }); describe("TypedArray", function () { it("should be serializable", function () { const arr1 = new Int32Array([1, 2, 3, 4, 5]); const arr2 = new Uint32Array(arr1.buffer, 8); const d = Lib.write({ arr1, arr2 }, {}); assert.deepStrictEqual(d, { f: [ [ "arr1", { o: 0, l: 5, b: { r: 0 }, $: "Int32Array" } ], [ "arr2", { o: 8, l: 3, b: { r: 0 }, $: "Uint32Array" } ] ], x: [ { d: "AQAAAAIAAAADAAAABAAAAAUAAAA=", $: "ArrayBuffer" } ] }); const { arr1: rarr1, arr2: rarr2 } = Lib.read(d); assert.equal(rarr1.constructor, Int32Array); assert.equal(rarr2.constructor, Uint32Array); assert.notStrictEqual(arr1, rarr1); assert.notStrictEqual(arr2, rarr2); assert.deepStrictEqual(arr1, rarr1); assert.deepStrictEqual(arr2, rarr2); }); }); describe("WeakSet/WeakMap", function () { it("should be serializable", function () { const set = new WeakSet(); const map = new WeakMap(); const map2 = new WeakMap(); const obj1 = {}; const obj2 = {}; Lib.regOpaqueObject(obj1, "w#obj1"); set.add(obj1).add(obj2); map.set(obj1, "obj1").set(obj2, "obj2"); map2.set(obj1, "2obj1"); assert.ok(set.has(obj1)); assert.ok(map.has(obj1)); assert.strictEqual(map.get(obj1), "obj1"); assert.strictEqual(map.get({}), void 0); const d = Lib.write({ set, map, map2, obj1, obj2 }); assert.deepStrictEqual(d, { x: [{}, { $: "w#obj1" }], f: [ ["set", { v: [{ r: 0 }, { r: 1 }], $: "WeakSet" }], [ "map", { k: [{ r: 1 }, { r: 0 }], v: ["obj1", "obj2"], $: "WeakMap" } ], [ "map2", { k: [{ r: 1 }], v: ["2obj1"], $: "WeakMap" } ], ["obj1", { r: 1 }], ["obj2", { r: 0 }] ] }); const { set: rset, map: rmap, map2: rmap2, obj1: robj1, obj2: robj2 } = Lib.read(d); assert.strictEqual(robj1, obj1); assert.notStrictEqual(robj2, obj2); assert.ok(rset.has(obj1)); assert.ok(set.delete(obj1)); assert.ok(!set.has(obj1)); assert.ok(rset.has(obj1)); assert.ok(rset.has(robj2)); assert.ok(!rset.has(obj2)); assert.ok(!set.has(robj2)); assert.strictEqual(rmap.get(obj1), "obj1"); assert.strictEqual(rmap2.get(obj1), "2obj1"); assert.ok(map.delete(obj1)); assert.strictEqual(rmap.get(obj1), "obj1"); assert.ok(rset.delete(robj2)); assert.ok(!rset.has(robj2)); assert.ok(!rset.delete(robj2)); assert.ok(!rset.has(robj2)); }); }); describe("WeakSet/WeakMap workaround", function () { it("should be serializable", function () { const set = new Lib.WeakSetWorkaround(); const map = new Lib.WeakMapWorkaround(); const map2 = new Lib.WeakMapWorkaround(); const obj1 = {}; const obj2 = {}; Lib.regOpaqueObject(obj1, "w##obj1"); set.add(obj1).add(obj2); map.set(obj1, "obj1").set(obj2, "obj2"); map2.set(obj1, "2obj1"); assert.ok(set.has(obj1)); assert.ok(map.has(obj1)); assert.strictEqual(map.get(obj1), "obj1"); assert.strictEqual(map.get({}), void 0); const d = Lib.write({ set, map, map2, obj1, obj2 }); assert.deepStrictEqual(d, { f: [ [ "set", { f: [["prop", { name: "@effectful/weakset", $: "Symbol" }]], $: "WeakSet#" } ], [ "map", { f: [["prop", { name: "@effectful/weakmap", $: "Symbol" }]], $: "WeakMap#" } ], [ "map2", { f: [["prop", { name: "@effectful/weakmap", id: 1, $: "Symbol" }]], $: "WeakMap#" } ], [ "obj1", { $: "w##obj1", f: [ [{ name: "@effectful/weakset" }, true, 2], [{ name: "@effectful/weakmap" }, "obj1", 2], [{ name: "@effectful/weakmap", id: 1 }, "2obj1", 2] ] } ], [ "obj2", { f: [ [{ name: "@effectful/weakset" }, true, 2], [{ name: "@effectful/weakmap" }, "obj2", 2] ] } ] ] }); const { set: rset, map: rmap, map2: rmap2, obj1: robj1, obj2: robj2 } = Lib.read(d); assert.strictEqual(robj1, obj1); assert.notStrictEqual(robj2, obj2); assert.ok(rset.has(obj1)); assert.ok(set.delete(obj1)); assert.ok(!set.has(obj1)); assert.ok(rset.has(obj1)); assert.ok(rset.has(robj2)); assert.ok(!rset.has(obj2)); assert.ok(!set.has(robj2)); assert.strictEqual(rmap.get(obj1), "obj1"); assert.strictEqual(rmap2.get(obj1), "2obj1"); assert.ok(map.delete(obj1)); assert.strictEqual(rmap.get(obj1), "obj1"); assert.ok(rset.delete(robj2)); assert.ok(!rset.has(robj2)); assert.ok(!rset.delete(robj2)); assert.ok(!rset.has(robj2)); }); }); describe("BigInt", function () { it("should be serializable", function () { const num = 2n ** 10000n; const doc = Lib.write({ num }); assert.equal(doc.f[0][0], "num"); assert.ok(doc.f[0][1].int.substr); assert.equal(doc.f[0][1].int.length, 3011); assert.strictEqual(Lib.read(doc).num, num); }); });<|fim▁end|>
<|file_name|>test_run.py<|end_file_name|><|fim▁begin|>##################################################################################### # # Copyright (c) Crossbar.io Technologies GmbH # # Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g. # you have purchased a commercial license), the license terms below apply. # # Should you enter into a separate license agreement after having received a copy of # this software, then the terms of such license agreement replace the terms below at # the time at which such license agreement becomes effective. # # In case a separate license agreement ends, and such agreement ends without being # replaced by another separate license agreement, the license terms below apply # from the time at which said agreement ends. # # LICENSE TERMS # # This program is free software: you can redistribute it and/or modify it under the # terms of the GNU Affero General Public License, version 3, as published by the # Free Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU Affero General Public License Version 3 for more details. # # You should have received a copy of the GNU Affero General Public license along # with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>. # ##################################################################################### from __future__ import absolute_import, division, print_function import json import os import sys from six import PY3 from twisted.internet.selectreactor import SelectReactor from twisted.internet.task import LoopingCall from crossbar.controller import cli from .test_cli import CLITestBase # Turn this to `True` to print the stdout/stderr of the Crossbars spawned DEBUG = False def make_lc(self, reactor, func): if DEBUG: self.stdout_length = 0 self.stderr_length = 0 def _(lc, reactor): if DEBUG: stdout = self.stdout.getvalue() stderr = self.stderr.getvalue() if self.stdout.getvalue()[self.stdout_length:]: print(self.stdout.getvalue()[self.stdout_length:], file=sys.__stdout__) if self.stderr.getvalue()[self.stderr_length:]: print(self.stderr.getvalue()[self.stderr_length:], file=sys.__stderr__) self.stdout_length = len(stdout) self.stderr_length = len(stderr) return func(lc, reactor) lc = LoopingCall(_) lc.a = (lc, reactor) lc.clock = reactor lc.start(0.1) return lc class ContainerRunningTests(CLITestBase): def setUp(self): CLITestBase.setUp(self) # Set up the configuration directories self.cbdir = os.path.abspath(self.mktemp()) os.mkdir(self.cbdir) self.config = os.path.abspath(os.path.join(self.cbdir, "config.json")) self.code_location = os.path.abspath(self.mktemp()) os.mkdir(self.code_location) def _start_run(self, config, app, stdout_expected, stderr_expected, end_on): with open(self.config, "wb") as f: f.write(json.dumps(config, ensure_ascii=False).encode('utf8')) with open(self.code_location + "/myapp.py", "w") as f: f.write(app) reactor = SelectReactor() make_lc(self, reactor, end_on) # In case it hard-locks reactor.callLater(self._subprocess_timeout, reactor.stop) cli.run("crossbar", ["start", "--cbdir={}".format(self.cbdir), "--logformat=syslogd"], reactor=reactor) out = self.stdout.getvalue() err = self.stderr.getvalue() for i in stdout_expected: if i not in out: self.fail(u"Error: '{}' not in:\n{}".format(i, out)) for i in stderr_expected: if i not in err: self.fail(u"Error: '{}' not in:\n{}".format(i, err)) def test_start_run(self): """ A basic start, that enters the reactor. """ expected_stdout = [ "Entering reactor event loop", "Loaded the component!" ] expected_stderr = [] def _check(lc, reactor): if "Loaded the component!" in self.stdout.getvalue(): lc.stop() try: reactor.stop() except: pass config = { "controller": { }, "workers": [ { "type": "router", "options": { "pythonpath": ["."] }, "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "directory": ".", "type": "static" }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """#!/usr/bin/env python from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession from autobahn.wamp.exception import ApplicationError class MySession(ApplicationSession): log = Logger() def onJoin(self, details): self.log.info("Loaded the component!") """ self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_start_run_guest(self): """ A basic start of a guest. """ expected_stdout = [ "Entering reactor event loop", "Loaded the component!" ] expected_stderr = [] def _check(lc, reactor): if "Loaded the component!" in self.stdout.getvalue(): lc.stop() try: reactor.stop() except: pass config = { "controller": { }, "workers": [ { "type": "router", "options": { "pythonpath": ["."] }, "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "directory": ".", "type": "static" }, "ws": { "type": "websocket" } } } ] }, { "type": "guest", "executable": sys.executable, "arguments": [os.path.join(self.code_location, "myapp.py")] } ] } myapp = """#!/usr/bin/env python print("Loaded the component!") """ self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_start_utf8_logging(self): """ Logging things that are UTF8 but not Unicode should work fine. """ expected_stdout = [ "Entering reactor event loop", u"\u2603" ] expected_stderr = [] def _check(lc, reactor): if u"\u2603" in self.stdout.getvalue(): lc.stop() try: reactor.stop() except: pass config = { "controller": { }, "workers": [ { "type": "router", "options": { "pythonpath": ["."] }, "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "directory": ".", "type": "static" }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """#!/usr/bin/env python from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession from autobahn.wamp.exception import ApplicationError class MySession(ApplicationSession): log = Logger() def onJoin(self, details): self.log.info(u"\\u2603") """ self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_run_exception_utf8(self): """ Raising an ApplicationError with Unicode will raise that error through to the caller. """ config = { "workers": [ { "type": "router", "realms": [<|fim▁hole|> "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """from __future__ import absolute_import, print_function from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession from autobahn.wamp.exception import ApplicationError from twisted.internet.defer import inlineCallbacks class MySession(ApplicationSession): log = Logger() @inlineCallbacks def onJoin(self, details): def _err(): raise ApplicationError(u"com.example.error.form_error", u"\\u2603") e = yield self.register(_err, u'com.example.err') try: yield self.call(u'com.example.err') except ApplicationError as e: assert e.args[0] == u"\\u2603" print("Caught error:", e) except: print('other err:', e) self.log.info("Loaded the component") """ if PY3: expected_stdout = ["Loaded the component", "\u2603", "Caught error:"] else: expected_stdout = ["Loaded the component", "\\u2603", "Caught error:"] expected_stderr = [] def _check(lc, reactor): if "Loaded the component" in self.stdout.getvalue(): lc.stop() try: reactor.stop() except: pass self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure1(self): config = { "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession class MySession(ApplicationSession): log = Logger() def __init__(self, config): self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) def onJoin(self, details): self.log.info("MySession.onJoin()") """ expected_stdout = [] expected_stderr = ["No module named"] def _check(_1, _2): pass self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure2(self): config = { "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession2", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """ from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession class MySession(ApplicationSession): log = Logger() def __init__(self, config): self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) def onJoin(self, details): self.log.info("MySession.onJoin()") """ def _check(_1, _2): pass expected_stdout = [] if sys.version_info >= (3, 5): expected_stderr = ["module 'myapp' has no attribute 'MySession2'"] else: expected_stderr = ["'module' object has no attribute 'MySession2'"] self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure3(self): config = { "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """ from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession class MySession(ApplicationSession): log = Logger() def __init__(self, config): a = 1 / 0 self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) def onJoin(self, details): self.log.info("MySession.onJoin()") """ def _check(_1, _2): pass expected_stdout = [] expected_stderr = ["Component instantiation failed"] if PY3: expected_stderr.append("division by zero") else: expected_stderr.append("integer division") expected_stderr.append("by zero") self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure4(self): config = { "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """ from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession class MySession(ApplicationSession): log = Logger() def __init__(self, config): self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) def onJoin(self, details): self.log.info("MySession.onJoin()") a = 1 / 0 # trigger exception """ def _check(_1, _2): pass expected_stdout = [] expected_stderr = ["Fatal error in component", "While firing onJoin"] if PY3: expected_stderr.append("division by zero") else: expected_stderr.append("integer division") expected_stderr.append("by zero") self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure5(self): config = { "controller": { }, "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """ from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession class MySession(ApplicationSession): log = Logger() def __init__(self, config): self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) def onJoin(self, details): self.log.info("MySession.onJoin()") self.leave() def onLeave(self, details): self.log.info("Session ended: {details}", details=details) self.disconnect() """ def _check(_1, _2): pass expected_stdout = [] expected_stderr = [ "Component 'component1' failed to start; shutting down node." ] self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure6(self): config = { "controller": { }, "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8080 }, "url": "ws://127.0.0.1:8080/ws" } } ] } ] } myapp = """ from twisted.logger import Logger from twisted.internet.defer import inlineCallbacks from autobahn.twisted.wamp import ApplicationSession from autobahn.twisted.util import sleep class MySession(ApplicationSession): log = Logger() def __init__(self, config): self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) @inlineCallbacks def onJoin(self, details): self.log.info("MySession.onJoin()") self.log.info("Sleeping a couple of secs and then shutting down ..") yield sleep(2) self.leave() def onLeave(self, details): self.log.info("Session ended: {details}", details=details) self.disconnect() """ def _check(_1, _2): pass expected_stdout = [ "Session ended: CloseDetails", "Sleeping a couple of secs and then shutting down", "Container is hosting no more components: shutting down" ] expected_stderr = [] self._start_run(config, myapp, expected_stdout, expected_stderr, _check) def test_failure7(self): config = { "workers": [ { "type": "router", "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "*", "publish": True, "subscribe": True, "call": True, "register": True } ] } ] } ], "transports": [ { "type": "web", "endpoint": { "type": "tcp", "port": 8080 }, "paths": { "/": { "type": "static", "directory": ".." }, "ws": { "type": "websocket" } } } ] }, { "type": "container", "options": { "pythonpath": [self.code_location] }, "components": [ { "type": "class", "classname": "myapp.MySession", "realm": "realm1", "transport": { "type": "websocket", "endpoint": { "type": "tcp", "host": "127.0.0.1", "port": 8090 }, "url": "ws://127.0.0.1:8090/ws" } } ] } ] } myapp = """ from twisted.logger import Logger from autobahn.twisted.wamp import ApplicationSession class MySession(ApplicationSession): log = Logger() def __init__(self, config): self.log.info("MySession.__init__()") ApplicationSession.__init__(self, config) def onJoin(self, details): self.log.info("MySession.onJoin()") self.leave() """ def _check(_1, _2): pass expected_stdout = [] expected_stderr = [ ("Could not connect container component to router - transport " "establishment failed") ] self._start_run(config, myapp, expected_stdout, expected_stderr, _check) class InitTests(CLITestBase): def test_hello(self): def _check(lc, reactor): if "published to 'oncounter'" in self.stdout.getvalue(): lc.stop() try: reactor.stop() except: pass appdir = self.mktemp() cbdir = os.path.join(appdir, ".crossbar") reactor = SelectReactor() cli.run("crossbar", ["init", "--appdir={}".format(appdir), "--template=hello:python"], reactor=reactor) self.assertIn("Application template initialized", self.stdout.getvalue()) reactor = SelectReactor() make_lc(self, reactor, _check) # In case it hard-locks reactor.callLater(self._subprocess_timeout, reactor.stop) cli.run("crossbar", ["start", "--cbdir={}".format(cbdir.path), "--logformat=syslogd"], reactor=reactor) stdout_expected = ["published to 'oncounter'"] for i in stdout_expected: self.assertIn(i, self.stdout.getvalue()) if not os.environ.get("CB_FULLTESTS"): del ContainerRunningTests del InitTests<|fim▁end|>
{ "name": "realm1",
<|file_name|>decorators.py<|end_file_name|><|fim▁begin|>import logging from functools import wraps from requests.exceptions import HTTPError from django.utils.decorators import available_attrs from django.conf import settings from authclient import _get_user_session_key, SESSION_KEY from authclient.client import auth_client logger = logging.getLogger('authclient') def app_auth_exempt(function=None): def decorator(view_func): @wraps(view_func)<|fim▁hole|> _wrapped.app_auth_exempt = True return _wrapped if function: return decorator(function) return decorator def refresh_jwt(view_func): """ Decorator that adds headers to a response so that it will never be cached. """ @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view_func(request, *args, **kwargs): response = view_func(request, *args, **kwargs) try: resource_token = _get_user_session_key(request) except KeyError: pass else: try: resource_token = auth_client.token_refresh.call( payload={'token': resource_token}, headers={'X-APPLICATION': settings.AUTH_API_TOKEN}, )['resource_token'] except HTTPError: logger.debug('Failed to refresh the JWT.') else: request.session[SESSION_KEY] = resource_token return response return _wrapped_view_func<|fim▁end|>
def _wrapped(request, *args, **kwargs): return view_func(request, *args, **kwargs)
<|file_name|>robotinspect.py<|end_file_name|><|fim▁begin|># Copyright 2008-2013 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys if sys.platform.startswith('java'): from org.python.core import PyReflectedFunction, PyReflectedConstructor def is_java_init(init): return isinstance(init, PyReflectedConstructor) def is_java_method(method): func = method.im_func if hasattr(method, 'im_func') else method return isinstance(func, PyReflectedFunction) <|fim▁hole|> return False def is_java_method(method): return False<|fim▁end|>
else: def is_java_init(init):
<|file_name|>NavigationBar.qunit.js<|end_file_name|><|fim▁begin|>/*global QUnit */ sap.ui.define([ "sap/ui/qunit/QUnitUtils", "sap/ui/qunit/utils/createAndAppendDiv", "sap/ui/ux3/NavigationBar", "sap/ui/thirdparty/jquery", "sap/ui/ux3/NavigationItem" ], function(qutils, createAndAppendDiv, NavigationBar, jQuery, NavigationItem) { "use strict"; // prepare DOM createAndAppendDiv("uiArea1").style.marginTop = "10px"; var styleElement = document.createElement("style"); styleElement.textContent = ".sapUiUx3NavBarArrow {" + " /* Enable testing of the arrow, even though it is not used outside the shell in BC */" + " display: inline-block !important;" + "}"; document.head.appendChild(styleElement); var expectedItemId; function eventHandler(oEvent) { var itemId = oEvent.getParameter("itemId"); QUnit.config.current.assert.equal(itemId, expectedItemId, "the item ID should be the one of the clicked item: " + expectedItemId); var item = oEvent.getParameter("item"); QUnit.config.current.assert.ok(item, "item should be given as well"); QUnit.config.current.assert.equal(item.getId(), expectedItemId, "the item's ID should be the one of the clicked item: " + expectedItemId); } var oCtrl = new NavigationBar("n1", {select:eventHandler}); oCtrl.placeAt("uiArea1"); QUnit.test("Initial Check", function(assert) { assert.ok(oCtrl, "NavBar should exist after creating"); var oDomRef = window.document.getElementById("n1"); assert.ok(oDomRef, "NavBar root element should exist in the page"); }); var item1 = new NavigationItem({text:"Item 1"}); var item2 = new NavigationItem({text:"Item 2", href:"http://item2.org/"}); var item3 = new NavigationItem({text:"Item 3"}); QUnit.test("Add Items", function(assert) { oCtrl.addItem(item1); oCtrl.addItem(item2); oCtrl.addItem(item3); sap.ui.getCore().applyChanges(); var oDomRef = item1.getDomRef(); assert.ok(oDomRef, "Item element should exist in the page"); assert.equal(jQuery(oDomRef).text(), "Item 1", "Item 1 text should be written to the page"); var $list = oCtrl.$("list"); assert.equal($list.children().length, 5, "3 items plus the selection arrow plus dummy should be in the NavigationBar"); assert.equal($list.children(":eq(3)").text(), "Item 3", "The text of the third item should be rendered inside the NavigationBar"); var $items = jQuery(".sapUiUx3NavBarItem"); assert.equal($items.length, 4, "3 items plus the dummy should be in the page"); assert.equal($items[2].getAttribute("href"), "http://item2.org/", "item 2 should have a URL set as href"); assert.equal($items[3].getAttribute("href"), "#", "item 3 should have no URL set as href"); }); QUnit.test("Select Item", function(assert) { oCtrl.setSelectedItem(item2); assert.equal(oCtrl.getSelectedItem(), item2.getId(), "item 2 should be selected"); var $selItems = jQuery(".sapUiUx3NavBarItemSel"); assert.equal($selItems.length, 1, "1 item should be selected"); assert.equal($selItems.children()[0].id, item2.getId(), "DOM element marked as selected should be the one with the same ID as the selected item"); }); var firstPos; QUnit.test("Selection Arrow", function(assert) { var done = assert.async(); var arrow = oCtrl.getDomRef("arrow"); assert.ok(arrow, "there should be one selection arrow"); setTimeout(function(){ var item = item2.getDomRef(); var left = item.offsetLeft; var width = item.offsetWidth; var right = left + 3 * width / 5; left = left + 2 * width / 5; var arrowPos = arrow.offsetLeft + (arrow.offsetWidth / 2); firstPos = arrowPos;<|fim▁hole|> done(); }, 600); }); QUnit.test("Selection Arrow Animation", function(assert) { var done = assert.async(); oCtrl.setSelectedItem(item3); var arrow = oCtrl.getDomRef("arrow"); setTimeout(function(){ var arrowPos = arrow.offsetLeft + (arrow.offsetWidth / 2); assert.ok(arrowPos > firstPos, "arrow should have moved to the right a bit in the middle of the animation (from " + firstPos + ", now " + arrowPos + ")"); setTimeout(function() { var newArrowPos = arrow.offsetLeft + (arrow.offsetWidth / 2); var item = item3.getDomRef(); var left = item.offsetLeft; var width = item.offsetWidth; var right = left + 3 * width / 5; left = left + 2 * width / 5; assert.ok(newArrowPos > arrowPos, "arrow should have moved further to the right after the animation (from " + arrowPos + ", now " + newArrowPos + ")"); assert.ok(newArrowPos > left && newArrowPos < right, "arrow position (" + newArrowPos + ") should be around the center of the newly selected item (between " + left + " and " + right + ")"); done(); }, 400); }, 300); }); QUnit.test("Item selection (mouse)", function(assert) { var done = assert.async(); assert.expect(5); // including event handler var oldSel = oCtrl.getSelectedItem(); assert.equal(oldSel, item3.getId(), "item 3 should be selected"); // make sure previous selection is right // click first item var target = item1.getDomRef(); expectedItemId = item1.getId(); qutils.triggerMouseEvent(target, "click"); // wait selection animation to be finished setTimeout(function(){ var newSel = oCtrl.getSelectedItem(); assert.equal(newSel, item1.getId(), "item 1 should be selected after clicking it"); // make sure selection is right after clicking done(); }, 600); }); QUnit.test("Render With Association", function(assert) { oCtrl.removeAllItems(); sap.ui.getCore().applyChanges(); var $list = oCtrl.$("list"); assert.equal($list.children().length, 2, "no items except for dummy and arrow should be in the NavigationBar"); oCtrl.addAssociatedItem(item1); oCtrl.addAssociatedItem(item2); oCtrl.addAssociatedItem(item3); sap.ui.getCore().applyChanges(); var oDomRef = item1.getDomRef(); assert.ok(oDomRef, "Item element should exist in the page"); assert.equal(jQuery(oDomRef).text(), "Item 1", "Item 1 text should be written to the page"); $list = oCtrl.$("list"); assert.equal($list.children().length, 5, "3 items plus dummy plus the selection arrow should be in the NavigationBar"); assert.equal($list.children(":eq(3)").text(), "Item 3", "The text of the third item should be rendered inside the NavigationBar"); }); QUnit.test("isSelectedItemValid", function(assert) { oCtrl.setSelectedItem(item1); var valid = oCtrl.isSelectedItemValid(); assert.equal(valid, true, "item1 is a valid selection"); oCtrl.setSelectedItem("item4"); valid = oCtrl.isSelectedItemValid(); assert.equal(valid, false, "the ID 'item4' is not a valid selection"); oCtrl.setSelectedItem(item1.getId()); valid = oCtrl.isSelectedItemValid(); assert.equal(valid, true, "the ID 'item1' is a valid selection"); }); QUnit.test("enabaling the overflowItemsToUppercase", function (assert) { var oOverflowMenu = oCtrl._getOverflowMenu(); assert.strictEqual(oCtrl.getOverflowItemsToUpperCase(), false, "the property is disabled by default"); oCtrl.setOverflowItemsToUpperCase(true); sap.ui.getCore().applyChanges(); assert.strictEqual(oOverflowMenu.hasStyleClass("sapUiUx3NavBarUpperCaseText"), true, "the items in the menu are uppercased"); oCtrl.setOverflowItemsToUpperCase(false); sap.ui.getCore().applyChanges(); assert.strictEqual(oOverflowMenu.hasStyleClass("sapUiUx3NavBarUpperCaseText"), false, "the items in the menu are with their original case"); }); QUnit.test("Overflow", function(assert) { var done = assert.async(); jQuery(document.getElementById("uiArea1")).css("width", "80px"); setTimeout(function(){ assert.equal(isForwardVisible(), true, "forward arrow should be visible"); assert.equal(isForwardEnabled(), true, "forward arrow should be enabled"); assert.equal(isBackVisible(), true, "back arrow should be visible"); assert.equal(isBackEnabled(), false, "back arrow should not be enabled"); jQuery(document.getElementById("uiArea1")).css("width", "800px"); setTimeout(function(){ assert.equal(isForwardVisible(), false, "forward arrow should not be visible"); assert.equal(isBackVisible(), false, "back arrow should not be visible"); oCtrl.addAssociatedItem(new NavigationItem({text:"Item with some quite long text to cause overflow 4"})); oCtrl.addAssociatedItem(new NavigationItem({text:"Item with some quite long text to cause overflow 5"})); setTimeout(function(){ assert.equal(isForwardVisible(), true, "forward arrow should be visible"); assert.equal(isForwardEnabled(), true, "forward arrow should be enabled"); assert.equal(isBackVisible(), true, "back arrow should not be visible"); assert.equal(isBackEnabled(), false, "back arrow should not be enabled"); done(); }, 500); }, 500); }, 500); }); QUnit.test("Scrolling + Overflow", function(assert) { var done = assert.async(); assert.equal(oCtrl.getDomRef("list").scrollLeft, 0, "list should not be scrolled initially"); // click first item var target = oCtrl.$("off"); qutils.triggerMouseEvent(target, "click"); setTimeout(function(){ assert.equal(isForwardVisible(), true, "forward arrow should be visible"); assert.equal(isBackVisible(), true, "back arrow should be visible"); assert.ok(oCtrl.getDomRef("list").scrollLeft != 0, "list should be scrolled now"); // scroll to end qutils.triggerMouseEvent(target, "click"); setTimeout(function(){ assert.equal(isForwardVisible(), true, "forward arrow should be visible"); assert.equal(isForwardEnabled(), false, "forward arrow should not be enabled"); assert.equal(isBackVisible(), true, "back arrow should be visible"); assert.equal(isBackEnabled(), true, "back arrow should be enabled"); // scroll to the beginning again target = oCtrl.getDomRef("ofb"); qutils.triggerMouseEvent(target, "click"); setTimeout(function(){ qutils.triggerMouseEvent(target, "click"); setTimeout(function(){ assert.equal(isForwardVisible(), true, "forward arrow should be visible"); assert.equal(isForwardEnabled(), true, "forward arrow should be enabled"); assert.equal(isBackVisible(), true, "back arrow should be visible"); assert.equal(isBackEnabled(), false, "back arrow shouldnot be enabled"); assert.equal(oCtrl.getDomRef("list").scrollLeft, 0, "list should not be scrolled now"); done(); }, 600); }, 600); }, 600); }, 600); }); function isForwardVisible() { return oCtrl.$("off").is(":visible"); } function isBackVisible() { return oCtrl.$("ofb").is(":visible"); } function isForwardEnabled() { return oCtrl.$("off").is(":visible") && oCtrl.$().hasClass("sapUiUx3NavBarScrollForward"); } function isBackEnabled() { return oCtrl.$("ofb").is(":visible") && oCtrl.$().hasClass("sapUiUx3NavBarScrollBack"); } });<|fim▁end|>
assert.ok(arrowPos > left && arrowPos < right, "arrow position (" + arrowPos + ") should be around the center of the selected item (between " + left + " and " + right + ")");
<|file_name|>oobfuncs.py<|end_file_name|><|fim▁begin|>""" OOB configuration. This module should be included in (or replace) the default module set in settings.OOB_PLUGIN_MODULES All functions defined in this module are made available to be called by the OOB handler. <|fim▁hole|> function execution - the oob protocol can execute a function directly on the server. The available functions must be defined as global functions via settings.OOB_PLUGIN_MODULES. repeat func execution - the oob protocol can request a given function be executed repeatedly at a regular interval. This uses an internal script pool. tracking - the oob protocol can request Evennia to track changes to fields on objects, as well as changes in Attributes. This is done by dynamically adding tracker-objects on entities. The behaviour of those objects can be customized via settings.OOB_PLUGIN_MODULES. oob functions have the following call signature: function(caller, session, *args, **kwargs) oob trackers should inherit from the OOBTracker class in src/server.oob_msdp.py and implement a minimum of the same functionality. a global function oob_error will be used as optional error management. """ # import the contents of the default msdp module from src.server.oob_cmds import *<|fim▁end|>
See src/server/oob_msdp.py for more information.
<|file_name|>column_puyo_list.rs<|end_file_name|><|fim▁begin|>use color::PuyoColor; use small_int_set::SmallIntSet; const MAX_SIZE: usize = 8; pub struct ColumnPuyoList { size: [usize; 6], puyo: [[PuyoColor; MAX_SIZE]; 6], place_holders: [SmallIntSet; 6], } impl ColumnPuyoList { pub fn new() -> ColumnPuyoList { ColumnPuyoList { size: [0; 6], puyo: [[PuyoColor::EMPTY; MAX_SIZE]; 6], place_holders: [SmallIntSet::new(); 6], } } fn is_place_holder(c: PuyoColor) -> bool { c == PuyoColor::IRON } pub fn size_on(&self, x: usize) -> usize { self.size[x - 1] } pub fn is_empty(&self) -> bool { self.size() == 0 } pub fn size(&self) -> usize { self.size[0] + self.size[1] + self.size[2] + self.size[3] + self.size[4] + self.size[5] } pub fn top(&self, x: usize) -> Option<PuyoColor> { if self.size_on(x) == 0 { None } else { Some(self.puyo[x - 1][self.size_on(x) - 1]) } } pub fn get(&self, x: usize, i: usize) -> PuyoColor { debug_assert!(i < self.size_on(x)); self.puyo[x - 1][i] } pub fn has_place_holder(&self) -> bool { for i in 0..6 { if !self.place_holders[i].is_empty() { return true; } } false } /// Adds PuyoColor `c` on column `x`. /// Returns true if succeeded. Returns false otherwise. /// When failed, the `self` won't be changed. pub fn add(&mut self, x: usize, c: PuyoColor) -> bool { if MAX_SIZE <= self.size[x - 1] { return false; } if ColumnPuyoList::is_place_holder(c) { self.place_holders[x - 1].set(self.size[x - 1]); } self.puyo[x - 1][self.size[x - 1]] = c; self.size[x - 1] += 1; true } /// Adds `n` PuyoColor `c` on column `x`. /// Returns true if succeeded. Returns false otherwise. /// When failed, the `self` won't be changed. pub fn add_multi(&mut self, x: usize, c: PuyoColor, n: usize) -> bool { if MAX_SIZE < self.size_on(x) + n { return false; } for i in 0..n { if ColumnPuyoList::is_place_holder(c) { self.place_holders[x - 1].set(self.size[x - 1] + i); } self.puyo[x - 1][self.size[x - 1] + i] = c; } self.size[x - 1] += n; true } /// Removes top puyo from column x. pub fn remove_top(&mut self, x: usize) { if self.size_on(x) == 0 { return; } let c = self.puyo[x - 1][self.size[x - 1] - 1]; if ColumnPuyoList::is_place_holder(c) { self.place_holders[x - 1].unset(self.size[x - 1] - 1); } self.size[x - 1] -= 1; } pub fn merge(&mut self, cpl: &ColumnPuyoList) -> bool { for i in 0..6 { if cpl.size[i] >= self.place_holders[i].size() && MAX_SIZE < self.size[i] + (cpl.size[i] - self.place_holders[i].size()) { return false; } } for i in 0..6 { if cpl.size[i] < self.place_holders[i].size() { let discard = self.place_holders[i].size() - cpl.size[i]; for _ in 0..discard { self.place_holders[i].remove_smallest(); } for j in 0..cpl.size[i] { let k = self.place_holders[i].smallest(); self.place_holders[i].remove_smallest(); self.puyo[i][k] = cpl.puyo[i][j]; } } else { let mut j = 0; while !self.place_holders[i].is_empty() { let k = self.place_holders[i].smallest(); self.place_holders[i].remove_smallest(); self.puyo[i][k] = cpl.puyo[i][j]; j += 1; } while j < cpl.size[i] { self.puyo[i][self.size[i]] = cpl.puyo[i][j]; self.size[i] += 1; j += 1; } } let mut new_place_holder = SmallIntSet::new(); for j in 0..self.size[i] { if ColumnPuyoList::is_place_holder(cpl.puyo[i][j]) { new_place_holder.set(j); } } self.place_holders[i] = new_place_holder; } true } } #[cfg(test)] mod tests { use super::*; use color::PuyoColor; #[test] fn test_constructor() { let cpl = ColumnPuyoList::new(); assert_eq!(cpl.size(), 0); assert!(cpl.is_empty()); } #[test] fn test_add() { let mut cpl = ColumnPuyoList::new(); cpl.add(1, PuyoColor::RED); cpl.add(1, PuyoColor::BLUE); cpl.add(2, PuyoColor::YELLOW); assert_eq!(cpl.size_on(1), 2); assert_eq!(cpl.size_on(2), 1); assert_eq!(cpl.size_on(3), 0); assert_eq!(cpl.size_on(4), 0); assert_eq!(cpl.size_on(5), 0); assert_eq!(cpl.size_on(6), 0); assert_eq!(cpl.top(1), Some(PuyoColor::BLUE)); assert_eq!(cpl.top(2), Some(PuyoColor::YELLOW)); assert_eq!(cpl.top(3), None); } #[test] fn test_place_holder() { let mut cpl = ColumnPuyoList::new(); assert!(!cpl.has_place_holder()); cpl.add(1, PuyoColor::RED); assert!(!cpl.has_place_holder()); <|fim▁hole|> cpl.remove_top(1); assert!(!cpl.has_place_holder()); } #[test] fn test_merge() { let mut cpl = ColumnPuyoList::new(); let mut cpl1 = ColumnPuyoList::new(); assert!(cpl1.add_multi(3, PuyoColor::RED, 2)); let mut cpl2 = ColumnPuyoList::new(); assert!(cpl2.add_multi(3, PuyoColor::BLUE, 2)); let mut cpl3 = ColumnPuyoList::new(); assert!(cpl3.add_multi(3, PuyoColor::BLUE, 8)); assert!(cpl.merge(&cpl1)); assert!(cpl.merge(&cpl2)); assert_eq!(4, cpl.size()); assert!(!cpl.merge(&cpl3)); assert_eq!(4, cpl.size()); } #[test] fn test_merge_with_place_holders_1() { let mut cpl1 = ColumnPuyoList::new(); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); let mut cpl2 = ColumnPuyoList::new(); assert!(cpl2.add(3, PuyoColor::IRON)); assert!(cpl2.add(3, PuyoColor::IRON)); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl1.merge(&cpl2)); assert_eq!(7, cpl1.size()); assert_eq!(7, cpl1.size_on(3)); assert_eq!(PuyoColor::IRON, cpl1.get(3, 0)); assert_eq!(PuyoColor::IRON, cpl1.get(3, 1)); assert_eq!(PuyoColor::YELLOW, cpl1.get(3, 2)); assert_eq!(PuyoColor::RED, cpl1.get(3, 3)); assert_eq!(PuyoColor::RED, cpl1.get(3, 4)); assert_eq!(PuyoColor::RED, cpl1.get(3, 5)); assert_eq!(PuyoColor::YELLOW, cpl1.get(3, 6)); } #[test] fn test_merge_with_place_holders_2() { let mut cpl1 = ColumnPuyoList::new(); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); let mut cpl2 = ColumnPuyoList::new(); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl1.merge(&cpl2)); assert_eq!(6, cpl1.size()); assert_eq!(6, cpl1.size_on(3)); assert_eq!(PuyoColor::IRON, cpl1.get(3, 0)); assert_eq!(PuyoColor::YELLOW, cpl1.get(3, 1)); assert_eq!(PuyoColor::YELLOW, cpl1.get(3, 2)); assert_eq!(PuyoColor::RED, cpl1.get(3, 3)); assert_eq!(PuyoColor::RED, cpl1.get(3, 4)); assert_eq!(PuyoColor::RED, cpl1.get(3, 5)); } #[test] fn test_merge_with_place_holders_3() { let mut cpl1 = ColumnPuyoList::new(); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::IRON)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); assert!(cpl1.add(3, PuyoColor::RED)); let mut cpl2 = ColumnPuyoList::new(); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(cpl2.add(3, PuyoColor::YELLOW)); assert!(!cpl1.merge(&cpl2)); } }<|fim▁end|>
cpl.add(1, PuyoColor::IRON); assert!(cpl.has_place_holder());